This job view page is being replaced by Spyglass soon. Check out the new job view.
PRrobscott: Adding EndpointSlice API util to simplify migration from Endpoints
ResultFAILURE
Tests 1 failed / 2898 succeeded
Started2019-11-09 00:46
Elapsed25m7s
Revision66a8819349c668f4068daddda4e1a13f44a48aff
Refs 84658

Test Failures


k8s.io/kubernetes/test/integration/scheduler TestPreemption 34s

go test -v k8s.io/kubernetes/test/integration/scheduler -run TestPreemption$
=== RUN   TestPreemption
W1109 01:08:03.298204  109746 services.go:37] No CIDR for service cluster IPs specified. Default value which was 10.0.0.0/24 is deprecated and will be removed in future releases. Please specify it using --service-cluster-ip-range on kube-apiserver.
I1109 01:08:03.298236  109746 services.go:51] Setting service IP to "10.0.0.1" (read-write).
I1109 01:08:03.298251  109746 master.go:309] Node port range unspecified. Defaulting to 30000-32767.
I1109 01:08:03.298264  109746 master.go:265] Using reconciler: 
I1109 01:08:03.300139  109746 storage_factory.go:285] storing podtemplates in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.300441  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.300487  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.301590  109746 store.go:1342] Monitoring podtemplates count at <storage-prefix>//podtemplates
I1109 01:08:03.301669  109746 storage_factory.go:285] storing events in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.302040  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.302071  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.302165  109746 reflector.go:188] Listing and watching *core.PodTemplate from storage/cacher.go:/podtemplates
I1109 01:08:03.303244  109746 store.go:1342] Monitoring events count at <storage-prefix>//events
I1109 01:08:03.303302  109746 storage_factory.go:285] storing limitranges in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.303438  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.303456  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.303548  109746 reflector.go:188] Listing and watching *core.Event from storage/cacher.go:/events
I1109 01:08:03.305130  109746 watch_cache.go:409] Replace watchCache (rev: 32257) 
I1109 01:08:03.305756  109746 store.go:1342] Monitoring limitranges count at <storage-prefix>//limitranges
I1109 01:08:03.305899  109746 reflector.go:188] Listing and watching *core.LimitRange from storage/cacher.go:/limitranges
I1109 01:08:03.306419  109746 storage_factory.go:285] storing resourcequotas in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.306600  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.306621  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.307556  109746 store.go:1342] Monitoring resourcequotas count at <storage-prefix>//resourcequotas
I1109 01:08:03.307602  109746 reflector.go:188] Listing and watching *core.ResourceQuota from storage/cacher.go:/resourcequotas
I1109 01:08:03.307775  109746 storage_factory.go:285] storing secrets in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.307912  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.307936  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.308604  109746 store.go:1342] Monitoring secrets count at <storage-prefix>//secrets
I1109 01:08:03.308704  109746 reflector.go:188] Listing and watching *core.Secret from storage/cacher.go:/secrets
I1109 01:08:03.309444  109746 storage_factory.go:285] storing persistentvolumes in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.309712  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.309790  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.310856  109746 watch_cache.go:409] Replace watchCache (rev: 32259) 
I1109 01:08:03.311266  109746 watch_cache.go:409] Replace watchCache (rev: 32259) 
I1109 01:08:03.311414  109746 watch_cache.go:409] Replace watchCache (rev: 32259) 
I1109 01:08:03.311803  109746 watch_cache.go:409] Replace watchCache (rev: 32259) 
I1109 01:08:03.312295  109746 store.go:1342] Monitoring persistentvolumes count at <storage-prefix>//persistentvolumes
I1109 01:08:03.312511  109746 storage_factory.go:285] storing persistentvolumeclaims in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.312670  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.312691  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.312764  109746 reflector.go:188] Listing and watching *core.PersistentVolume from storage/cacher.go:/persistentvolumes
I1109 01:08:03.314373  109746 watch_cache.go:409] Replace watchCache (rev: 32259) 
I1109 01:08:03.314773  109746 store.go:1342] Monitoring persistentvolumeclaims count at <storage-prefix>//persistentvolumeclaims
I1109 01:08:03.314823  109746 reflector.go:188] Listing and watching *core.PersistentVolumeClaim from storage/cacher.go:/persistentvolumeclaims
I1109 01:08:03.314948  109746 storage_factory.go:285] storing configmaps in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.315067  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.315081  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.316534  109746 watch_cache.go:409] Replace watchCache (rev: 32259) 
I1109 01:08:03.317210  109746 store.go:1342] Monitoring configmaps count at <storage-prefix>//configmaps
I1109 01:08:03.317277  109746 reflector.go:188] Listing and watching *core.ConfigMap from storage/cacher.go:/configmaps
I1109 01:08:03.317436  109746 storage_factory.go:285] storing namespaces in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.317620  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.317650  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.318365  109746 store.go:1342] Monitoring namespaces count at <storage-prefix>//namespaces
I1109 01:08:03.318455  109746 reflector.go:188] Listing and watching *core.Namespace from storage/cacher.go:/namespaces
I1109 01:08:03.318637  109746 storage_factory.go:285] storing endpoints in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.318785  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.318806  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.318936  109746 watch_cache.go:409] Replace watchCache (rev: 32259) 
I1109 01:08:03.319349  109746 watch_cache.go:409] Replace watchCache (rev: 32259) 
I1109 01:08:03.319837  109746 store.go:1342] Monitoring endpoints count at <storage-prefix>//services/endpoints
I1109 01:08:03.319925  109746 reflector.go:188] Listing and watching *core.Endpoints from storage/cacher.go:/services/endpoints
I1109 01:08:03.320041  109746 storage_factory.go:285] storing nodes in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.320151  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.320205  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.320893  109746 store.go:1342] Monitoring nodes count at <storage-prefix>//minions
I1109 01:08:03.321072  109746 watch_cache.go:409] Replace watchCache (rev: 32259) 
I1109 01:08:03.321150  109746 reflector.go:188] Listing and watching *core.Node from storage/cacher.go:/minions
I1109 01:08:03.321134  109746 storage_factory.go:285] storing pods in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.321397  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.321429  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.322092  109746 store.go:1342] Monitoring pods count at <storage-prefix>//pods
I1109 01:08:03.322145  109746 reflector.go:188] Listing and watching *core.Pod from storage/cacher.go:/pods
I1109 01:08:03.322304  109746 storage_factory.go:285] storing serviceaccounts in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.322435  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.322458  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.323382  109746 watch_cache.go:409] Replace watchCache (rev: 32259) 
I1109 01:08:03.323424  109746 store.go:1342] Monitoring serviceaccounts count at <storage-prefix>//serviceaccounts
I1109 01:08:03.323478  109746 watch_cache.go:409] Replace watchCache (rev: 32259) 
I1109 01:08:03.323565  109746 reflector.go:188] Listing and watching *core.ServiceAccount from storage/cacher.go:/serviceaccounts
I1109 01:08:03.323601  109746 storage_factory.go:285] storing services in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.323709  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.323726  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.324474  109746 store.go:1342] Monitoring services count at <storage-prefix>//services/specs
I1109 01:08:03.324506  109746 reflector.go:188] Listing and watching *core.Service from storage/cacher.go:/services/specs
I1109 01:08:03.324520  109746 storage_factory.go:285] storing services in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.324817  109746 watch_cache.go:409] Replace watchCache (rev: 32259) 
I1109 01:08:03.325105  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.325122  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.325362  109746 watch_cache.go:409] Replace watchCache (rev: 32259) 
I1109 01:08:03.326153  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.326204  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.327401  109746 storage_factory.go:285] storing replicationcontrollers in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.327530  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.327557  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.328160  109746 store.go:1342] Monitoring replicationcontrollers count at <storage-prefix>//controllers
I1109 01:08:03.328221  109746 rest.go:115] the default service ipfamily for this cluster is: IPv4
I1109 01:08:03.328230  109746 reflector.go:188] Listing and watching *core.ReplicationController from storage/cacher.go:/controllers
I1109 01:08:03.328728  109746 storage_factory.go:285] storing bindings in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.328803  109746 watch_cache.go:409] Replace watchCache (rev: 32259) 
I1109 01:08:03.328972  109746 storage_factory.go:285] storing componentstatuses in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.329828  109746 storage_factory.go:285] storing configmaps in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.330378  109746 storage_factory.go:285] storing endpoints in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.331170  109746 storage_factory.go:285] storing events in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.331801  109746 storage_factory.go:285] storing limitranges in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.332141  109746 storage_factory.go:285] storing namespaces in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.332300  109746 storage_factory.go:285] storing namespaces in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.332482  109746 storage_factory.go:285] storing namespaces in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.332837  109746 storage_factory.go:285] storing nodes in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.333296  109746 storage_factory.go:285] storing nodes in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.333452  109746 storage_factory.go:285] storing nodes in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.334553  109746 storage_factory.go:285] storing persistentvolumeclaims in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.334942  109746 storage_factory.go:285] storing persistentvolumeclaims in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.335781  109746 storage_factory.go:285] storing persistentvolumes in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.336169  109746 storage_factory.go:285] storing persistentvolumes in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.336971  109746 storage_factory.go:285] storing pods in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.337318  109746 storage_factory.go:285] storing pods in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.337580  109746 storage_factory.go:285] storing pods in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.337798  109746 storage_factory.go:285] storing pods in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.338017  109746 storage_factory.go:285] storing pods in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.338174  109746 storage_factory.go:285] storing pods in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.338658  109746 storage_factory.go:285] storing pods in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.339320  109746 storage_factory.go:285] storing pods in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.339777  109746 storage_factory.go:285] storing pods in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.340709  109746 storage_factory.go:285] storing podtemplates in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.341529  109746 storage_factory.go:285] storing replicationcontrollers in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.341885  109746 storage_factory.go:285] storing replicationcontrollers in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.342074  109746 storage_factory.go:285] storing replicationcontrollers in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.342745  109746 storage_factory.go:285] storing resourcequotas in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.342997  109746 storage_factory.go:285] storing resourcequotas in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.343567  109746 storage_factory.go:285] storing secrets in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.344062  109746 storage_factory.go:285] storing serviceaccounts in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.344551  109746 storage_factory.go:285] storing services in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.345357  109746 storage_factory.go:285] storing services in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.345797  109746 storage_factory.go:285] storing services in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.345922  109746 master.go:493] Skipping disabled API group "auditregistration.k8s.io".
I1109 01:08:03.345936  109746 master.go:504] Enabling API group "authentication.k8s.io".
I1109 01:08:03.345951  109746 master.go:504] Enabling API group "authorization.k8s.io".
I1109 01:08:03.346171  109746 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.346357  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.346377  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.350060  109746 store.go:1342] Monitoring horizontalpodautoscalers.autoscaling count at <storage-prefix>//horizontalpodautoscalers
I1109 01:08:03.350224  109746 reflector.go:188] Listing and watching *autoscaling.HorizontalPodAutoscaler from storage/cacher.go:/horizontalpodautoscalers
I1109 01:08:03.350424  109746 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.351426  109746 watch_cache.go:409] Replace watchCache (rev: 32260) 
I1109 01:08:03.352741  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.352828  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.356019  109746 store.go:1342] Monitoring horizontalpodautoscalers.autoscaling count at <storage-prefix>//horizontalpodautoscalers
I1109 01:08:03.356216  109746 reflector.go:188] Listing and watching *autoscaling.HorizontalPodAutoscaler from storage/cacher.go:/horizontalpodautoscalers
I1109 01:08:03.357393  109746 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.357593  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.357627  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.358564  109746 store.go:1342] Monitoring horizontalpodautoscalers.autoscaling count at <storage-prefix>//horizontalpodautoscalers
I1109 01:08:03.358600  109746 master.go:504] Enabling API group "autoscaling".
I1109 01:08:03.358630  109746 reflector.go:188] Listing and watching *autoscaling.HorizontalPodAutoscaler from storage/cacher.go:/horizontalpodautoscalers
I1109 01:08:03.358840  109746 storage_factory.go:285] storing jobs.batch in batch/v1, reading as batch/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.359008  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.359039  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.359631  109746 watch_cache.go:409] Replace watchCache (rev: 32261) 
I1109 01:08:03.359963  109746 watch_cache.go:409] Replace watchCache (rev: 32261) 
I1109 01:08:03.360128  109746 store.go:1342] Monitoring jobs.batch count at <storage-prefix>//jobs
I1109 01:08:03.360212  109746 reflector.go:188] Listing and watching *batch.Job from storage/cacher.go:/jobs
I1109 01:08:03.361021  109746 watch_cache.go:409] Replace watchCache (rev: 32261) 
I1109 01:08:03.361100  109746 storage_factory.go:285] storing cronjobs.batch in batch/v1beta1, reading as batch/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.361238  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.361260  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.361976  109746 store.go:1342] Monitoring cronjobs.batch count at <storage-prefix>//cronjobs
I1109 01:08:03.362009  109746 master.go:504] Enabling API group "batch".
I1109 01:08:03.362030  109746 reflector.go:188] Listing and watching *batch.CronJob from storage/cacher.go:/cronjobs
I1109 01:08:03.362253  109746 storage_factory.go:285] storing certificatesigningrequests.certificates.k8s.io in certificates.k8s.io/v1beta1, reading as certificates.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.362443  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.362479  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.362810  109746 watch_cache.go:409] Replace watchCache (rev: 32261) 
I1109 01:08:03.363523  109746 store.go:1342] Monitoring certificatesigningrequests.certificates.k8s.io count at <storage-prefix>//certificatesigningrequests
I1109 01:08:03.363556  109746 master.go:504] Enabling API group "certificates.k8s.io".
I1109 01:08:03.363556  109746 reflector.go:188] Listing and watching *certificates.CertificateSigningRequest from storage/cacher.go:/certificatesigningrequests
I1109 01:08:03.364040  109746 storage_factory.go:285] storing leases.coordination.k8s.io in coordination.k8s.io/v1beta1, reading as coordination.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.364277  109746 watch_cache.go:409] Replace watchCache (rev: 32261) 
I1109 01:08:03.364435  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.364539  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.365540  109746 store.go:1342] Monitoring leases.coordination.k8s.io count at <storage-prefix>//leases
I1109 01:08:03.365583  109746 reflector.go:188] Listing and watching *coordination.Lease from storage/cacher.go:/leases
I1109 01:08:03.365935  109746 storage_factory.go:285] storing leases.coordination.k8s.io in coordination.k8s.io/v1beta1, reading as coordination.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.366144  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.366163  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.366655  109746 watch_cache.go:409] Replace watchCache (rev: 32261) 
I1109 01:08:03.367534  109746 store.go:1342] Monitoring leases.coordination.k8s.io count at <storage-prefix>//leases
I1109 01:08:03.367566  109746 master.go:504] Enabling API group "coordination.k8s.io".
I1109 01:08:03.367579  109746 master.go:493] Skipping disabled API group "discovery.k8s.io".
I1109 01:08:03.367701  109746 reflector.go:188] Listing and watching *coordination.Lease from storage/cacher.go:/leases
I1109 01:08:03.367756  109746 storage_factory.go:285] storing ingresses.networking.k8s.io in networking.k8s.io/v1beta1, reading as networking.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.368210  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.368236  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.369029  109746 watch_cache.go:409] Replace watchCache (rev: 32261) 
I1109 01:08:03.370012  109746 store.go:1342] Monitoring ingresses.networking.k8s.io count at <storage-prefix>//ingress
I1109 01:08:03.370119  109746 master.go:504] Enabling API group "extensions".
I1109 01:08:03.370061  109746 reflector.go:188] Listing and watching *networking.Ingress from storage/cacher.go:/ingress
I1109 01:08:03.370417  109746 storage_factory.go:285] storing networkpolicies.networking.k8s.io in networking.k8s.io/v1, reading as networking.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.370577  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.370650  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.371090  109746 watch_cache.go:409] Replace watchCache (rev: 32261) 
I1109 01:08:03.371976  109746 store.go:1342] Monitoring networkpolicies.networking.k8s.io count at <storage-prefix>//networkpolicies
I1109 01:08:03.372086  109746 reflector.go:188] Listing and watching *networking.NetworkPolicy from storage/cacher.go:/networkpolicies
I1109 01:08:03.372992  109746 watch_cache.go:409] Replace watchCache (rev: 32261) 
I1109 01:08:03.373342  109746 storage_factory.go:285] storing ingresses.networking.k8s.io in networking.k8s.io/v1beta1, reading as networking.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.373460  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.373482  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.374012  109746 store.go:1342] Monitoring ingresses.networking.k8s.io count at <storage-prefix>//ingress
I1109 01:08:03.374112  109746 master.go:504] Enabling API group "networking.k8s.io".
I1109 01:08:03.374228  109746 storage_factory.go:285] storing runtimeclasses.node.k8s.io in node.k8s.io/v1beta1, reading as node.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.374430  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.374527  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.374066  109746 reflector.go:188] Listing and watching *networking.Ingress from storage/cacher.go:/ingress
I1109 01:08:03.375753  109746 watch_cache.go:409] Replace watchCache (rev: 32261) 
I1109 01:08:03.375782  109746 store.go:1342] Monitoring runtimeclasses.node.k8s.io count at <storage-prefix>//runtimeclasses
I1109 01:08:03.376156  109746 master.go:504] Enabling API group "node.k8s.io".
I1109 01:08:03.375803  109746 reflector.go:188] Listing and watching *node.RuntimeClass from storage/cacher.go:/runtimeclasses
I1109 01:08:03.376601  109746 storage_factory.go:285] storing poddisruptionbudgets.policy in policy/v1beta1, reading as policy/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.376833  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.376919  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.377205  109746 watch_cache.go:409] Replace watchCache (rev: 32261) 
I1109 01:08:03.378028  109746 store.go:1342] Monitoring poddisruptionbudgets.policy count at <storage-prefix>//poddisruptionbudgets
I1109 01:08:03.378087  109746 reflector.go:188] Listing and watching *policy.PodDisruptionBudget from storage/cacher.go:/poddisruptionbudgets
I1109 01:08:03.378442  109746 storage_factory.go:285] storing podsecuritypolicies.policy in policy/v1beta1, reading as policy/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.378626  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.378721  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.379209  109746 watch_cache.go:409] Replace watchCache (rev: 32261) 
I1109 01:08:03.379571  109746 store.go:1342] Monitoring podsecuritypolicies.policy count at <storage-prefix>//podsecuritypolicy
I1109 01:08:03.379600  109746 reflector.go:188] Listing and watching *policy.PodSecurityPolicy from storage/cacher.go:/podsecuritypolicy
I1109 01:08:03.379612  109746 master.go:504] Enabling API group "policy".
I1109 01:08:03.379707  109746 storage_factory.go:285] storing roles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.379894  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.379929  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.380721  109746 store.go:1342] Monitoring roles.rbac.authorization.k8s.io count at <storage-prefix>//roles
I1109 01:08:03.381004  109746 reflector.go:188] Listing and watching *rbac.Role from storage/cacher.go:/roles
I1109 01:08:03.381027  109746 watch_cache.go:409] Replace watchCache (rev: 32261) 
I1109 01:08:03.380997  109746 storage_factory.go:285] storing rolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.381346  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.381457  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.382158  109746 watch_cache.go:409] Replace watchCache (rev: 32261) 
I1109 01:08:03.382407  109746 store.go:1342] Monitoring rolebindings.rbac.authorization.k8s.io count at <storage-prefix>//rolebindings
I1109 01:08:03.382483  109746 reflector.go:188] Listing and watching *rbac.RoleBinding from storage/cacher.go:/rolebindings
I1109 01:08:03.382566  109746 storage_factory.go:285] storing clusterroles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.382794  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.382820  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.383654  109746 store.go:1342] Monitoring clusterroles.rbac.authorization.k8s.io count at <storage-prefix>//clusterroles
I1109 01:08:03.383681  109746 reflector.go:188] Listing and watching *rbac.ClusterRole from storage/cacher.go:/clusterroles
I1109 01:08:03.383976  109746 storage_factory.go:285] storing clusterrolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.384106  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.384125  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.384524  109746 watch_cache.go:409] Replace watchCache (rev: 32261) 
I1109 01:08:03.384963  109746 store.go:1342] Monitoring clusterrolebindings.rbac.authorization.k8s.io count at <storage-prefix>//clusterrolebindings
I1109 01:08:03.385032  109746 storage_factory.go:285] storing roles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.385057  109746 reflector.go:188] Listing and watching *rbac.ClusterRoleBinding from storage/cacher.go:/clusterrolebindings
I1109 01:08:03.385154  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.385170  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.385915  109746 store.go:1342] Monitoring roles.rbac.authorization.k8s.io count at <storage-prefix>//roles
I1109 01:08:03.386085  109746 storage_factory.go:285] storing rolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.386225  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.386255  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.386334  109746 reflector.go:188] Listing and watching *rbac.Role from storage/cacher.go:/roles
I1109 01:08:03.386517  109746 watch_cache.go:409] Replace watchCache (rev: 32261) 
I1109 01:08:03.386524  109746 watch_cache.go:409] Replace watchCache (rev: 32261) 
I1109 01:08:03.387528  109746 watch_cache.go:409] Replace watchCache (rev: 32261) 
I1109 01:08:03.387549  109746 reflector.go:188] Listing and watching *rbac.RoleBinding from storage/cacher.go:/rolebindings
I1109 01:08:03.387532  109746 store.go:1342] Monitoring rolebindings.rbac.authorization.k8s.io count at <storage-prefix>//rolebindings
I1109 01:08:03.387709  109746 storage_factory.go:285] storing clusterroles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.387987  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.388058  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.388685  109746 store.go:1342] Monitoring clusterroles.rbac.authorization.k8s.io count at <storage-prefix>//clusterroles
I1109 01:08:03.388753  109746 reflector.go:188] Listing and watching *rbac.ClusterRole from storage/cacher.go:/clusterroles
I1109 01:08:03.388864  109746 storage_factory.go:285] storing clusterrolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.388986  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.389012  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.389108  109746 watch_cache.go:409] Replace watchCache (rev: 32261) 
I1109 01:08:03.389448  109746 watch_cache.go:409] Replace watchCache (rev: 32261) 
I1109 01:08:03.389639  109746 store.go:1342] Monitoring clusterrolebindings.rbac.authorization.k8s.io count at <storage-prefix>//clusterrolebindings
I1109 01:08:03.389676  109746 master.go:504] Enabling API group "rbac.authorization.k8s.io".
I1109 01:08:03.389706  109746 reflector.go:188] Listing and watching *rbac.ClusterRoleBinding from storage/cacher.go:/clusterrolebindings
I1109 01:08:03.390565  109746 watch_cache.go:409] Replace watchCache (rev: 32261) 
I1109 01:08:03.391832  109746 storage_factory.go:285] storing priorityclasses.scheduling.k8s.io in scheduling.k8s.io/v1, reading as scheduling.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.391983  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.392058  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.392946  109746 store.go:1342] Monitoring priorityclasses.scheduling.k8s.io count at <storage-prefix>//priorityclasses
I1109 01:08:03.393046  109746 reflector.go:188] Listing and watching *scheduling.PriorityClass from storage/cacher.go:/priorityclasses
I1109 01:08:03.393230  109746 storage_factory.go:285] storing priorityclasses.scheduling.k8s.io in scheduling.k8s.io/v1, reading as scheduling.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.393553  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.393634  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.393960  109746 watch_cache.go:409] Replace watchCache (rev: 32261) 
I1109 01:08:03.394882  109746 store.go:1342] Monitoring priorityclasses.scheduling.k8s.io count at <storage-prefix>//priorityclasses
I1109 01:08:03.394974  109746 master.go:504] Enabling API group "scheduling.k8s.io".
I1109 01:08:03.394893  109746 reflector.go:188] Listing and watching *scheduling.PriorityClass from storage/cacher.go:/priorityclasses
I1109 01:08:03.395301  109746 master.go:493] Skipping disabled API group "settings.k8s.io".
I1109 01:08:03.395909  109746 storage_factory.go:285] storing storageclasses.storage.k8s.io in storage.k8s.io/v1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.395946  109746 watch_cache.go:409] Replace watchCache (rev: 32261) 
I1109 01:08:03.396120  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.396270  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.397163  109746 store.go:1342] Monitoring storageclasses.storage.k8s.io count at <storage-prefix>//storageclasses
I1109 01:08:03.397240  109746 reflector.go:188] Listing and watching *storage.StorageClass from storage/cacher.go:/storageclasses
I1109 01:08:03.397407  109746 storage_factory.go:285] storing volumeattachments.storage.k8s.io in storage.k8s.io/v1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.397603  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.397635  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.398300  109746 watch_cache.go:409] Replace watchCache (rev: 32261) 
I1109 01:08:03.398557  109746 store.go:1342] Monitoring volumeattachments.storage.k8s.io count at <storage-prefix>//volumeattachments
I1109 01:08:03.398588  109746 reflector.go:188] Listing and watching *storage.VolumeAttachment from storage/cacher.go:/volumeattachments
I1109 01:08:03.398820  109746 storage_factory.go:285] storing csinodes.storage.k8s.io in storage.k8s.io/v1beta1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.399209  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.399323  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.399746  109746 watch_cache.go:409] Replace watchCache (rev: 32261) 
I1109 01:08:03.400129  109746 store.go:1342] Monitoring csinodes.storage.k8s.io count at <storage-prefix>//csinodes
I1109 01:08:03.400210  109746 storage_factory.go:285] storing csidrivers.storage.k8s.io in storage.k8s.io/v1beta1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.400245  109746 reflector.go:188] Listing and watching *storage.CSINode from storage/cacher.go:/csinodes
I1109 01:08:03.400357  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.400381  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.402069  109746 watch_cache.go:409] Replace watchCache (rev: 32261) 
I1109 01:08:03.402634  109746 store.go:1342] Monitoring csidrivers.storage.k8s.io count at <storage-prefix>//csidrivers
I1109 01:08:03.402818  109746 reflector.go:188] Listing and watching *storage.CSIDriver from storage/cacher.go:/csidrivers
I1109 01:08:03.403774  109746 watch_cache.go:409] Replace watchCache (rev: 32261) 
I1109 01:08:03.403774  109746 storage_factory.go:285] storing storageclasses.storage.k8s.io in storage.k8s.io/v1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.404444  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.404588  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.405406  109746 store.go:1342] Monitoring storageclasses.storage.k8s.io count at <storage-prefix>//storageclasses
I1109 01:08:03.405577  109746 reflector.go:188] Listing and watching *storage.StorageClass from storage/cacher.go:/storageclasses
I1109 01:08:03.405981  109746 storage_factory.go:285] storing volumeattachments.storage.k8s.io in storage.k8s.io/v1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.406393  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.406654  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.406488  109746 watch_cache.go:409] Replace watchCache (rev: 32261) 
I1109 01:08:03.412982  109746 store.go:1342] Monitoring volumeattachments.storage.k8s.io count at <storage-prefix>//volumeattachments
I1109 01:08:03.413042  109746 reflector.go:188] Listing and watching *storage.VolumeAttachment from storage/cacher.go:/volumeattachments
I1109 01:08:03.413068  109746 storage_factory.go:285] storing csinodes.storage.k8s.io in storage.k8s.io/v1beta1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.413262  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.413293  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.414494  109746 store.go:1342] Monitoring csinodes.storage.k8s.io count at <storage-prefix>//csinodes
I1109 01:08:03.414529  109746 master.go:504] Enabling API group "storage.k8s.io".
I1109 01:08:03.414582  109746 reflector.go:188] Listing and watching *storage.CSINode from storage/cacher.go:/csinodes
I1109 01:08:03.414741  109746 storage_factory.go:285] storing deployments.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.414867  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.414884  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.415529  109746 watch_cache.go:409] Replace watchCache (rev: 32261) 
I1109 01:08:03.415772  109746 watch_cache.go:409] Replace watchCache (rev: 32261) 
I1109 01:08:03.417103  109746 store.go:1342] Monitoring deployments.apps count at <storage-prefix>//deployments
I1109 01:08:03.417127  109746 reflector.go:188] Listing and watching *apps.Deployment from storage/cacher.go:/deployments
I1109 01:08:03.417419  109746 storage_factory.go:285] storing statefulsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.417883  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.417961  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.418370  109746 watch_cache.go:409] Replace watchCache (rev: 32261) 
I1109 01:08:03.419295  109746 store.go:1342] Monitoring statefulsets.apps count at <storage-prefix>//statefulsets
I1109 01:08:03.419353  109746 reflector.go:188] Listing and watching *apps.StatefulSet from storage/cacher.go:/statefulsets
I1109 01:08:03.420320  109746 watch_cache.go:409] Replace watchCache (rev: 32261) 
I1109 01:08:03.424490  109746 storage_factory.go:285] storing daemonsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.424737  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.424822  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.425793  109746 store.go:1342] Monitoring daemonsets.apps count at <storage-prefix>//daemonsets
I1109 01:08:03.425921  109746 storage_factory.go:285] storing replicasets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.425942  109746 reflector.go:188] Listing and watching *apps.DaemonSet from storage/cacher.go:/daemonsets
I1109 01:08:03.426005  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.426025  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.427112  109746 watch_cache.go:409] Replace watchCache (rev: 32261) 
I1109 01:08:03.427146  109746 store.go:1342] Monitoring replicasets.apps count at <storage-prefix>//replicasets
I1109 01:08:03.427213  109746 reflector.go:188] Listing and watching *apps.ReplicaSet from storage/cacher.go:/replicasets
I1109 01:08:03.427442  109746 storage_factory.go:285] storing controllerrevisions.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.427666  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.427711  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.428757  109746 watch_cache.go:409] Replace watchCache (rev: 32261) 
I1109 01:08:03.429091  109746 store.go:1342] Monitoring controllerrevisions.apps count at <storage-prefix>//controllerrevisions
I1109 01:08:03.429113  109746 master.go:504] Enabling API group "apps".
I1109 01:08:03.429166  109746 reflector.go:188] Listing and watching *apps.ControllerRevision from storage/cacher.go:/controllerrevisions
I1109 01:08:03.429168  109746 storage_factory.go:285] storing validatingwebhookconfigurations.admissionregistration.k8s.io in admissionregistration.k8s.io/v1beta1, reading as admissionregistration.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.429492  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.429570  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.430288  109746 watch_cache.go:409] Replace watchCache (rev: 32261) 
I1109 01:08:03.430513  109746 store.go:1342] Monitoring validatingwebhookconfigurations.admissionregistration.k8s.io count at <storage-prefix>//validatingwebhookconfigurations
I1109 01:08:03.430612  109746 reflector.go:188] Listing and watching *admissionregistration.ValidatingWebhookConfiguration from storage/cacher.go:/validatingwebhookconfigurations
I1109 01:08:03.430596  109746 storage_factory.go:285] storing mutatingwebhookconfigurations.admissionregistration.k8s.io in admissionregistration.k8s.io/v1beta1, reading as admissionregistration.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.430849  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.430881  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.431711  109746 store.go:1342] Monitoring mutatingwebhookconfigurations.admissionregistration.k8s.io count at <storage-prefix>//mutatingwebhookconfigurations
I1109 01:08:03.431784  109746 reflector.go:188] Listing and watching *admissionregistration.MutatingWebhookConfiguration from storage/cacher.go:/mutatingwebhookconfigurations
I1109 01:08:03.431782  109746 storage_factory.go:285] storing validatingwebhookconfigurations.admissionregistration.k8s.io in admissionregistration.k8s.io/v1beta1, reading as admissionregistration.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.431861  109746 watch_cache.go:409] Replace watchCache (rev: 32261) 
I1109 01:08:03.431957  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.431984  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.432720  109746 store.go:1342] Monitoring validatingwebhookconfigurations.admissionregistration.k8s.io count at <storage-prefix>//validatingwebhookconfigurations
I1109 01:08:03.432727  109746 watch_cache.go:409] Replace watchCache (rev: 32261) 
I1109 01:08:03.432804  109746 reflector.go:188] Listing and watching *admissionregistration.ValidatingWebhookConfiguration from storage/cacher.go:/validatingwebhookconfigurations
I1109 01:08:03.432803  109746 storage_factory.go:285] storing mutatingwebhookconfigurations.admissionregistration.k8s.io in admissionregistration.k8s.io/v1beta1, reading as admissionregistration.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.432998  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.433027  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.433666  109746 watch_cache.go:409] Replace watchCache (rev: 32261) 
I1109 01:08:03.433801  109746 store.go:1342] Monitoring mutatingwebhookconfigurations.admissionregistration.k8s.io count at <storage-prefix>//mutatingwebhookconfigurations
I1109 01:08:03.433886  109746 master.go:504] Enabling API group "admissionregistration.k8s.io".
I1109 01:08:03.434008  109746 storage_factory.go:285] storing events in v1, reading as __internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.433926  109746 reflector.go:188] Listing and watching *admissionregistration.MutatingWebhookConfiguration from storage/cacher.go:/mutatingwebhookconfigurations
I1109 01:08:03.434436  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:03.434530  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:03.435433  109746 store.go:1342] Monitoring events count at <storage-prefix>//events
I1109 01:08:03.435463  109746 master.go:504] Enabling API group "events.k8s.io".
I1109 01:08:03.435567  109746 reflector.go:188] Listing and watching *core.Event from storage/cacher.go:/events
I1109 01:08:03.435759  109746 storage_factory.go:285] storing tokenreviews.authentication.k8s.io in authentication.k8s.io/v1, reading as authentication.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.436024  109746 storage_factory.go:285] storing tokenreviews.authentication.k8s.io in authentication.k8s.io/v1, reading as authentication.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.436378  109746 storage_factory.go:285] storing localsubjectaccessreviews.authorization.k8s.io in authorization.k8s.io/v1, reading as authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.436538  109746 watch_cache.go:409] Replace watchCache (rev: 32261) 
I1109 01:08:03.436536  109746 storage_factory.go:285] storing selfsubjectaccessreviews.authorization.k8s.io in authorization.k8s.io/v1, reading as authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.436673  109746 storage_factory.go:285] storing selfsubjectrulesreviews.authorization.k8s.io in authorization.k8s.io/v1, reading as authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.436799  109746 storage_factory.go:285] storing subjectaccessreviews.authorization.k8s.io in authorization.k8s.io/v1, reading as authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.437003  109746 storage_factory.go:285] storing localsubjectaccessreviews.authorization.k8s.io in authorization.k8s.io/v1, reading as authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.437122  109746 storage_factory.go:285] storing selfsubjectaccessreviews.authorization.k8s.io in authorization.k8s.io/v1, reading as authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.437280  109746 storage_factory.go:285] storing selfsubjectrulesreviews.authorization.k8s.io in authorization.k8s.io/v1, reading as authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.437403  109746 storage_factory.go:285] storing subjectaccessreviews.authorization.k8s.io in authorization.k8s.io/v1, reading as authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.438061  109746 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.438317  109746 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.438955  109746 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.439145  109746 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.439866  109746 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.440063  109746 storage_factory.go:285] storing horizontalpodautoscalers.autoscaling in autoscaling/v1, reading as autoscaling/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.440658  109746 watch_cache.go:409] Replace watchCache (rev: 32261) 
I1109 01:08:03.440767  109746 storage_factory.go:285] storing jobs.batch in batch/v1, reading as batch/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.441028  109746 storage_factory.go:285] storing jobs.batch in batch/v1, reading as batch/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.441621  109746 storage_factory.go:285] storing cronjobs.batch in batch/v1beta1, reading as batch/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.441851  109746 storage_factory.go:285] storing cronjobs.batch in batch/v1beta1, reading as batch/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
W1109 01:08:03.441894  109746 genericapiserver.go:404] Skipping API batch/v2alpha1 because it has no resources.
I1109 01:08:03.442426  109746 storage_factory.go:285] storing certificatesigningrequests.certificates.k8s.io in certificates.k8s.io/v1beta1, reading as certificates.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.442541  109746 storage_factory.go:285] storing certificatesigningrequests.certificates.k8s.io in certificates.k8s.io/v1beta1, reading as certificates.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.442701  109746 storage_factory.go:285] storing certificatesigningrequests.certificates.k8s.io in certificates.k8s.io/v1beta1, reading as certificates.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.443547  109746 storage_factory.go:285] storing leases.coordination.k8s.io in coordination.k8s.io/v1beta1, reading as coordination.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.444216  109746 storage_factory.go:285] storing leases.coordination.k8s.io in coordination.k8s.io/v1beta1, reading as coordination.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.444894  109746 storage_factory.go:285] storing ingresses.extensions in extensions/v1beta1, reading as extensions/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.445157  109746 storage_factory.go:285] storing ingresses.extensions in extensions/v1beta1, reading as extensions/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.446216  109746 storage_factory.go:285] storing networkpolicies.networking.k8s.io in networking.k8s.io/v1, reading as networking.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.446916  109746 storage_factory.go:285] storing ingresses.networking.k8s.io in networking.k8s.io/v1beta1, reading as networking.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.447119  109746 storage_factory.go:285] storing ingresses.networking.k8s.io in networking.k8s.io/v1beta1, reading as networking.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.447978  109746 storage_factory.go:285] storing runtimeclasses.node.k8s.io in node.k8s.io/v1beta1, reading as node.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
W1109 01:08:03.448098  109746 genericapiserver.go:404] Skipping API node.k8s.io/v1alpha1 because it has no resources.
I1109 01:08:03.448929  109746 storage_factory.go:285] storing poddisruptionbudgets.policy in policy/v1beta1, reading as policy/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.449251  109746 storage_factory.go:285] storing poddisruptionbudgets.policy in policy/v1beta1, reading as policy/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.449660  109746 storage_factory.go:285] storing podsecuritypolicies.policy in policy/v1beta1, reading as policy/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.450309  109746 storage_factory.go:285] storing clusterrolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.450712  109746 storage_factory.go:285] storing clusterroles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.451160  109746 storage_factory.go:285] storing rolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.451728  109746 storage_factory.go:285] storing roles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.452376  109746 storage_factory.go:285] storing clusterrolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.452906  109746 storage_factory.go:285] storing clusterroles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.453766  109746 storage_factory.go:285] storing rolebindings.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.454378  109746 storage_factory.go:285] storing roles.rbac.authorization.k8s.io in rbac.authorization.k8s.io/v1, reading as rbac.authorization.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
W1109 01:08:03.454515  109746 genericapiserver.go:404] Skipping API rbac.authorization.k8s.io/v1alpha1 because it has no resources.
I1109 01:08:03.455045  109746 storage_factory.go:285] storing priorityclasses.scheduling.k8s.io in scheduling.k8s.io/v1, reading as scheduling.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.455700  109746 storage_factory.go:285] storing priorityclasses.scheduling.k8s.io in scheduling.k8s.io/v1, reading as scheduling.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
W1109 01:08:03.455927  109746 genericapiserver.go:404] Skipping API scheduling.k8s.io/v1alpha1 because it has no resources.
I1109 01:08:03.456584  109746 storage_factory.go:285] storing csinodes.storage.k8s.io in storage.k8s.io/v1beta1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.457215  109746 storage_factory.go:285] storing storageclasses.storage.k8s.io in storage.k8s.io/v1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.457754  109746 storage_factory.go:285] storing volumeattachments.storage.k8s.io in storage.k8s.io/v1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.458082  109746 storage_factory.go:285] storing volumeattachments.storage.k8s.io in storage.k8s.io/v1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.458775  109746 storage_factory.go:285] storing csidrivers.storage.k8s.io in storage.k8s.io/v1beta1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.459529  109746 storage_factory.go:285] storing csinodes.storage.k8s.io in storage.k8s.io/v1beta1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.460224  109746 storage_factory.go:285] storing storageclasses.storage.k8s.io in storage.k8s.io/v1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.461004  109746 storage_factory.go:285] storing volumeattachments.storage.k8s.io in storage.k8s.io/v1, reading as storage.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
W1109 01:08:03.461211  109746 genericapiserver.go:404] Skipping API storage.k8s.io/v1alpha1 because it has no resources.
I1109 01:08:03.466677  109746 storage_factory.go:285] storing controllerrevisions.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.467589  109746 storage_factory.go:285] storing daemonsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.468144  109746 storage_factory.go:285] storing daemonsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.468931  109746 storage_factory.go:285] storing deployments.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.469443  109746 storage_factory.go:285] storing deployments.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.469805  109746 storage_factory.go:285] storing deployments.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.470683  109746 storage_factory.go:285] storing replicasets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.471066  109746 storage_factory.go:285] storing replicasets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.471447  109746 storage_factory.go:285] storing replicasets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.472365  109746 storage_factory.go:285] storing statefulsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.472742  109746 storage_factory.go:285] storing statefulsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.473230  109746 storage_factory.go:285] storing statefulsets.apps in apps/v1, reading as apps/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
W1109 01:08:03.473395  109746 genericapiserver.go:404] Skipping API apps/v1beta2 because it has no resources.
W1109 01:08:03.473465  109746 genericapiserver.go:404] Skipping API apps/v1beta1 because it has no resources.
I1109 01:08:03.474038  109746 storage_factory.go:285] storing mutatingwebhookconfigurations.admissionregistration.k8s.io in admissionregistration.k8s.io/v1beta1, reading as admissionregistration.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.474703  109746 storage_factory.go:285] storing validatingwebhookconfigurations.admissionregistration.k8s.io in admissionregistration.k8s.io/v1beta1, reading as admissionregistration.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.475354  109746 storage_factory.go:285] storing mutatingwebhookconfigurations.admissionregistration.k8s.io in admissionregistration.k8s.io/v1beta1, reading as admissionregistration.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.475926  109746 storage_factory.go:285] storing validatingwebhookconfigurations.admissionregistration.k8s.io in admissionregistration.k8s.io/v1beta1, reading as admissionregistration.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.476568  109746 storage_factory.go:285] storing events.events.k8s.io in events.k8s.io/v1beta1, reading as events.k8s.io/__internal from storagebackend.Config{Type:"", Prefix:"18091606-1f4c-4ba4-beeb-db514c627f25", Transport:storagebackend.TransportConfig{ServerList:[]string{"http://127.0.0.1:2379"}, KeyFile:"", CertFile:"", TrustedCAFile:"", EgressLookup:(egressselector.Lookup)(nil)}, Paging:true, Codec:runtime.Codec(nil), EncodeVersioner:runtime.GroupVersioner(nil), Transformer:value.Transformer(nil), CompactionInterval:300000000000, CountMetricPollPeriod:60000000000}
I1109 01:08:03.479827  109746 healthz.go:177] healthz check etcd failed: etcd client connection not yet established
I1109 01:08:03.479887  109746 healthz.go:177] healthz check poststarthook/bootstrap-controller failed: not finished
I1109 01:08:03.479899  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:03.479911  109746 healthz.go:177] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I1109 01:08:03.479918  109746 healthz.go:177] healthz check poststarthook/start-cluster-authentication-info-controller failed: not finished
I1109 01:08:03.479925  109746 healthz.go:191] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[-]poststarthook/bootstrap-controller failed: reason withheld
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/start-cluster-authentication-info-controller failed: reason withheld
healthz check failed
I1109 01:08:03.479986  109746 httplog.go:90] GET /healthz: (350.855µs) 0 [Go-http-client/1.1 127.0.0.1:43188]
W1109 01:08:03.479845  109746 mutation_detector.go:50] Mutation detector is enabled, this will result in memory leakage.
I1109 01:08:03.480168  109746 cluster_authentication_trust_controller.go:440] Starting cluster_authentication_trust_controller controller
I1109 01:08:03.480220  109746 shared_informer.go:197] Waiting for caches to sync for cluster_authentication_trust_controller
I1109 01:08:03.480420  109746 reflector.go:153] Starting reflector *v1.ConfigMap (12h0m0s) from k8s.io/kubernetes/pkg/master/controller/clusterauthenticationtrust/cluster_authentication_trust_controller.go:444
I1109 01:08:03.480431  109746 reflector.go:188] Listing and watching *v1.ConfigMap from k8s.io/kubernetes/pkg/master/controller/clusterauthenticationtrust/cluster_authentication_trust_controller.go:444
I1109 01:08:03.481576  109746 httplog.go:90] GET /api/v1/namespaces/kube-system/configmaps?limit=500&resourceVersion=0: (638.468µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43198]
I1109 01:08:03.482471  109746 get.go:251] Starting watch for /api/v1/namespaces/kube-system/configmaps, rv=32259 labels= fields= timeout=7m46s
I1109 01:08:03.484431  109746 httplog.go:90] GET /api/v1/namespaces/default/endpoints/kubernetes: (4.11645ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:03.488366  109746 httplog.go:90] GET /api/v1/services: (2.31152ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:03.496396  109746 httplog.go:90] GET /api/v1/services: (1.747662ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:03.498524  109746 healthz.go:177] healthz check etcd failed: etcd client connection not yet established
I1109 01:08:03.498732  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:03.498894  109746 healthz.go:177] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I1109 01:08:03.499022  109746 healthz.go:191] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:03.499356  109746 httplog.go:90] GET /healthz: (982.988µs) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:03.500432  109746 httplog.go:90] GET /api/v1/services: (1.062825ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43222]
I1109 01:08:03.500734  109746 httplog.go:90] GET /api/v1/namespaces/kube-system: (1.868787ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43190]
I1109 01:08:03.500773  109746 httplog.go:90] GET /api/v1/services: (1.196782ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43224]
I1109 01:08:03.502603  109746 httplog.go:90] POST /api/v1/namespaces: (1.296383ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43222]
I1109 01:08:03.503677  109746 httplog.go:90] GET /api/v1/namespaces/kube-public: (686.619µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43222]
I1109 01:08:03.505731  109746 httplog.go:90] POST /api/v1/namespaces: (1.659531ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43222]
I1109 01:08:03.507478  109746 httplog.go:90] GET /api/v1/namespaces/kube-node-lease: (1.420075ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43222]
I1109 01:08:03.509561  109746 httplog.go:90] POST /api/v1/namespaces: (1.609064ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43222]
I1109 01:08:03.580443  109746 shared_informer.go:227] caches populated
I1109 01:08:03.580472  109746 shared_informer.go:204] Caches are synced for cluster_authentication_trust_controller 
I1109 01:08:03.580969  109746 healthz.go:177] healthz check etcd failed: etcd client connection not yet established
I1109 01:08:03.581062  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:03.581077  109746 healthz.go:177] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I1109 01:08:03.581087  109746 healthz.go:191] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:03.581127  109746 httplog.go:90] GET /healthz: (471.522µs) 0 [Go-http-client/1.1 127.0.0.1:43222]
I1109 01:08:03.601616  109746 healthz.go:177] healthz check etcd failed: etcd client connection not yet established
I1109 01:08:03.601877  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:03.602065  109746 healthz.go:177] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I1109 01:08:03.602276  109746 healthz.go:191] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:03.603073  109746 httplog.go:90] GET /healthz: (1.302865ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43222]
I1109 01:08:03.680743  109746 healthz.go:177] healthz check etcd failed: etcd client connection not yet established
I1109 01:08:03.680871  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:03.680941  109746 healthz.go:177] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I1109 01:08:03.680980  109746 healthz.go:191] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:03.681123  109746 httplog.go:90] GET /healthz: (539.362µs) 0 [Go-http-client/1.1 127.0.0.1:43222]
I1109 01:08:03.700229  109746 healthz.go:177] healthz check etcd failed: etcd client connection not yet established
I1109 01:08:03.700265  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:03.700278  109746 healthz.go:177] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I1109 01:08:03.700288  109746 healthz.go:191] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:03.700325  109746 httplog.go:90] GET /healthz: (294.37µs) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43222]
I1109 01:08:03.780787  109746 healthz.go:177] healthz check etcd failed: etcd client connection not yet established
I1109 01:08:03.780818  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:03.780831  109746 healthz.go:177] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I1109 01:08:03.780841  109746 healthz.go:191] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:03.780891  109746 httplog.go:90] GET /healthz: (273.823µs) 0 [Go-http-client/1.1 127.0.0.1:43222]
I1109 01:08:03.800362  109746 healthz.go:177] healthz check etcd failed: etcd client connection not yet established
I1109 01:08:03.800394  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:03.800414  109746 healthz.go:177] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I1109 01:08:03.800424  109746 healthz.go:191] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:03.800474  109746 httplog.go:90] GET /healthz: (322.445µs) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43222]
I1109 01:08:03.880715  109746 healthz.go:177] healthz check etcd failed: etcd client connection not yet established
I1109 01:08:03.880752  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:03.880765  109746 healthz.go:177] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I1109 01:08:03.880776  109746 healthz.go:191] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:03.880815  109746 httplog.go:90] GET /healthz: (267.378µs) 0 [Go-http-client/1.1 127.0.0.1:43222]
I1109 01:08:03.900235  109746 healthz.go:177] healthz check etcd failed: etcd client connection not yet established
I1109 01:08:03.900271  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:03.900285  109746 healthz.go:177] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I1109 01:08:03.900295  109746 healthz.go:191] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:03.900346  109746 httplog.go:90] GET /healthz: (305.425µs) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43222]
I1109 01:08:03.980689  109746 healthz.go:177] healthz check etcd failed: etcd client connection not yet established
I1109 01:08:03.980728  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:03.980738  109746 healthz.go:177] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I1109 01:08:03.980746  109746 healthz.go:191] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:03.980772  109746 httplog.go:90] GET /healthz: (209.121µs) 0 [Go-http-client/1.1 127.0.0.1:43222]
I1109 01:08:04.000169  109746 healthz.go:177] healthz check etcd failed: etcd client connection not yet established
I1109 01:08:04.000245  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:04.000259  109746 healthz.go:177] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I1109 01:08:04.000269  109746 healthz.go:191] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:04.000304  109746 httplog.go:90] GET /healthz: (296.476µs) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43222]
I1109 01:08:04.080734  109746 healthz.go:177] healthz check etcd failed: etcd client connection not yet established
I1109 01:08:04.080779  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:04.080793  109746 healthz.go:177] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I1109 01:08:04.080803  109746 healthz.go:191] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:04.080832  109746 httplog.go:90] GET /healthz: (266.102µs) 0 [Go-http-client/1.1 127.0.0.1:43222]
I1109 01:08:04.100231  109746 healthz.go:177] healthz check etcd failed: etcd client connection not yet established
I1109 01:08:04.100279  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:04.100293  109746 healthz.go:177] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I1109 01:08:04.100303  109746 healthz.go:191] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:04.100343  109746 httplog.go:90] GET /healthz: (288.158µs) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43222]
I1109 01:08:04.180742  109746 healthz.go:177] healthz check etcd failed: etcd client connection not yet established
I1109 01:08:04.180777  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:04.180790  109746 healthz.go:177] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I1109 01:08:04.180801  109746 healthz.go:191] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:04.180844  109746 httplog.go:90] GET /healthz: (259.242µs) 0 [Go-http-client/1.1 127.0.0.1:43222]
I1109 01:08:04.200301  109746 healthz.go:177] healthz check etcd failed: etcd client connection not yet established
I1109 01:08:04.200339  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:04.200355  109746 healthz.go:177] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I1109 01:08:04.200365  109746 healthz.go:191] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:04.200396  109746 httplog.go:90] GET /healthz: (290.676µs) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43222]
I1109 01:08:04.280730  109746 healthz.go:177] healthz check etcd failed: etcd client connection not yet established
I1109 01:08:04.280770  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:04.280784  109746 healthz.go:177] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I1109 01:08:04.280795  109746 healthz.go:191] [+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:04.280837  109746 httplog.go:90] GET /healthz: (282.767µs) 0 [Go-http-client/1.1 127.0.0.1:43222]
I1109 01:08:04.297991  109746 client.go:361] parsed scheme: "endpoint"
I1109 01:08:04.298109  109746 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 01:08:04.301462  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:04.301493  109746 healthz.go:177] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I1109 01:08:04.301505  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:04.301559  109746 httplog.go:90] GET /healthz: (1.534546ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43222]
I1109 01:08:04.381473  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:04.381512  109746 healthz.go:177] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I1109 01:08:04.381524  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:04.381577  109746 httplog.go:90] GET /healthz: (978.988µs) 0 [Go-http-client/1.1 127.0.0.1:43222]
I1109 01:08:04.401164  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:04.401403  109746 healthz.go:177] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I1109 01:08:04.401423  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:04.401480  109746 httplog.go:90] GET /healthz: (1.473946ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43222]
I1109 01:08:04.482080  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.159959ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.482080  109746 httplog.go:90] GET /apis/scheduling.k8s.io/v1/priorityclasses/system-node-critical: (2.170392ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43222]
I1109 01:08:04.484683  109746 httplog.go:90] POST /apis/scheduling.k8s.io/v1/priorityclasses: (1.984185ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.484718  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (1.96287ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43222]
I1109 01:08:04.484861  109746 storage_scheduling.go:133] created PriorityClass system-node-critical with value 2000001000
I1109 01:08:04.485627  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:04.485652  109746 healthz.go:177] healthz check poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished
I1109 01:08:04.485662  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:04.485697  109746 httplog.go:90] GET /healthz: (3.518632ms) 0 [Go-http-client/1.1 127.0.0.1:43498]
I1109 01:08:04.488571  109746 httplog.go:90] GET /apis/scheduling.k8s.io/v1/priorityclasses/system-cluster-critical: (2.7707ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43222]
I1109 01:08:04.488571  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-admin: (3.005926ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.491477  109746 httplog.go:90] POST /apis/scheduling.k8s.io/v1/priorityclasses: (1.715205ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43498]
I1109 01:08:04.491660  109746 storage_scheduling.go:133] created PriorityClass system-cluster-critical with value 2000000000
I1109 01:08:04.491675  109746 storage_scheduling.go:142] all system priority classes are created successfully or already exist.
I1109 01:08:04.493609  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/admin: (3.884522ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.495228  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-edit: (818.748µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.496588  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/edit: (1.012097ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.497946  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-view: (1.073992ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.504860  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:04.504888  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/view: (6.31944ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.504889  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:04.504970  109746 httplog.go:90] GET /healthz: (4.632856ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43498]
I1109 01:08:04.506015  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:discovery: (869.828µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.508538  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/cluster-admin: (2.163355ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.510792  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.798939ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.511433  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/cluster-admin
I1109 01:08:04.512542  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:discovery: (904.021µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.515559  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.696922ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.515993  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:discovery
I1109 01:08:04.517263  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:basic-user: (1.017995ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.519363  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.608242ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.519660  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:basic-user
I1109 01:08:04.520744  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:public-info-viewer: (885.968µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.522586  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.461335ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.522793  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:public-info-viewer
I1109 01:08:04.523708  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/admin: (756.486µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.525557  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.521183ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.525755  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/admin
I1109 01:08:04.528654  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/edit: (2.710119ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.530993  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.632082ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.531223  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/edit
I1109 01:08:04.532254  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/view: (867.216µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.534591  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.970835ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.534892  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/view
I1109 01:08:04.535938  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-admin: (903.048µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.537984  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.44107ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.538222  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:aggregate-to-admin
I1109 01:08:04.541407  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-edit: (2.996197ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.543686  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.859617ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.543962  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:aggregate-to-edit
I1109 01:08:04.545216  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:aggregate-to-view: (1.037557ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.548837  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.679719ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.549047  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:aggregate-to-view
I1109 01:08:04.551172  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:heapster: (1.862365ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.553511  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.860407ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.553695  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:heapster
I1109 01:08:04.560266  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:node: (6.362994ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.564874  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (4.189536ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.565198  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:node
I1109 01:08:04.568440  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:node-problem-detector: (3.023156ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.571572  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.766909ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.571983  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:node-problem-detector
I1109 01:08:04.575076  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:kubelet-api-admin: (2.916799ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.577067  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.645893ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.577367  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:kubelet-api-admin
I1109 01:08:04.578477  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:node-bootstrapper: (828.331µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.581829  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:04.581852  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:04.581885  109746 httplog.go:90] GET /healthz: (977.301µs) 0 [Go-http-client/1.1 127.0.0.1:43498]
I1109 01:08:04.592660  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (13.78893ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.592883  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:node-bootstrapper
I1109 01:08:04.594053  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:auth-delegator: (973.652µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.598907  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (3.530492ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.599098  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:auth-delegator
I1109 01:08:04.600564  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:kube-aggregator: (1.276223ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.601005  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:04.601033  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:04.601063  109746 httplog.go:90] GET /healthz: (892.369µs) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43498]
I1109 01:08:04.604083  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (3.123523ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.604331  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:kube-aggregator
I1109 01:08:04.608017  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:kube-controller-manager: (3.484592ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.610884  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.144942ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.611566  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:kube-controller-manager
I1109 01:08:04.613062  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:kube-dns: (1.035589ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.619470  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (5.721949ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.619732  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:kube-dns
I1109 01:08:04.622354  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:persistent-volume-provisioner: (2.416632ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.625134  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.061526ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.625523  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:persistent-volume-provisioner
I1109 01:08:04.629571  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:certificates.k8s.io:certificatesigningrequests:nodeclient: (3.874919ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.632771  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.311728ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.633095  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:certificates.k8s.io:certificatesigningrequests:nodeclient
I1109 01:08:04.635758  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:certificates.k8s.io:certificatesigningrequests:selfnodeclient: (2.465578ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.643281  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (7.040554ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.643692  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
I1109 01:08:04.645119  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:volume-scheduler: (1.184893ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.648384  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.846521ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.648570  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:volume-scheduler
I1109 01:08:04.649870  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:node-proxier: (1.125605ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.654216  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (3.535682ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.654424  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:node-proxier
I1109 01:08:04.656124  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:kube-scheduler: (1.528834ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.662080  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (5.452584ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.662814  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:kube-scheduler
I1109 01:08:04.664387  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:attachdetach-controller: (1.388017ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.668508  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (3.606431ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.669201  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:attachdetach-controller
I1109 01:08:04.670592  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:clusterrole-aggregation-controller: (1.112455ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.674952  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (3.925857ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.675168  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:clusterrole-aggregation-controller
I1109 01:08:04.692550  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:cronjob-controller: (16.885692ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.727826  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (34.769786ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.728168  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:cronjob-controller
I1109 01:08:04.730403  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:04.730437  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:04.730480  109746 httplog.go:90] GET /healthz: (28.575155ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:04.733484  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:04.733507  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:04.733564  109746 httplog.go:90] GET /healthz: (50.821209ms) 0 [Go-http-client/1.1 127.0.0.1:43498]
I1109 01:08:04.733601  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:daemon-set-controller: (5.119801ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.738238  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.723825ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.738456  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:daemon-set-controller
I1109 01:08:04.740526  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:deployment-controller: (1.76475ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.758759  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (17.476805ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.759068  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:deployment-controller
I1109 01:08:04.761004  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:disruption-controller: (1.433134ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.775688  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (13.142002ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.776120  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:disruption-controller
I1109 01:08:04.778491  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:endpoint-controller: (2.058778ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.781100  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.142041ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.781334  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:endpoint-controller
I1109 01:08:04.782864  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:04.782894  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:04.782931  109746 httplog.go:90] GET /healthz: (1.382792ms) 0 [Go-http-client/1.1 127.0.0.1:43544]
I1109 01:08:04.783361  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:expand-controller: (1.842462ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.791667  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (7.86191ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.791972  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:expand-controller
I1109 01:08:04.793682  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:generic-garbage-collector: (1.423529ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.797104  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.964581ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.797412  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:generic-garbage-collector
I1109 01:08:04.798955  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:horizontal-pod-autoscaler: (1.316305ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.802102  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.63807ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.802373  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:horizontal-pod-autoscaler
I1109 01:08:04.811227  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:job-controller: (8.635746ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.811402  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:04.811430  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:04.811474  109746 httplog.go:90] GET /healthz: (11.166687ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:04.819518  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (7.804095ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.819871  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:job-controller
I1109 01:08:04.821710  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:namespace-controller: (1.571027ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.825150  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.960943ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.826483  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:namespace-controller
I1109 01:08:04.827858  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:node-controller: (1.109475ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.830169  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.833167ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.830540  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:node-controller
I1109 01:08:04.831701  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:persistent-volume-binder: (951.947µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.834126  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.047645ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.834382  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:persistent-volume-binder
I1109 01:08:04.835943  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:pod-garbage-collector: (1.341071ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.838314  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.965145ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.838527  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:pod-garbage-collector
I1109 01:08:04.841564  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:replicaset-controller: (2.853321ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.847476  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (5.281099ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.848016  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:replicaset-controller
I1109 01:08:04.850124  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:replication-controller: (1.838913ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.853300  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.71162ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.853528  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:replication-controller
I1109 01:08:04.859392  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:resourcequota-controller: (5.641174ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.863911  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (4.005306ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.864170  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:resourcequota-controller
I1109 01:08:04.865526  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:route-controller: (1.097857ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.876884  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (10.906249ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.879296  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:route-controller
I1109 01:08:04.880649  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:service-account-controller: (1.145108ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.883338  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:04.883465  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:04.883506  109746 httplog.go:90] GET /healthz: (1.866825ms) 0 [Go-http-client/1.1 127.0.0.1:43544]
I1109 01:08:04.884832  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (3.768472ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.885027  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:service-account-controller
I1109 01:08:04.888939  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:service-controller: (3.596964ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.892402  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (3.085013ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.892701  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:service-controller
I1109 01:08:04.893810  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:statefulset-controller: (872.321µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.896044  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.872738ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.896320  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:statefulset-controller
I1109 01:08:04.897679  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:ttl-controller: (1.105758ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.899996  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.756708ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.900232  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:ttl-controller
I1109 01:08:04.901463  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:certificate-controller: (1.063571ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.901528  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:04.901551  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:04.901914  109746 httplog.go:90] GET /healthz: (1.778717ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:04.903357  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.421319ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.903607  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:certificate-controller
I1109 01:08:04.905265  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:pvc-protection-controller: (1.445858ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.907547  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (1.825611ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.907767  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:pvc-protection-controller
I1109 01:08:04.909072  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/system:controller:pv-protection-controller: (1.082598ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.911455  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterroles: (2.00966ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.911680  109746 storage_rbac.go:219] created clusterrole.rbac.authorization.k8s.io/system:controller:pv-protection-controller
I1109 01:08:04.913064  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/cluster-admin: (1.116718ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.915410  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (1.864063ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.915679  109746 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/cluster-admin
I1109 01:08:04.919633  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:discovery: (3.712235ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.922110  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.083104ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.922301  109746 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:discovery
I1109 01:08:04.923492  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:basic-user: (1.009526ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.925987  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.061321ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.926167  109746 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:basic-user
I1109 01:08:04.927545  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:public-info-viewer: (1.183906ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.933645  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (5.70207ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.933873  109746 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:public-info-viewer
I1109 01:08:04.935067  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:node-proxier: (912.956µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.936905  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (1.460616ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.937362  109746 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:node-proxier
I1109 01:08:04.938315  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:kube-controller-manager: (801.752µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.942131  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.140709ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.942405  109746 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:kube-controller-manager
I1109 01:08:04.968423  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:kube-dns: (8.343261ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.982066  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.025207ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:04.982161  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:04.982204  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:04.982234  109746 httplog.go:90] GET /healthz: (1.802959ms) 0 [Go-http-client/1.1 127.0.0.1:43544]
I1109 01:08:04.982290  109746 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:kube-dns
I1109 01:08:05.001671  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:05.001702  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:05.001740  109746 httplog.go:90] GET /healthz: (1.174544ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:05.001829  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:kube-scheduler: (1.756144ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:05.023216  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.095205ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:05.023462  109746 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:kube-scheduler
I1109 01:08:05.041580  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:volume-scheduler: (1.400438ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:05.063346  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.950235ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:05.063616  109746 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:volume-scheduler
I1109 01:08:05.083134  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:node: (3.088354ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:05.083218  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:05.083263  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:05.083324  109746 httplog.go:90] GET /healthz: (2.812261ms) 0 [Go-http-client/1.1 127.0.0.1:43544]
I1109 01:08:05.101255  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:05.101292  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:05.101360  109746 httplog.go:90] GET /healthz: (1.3079ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:05.103700  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.639549ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:05.103985  109746 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:node
I1109 01:08:05.121833  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:attachdetach-controller: (1.72122ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:05.143055  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.921057ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:05.143379  109746 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:attachdetach-controller
I1109 01:08:05.161533  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:clusterrole-aggregation-controller: (1.468453ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:05.182745  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.195319ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:05.183285  109746 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:clusterrole-aggregation-controller
I1109 01:08:05.186538  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:05.186562  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:05.186602  109746 httplog.go:90] GET /healthz: (2.516277ms) 0 [Go-http-client/1.1 127.0.0.1:43188]
I1109 01:08:05.201437  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:05.201461  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:05.201488  109746 httplog.go:90] GET /healthz: (1.447392ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:05.202797  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:cronjob-controller: (1.80276ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:05.223586  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.448585ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:05.223850  109746 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:cronjob-controller
I1109 01:08:05.241603  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:daemon-set-controller: (1.513915ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:05.262742  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.610979ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:05.263017  109746 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:daemon-set-controller
I1109 01:08:05.281687  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:05.281723  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:05.281770  109746 httplog.go:90] GET /healthz: (906.272µs) 0 [Go-http-client/1.1 127.0.0.1:43188]
I1109 01:08:05.282709  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:deployment-controller: (2.635005ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:05.301466  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:05.301495  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:05.301537  109746 httplog.go:90] GET /healthz: (1.464707ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:05.302388  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.320131ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:05.302625  109746 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:deployment-controller
I1109 01:08:05.322784  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:disruption-controller: (2.404087ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:05.342697  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.595254ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:05.342987  109746 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:disruption-controller
I1109 01:08:05.361597  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:endpoint-controller: (1.471606ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:05.382101  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.07462ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:05.382396  109746 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:endpoint-controller
I1109 01:08:05.383919  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:05.383960  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:05.383997  109746 httplog.go:90] GET /healthz: (3.441166ms) 0 [Go-http-client/1.1 127.0.0.1:43188]
I1109 01:08:05.402915  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:05.402961  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:05.403020  109746 httplog.go:90] GET /healthz: (2.452523ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:05.404069  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:expand-controller: (3.590672ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:05.424805  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (4.688482ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:05.425144  109746 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:expand-controller
I1109 01:08:05.441466  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:generic-garbage-collector: (1.375329ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:05.467221  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (7.180755ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:05.467546  109746 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:generic-garbage-collector
I1109 01:08:05.481593  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:05.481601  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:horizontal-pod-autoscaler: (1.542462ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:05.481629  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:05.481685  109746 httplog.go:90] GET /healthz: (1.214784ms) 0 [Go-http-client/1.1 127.0.0.1:43544]
I1109 01:08:05.502602  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.574493ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:05.502895  109746 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:horizontal-pod-autoscaler
I1109 01:08:05.502992  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:05.503010  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:05.503035  109746 httplog.go:90] GET /healthz: (1.317987ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:05.521680  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:job-controller: (1.562752ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:05.542618  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.582181ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:05.542859  109746 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:job-controller
I1109 01:08:05.561331  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:namespace-controller: (1.238183ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:05.581549  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:05.581584  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:05.581644  109746 httplog.go:90] GET /healthz: (1.178631ms) 0 [Go-http-client/1.1 127.0.0.1:43544]
I1109 01:08:05.582587  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.485576ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:05.582839  109746 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:namespace-controller
I1109 01:08:05.602112  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:node-controller: (1.997186ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:05.602396  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:05.602443  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:05.602487  109746 httplog.go:90] GET /healthz: (1.737914ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:05.622856  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.738423ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:05.623226  109746 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:node-controller
I1109 01:08:05.641998  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:persistent-volume-binder: (1.884674ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:05.662627  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.621696ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:05.662986  109746 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:persistent-volume-binder
I1109 01:08:05.681611  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:pod-garbage-collector: (1.537708ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:05.682530  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:05.682567  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:05.682612  109746 httplog.go:90] GET /healthz: (2.11134ms) 0 [Go-http-client/1.1 127.0.0.1:43544]
I1109 01:08:05.701419  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:05.701454  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:05.701496  109746 httplog.go:90] GET /healthz: (1.424729ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:05.702573  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.461203ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:05.702955  109746 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:pod-garbage-collector
I1109 01:08:05.721302  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:replicaset-controller: (1.2341ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:05.742428  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.260872ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:05.742654  109746 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:replicaset-controller
I1109 01:08:05.762927  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:replication-controller: (2.868397ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:05.782250  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:05.782284  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:05.782323  109746 httplog.go:90] GET /healthz: (1.81167ms) 0 [Go-http-client/1.1 127.0.0.1:43544]
I1109 01:08:05.784452  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (4.27394ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:05.784695  109746 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:replication-controller
I1109 01:08:05.801341  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:05.801384  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:05.801424  109746 httplog.go:90] GET /healthz: (1.387309ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:05.802322  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:resourcequota-controller: (2.231909ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:05.827603  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (7.555918ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:05.827834  109746 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:resourcequota-controller
I1109 01:08:05.841571  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:route-controller: (1.515848ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:05.862395  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.331178ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:05.862613  109746 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:route-controller
I1109 01:08:05.881227  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:service-account-controller: (1.177749ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:05.881267  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:05.881287  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:05.881317  109746 httplog.go:90] GET /healthz: (784.483µs) 0 [Go-http-client/1.1 127.0.0.1:43544]
I1109 01:08:05.901840  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:05.901876  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:05.901925  109746 httplog.go:90] GET /healthz: (1.785166ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:05.902391  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.302923ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:05.902577  109746 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:service-account-controller
I1109 01:08:05.921481  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:service-controller: (1.460852ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:05.942757  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.641682ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:05.943117  109746 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:service-controller
I1109 01:08:05.961214  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:statefulset-controller: (1.147994ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:05.982375  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:05.982406  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:05.982445  109746 httplog.go:90] GET /healthz: (1.978963ms) 0 [Go-http-client/1.1 127.0.0.1:43188]
I1109 01:08:05.983698  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (3.62567ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:05.984011  109746 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:statefulset-controller
I1109 01:08:06.001769  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:ttl-controller: (1.592407ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:06.001779  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:06.001829  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:06.001873  109746 httplog.go:90] GET /healthz: (1.705488ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:06.022419  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.280689ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:06.022676  109746 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:ttl-controller
I1109 01:08:06.041480  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:certificate-controller: (1.340833ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:06.062337  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.252446ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:06.062614  109746 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:certificate-controller
I1109 01:08:06.081566  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:06.081806  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:06.082097  109746 httplog.go:90] GET /healthz: (1.435092ms) 0 [Go-http-client/1.1 127.0.0.1:43188]
I1109 01:08:06.082950  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:pvc-protection-controller: (2.917155ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:06.101205  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:06.101235  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:06.101271  109746 httplog.go:90] GET /healthz: (1.235675ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:06.102028  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (1.989784ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:06.102426  109746 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:pvc-protection-controller
I1109 01:08:06.121792  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:controller:pv-protection-controller: (1.641632ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:06.142300  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/clusterrolebindings: (2.254645ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:06.142574  109746 storage_rbac.go:247] created clusterrolebinding.rbac.authorization.k8s.io/system:controller:pv-protection-controller
I1109 01:08:06.161456  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/extension-apiserver-authentication-reader: (1.387599ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:06.163228  109746 httplog.go:90] GET /api/v1/namespaces/kube-system: (1.232982ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:06.182014  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:06.182046  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:06.182083  109746 httplog.go:90] GET /healthz: (1.648419ms) 0 [Go-http-client/1.1 127.0.0.1:43188]
I1109 01:08:06.182884  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (2.833749ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:06.183137  109746 storage_rbac.go:278] created role.rbac.authorization.k8s.io/extension-apiserver-authentication-reader in kube-system
I1109 01:08:06.201381  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:06.201416  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:06.201457  109746 httplog.go:90] GET /healthz: (1.383417ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:06.201490  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system:controller:bootstrap-signer: (1.385313ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:06.204703  109746 httplog.go:90] GET /api/v1/namespaces/kube-system: (2.713122ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:06.222384  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (2.217902ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:06.222715  109746 storage_rbac.go:278] created role.rbac.authorization.k8s.io/system:controller:bootstrap-signer in kube-system
I1109 01:08:06.241833  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system:controller:cloud-provider: (1.737667ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:06.243542  109746 httplog.go:90] GET /api/v1/namespaces/kube-system: (1.2653ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:06.262021  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (1.995161ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:06.262271  109746 storage_rbac.go:278] created role.rbac.authorization.k8s.io/system:controller:cloud-provider in kube-system
I1109 01:08:06.281062  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system:controller:token-cleaner: (1.020567ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:06.281208  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:06.281230  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:06.281264  109746 httplog.go:90] GET /healthz: (770.212µs) 0 [Go-http-client/1.1 127.0.0.1:43544]
I1109 01:08:06.282635  109746 httplog.go:90] GET /api/v1/namespaces/kube-system: (1.119191ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:06.302373  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:06.302412  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:06.302471  109746 httplog.go:90] GET /healthz: (2.36883ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:06.304027  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (3.895294ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:06.304299  109746 storage_rbac.go:278] created role.rbac.authorization.k8s.io/system:controller:token-cleaner in kube-system
I1109 01:08:06.322157  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system::leader-locking-kube-controller-manager: (1.987331ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:06.325567  109746 httplog.go:90] GET /api/v1/namespaces/kube-system: (1.655865ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:06.345924  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (5.896554ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:06.346227  109746 storage_rbac.go:278] created role.rbac.authorization.k8s.io/system::leader-locking-kube-controller-manager in kube-system
I1109 01:08:06.361378  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system::leader-locking-kube-scheduler: (1.371217ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:06.362890  109746 httplog.go:90] GET /api/v1/namespaces/kube-system: (1.073377ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:06.381482  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:06.381509  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:06.381541  109746 httplog.go:90] GET /healthz: (1.093811ms) 0 [Go-http-client/1.1 127.0.0.1:43188]
I1109 01:08:06.382085  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles: (2.075678ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:06.382396  109746 storage_rbac.go:278] created role.rbac.authorization.k8s.io/system::leader-locking-kube-scheduler in kube-system
I1109 01:08:06.402093  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:06.402127  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:06.402169  109746 httplog.go:90] GET /healthz: (1.988474ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:06.402546  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/roles/system:controller:bootstrap-signer: (2.480454ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:06.404170  109746 httplog.go:90] GET /api/v1/namespaces/kube-public: (1.205627ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:06.422881  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/roles: (2.780752ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:06.423164  109746 storage_rbac.go:278] created role.rbac.authorization.k8s.io/system:controller:bootstrap-signer in kube-public
I1109 01:08:06.441499  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system::extension-apiserver-authentication-reader: (1.452111ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:06.443336  109746 httplog.go:90] GET /api/v1/namespaces/kube-system: (1.394147ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:06.462311  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (2.271277ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:06.462581  109746 storage_rbac.go:308] created rolebinding.rbac.authorization.k8s.io/system::extension-apiserver-authentication-reader in kube-system
I1109 01:08:06.481564  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system::leader-locking-kube-controller-manager: (1.428197ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:06.481623  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:06.481656  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:06.481697  109746 httplog.go:90] GET /healthz: (1.188041ms) 0 [Go-http-client/1.1 127.0.0.1:43188]
I1109 01:08:06.483215  109746 httplog.go:90] GET /api/v1/namespaces/kube-system: (1.196257ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:06.511483  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:06.511526  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:06.511575  109746 httplog.go:90] GET /healthz: (11.118454ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:06.511804  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (11.622435ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:06.512026  109746 storage_rbac.go:308] created rolebinding.rbac.authorization.k8s.io/system::leader-locking-kube-controller-manager in kube-system
I1109 01:08:06.524622  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system::leader-locking-kube-scheduler: (1.105256ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:06.526291  109746 httplog.go:90] GET /api/v1/namespaces/kube-system: (1.181326ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:06.542338  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (2.222526ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:06.542603  109746 storage_rbac.go:308] created rolebinding.rbac.authorization.k8s.io/system::leader-locking-kube-scheduler in kube-system
I1109 01:08:06.561335  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:controller:bootstrap-signer: (1.235264ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:06.563172  109746 httplog.go:90] GET /api/v1/namespaces/kube-system: (1.456531ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:06.581481  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:06.581689  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:06.581878  109746 httplog.go:90] GET /healthz: (1.518653ms) 0 [Go-http-client/1.1 127.0.0.1:43544]
I1109 01:08:06.583716  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (3.737004ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:06.584054  109746 storage_rbac.go:308] created rolebinding.rbac.authorization.k8s.io/system:controller:bootstrap-signer in kube-system
I1109 01:08:06.600882  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:06.600915  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:06.600950  109746 httplog.go:90] GET /healthz: (804.076µs) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:06.603409  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:controller:cloud-provider: (3.240278ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:06.605521  109746 httplog.go:90] GET /api/v1/namespaces/kube-system: (1.740236ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:06.624105  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (3.781675ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:06.624355  109746 storage_rbac.go:308] created rolebinding.rbac.authorization.k8s.io/system:controller:cloud-provider in kube-system
I1109 01:08:06.641538  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:controller:token-cleaner: (1.448762ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:06.643627  109746 httplog.go:90] GET /api/v1/namespaces/kube-system: (1.716559ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:06.663651  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (3.653339ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:06.663868  109746 storage_rbac.go:308] created rolebinding.rbac.authorization.k8s.io/system:controller:token-cleaner in kube-system
I1109 01:08:06.681605  109746 httplog.go:90] GET /apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/rolebindings/system:controller:bootstrap-signer: (1.375093ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:06.682056  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:06.682091  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:06.682116  109746 httplog.go:90] GET /healthz: (1.150986ms) 0 [Go-http-client/1.1 127.0.0.1:43188]
I1109 01:08:06.683086  109746 httplog.go:90] GET /api/v1/namespaces/kube-public: (1.099241ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:06.703551  109746 healthz.go:177] healthz check poststarthook/rbac/bootstrap-roles failed: not finished
I1109 01:08:06.703587  109746 healthz.go:191] [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/start-cluster-authentication-info-controller ok
healthz check failed
I1109 01:08:06.703665  109746 httplog.go:90] GET /healthz: (3.646095ms) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:06.703988  109746 httplog.go:90] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/rolebindings: (3.968491ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:06.704301  109746 storage_rbac.go:308] created rolebinding.rbac.authorization.k8s.io/system:controller:bootstrap-signer in kube-public
I1109 01:08:06.782310  109746 httplog.go:90] GET /healthz: (1.73641ms) 200 [Go-http-client/1.1 127.0.0.1:43544]
W1109 01:08:06.783545  109746 mutation_detector.go:50] Mutation detector is enabled, this will result in memory leakage.
W1109 01:08:06.783573  109746 mutation_detector.go:50] Mutation detector is enabled, this will result in memory leakage.
W1109 01:08:06.783610  109746 mutation_detector.go:50] Mutation detector is enabled, this will result in memory leakage.
W1109 01:08:06.783678  109746 mutation_detector.go:50] Mutation detector is enabled, this will result in memory leakage.
W1109 01:08:06.783691  109746 mutation_detector.go:50] Mutation detector is enabled, this will result in memory leakage.
W1109 01:08:06.783705  109746 mutation_detector.go:50] Mutation detector is enabled, this will result in memory leakage.
W1109 01:08:06.783720  109746 mutation_detector.go:50] Mutation detector is enabled, this will result in memory leakage.
W1109 01:08:06.783733  109746 mutation_detector.go:50] Mutation detector is enabled, this will result in memory leakage.
W1109 01:08:06.783751  109746 mutation_detector.go:50] Mutation detector is enabled, this will result in memory leakage.
W1109 01:08:06.783766  109746 mutation_detector.go:50] Mutation detector is enabled, this will result in memory leakage.
W1109 01:08:06.783779  109746 mutation_detector.go:50] Mutation detector is enabled, this will result in memory leakage.
I1109 01:08:06.783794  109746 factory.go:300] Creating scheduler from algorithm provider 'DefaultProvider'
I1109 01:08:06.783805  109746 factory.go:392] Creating scheduler with fit predicates 'map[CheckNodeUnschedulable:{} CheckVolumeBinding:{} GeneralPredicates:{} MatchInterPodAffinity:{} MaxAzureDiskVolumeCount:{} MaxCSIVolumeCountPred:{} MaxEBSVolumeCount:{} MaxGCEPDVolumeCount:{} NoDiskConflict:{} NoVolumeZoneConflict:{} PodToleratesNodeTaints:{}]' and priority functions 'map[BalancedResourceAllocation:{} ImageLocalityPriority:{} InterPodAffinityPriority:{} LeastRequestedPriority:{} NodeAffinityPriority:{} NodePreferAvoidPodsPriority:{} SelectorSpreadPriority:{} TaintTolerationPriority:{}]'
I1109 01:08:06.784604  109746 reflector.go:153] Starting reflector *v1.PersistentVolumeClaim (1s) from k8s.io/client-go/informers/factory.go:135
I1109 01:08:06.784626  109746 reflector.go:188] Listing and watching *v1.PersistentVolumeClaim from k8s.io/client-go/informers/factory.go:135
I1109 01:08:06.784737  109746 reflector.go:153] Starting reflector *v1.PersistentVolume (1s) from k8s.io/client-go/informers/factory.go:135
I1109 01:08:06.784763  109746 reflector.go:188] Listing and watching *v1.PersistentVolume from k8s.io/client-go/informers/factory.go:135
I1109 01:08:06.784985  109746 reflector.go:153] Starting reflector *v1beta1.PodDisruptionBudget (1s) from k8s.io/client-go/informers/factory.go:135
I1109 01:08:06.784999  109746 reflector.go:188] Listing and watching *v1beta1.PodDisruptionBudget from k8s.io/client-go/informers/factory.go:135
I1109 01:08:06.785153  109746 reflector.go:153] Starting reflector *v1beta1.CSINode (1s) from k8s.io/client-go/informers/factory.go:135
I1109 01:08:06.785163  109746 reflector.go:188] Listing and watching *v1beta1.CSINode from k8s.io/client-go/informers/factory.go:135
I1109 01:08:06.785456  109746 reflector.go:153] Starting reflector *v1.Service (1s) from k8s.io/client-go/informers/factory.go:135
I1109 01:08:06.785472  109746 reflector.go:188] Listing and watching *v1.Service from k8s.io/client-go/informers/factory.go:135
I1109 01:08:06.785611  109746 reflector.go:153] Starting reflector *v1.ReplicaSet (1s) from k8s.io/client-go/informers/factory.go:135
I1109 01:08:06.785625  109746 reflector.go:188] Listing and watching *v1.ReplicaSet from k8s.io/client-go/informers/factory.go:135
I1109 01:08:06.785649  109746 reflector.go:153] Starting reflector *v1.ReplicationController (1s) from k8s.io/client-go/informers/factory.go:135
I1109 01:08:06.785660  109746 reflector.go:188] Listing and watching *v1.ReplicationController from k8s.io/client-go/informers/factory.go:135
I1109 01:08:06.785871  109746 reflector.go:153] Starting reflector *v1.Pod (1s) from k8s.io/client-go/informers/factory.go:135
I1109 01:08:06.785888  109746 reflector.go:188] Listing and watching *v1.Pod from k8s.io/client-go/informers/factory.go:135
I1109 01:08:06.786027  109746 reflector.go:153] Starting reflector *v1.StatefulSet (1s) from k8s.io/client-go/informers/factory.go:135
I1109 01:08:06.786041  109746 reflector.go:188] Listing and watching *v1.StatefulSet from k8s.io/client-go/informers/factory.go:135
I1109 01:08:06.786303  109746 reflector.go:153] Starting reflector *v1.StorageClass (1s) from k8s.io/client-go/informers/factory.go:135
I1109 01:08:06.786317  109746 reflector.go:188] Listing and watching *v1.StorageClass from k8s.io/client-go/informers/factory.go:135
I1109 01:08:06.786368  109746 reflector.go:153] Starting reflector *v1.Node (1s) from k8s.io/client-go/informers/factory.go:135
I1109 01:08:06.786382  109746 reflector.go:188] Listing and watching *v1.Node from k8s.io/client-go/informers/factory.go:135
I1109 01:08:06.788157  109746 httplog.go:90] GET /api/v1/nodes?limit=500&resourceVersion=0: (378.145µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44204]
I1109 01:08:06.788157  109746 httplog.go:90] GET /api/v1/persistentvolumeclaims?limit=500&resourceVersion=0: (599.897µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:06.788528  109746 httplog.go:90] GET /apis/storage.k8s.io/v1beta1/csinodes?limit=500&resourceVersion=0: (275.283µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44190]
I1109 01:08:06.788686  109746 httplog.go:90] GET /api/v1/replicationcontrollers?limit=500&resourceVersion=0: (415.647µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44196]
I1109 01:08:06.788704  109746 httplog.go:90] GET /api/v1/pods?limit=500&resourceVersion=0: (349.833µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44198]
I1109 01:08:06.788966  109746 httplog.go:90] GET /apis/policy/v1beta1/poddisruptionbudgets?limit=500&resourceVersion=0: (338.811µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44188]
I1109 01:08:06.789067  109746 httplog.go:90] GET /apis/apps/v1/statefulsets?limit=500&resourceVersion=0: (273.835µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44200]
I1109 01:08:06.789435  109746 httplog.go:90] GET /api/v1/services?limit=500&resourceVersion=0: (350.636µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44192]
I1109 01:08:06.789437  109746 httplog.go:90] GET /apis/storage.k8s.io/v1/storageclasses?limit=500&resourceVersion=0: (277.885µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44202]
I1109 01:08:06.789667  109746 get.go:251] Starting watch for /api/v1/persistentvolumeclaims, rv=32259 labels= fields= timeout=9m21s
I1109 01:08:06.788158  109746 httplog.go:90] GET /api/v1/persistentvolumes?limit=500&resourceVersion=0: (596.09µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43188]
I1109 01:08:06.789919  109746 get.go:251] Starting watch for /apis/storage.k8s.io/v1beta1/csinodes, rv=32261 labels= fields= timeout=8m54s
I1109 01:08:06.790001  109746 httplog.go:90] GET /apis/apps/v1/replicasets?limit=500&resourceVersion=0: (396.298µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44194]
I1109 01:08:06.789667  109746 get.go:251] Starting watch for /api/v1/nodes, rv=32259 labels= fields= timeout=5m9s
I1109 01:08:06.790518  109746 get.go:251] Starting watch for /apis/policy/v1beta1/poddisruptionbudgets, rv=32261 labels= fields= timeout=7m12s
I1109 01:08:06.790872  109746 get.go:251] Starting watch for /apis/apps/v1/statefulsets, rv=32261 labels= fields= timeout=6m53s
I1109 01:08:06.791405  109746 get.go:251] Starting watch for /api/v1/persistentvolumes, rv=32259 labels= fields= timeout=9m58s
I1109 01:08:06.791786  109746 get.go:251] Starting watch for /api/v1/services, rv=32259 labels= fields= timeout=8m15s
I1109 01:08:06.791804  109746 get.go:251] Starting watch for /apis/storage.k8s.io/v1/storageclasses, rv=32261 labels= fields= timeout=6m13s
I1109 01:08:06.791515  109746 get.go:251] Starting watch for /api/v1/replicationcontrollers, rv=32259 labels= fields= timeout=9m11s
I1109 01:08:06.792336  109746 get.go:251] Starting watch for /apis/apps/v1/replicasets, rv=32261 labels= fields= timeout=7m8s
I1109 01:08:06.792350  109746 get.go:251] Starting watch for /api/v1/pods, rv=32259 labels= fields= timeout=8m27s
I1109 01:08:06.801761  109746 httplog.go:90] GET /healthz: (1.405305ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44208]
I1109 01:08:06.803209  109746 httplog.go:90] GET /api/v1/namespaces/default: (1.080691ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44208]
I1109 01:08:06.805990  109746 httplog.go:90] POST /api/v1/namespaces: (2.274008ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44208]
I1109 01:08:06.807680  109746 httplog.go:90] GET /api/v1/namespaces/default/services/kubernetes: (1.211597ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44208]
I1109 01:08:06.812234  109746 httplog.go:90] POST /api/v1/namespaces/default/services: (3.960042ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44208]
I1109 01:08:06.815956  109746 httplog.go:90] GET /api/v1/namespaces/default/endpoints/kubernetes: (3.274403ms) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44208]
I1109 01:08:06.818534  109746 httplog.go:90] POST /api/v1/namespaces/default/endpoints: (2.092508ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44208]
I1109 01:08:06.884587  109746 shared_informer.go:227] caches populated
I1109 01:08:06.884665  109746 shared_informer.go:227] caches populated
I1109 01:08:06.884676  109746 shared_informer.go:227] caches populated
I1109 01:08:06.884684  109746 shared_informer.go:227] caches populated
I1109 01:08:06.884691  109746 shared_informer.go:227] caches populated
I1109 01:08:06.884699  109746 shared_informer.go:227] caches populated
I1109 01:08:06.884707  109746 shared_informer.go:227] caches populated
I1109 01:08:06.884714  109746 shared_informer.go:227] caches populated
I1109 01:08:06.884729  109746 shared_informer.go:227] caches populated
I1109 01:08:06.884737  109746 shared_informer.go:227] caches populated
I1109 01:08:06.884741  109746 shared_informer.go:227] caches populated
I1109 01:08:06.884874  109746 shared_informer.go:227] caches populated
I1109 01:08:06.888496  109746 node_tree.go:86] Added node "node1" in group "" to NodeTree
I1109 01:08:06.889013  109746 httplog.go:90] POST /api/v1/nodes: (3.661412ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44208]
I1109 01:08:06.892804  109746 httplog.go:90] PATCH /api/v1/nodes/node1: (3.278151ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44208]
I1109 01:08:06.995909  109746 httplog.go:90] GET /api/v1/nodes/node1: (2.008038ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44208]
I1109 01:08:06.998550  109746 httplog.go:90] POST /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods: (2.032664ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44208]
I1109 01:08:06.999382  109746 scheduling_queue.go:841] About to try and schedule pod preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:06.999399  109746 scheduler.go:611] Attempting to schedule pod: preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:06.999568  109746 scheduler_binder.go:257] AssumePodVolumes for pod "preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod", node "node1"
I1109 01:08:06.999591  109746 scheduler_binder.go:267] AssumePodVolumes for pod "preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod", node "node1": all PVCs bound and nothing to do
I1109 01:08:06.999665  109746 factory.go:698] Attempting to bind victim-pod to node1
I1109 01:08:07.002674  109746 httplog.go:90] POST /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod/binding: (2.720837ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44208]
I1109 01:08:07.002834  109746 scheduler.go:756] pod preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod is bound successfully on node "node1", 1 nodes evaluated, 1 nodes were found feasible.
I1109 01:08:07.006297  109746 httplog.go:90] POST /apis/events.k8s.io/v1beta1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/events: (3.206372ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44208]
I1109 01:08:07.101553  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.150502ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44208]
I1109 01:08:07.104640  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.634794ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44208]
I1109 01:08:07.107736  109746 httplog.go:90] POST /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods: (2.556716ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44208]
I1109 01:08:07.107866  109746 scheduling_queue.go:841] About to try and schedule pod preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/preemptor-pod
I1109 01:08:07.107888  109746 scheduler.go:611] Attempting to schedule pod: preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/preemptor-pod
I1109 01:08:07.107995  109746 factory.go:632] Unable to schedule preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu.; waiting
I1109 01:08:07.108034  109746 scheduler.go:774] Updating pod condition for preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I1109 01:08:07.110154  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/preemptor-pod: (1.481694ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44248]
I1109 01:08:07.110961  109746 httplog.go:90] PUT /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/preemptor-pod/status: (2.664863ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44208]
I1109 01:08:07.112827  109746 httplog.go:90] POST /apis/events.k8s.io/v1beta1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/events: (2.939258ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44250]
I1109 01:08:07.113698  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/preemptor-pod: (2.337331ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44208]
I1109 01:08:07.113951  109746 generic_scheduler.go:1207] Node node1 is a potential node for preemption.
I1109 01:08:07.116335  109746 httplog.go:90] PUT /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/preemptor-pod/status: (1.935013ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44250]
I1109 01:08:07.119730  109746 httplog.go:90] DELETE /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.982133ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44250]
I1109 01:08:07.122498  109746 httplog.go:90] POST /apis/events.k8s.io/v1beta1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/events: (2.123547ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44250]
I1109 01:08:07.789227  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:07.789404  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:07.789595  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:07.791129  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:07.791385  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:07.791709  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:07.792117  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:07.792313  109746 scheduling_queue.go:841] About to try and schedule pod preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/preemptor-pod
I1109 01:08:07.792338  109746 scheduler.go:611] Attempting to schedule pod: preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/preemptor-pod
I1109 01:08:07.792499  109746 factory.go:632] Unable to schedule preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/preemptor-pod: no fit: 0/1 nodes are available: 1 Insufficient cpu.; waiting
I1109 01:08:07.792549  109746 scheduler.go:774] Updating pod condition for preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/preemptor-pod to (PodScheduled==False, Reason=Unschedulable)
I1109 01:08:07.794784  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/preemptor-pod: (1.882086ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44250]
I1109 01:08:07.795258  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/preemptor-pod: (2.285702ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44248]
I1109 01:08:07.795264  109746 httplog.go:90] POST /apis/events.k8s.io/v1beta1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/events: (1.89968ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44334]
I1109 01:08:08.110304  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.782945ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44248]
I1109 01:08:08.213458  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/preemptor-pod: (2.133506ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44248]
I1109 01:08:08.221225  109746 httplog.go:90] DELETE /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (7.154459ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44248]
I1109 01:08:08.221781  109746 scheduling_queue.go:841] About to try and schedule pod preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/preemptor-pod
I1109 01:08:08.221814  109746 scheduler.go:611] Attempting to schedule pod: preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/preemptor-pod
I1109 01:08:08.222141  109746 scheduler_binder.go:257] AssumePodVolumes for pod "preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/preemptor-pod", node "node1"
I1109 01:08:08.222213  109746 scheduler_binder.go:267] AssumePodVolumes for pod "preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/preemptor-pod", node "node1": all PVCs bound and nothing to do
I1109 01:08:08.222412  109746 factory.go:698] Attempting to bind preemptor-pod to node1
I1109 01:08:08.224914  109746 scheduling_queue.go:841] About to try and schedule pod preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/preemptor-pod
I1109 01:08:08.224961  109746 scheduler.go:607] Skip schedule deleting pod: preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/preemptor-pod
I1109 01:08:08.225098  109746 store.go:365] GuaranteedUpdate of /18091606-1f4c-4ba4-beeb-db514c627f25/pods/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/preemptor-pod failed because of a conflict, going to retry
I1109 01:08:08.225860  109746 httplog.go:90] POST /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/preemptor-pod/binding: (3.093634ms) 409 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44250]
I1109 01:08:08.226157  109746 scheduler.go:580] Failed to bind pod: preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/preemptor-pod
E1109 01:08:08.226283  109746 factory.go:648] Error scheduling preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/preemptor-pod: Operation cannot be fulfilled on pods/binding "preemptor-pod": pod preemptor-pod is being deleted, cannot be assigned to a host; retrying
I1109 01:08:08.226355  109746 scheduler.go:774] Updating pod condition for preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/preemptor-pod to (PodScheduled==False, Reason=SchedulerError)
I1109 01:08:08.228536  109746 httplog.go:90] POST /apis/events.k8s.io/v1beta1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/events: (2.797639ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:08.228739  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/preemptor-pod: (1.949011ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44382]
I1109 01:08:08.229044  109746 httplog.go:90] DELETE /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/preemptor-pod: (7.377532ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44248]
I1109 01:08:08.229763  109746 httplog.go:90] PUT /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/preemptor-pod/status: (1.969138ms) 409 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44250]
E1109 01:08:08.230027  109746 scheduler.go:446] Error updating the condition of the pod preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/preemptor-pod: Operation cannot be fulfilled on pods "preemptor-pod": StorageError: invalid object, Code: 4, Key: /18091606-1f4c-4ba4-beeb-db514c627f25/pods/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/preemptor-pod, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: c19a9c3d-5f38-4f23-8fc3-9e106c6dab9e, UID in object meta: 
I1109 01:08:08.231037  109746 httplog.go:90] PATCH /apis/events.k8s.io/v1beta1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/events/preemptor-pod.15d5592b679b160c: (2.773314ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44384]
I1109 01:08:08.231608  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (971.814µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44382]
I1109 01:08:08.233980  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/preemptor-pod: (854.067µs) 404 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44384]
I1109 01:08:08.236368  109746 httplog.go:90] POST /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods: (1.92358ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44384]
I1109 01:08:08.236725  109746 scheduling_queue.go:841] About to try and schedule pod preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:08.236804  109746 scheduler.go:611] Attempting to schedule pod: preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:08.237039  109746 factory.go:632] Unable to schedule preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod: no fit: 0/1 nodes are available: 1 can't fit victim-pod.; waiting
I1109 01:08:08.237132  109746 scheduler.go:774] Updating pod condition for preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod to (PodScheduled==False, Reason=Unschedulable)
I1109 01:08:08.239311  109746 httplog.go:90] POST /apis/events.k8s.io/v1beta1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/events: (1.81517ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:08.240333  109746 httplog.go:90] PUT /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod/status: (2.881827ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44384]
I1109 01:08:08.240783  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.755121ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
E1109 01:08:08.240994  109746 factory.go:673] pod is already present in the activeQ
I1109 01:08:08.241914  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.177498ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44384]
I1109 01:08:08.242131  109746 generic_scheduler.go:1207] Node node1 is a potential node for preemption.
I1109 01:08:08.242416  109746 scheduling_queue.go:841] About to try and schedule pod preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:08.242436  109746 scheduler.go:611] Attempting to schedule pod: preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:08.242509  109746 factory.go:632] Unable to schedule preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod: no fit: 0/1 nodes are available: 1 can't fit victim-pod.; waiting
I1109 01:08:08.242536  109746 scheduler.go:774] Updating pod condition for preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod to (PodScheduled==False, Reason=Unschedulable)
I1109 01:08:08.243972  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (991.038µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:08.244383  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.667987ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:08.244643  109746 generic_scheduler.go:1207] Node node1 is a potential node for preemption.
I1109 01:08:08.244983  109746 httplog.go:90] POST /apis/events.k8s.io/v1beta1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/events: (1.915138ms) 201 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44390]
I1109 01:08:08.339445  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.193895ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:08.440090  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.797228ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:08.538950  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.930686ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:08.641611  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.944458ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:08.739301  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.12019ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:08.789447  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:08.789531  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:08.789767  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:08.791256  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:08.791564  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:08.791884  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:08.792253  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:08.792338  109746 scheduling_queue.go:841] About to try and schedule pod preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:08.792357  109746 scheduler.go:611] Attempting to schedule pod: preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:08.792545  109746 factory.go:632] Unable to schedule preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod: no fit: 0/1 nodes are available: 1 can't fit victim-pod.; waiting
I1109 01:08:08.792599  109746 scheduler.go:774] Updating pod condition for preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod to (PodScheduled==False, Reason=Unschedulable)
I1109 01:08:08.795065  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.785188ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:08.795065  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.128473ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:08.795992  109746 generic_scheduler.go:1207] Node node1 is a potential node for preemption.
I1109 01:08:08.799956  109746 httplog.go:90] PATCH /apis/events.k8s.io/v1beta1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/events/victim-pod.15d5592b826d5cb5: (5.082092ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44524]
I1109 01:08:08.838687  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.602535ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:08.945793  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (8.316369ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:09.039060  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.943625ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:09.139062  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.977778ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:09.238914  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.732473ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:09.338999  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.913544ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:09.439040  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.859669ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:09.539241  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.114022ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:09.639097  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.815466ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:09.738932  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.793302ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:09.784885  109746 scheduling_queue.go:841] About to try and schedule pod preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/preemptor-pod
I1109 01:08:09.784959  109746 scheduler.go:607] Skip schedule deleting pod: preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/preemptor-pod
I1109 01:08:09.788250  109746 httplog.go:90] PATCH /apis/events.k8s.io/v1beta1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/events/preemptor-pod.15d5592b81610e1a: (2.5734ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:09.789579  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:09.789673  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:09.789932  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:09.791443  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:09.791737  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:09.791830  109746 scheduling_queue.go:841] About to try and schedule pod preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:09.791849  109746 scheduler.go:611] Attempting to schedule pod: preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:09.791992  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:09.792010  109746 factory.go:632] Unable to schedule preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod: no fit: 0/1 nodes are available: 1 can't fit victim-pod.; waiting
I1109 01:08:09.792048  109746 scheduler.go:774] Updating pod condition for preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod to (PodScheduled==False, Reason=Unschedulable)
I1109 01:08:09.792431  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:09.793744  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.407245ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:09.793763  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.411335ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:09.794090  109746 generic_scheduler.go:1207] Node node1 is a potential node for preemption.
E1109 01:08:09.794090  109746 factory.go:673] pod is already present in the activeQ
I1109 01:08:09.794353  109746 scheduling_queue.go:841] About to try and schedule pod preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:09.794365  109746 scheduler.go:611] Attempting to schedule pod: preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:09.794477  109746 factory.go:632] Unable to schedule preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod: no fit: 0/1 nodes are available: 1 can't fit victim-pod.; waiting
I1109 01:08:09.794536  109746 scheduler.go:774] Updating pod condition for preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod to (PodScheduled==False, Reason=Unschedulable)
I1109 01:08:09.795847  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.057319ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:09.795853  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (997.343µs) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:09.796086  109746 generic_scheduler.go:1207] Node node1 is a potential node for preemption.
I1109 01:08:09.838570  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.500113ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:09.938718  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.580126ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:10.040552  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (3.29933ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:10.138863  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.728364ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:10.238910  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.803627ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:10.338837  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.750362ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:10.439787  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.554412ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:10.538907  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.75086ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:10.639225  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.194582ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:10.738780  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.678957ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:10.789760  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:10.789814  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:10.790108  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:10.791601  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:10.791938  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:10.792134  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:10.792639  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:10.792740  109746 scheduling_queue.go:841] About to try and schedule pod preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:10.792750  109746 scheduler.go:611] Attempting to schedule pod: preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:10.792900  109746 factory.go:632] Unable to schedule preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod: no fit: 0/1 nodes are available: 1 can't fit victim-pod.; waiting
I1109 01:08:10.792934  109746 scheduler.go:774] Updating pod condition for preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod to (PodScheduled==False, Reason=Unschedulable)
I1109 01:08:10.795137  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.650233ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:10.795485  109746 generic_scheduler.go:1207] Node node1 is a potential node for preemption.
I1109 01:08:10.795605  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.333847ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:10.849809  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (10.320158ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:10.939097  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.982776ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:11.039080  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.923604ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:11.140492  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (3.398209ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:11.238702  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.627556ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:11.338925  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.644702ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:11.438942  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.799496ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:11.539638  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.614876ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:11.639481  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.243207ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:11.738962  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.861542ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:11.789990  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:11.789998  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:11.790649  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:11.791782  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:11.792117  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:11.792302  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:11.792809  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:11.792899  109746 scheduling_queue.go:841] About to try and schedule pod preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:11.792936  109746 scheduler.go:611] Attempting to schedule pod: preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:11.793133  109746 factory.go:632] Unable to schedule preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod: no fit: 0/1 nodes are available: 1 can't fit victim-pod.; waiting
I1109 01:08:11.793213  109746 scheduler.go:774] Updating pod condition for preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod to (PodScheduled==False, Reason=Unschedulable)
I1109 01:08:11.795296  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.578946ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:11.795399  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.879267ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:11.795646  109746 generic_scheduler.go:1207] Node node1 is a potential node for preemption.
I1109 01:08:11.838998  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.920656ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:11.938654  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.573653ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:12.038958  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.826881ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:12.138706  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.579101ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:12.239097  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.936587ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:12.338880  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.749328ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:12.439034  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.95892ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:12.538921  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.820417ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:12.642572  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (5.416569ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:12.741549  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (4.4477ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:12.790137  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:12.790220  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:12.790879  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:12.791995  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:12.792363  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:12.792502  109746 scheduling_queue.go:841] About to try and schedule pod preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:12.792522  109746 scheduler.go:611] Attempting to schedule pod: preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:12.792702  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:12.792817  109746 factory.go:632] Unable to schedule preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod: no fit: 0/1 nodes are available: 1 can't fit victim-pod.; waiting
I1109 01:08:12.792887  109746 scheduler.go:774] Updating pod condition for preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod to (PodScheduled==False, Reason=Unschedulable)
I1109 01:08:12.793151  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:12.795269  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.800355ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
E1109 01:08:12.795605  109746 factory.go:673] pod is already present in the activeQ
I1109 01:08:12.795894  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.945608ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:12.796237  109746 generic_scheduler.go:1207] Node node1 is a potential node for preemption.
I1109 01:08:12.796715  109746 scheduling_queue.go:841] About to try and schedule pod preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:12.796733  109746 scheduler.go:611] Attempting to schedule pod: preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:12.796839  109746 factory.go:632] Unable to schedule preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod: no fit: 0/1 nodes are available: 1 can't fit victim-pod.; waiting
I1109 01:08:12.796872  109746 scheduler.go:774] Updating pod condition for preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod to (PodScheduled==False, Reason=Unschedulable)
I1109 01:08:12.798611  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.424285ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:12.798626  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.547304ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:12.798890  109746 generic_scheduler.go:1207] Node node1 is a potential node for preemption.
I1109 01:08:12.838838  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.730989ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:12.939039  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.912363ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:13.039058  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.903436ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:13.138997  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.932929ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:13.238876  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.778383ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:13.338823  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.589631ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:13.438928  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.773285ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:13.538873  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.75994ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:13.638997  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.823536ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:13.739264  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.918602ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:13.790283  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:13.790392  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:13.791057  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:13.792163  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:13.792872  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:13.793327  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:13.793386  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:13.839239  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.842516ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:13.938858  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.662765ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:14.039408  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.272442ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:14.139666  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.50085ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:14.239110  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.02789ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:14.338943  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.878981ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:14.438974  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.876801ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:14.538761  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.67933ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:14.639031  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.891705ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:14.738875  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.651787ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:14.790512  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:14.790592  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:14.791377  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:14.792369  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:14.793043  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:14.793450  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:14.793508  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:14.794540  109746 scheduling_queue.go:841] About to try and schedule pod preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:14.794561  109746 scheduler.go:611] Attempting to schedule pod: preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:14.794744  109746 factory.go:632] Unable to schedule preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod: no fit: 0/1 nodes are available: 1 can't fit victim-pod.; waiting
I1109 01:08:14.794793  109746 scheduler.go:774] Updating pod condition for preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod to (PodScheduled==False, Reason=Unschedulable)
I1109 01:08:14.796904  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.79847ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:14.797028  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.880122ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:14.797219  109746 generic_scheduler.go:1207] Node node1 is a potential node for preemption.
I1109 01:08:14.838905  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.802989ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:14.938923  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.787527ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:15.039000  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.839198ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:15.138849  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.756782ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:15.238972  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.868195ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:15.338642  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.557322ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:15.438677  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.62488ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:15.538988  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.879015ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:15.639148  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.093225ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:15.739239  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.094157ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:15.790698  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:15.790702  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:15.791755  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:15.792540  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:15.793265  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:15.793879  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:15.793868  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:15.839405  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.188097ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:15.939346  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.102753ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:16.039641  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.405438ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:16.139352  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.175268ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:16.238891  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.732269ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:16.338999  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.831931ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:16.439108  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.957081ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:16.544444  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (7.267304ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:16.639095  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.861675ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:16.739234  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.076273ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:16.790959  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:16.791015  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:16.791987  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:16.792871  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:16.793440  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:16.794141  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:16.794266  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:16.794300  109746 scheduling_queue.go:841] About to try and schedule pod preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:16.794315  109746 scheduler.go:611] Attempting to schedule pod: preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:16.794534  109746 factory.go:632] Unable to schedule preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod: no fit: 0/1 nodes are available: 1 can't fit victim-pod.; waiting
I1109 01:08:16.794587  109746 scheduler.go:774] Updating pod condition for preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod to (PodScheduled==False, Reason=Unschedulable)
I1109 01:08:16.796622  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.550725ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:16.797547  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.660325ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:16.798034  109746 generic_scheduler.go:1207] Node node1 is a potential node for preemption.
I1109 01:08:16.804559  109746 httplog.go:90] GET /api/v1/namespaces/default: (2.152657ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:16.807796  109746 httplog.go:90] GET /api/v1/namespaces/default/services/kubernetes: (1.484489ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:16.809841  109746 httplog.go:90] GET /api/v1/namespaces/default/endpoints/kubernetes: (1.583654ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:16.841274  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (4.078716ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:16.944788  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (7.469636ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:17.039024  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.870383ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:17.139973  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.818027ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:17.239206  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.940634ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:17.339355  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.928433ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:17.448388  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (3.50768ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:17.538763  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.694363ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:17.640755  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (3.624267ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:17.752889  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (8.300011ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:17.786289  109746 scheduling_queue.go:841] About to try and schedule pod preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:17.786325  109746 scheduler.go:611] Attempting to schedule pod: preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:17.786581  109746 factory.go:632] Unable to schedule preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod: no fit: 0/1 nodes are available: 1 can't fit victim-pod.; waiting
I1109 01:08:17.786631  109746 scheduler.go:774] Updating pod condition for preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod to (PodScheduled==False, Reason=Unschedulable)
I1109 01:08:17.791116  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:17.791164  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:17.792103  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:17.793029  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:17.793614  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:17.794342  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:17.794514  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:17.802902  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (12.565498ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:17.803204  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (12.845343ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:17.803250  109746 generic_scheduler.go:1207] Node node1 is a potential node for preemption.
I1109 01:08:17.803501  109746 scheduling_queue.go:841] About to try and schedule pod preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:17.803527  109746 scheduler.go:611] Attempting to schedule pod: preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:17.803648  109746 factory.go:632] Unable to schedule preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod: no fit: 0/1 nodes are available: 1 can't fit victim-pod.; waiting
I1109 01:08:17.803697  109746 scheduler.go:774] Updating pod condition for preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod to (PodScheduled==False, Reason=Unschedulable)
I1109 01:08:17.806375  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.361865ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
E1109 01:08:17.806647  109746 factory.go:673] pod is already present in the backoffQ
I1109 01:08:17.806891  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.954438ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:17.807106  109746 generic_scheduler.go:1207] Node node1 is a potential node for preemption.
I1109 01:08:17.841864  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (4.622553ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:17.941214  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (3.907974ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:18.039235  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.979562ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:18.138974  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.80318ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:18.243275  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (6.154739ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:18.342862  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (5.685794ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:18.438859  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.751397ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:18.543206  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.720853ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:18.646021  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (8.871047ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:18.740061  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.992786ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:18.791251  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:18.791299  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:18.792273  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:18.793199  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:18.793750  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:18.794533  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:18.794614  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:18.794772  109746 scheduling_queue.go:841] About to try and schedule pod preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:18.794789  109746 scheduler.go:611] Attempting to schedule pod: preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:18.795025  109746 factory.go:632] Unable to schedule preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod: no fit: 0/1 nodes are available: 1 can't fit victim-pod.; waiting
I1109 01:08:18.795134  109746 scheduler.go:774] Updating pod condition for preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod to (PodScheduled==False, Reason=Unschedulable)
I1109 01:08:18.798231  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.66108ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:18.798569  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.86872ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:18.798805  109746 generic_scheduler.go:1207] Node node1 is a potential node for preemption.
I1109 01:08:18.843375  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (6.26154ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:18.939077  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.922271ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:19.038813  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.743911ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:19.139657  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.56363ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:19.239311  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.223483ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:19.345516  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (8.368208ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:19.439990  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.873407ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:19.548682  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (8.095429ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:19.639066  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.980742ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:19.739306  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.151032ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:19.791449  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:19.791504  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:19.792457  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:19.793345  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:19.793911  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:19.794766  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:19.794782  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:19.794898  109746 scheduling_queue.go:841] About to try and schedule pod preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:19.794911  109746 scheduler.go:611] Attempting to schedule pod: preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:19.795095  109746 factory.go:632] Unable to schedule preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod: no fit: 0/1 nodes are available: 1 can't fit victim-pod.; waiting
I1109 01:08:19.795160  109746 scheduler.go:774] Updating pod condition for preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod to (PodScheduled==False, Reason=Unschedulable)
I1109 01:08:19.798750  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (3.211751ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:19.802703  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (7.241093ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:19.803225  109746 generic_scheduler.go:1207] Node node1 is a potential node for preemption.
I1109 01:08:19.839024  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.873162ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:19.939317  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.147987ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:20.039311  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.138948ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:20.138907  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.77186ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:20.239320  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.812804ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:20.343274  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (5.486147ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:20.438765  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.508285ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:20.541525  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (4.045943ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:20.638826  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.716498ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:20.738797  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.666767ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:20.791672  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:20.791702  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:20.792682  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:20.793644  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:20.794081  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:20.794918  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:20.794956  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:20.838848  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.766129ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:20.939664  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.510047ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:21.038995  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.723737ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:21.141429  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (4.24412ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:21.239007  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.845157ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:21.341914  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (4.73813ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:21.446320  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (9.119443ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:21.542517  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (5.330132ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:21.639382  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.111464ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:21.741325  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (4.142158ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:21.791973  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:21.791975  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:21.792847  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:21.793887  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:21.794290  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:21.795102  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:21.795116  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:21.795237  109746 scheduling_queue.go:841] About to try and schedule pod preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:21.795258  109746 scheduler.go:611] Attempting to schedule pod: preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:21.795450  109746 factory.go:632] Unable to schedule preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod: no fit: 0/1 nodes are available: 1 can't fit victim-pod.; waiting
I1109 01:08:21.795494  109746 scheduler.go:774] Updating pod condition for preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod to (PodScheduled==False, Reason=Unschedulable)
I1109 01:08:21.798775  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.950531ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:21.798820  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.846672ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:21.799115  109746 generic_scheduler.go:1207] Node node1 is a potential node for preemption.
I1109 01:08:21.839593  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.373869ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:21.939875  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.601201ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:22.039314  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.222661ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:22.139116  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.975622ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:22.240711  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (3.539359ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:22.339062  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.772962ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:22.439063  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.899614ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:22.542109  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (4.866815ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:22.639451  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.299169ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:22.739501  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.273376ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:22.792122  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:22.792168  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:22.793056  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:22.794121  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:22.794513  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:22.795256  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:22.795275  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:22.795363  109746 scheduling_queue.go:841] About to try and schedule pod preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:22.795375  109746 scheduler.go:611] Attempting to schedule pod: preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:22.795563  109746 factory.go:632] Unable to schedule preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod: no fit: 0/1 nodes are available: 1 can't fit victim-pod.; waiting
I1109 01:08:22.795621  109746 scheduler.go:774] Updating pod condition for preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod to (PodScheduled==False, Reason=Unschedulable)
I1109 01:08:22.798325  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.187201ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:22.798331  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.27967ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:22.799078  109746 generic_scheduler.go:1207] Node node1 is a potential node for preemption.
I1109 01:08:22.839682  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.40093ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:22.939480  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.207424ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:23.039757  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.549815ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:23.139688  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.449262ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:23.239444  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.145322ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:23.339270  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.069541ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:23.439551  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.386405ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:23.539326  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.108071ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:23.639564  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.238645ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:23.740147  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.846807ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:23.792377  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:23.792407  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:23.793262  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:23.794355  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:23.794695  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:23.795469  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:23.795432  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:23.839968  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.354328ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:23.939500  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.198793ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:24.040215  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.824986ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:24.142697  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.289627ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:24.239794  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.496303ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:24.339286  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.157505ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:24.439266  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.123845ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:24.539490  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.325441ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:24.641488  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (4.302531ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:24.739999  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.757748ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:24.792615  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:24.792650  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:24.793521  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:24.794558  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:24.794867  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:24.795676  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:24.795828  109746 scheduling_queue.go:841] About to try and schedule pod preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:24.795885  109746 scheduler.go:611] Attempting to schedule pod: preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:24.795848  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:24.796334  109746 factory.go:632] Unable to schedule preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod: no fit: 0/1 nodes are available: 1 can't fit victim-pod.; waiting
I1109 01:08:24.796390  109746 scheduler.go:774] Updating pod condition for preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod to (PodScheduled==False, Reason=Unschedulable)
I1109 01:08:24.798782  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.059118ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:24.798888  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.330851ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:24.799214  109746 generic_scheduler.go:1207] Node node1 is a potential node for preemption.
I1109 01:08:24.839537  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.312754ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:24.939233  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.020695ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:25.038868  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.735594ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:25.139339  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.174553ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:25.239624  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.37294ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:25.339210  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.029416ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:25.439438  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.151112ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:25.539684  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.313039ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:25.639286  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.094692ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:25.740575  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (3.401037ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:25.792820  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:25.792876  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:25.793738  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:25.794689  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:25.795096  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:25.795972  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:25.796347  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:25.796409  109746 scheduling_queue.go:841] About to try and schedule pod preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:25.796649  109746 scheduler.go:611] Attempting to schedule pod: preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:25.797128  109746 factory.go:632] Unable to schedule preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod: no fit: 0/1 nodes are available: 1 can't fit victim-pod.; waiting
I1109 01:08:25.797413  109746 scheduler.go:774] Updating pod condition for preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod to (PodScheduled==False, Reason=Unschedulable)
I1109 01:08:25.799656  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.839607ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:25.800004  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.251632ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:25.800237  109746 generic_scheduler.go:1207] Node node1 is a potential node for preemption.
I1109 01:08:25.839211  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.825334ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:25.939199  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.938198ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:26.038867  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.679177ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:26.138931  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.77868ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:26.238726  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.626386ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:26.339047  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.801083ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:26.438644  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.581109ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:26.538829  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.636327ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:26.639117  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.901317ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:26.738863  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.73593ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:26.791504  109746 scheduling_queue.go:841] About to try and schedule pod preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:26.791535  109746 scheduler.go:611] Attempting to schedule pod: preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:26.791705  109746 factory.go:632] Unable to schedule preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod: no fit: 0/1 nodes are available: 1 can't fit victim-pod.; waiting
I1109 01:08:26.791742  109746 scheduler.go:774] Updating pod condition for preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod to (PodScheduled==False, Reason=Unschedulable)
I1109 01:08:26.792967  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:26.792997  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:26.793673  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.580418ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:26.793716  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.694092ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:26.794113  109746 generic_scheduler.go:1207] Node node1 is a potential node for preemption.
I1109 01:08:26.794246  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:26.794874  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:26.795341  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:26.796288  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:26.796612  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:26.804698  109746 httplog.go:90] GET /api/v1/namespaces/default: (2.164464ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:26.806899  109746 httplog.go:90] GET /api/v1/namespaces/default/services/kubernetes: (1.632242ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:26.808631  109746 httplog.go:90] GET /api/v1/namespaces/default/endpoints/kubernetes: (1.17793ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:26.838545  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.454916ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:26.940278  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (3.148641ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:27.039378  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.238578ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:27.139299  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.190982ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:27.241321  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (4.161026ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:27.338989  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.650919ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:27.438877  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.74725ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:27.538757  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.74099ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:27.639215  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.925034ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:27.740430  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (3.309226ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:27.793128  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:27.793197  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:27.795036  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:27.795323  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:27.795651  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:27.796486  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:27.796586  109746 scheduling_queue.go:841] About to try and schedule pod preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:27.796600  109746 scheduler.go:611] Attempting to schedule pod: preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:27.796765  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:27.796789  109746 factory.go:632] Unable to schedule preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod: no fit: 0/1 nodes are available: 1 can't fit victim-pod.; waiting
I1109 01:08:27.796839  109746 scheduler.go:774] Updating pod condition for preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod to (PodScheduled==False, Reason=Unschedulable)
I1109 01:08:27.800161  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.799399ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:27.800537  109746 generic_scheduler.go:1207] Node node1 is a potential node for preemption.
I1109 01:08:27.801711  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (4.342315ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:27.839370  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.148724ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:27.939691  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.572614ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:28.057041  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (16.314068ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:28.162316  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (24.226837ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:28.238801  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.683896ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:28.339042  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.951457ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:28.438722  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.629031ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:28.539023  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.863829ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:28.639018  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.911537ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:28.738990  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.870105ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:28.793337  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:28.793337  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:28.795270  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:28.795499  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:28.796536  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:28.796664  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:28.796797  109746 scheduling_queue.go:841] About to try and schedule pod preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:28.796819  109746 scheduler.go:611] Attempting to schedule pod: preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:28.796875  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:28.797031  109746 factory.go:632] Unable to schedule preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod: no fit: 0/1 nodes are available: 1 can't fit victim-pod.; waiting
I1109 01:08:28.797090  109746 scheduler.go:774] Updating pod condition for preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod to (PodScheduled==False, Reason=Unschedulable)
I1109 01:08:28.799283  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.862146ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:28.799297  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.855592ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:28.799624  109746 generic_scheduler.go:1207] Node node1 is a potential node for preemption.
I1109 01:08:28.839287  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.125281ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:28.939789  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.838048ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:29.040821  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (3.684318ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:29.139676  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.540161ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:29.238781  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.670581ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:29.338803  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.62864ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:29.439879  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.703933ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:29.538948  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.791991ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:29.639804  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.575003ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:29.738998  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.857404ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:29.793515  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:29.793589  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:29.795473  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:29.795692  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:29.796759  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:29.796818  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:29.796928  109746 scheduling_queue.go:841] About to try and schedule pod preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:29.796940  109746 scheduler.go:611] Attempting to schedule pod: preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:29.797164  109746 factory.go:632] Unable to schedule preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod: no fit: 0/1 nodes are available: 1 can't fit victim-pod.; waiting
I1109 01:08:29.797215  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:29.797228  109746 scheduler.go:774] Updating pod condition for preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod to (PodScheduled==False, Reason=Unschedulable)
I1109 01:08:29.799906  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.311553ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:29.800072  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.488531ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:29.800418  109746 generic_scheduler.go:1207] Node node1 is a potential node for preemption.
I1109 01:08:29.838958  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.829092ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:29.940217  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (3.053198ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:30.039017  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.763556ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:30.147269  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (10.170281ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:30.239080  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.030666ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:30.338891  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.793757ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:30.439079  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.800419ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:30.538948  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.805491ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:30.638939  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.72841ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:30.738700  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.579493ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:30.793721  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:30.793776  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:30.795686  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:30.795848  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:30.796915  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:30.796979  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:30.797076  109746 scheduling_queue.go:841] About to try and schedule pod preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:30.797094  109746 scheduler.go:611] Attempting to schedule pod: preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:30.797291  109746 factory.go:632] Unable to schedule preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod: no fit: 0/1 nodes are available: 1 can't fit victim-pod.; waiting
I1109 01:08:30.797345  109746 scheduler.go:774] Updating pod condition for preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod to (PodScheduled==False, Reason=Unschedulable)
I1109 01:08:30.797406  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:30.799479  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.739395ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:30.799534  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.876008ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:30.799786  109746 generic_scheduler.go:1207] Node node1 is a potential node for preemption.
I1109 01:08:30.839328  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.990002ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:30.938855  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.703294ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:31.038813  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.62454ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:31.139248  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.916671ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:31.239615  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.503401ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:31.338592  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.517624ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:31.441538  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (4.370381ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:31.539913  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.808646ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:31.641856  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (4.729692ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:31.739864  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.692556ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:31.793919  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:31.793979  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:31.795897  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:31.796036  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:31.797110  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:31.797111  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:31.797295  109746 scheduling_queue.go:841] About to try and schedule pod preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:31.797314  109746 scheduler.go:611] Attempting to schedule pod: preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:31.797500  109746 factory.go:632] Unable to schedule preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod: no fit: 0/1 nodes are available: 1 can't fit victim-pod.; waiting
I1109 01:08:31.797549  109746 scheduler.go:774] Updating pod condition for preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod to (PodScheduled==False, Reason=Unschedulable)
I1109 01:08:31.797588  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:31.800306  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.927198ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:31.800306  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.934456ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:31.800530  109746 generic_scheduler.go:1207] Node node1 is a potential node for preemption.
I1109 01:08:31.839338  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.171199ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:31.939590  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.079905ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:32.041727  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (4.522406ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:32.140051  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.889751ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:32.238501  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.467057ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:32.339194  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.060761ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:32.438836  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.697301ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:32.540006  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.887055ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:32.639224  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.787969ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:32.738804  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.682948ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:32.794144  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:32.794210  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:32.796024  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:32.796237  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:32.797254  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:32.797352  109746 scheduling_queue.go:841] About to try and schedule pod preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:32.797362  109746 scheduler.go:611] Attempting to schedule pod: preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:32.797519  109746 factory.go:632] Unable to schedule preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod: no fit: 0/1 nodes are available: 1 can't fit victim-pod.; waiting
I1109 01:08:32.797555  109746 scheduler.go:774] Updating pod condition for preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod to (PodScheduled==False, Reason=Unschedulable)
I1109 01:08:32.797735  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:32.797903  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:32.799873  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.932467ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:32.800060  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.137171ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:32.800256  109746 generic_scheduler.go:1207] Node node1 is a potential node for preemption.
I1109 01:08:32.838714  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.606461ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:32.939293  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.016315ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:33.039117  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.813008ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:33.138863  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.673226ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:33.238638  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.488684ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:33.338903  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.784821ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:33.439116  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.011371ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:33.538902  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.691557ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:33.639132  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.945778ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:33.740083  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.720997ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:33.794308  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:33.794399  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:33.796221  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:33.796390  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:33.797404  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:33.797572  109746 scheduling_queue.go:841] About to try and schedule pod preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:33.797590  109746 scheduler.go:611] Attempting to schedule pod: preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:33.797783  109746 factory.go:632] Unable to schedule preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod: no fit: 0/1 nodes are available: 1 can't fit victim-pod.; waiting
I1109 01:08:33.797840  109746 scheduler.go:774] Updating pod condition for preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod to (PodScheduled==False, Reason=Unschedulable)
I1109 01:08:33.797894  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:33.798085  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:33.799922  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.646662ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:33.800127  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.965642ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:33.800935  109746 generic_scheduler.go:1207] Node node1 is a potential node for preemption.
I1109 01:08:33.840678  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (3.525824ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:33.939221  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.928454ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:34.052348  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (15.2113ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:34.169924  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (32.347875ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:34.240297  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (3.055825ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:34.339417  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.293068ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:34.443004  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (5.962848ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:34.544431  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (7.373181ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:34.643429  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (6.068177ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:34.739295  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.106737ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:34.794640  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:34.796566  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:34.796611  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:34.796627  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:34.797574  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:34.797652  109746 scheduling_queue.go:841] About to try and schedule pod preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:34.797666  109746 scheduler.go:611] Attempting to schedule pod: preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:34.797864  109746 factory.go:632] Unable to schedule preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod: no fit: 0/1 nodes are available: 1 can't fit victim-pod.; waiting
I1109 01:08:34.797915  109746 scheduler.go:774] Updating pod condition for preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod to (PodScheduled==False, Reason=Unschedulable)
I1109 01:08:34.798372  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:34.799349  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:34.802797  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (4.188962ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:34.803107  109746 generic_scheduler.go:1207] Node node1 is a potential node for preemption.
I1109 01:08:34.803503  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (4.903326ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:34.841521  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (4.436654ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:34.939394  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.877542ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:35.040929  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (3.800663ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:35.139079  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.013149ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:35.239316  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.043595ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:35.338973  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.779039ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:35.438862  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.684885ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:35.539247  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.952898ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:35.639065  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.870448ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:35.739300  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.192643ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:35.794751  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:35.796707  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:35.796765  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:35.796782  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:35.797751  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:35.797881  109746 scheduling_queue.go:841] About to try and schedule pod preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:35.797909  109746 scheduler.go:611] Attempting to schedule pod: preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:35.798093  109746 factory.go:632] Unable to schedule preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod: no fit: 0/1 nodes are available: 1 can't fit victim-pod.; waiting
I1109 01:08:35.798154  109746 scheduler.go:774] Updating pod condition for preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod to (PodScheduled==False, Reason=Unschedulable)
I1109 01:08:35.798532  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:35.799517  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:35.804235  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (4.429319ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:35.804362  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (5.741594ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:35.804709  109746 generic_scheduler.go:1207] Node node1 is a potential node for preemption.
I1109 01:08:35.848344  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (11.200351ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:35.939063  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.763858ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:36.038730  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.600828ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:36.139148  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.069486ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:36.240366  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (3.196916ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:36.339465  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.293224ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:36.438941  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.855554ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:36.539102  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.969283ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:36.639045  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.94153ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:36.739245  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.149907ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:36.795268  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:36.796900  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:36.797017  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:36.796925  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:36.799459  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:36.799507  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:36.799607  109746 scheduling_queue.go:841] About to try and schedule pod preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:36.799620  109746 scheduler.go:611] Attempting to schedule pod: preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:36.799732  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:36.799906  109746 factory.go:632] Unable to schedule preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod: no fit: 0/1 nodes are available: 1 can't fit victim-pod.; waiting
I1109 01:08:36.799976  109746 scheduler.go:774] Updating pod condition for preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod to (PodScheduled==False, Reason=Unschedulable)
I1109 01:08:36.802388  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.010073ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:36.803609  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.623213ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:36.803938  109746 generic_scheduler.go:1207] Node node1 is a potential node for preemption.
I1109 01:08:36.804859  109746 httplog.go:90] GET /api/v1/namespaces/default: (1.323535ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:52876]
I1109 01:08:36.806450  109746 httplog.go:90] GET /api/v1/namespaces/default/services/kubernetes: (1.140505ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:36.808002  109746 httplog.go:90] GET /api/v1/namespaces/default/endpoints/kubernetes: (1.129087ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:36.839326  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.147219ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:36.939012  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.893855ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:37.039364  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.248247ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:37.139663  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.040885ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:37.239048  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.891887ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:37.339071  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.75093ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:37.439105  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.981752ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:37.539063  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.899264ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:37.639402  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.154305ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:37.739391  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.26587ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:37.795441  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:37.797157  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:37.799572  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:37.799620  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:37.799648  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:37.799757  109746 scheduling_queue.go:841] About to try and schedule pod preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:37.799781  109746 scheduler.go:611] Attempting to schedule pod: preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod
I1109 01:08:37.799901  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:37.800001  109746 factory.go:632] Unable to schedule preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod: no fit: 0/1 nodes are available: 1 can't fit victim-pod.; waiting
I1109 01:08:37.800063  109746 scheduler.go:774] Updating pod condition for preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod to (PodScheduled==False, Reason=Unschedulable)
I1109 01:08:37.800697  109746 reflector.go:278] k8s.io/client-go/informers/factory.go:135: forcing resync
I1109 01:08:37.802575  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.103546ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44388]
I1109 01:08:37.802915  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.536391ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:37.803242  109746 generic_scheduler.go:1207] Node node1 is a potential node for preemption.
I1109 01:08:37.839314  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.09362ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:37.939041  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.889214ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:38.039106  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.947883ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:38.139727  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.573416ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:38.239630  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (2.502658ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:38.242023  109746 httplog.go:90] GET /api/v1/namespaces/preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/pods/victim-pod: (1.486519ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:38.242848  109746 httplog.go:90] GET /api/v1/nodes?allowWatchBookmarks=true&resourceVersion=32259&timeout=5m9s&timeoutSeconds=309&watch=true: (31.453339271s) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44204]
I1109 01:08:38.242996  109746 httplog.go:90] GET /api/v1/pods?allowWatchBookmarks=true&resourceVersion=32259&timeout=8m27s&timeoutSeconds=507&watch=true: (31.450876007s) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44206]
I1109 01:08:38.243217  109746 httplog.go:90] GET /api/v1/persistentvolumeclaims?allowWatchBookmarks=true&resourceVersion=32259&timeout=9m21s&timeoutSeconds=561&watch=true: (31.453666536s) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43544]
I1109 01:08:38.243237  109746 httplog.go:90] GET /apis/storage.k8s.io/v1beta1/csinodes?allowWatchBookmarks=true&resourceVersion=32261&timeout=8m54s&timeoutSeconds=534&watch=true: (31.453353633s) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44190]
I1109 01:08:38.243244  109746 httplog.go:90] GET /api/v1/persistentvolumes?allowWatchBookmarks=true&resourceVersion=32259&timeout=9m58s&timeoutSeconds=598&watch=true: (31.452029856s) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44188]
I1109 01:08:38.243256  109746 httplog.go:90] GET /apis/policy/v1beta1/poddisruptionbudgets?allowWatchBookmarks=true&resourceVersion=32261&timeout=7m12s&timeoutSeconds=432&watch=true: (31.452879773s) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44198]
I1109 01:08:38.243377  109746 httplog.go:90] GET /apis/storage.k8s.io/v1/storageclasses?allowWatchBookmarks=true&resourceVersion=32261&timeout=6m13s&timeoutSeconds=373&watch=true: (31.451705898s) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44192]
I1109 01:08:38.243406  109746 httplog.go:90] GET /api/v1/replicationcontrollers?allowWatchBookmarks=true&resourceVersion=32259&timeout=9m11s&timeoutSeconds=551&watch=true: (31.452084165s) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44196]
I1109 01:08:38.243420  109746 httplog.go:90] GET /apis/apps/v1/replicasets?allowWatchBookmarks=true&resourceVersion=32261&timeout=7m8s&timeoutSeconds=428&watch=true: (31.451924673s) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44194]
I1109 01:08:38.243376  109746 httplog.go:90] GET /apis/apps/v1/statefulsets?allowWatchBookmarks=true&resourceVersion=32261&timeout=6m53s&timeoutSeconds=413&watch=true: (31.452649817s) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44200]
I1109 01:08:38.243449  109746 httplog.go:90] GET /api/v1/services?allowWatchBookmarks=true&resourceVersion=32259&timeout=8m15s&timeoutSeconds=495&watch=true: (31.451829986s) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44202]
I1109 01:08:38.247420  109746 httplog.go:90] DELETE /api/v1/nodes: (4.798701ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:38.247606  109746 controller.go:180] Shutting down kubernetes service endpoint reconciler
I1109 01:08:38.249132  109746 httplog.go:90] GET /api/v1/namespaces/default/endpoints/kubernetes: (1.303407ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:38.252225  109746 httplog.go:90] PUT /api/v1/namespaces/default/endpoints/kubernetes: (2.640422ms) 200 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:44380]
I1109 01:08:38.252649  109746 cluster_authentication_trust_controller.go:463] Shutting down cluster_authentication_trust_controller controller
I1109 01:08:38.252798  109746 httplog.go:90] GET /api/v1/namespaces/kube-system/configmaps?allowWatchBookmarks=true&resourceVersion=32259&timeout=7m46s&timeoutSeconds=466&watch=true: (34.770580869s) 0 [scheduler.test/v0.0.0 (linux/amd64) kubernetes/$Format 127.0.0.1:43198]
--- FAIL: TestPreemption (34.96s)
    preemption_test.go:402: Test [basic pod preemption with filter]: Error running pause pod: Pod preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod didn't schedule successfully. Error: timed out waiting for the condition

				from junit_99844db6e586a0ff1ded59c41b65ce7fe8e8a77e_20191109-010027.xml

Find preemptiome700d66b-f1d9-492e-8f5e-99948dcb2209/victim-pod mentions in log files | View test history on testgrid


Show 2898 Passed Tests

Show 4 Skipped Tests

Error lines from build-log.txt

... skipping 56 lines ...
Recording: record_command_canary
Running command: record_command_canary

+++ Running case: test-cmd.record_command_canary 
+++ working dir: /home/prow/go/src/k8s.io/kubernetes
+++ command: record_command_canary
/home/prow/go/src/k8s.io/kubernetes/test/cmd/legacy-script.sh: line 155: bogus-expected-to-fail: command not found
!!! [1109 00:50:46] Call tree:
!!! [1109 00:50:46]  1: /home/prow/go/src/k8s.io/kubernetes/test/cmd/../../third_party/forked/shell2junit/sh2ju.sh:47 record_command_canary(...)
!!! [1109 00:50:46]  2: /home/prow/go/src/k8s.io/kubernetes/test/cmd/../../third_party/forked/shell2junit/sh2ju.sh:112 eVal(...)
!!! [1109 00:50:46]  3: /home/prow/go/src/k8s.io/kubernetes/test/cmd/legacy-script.sh:131 juLog(...)
!!! [1109 00:50:46]  4: /home/prow/go/src/k8s.io/kubernetes/test/cmd/legacy-script.sh:159 record_command(...)
!!! [1109 00:50:46]  5: hack/make-rules/test-cmd.sh:27 source(...)
+++ exit code: 1
+++ error: 1
+++ [1109 00:50:46] Running kubeadm tests
+++ [1109 00:50:51] Building go targets for linux/amd64:
    cmd/kubeadm
Running tests for APIVersion: v1,admissionregistration.k8s.io/v1,admissionregistration.k8s.io/v1beta1,admission.k8s.io/v1,admission.k8s.io/v1beta1,apps/v1,apps/v1beta1,apps/v1beta2,auditregistration.k8s.io/v1alpha1,authentication.k8s.io/v1,authentication.k8s.io/v1beta1,authorization.k8s.io/v1,authorization.k8s.io/v1beta1,autoscaling/v1,autoscaling/v2beta1,autoscaling/v2beta2,batch/v1,batch/v1beta1,batch/v2alpha1,certificates.k8s.io/v1beta1,coordination.k8s.io/v1beta1,coordination.k8s.io/v1,discovery.k8s.io/v1alpha1,extensions/v1beta1,events.k8s.io/v1beta1,imagepolicy.k8s.io/v1alpha1,networking.k8s.io/v1,networking.k8s.io/v1beta1,node.k8s.io/v1alpha1,node.k8s.io/v1beta1,policy/v1beta1,rbac.authorization.k8s.io/v1,rbac.authorization.k8s.io/v1beta1,rbac.authorization.k8s.io/v1alpha1,scheduling.k8s.io/v1alpha1,scheduling.k8s.io/v1beta1,scheduling.k8s.io/v1,settings.k8s.io/v1alpha1,storage.k8s.io/v1beta1,storage.k8s.io/v1,storage.k8s.io/v1alpha1,flowcontrol.apiserver.k8s.io/v1alpha1,
+++ [1109 00:51:38] Running tests without code coverage
{"Time":"2019-11-09T00:52:55.312244371Z","Action":"output","Package":"k8s.io/kubernetes/cmd/kubeadm/test/cmd","Output":"ok  \tk8s.io/kubernetes/cmd/kubeadm/test/cmd\t37.577s\n"}
... skipping 282 lines ...
+++ [1109 00:54:38] Building kube-controller-manager
+++ [1109 00:54:42] Building go targets for linux/amd64:
    cmd/kube-controller-manager
+++ [1109 00:55:12] Starting controller-manager
Flag --port has been deprecated, see --secure-port instead.
I1109 00:55:13.110414   54666 serving.go:312] Generated self-signed cert in-memory
W1109 00:55:13.708821   54666 authentication.go:457] failed to read in-cluster kubeconfig for delegated authentication: open /var/run/secrets/kubernetes.io/serviceaccount/token: no such file or directory
W1109 00:55:13.708871   54666 authentication.go:319] No authentication-kubeconfig provided in order to lookup client-ca-file in configmap/extension-apiserver-authentication in kube-system, so client certificate authentication won't work.
W1109 00:55:13.708877   54666 authentication.go:322] No authentication-kubeconfig provided in order to lookup requestheader-client-ca-file in configmap/extension-apiserver-authentication in kube-system, so request-header client certificate authentication won't work.
W1109 00:55:13.708895   54666 authorization.go:177] failed to read in-cluster kubeconfig for delegated authorization: open /var/run/secrets/kubernetes.io/serviceaccount/token: no such file or directory
W1109 00:55:13.708909   54666 authorization.go:146] No authorization-kubeconfig provided, so SubjectAccessReview of authorization tokens won't work.
I1109 00:55:13.708946   54666 controllermanager.go:161] Version: v1.18.0-alpha.0.557+dce4431bc37c51
I1109 00:55:13.710011   54666 secure_serving.go:174] Serving securely on [::]:10257
I1109 00:55:13.710144   54666 tlsconfig.go:220] Starting DynamicServingCertificateController
I1109 00:55:13.710340   54666 deprecated_insecure_serving.go:53] Serving insecurely on [::]:10252
I1109 00:55:13.710417   54666 leaderelection.go:242] attempting to acquire leader lease  kube-system/kube-controller-manager...
... skipping 14 lines ...
I1109 00:55:13.934926   54666 controllermanager.go:534] Started "horizontalpodautoscaling"
I1109 00:55:13.934967   54666 horizontal.go:156] Starting HPA controller
I1109 00:55:13.934982   54666 shared_informer.go:197] Waiting for caches to sync for HPA
I1109 00:55:13.935357   54666 controllermanager.go:534] Started "disruption"
I1109 00:55:13.935388   54666 disruption.go:330] Starting disruption controller
I1109 00:55:13.935403   54666 shared_informer.go:197] Waiting for caches to sync for disruption
E1109 00:55:13.935858   54666 core.go:81] Failed to start service controller: WARNING: no cloud provider provided, services of type LoadBalancer will fail
W1109 00:55:13.935880   54666 controllermanager.go:526] Skipping "service"
W1109 00:55:13.935888   54666 controllermanager.go:513] "endpointslice" is disabled
I1109 00:55:13.936478   54666 controllermanager.go:534] Started "daemonset"
I1109 00:55:13.936611   54666 daemon_controller.go:255] Starting daemon sets controller
I1109 00:55:13.936641   54666 shared_informer.go:197] Waiting for caches to sync for daemon sets
I1109 00:55:13.936842   54666 controllermanager.go:534] Started "ttl"
... skipping 61 lines ...
I1109 00:55:14.570983   54666 controllermanager.go:534] Started "csrcleaner"
I1109 00:55:14.571002   54666 core.go:213] Will not configure cloud provider routes for allocate-node-cidrs: false, configure-cloud-routes: true.
W1109 00:55:14.571009   54666 controllermanager.go:526] Skipping "route"
W1109 00:55:14.571016   54666 controllermanager.go:513] "bootstrapsigner" is disabled
I1109 00:55:14.571030   54666 cleaner.go:81] Starting CSR cleaner controller
I1109 00:55:14.571397   54666 node_lifecycle_controller.go:77] Sending events to api server
E1109 00:55:14.571464   54666 core.go:203] failed to start cloud node lifecycle controller: no cloud provider provided
W1109 00:55:14.571481   54666 controllermanager.go:526] Skipping "cloud-node-lifecycle"
W1109 00:55:14.573267   54666 probe.go:268] Flexvolume plugin directory at /usr/libexec/kubernetes/kubelet-plugins/volume/exec/ does not exist. Recreating.
I1109 00:55:14.574227   54666 controllermanager.go:534] Started "attachdetach"
I1109 00:55:14.574367   54666 attach_detach_controller.go:325] Starting attach detach controller
I1109 00:55:14.574392   54666 shared_informer.go:197] Waiting for caches to sync for attach detach
I1109 00:55:14.574750   54666 controllermanager.go:534] Started "clusterrole-aggregation"
... skipping 18 lines ...
I1109 00:55:14.578546   54666 expand_controller.go:308] Starting expand controller
I1109 00:55:14.578557   54666 shared_informer.go:197] Waiting for caches to sync for expand
I1109 00:55:14.578496   54666 shared_informer.go:197] Waiting for caches to sync for GC
I1109 00:55:14.578906   54666 controllermanager.go:534] Started "pvc-protection"
I1109 00:55:14.579768   54666 pvc_protection_controller.go:100] Starting PVC protection controller
I1109 00:55:14.579795   54666 shared_informer.go:197] Waiting for caches to sync for PVC protection
W1109 00:55:14.620257   54666 actual_state_of_world.go:506] Failed to update statusUpdateNeeded field in actual state of world: Failed to set statusUpdateNeeded to needed true, because nodeName="127.0.0.1" does not exist
I1109 00:55:14.635173   54666 shared_informer.go:204] Caches are synced for HPA 
I1109 00:55:14.636823   54666 shared_informer.go:204] Caches are synced for daemon sets 
I1109 00:55:14.637131   54666 shared_informer.go:204] Caches are synced for TTL 
I1109 00:55:14.637575   54666 shared_informer.go:204] Caches are synced for ReplicationController 
I1109 00:55:14.644405   54666 shared_informer.go:204] Caches are synced for ReplicaSet 
The Service "kubernetes" is invalid: spec.clusterIP: Invalid value: "10.0.0.1": provided IP is already allocated
I1109 00:55:14.656278   54666 shared_informer.go:204] Caches are synced for namespace 
I1109 00:55:14.675022   54666 shared_informer.go:204] Caches are synced for ClusterRoleAggregator 
I1109 00:55:14.678672   54666 shared_informer.go:204] Caches are synced for certificate-csrapproving 
I1109 00:55:14.678762   54666 shared_informer.go:204] Caches are synced for GC 
E1109 00:55:14.691692   54666 clusterroleaggregation_controller.go:180] edit failed with : Operation cannot be fulfilled on clusterroles.rbac.authorization.k8s.io "edit": the object has been modified; please apply your changes to the latest version and try again
I1109 00:55:14.733968   54666 shared_informer.go:204] Caches are synced for job 
I1109 00:55:14.734480   54666 shared_informer.go:204] Caches are synced for deployment 
I1109 00:55:14.735584   54666 shared_informer.go:204] Caches are synced for disruption 
I1109 00:55:14.735671   54666 disruption.go:338] Sending events to api server.
NAME         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   10.0.0.1     <none>        443/TCP   37s
... skipping 94 lines ...
+++ working dir: /home/prow/go/src/k8s.io/kubernetes
+++ command: run_RESTMapper_evaluation_tests
+++ [1109 00:55:18] Creating namespace namespace-1573260918-3791
namespace/namespace-1573260918-3791 created
Context "test" modified.
+++ [1109 00:55:18] Testing RESTMapper
+++ [1109 00:55:19] "kubectl get unknownresourcetype" returns error as expected: error: the server doesn't have a resource type "unknownresourcetype"
+++ exit code: 0
NAME                              SHORTNAMES   APIGROUP                       NAMESPACED   KIND
bindings                                                                      true         Binding
componentstatuses                 cs                                          false        ComponentStatus
configmaps                        cm                                          true         ConfigMap
endpoints                         ep                                          true         Endpoints
... skipping 600 lines ...
has:valid-pod
Successful
message:NAME        READY   STATUS    RESTARTS   AGE
valid-pod   0/1     Pending   0          1s
has:valid-pod
core.sh:186: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod:
(Berror: resource(s) were provided, but no name, label selector, or --all flag specified
core.sh:190: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod:
(Bcore.sh:194: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod:
(Berror: setting 'all' parameter but found a non empty selector. 
core.sh:198: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod:
(Bcore.sh:202: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod:
(Bwarning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.
pod "valid-pod" force deleted
core.sh:206: Successful get pods -l'name in (valid-pod)' {{range.items}}{{.metadata.name}}:{{end}}: 
(Bcore.sh:211: Successful get namespaces {{range.items}}{{ if eq .metadata.name \"test-kubectl-describe-pod\" }}found{{end}}{{end}}:: :
... skipping 12 lines ...
(Bpoddisruptionbudget.policy/test-pdb-2 created
core.sh:245: Successful get pdb/test-pdb-2 --namespace=test-kubectl-describe-pod {{.spec.minAvailable}}: 50%
(Bpoddisruptionbudget.policy/test-pdb-3 created
core.sh:251: Successful get pdb/test-pdb-3 --namespace=test-kubectl-describe-pod {{.spec.maxUnavailable}}: 2
(Bpoddisruptionbudget.policy/test-pdb-4 created
core.sh:255: Successful get pdb/test-pdb-4 --namespace=test-kubectl-describe-pod {{.spec.maxUnavailable}}: 50%
(Berror: min-available and max-unavailable cannot be both specified
core.sh:261: Successful get pods --namespace=test-kubectl-describe-pod {{range.items}}{{.metadata.name}}:{{end}}: 
(Bpod/env-test-pod created
matched TEST_CMD_1
matched <set to the key 'key-1' in secret 'test-secret'>
matched TEST_CMD_2
matched <set to the key 'key-2' of config map 'test-configmap'>
... skipping 188 lines ...
(Bpod/valid-pod patched
core.sh:470: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: changed-with-yaml:
(Bpod/valid-pod patched
core.sh:475: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: k8s.gcr.io/pause:3.1:
(Bpod/valid-pod patched
core.sh:491: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: nginx:
(B+++ [1109 00:55:59] "kubectl patch with resourceVersion 529" returns error as expected: Error from server (Conflict): Operation cannot be fulfilled on pods "valid-pod": the object has been modified; please apply your changes to the latest version and try again
pod "valid-pod" deleted
pod/valid-pod replaced
core.sh:515: Successful get pod valid-pod {{(index .spec.containers 0).name}}: replaced-k8s-serve-hostname
(BSuccessful
message:error: --grace-period must have --force specified
has:\-\-grace-period must have \-\-force specified
Successful
message:error: --timeout must have --force specified
has:\-\-timeout must have \-\-force specified
node/node-v1-test created
W1109 00:56:00.422648   54666 actual_state_of_world.go:506] Failed to update statusUpdateNeeded field in actual state of world: Failed to set statusUpdateNeeded to needed true, because nodeName="node-v1-test" does not exist
node/node-v1-test replaced
core.sh:552: Successful get node node-v1-test {{.metadata.annotations.a}}: b
(Bnode "node-v1-test" deleted
core.sh:559: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: nginx:
(Bcore.sh:562: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: k8s.gcr.io/serve_hostname:
(BEdit cancelled, no changes made.
... skipping 22 lines ...
spec:
  containers:
  - image: k8s.gcr.io/pause:2.0
    name: kubernetes-pause
has:localonlyvalue
core.sh:585: Successful get pod valid-pod {{.metadata.labels.name}}: valid-pod
(Berror: 'name' already has a value (valid-pod), and --overwrite is false
core.sh:589: Successful get pod valid-pod {{.metadata.labels.name}}: valid-pod
(Bcore.sh:593: Successful get pod valid-pod {{.metadata.labels.name}}: valid-pod
(Bpod/valid-pod labeled
core.sh:597: Successful get pod valid-pod {{.metadata.labels.name}}: valid-pod-super-sayan
(Bcore.sh:601: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod:
(Bwarning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.
... skipping 85 lines ...
+++ Running case: test-cmd.run_kubectl_create_error_tests 
+++ working dir: /home/prow/go/src/k8s.io/kubernetes
+++ command: run_kubectl_create_error_tests
+++ [1109 00:56:10] Creating namespace namespace-1573260970-31028
namespace/namespace-1573260970-31028 created
Context "test" modified.
+++ [1109 00:56:11] Testing kubectl create with error
Error: must specify one of -f and -k

Create a resource from a file or from stdin.

 JSON and YAML formats are accepted.

Examples:
... skipping 41 lines ...

Usage:
  kubectl create -f FILENAME [options]

Use "kubectl <command> --help" for more information about a given command.
Use "kubectl options" for a list of global command-line options (applies to all commands).
+++ [1109 00:56:11] "kubectl create with empty string list returns error as expected: error: error validating "hack/testdata/invalid-rc-with-empty-args.yaml": error validating data: ValidationError(ReplicationController.spec.template.spec.containers[0].args): unknown object type "nil" in ReplicationController.spec.template.spec.containers[0].args[0]; if you choose to ignore these errors, turn validation off with --validate=false
kubectl convert is DEPRECATED and will be removed in a future version.
In order to convert, kubectl apply the object to the cluster, then kubectl get at the desired version.
+++ exit code: 0
Recording: run_kubectl_apply_tests
Running command: run_kubectl_apply_tests

... skipping 17 lines ...
(Bpod "test-pod" deleted
customresourcedefinition.apiextensions.k8s.io/resources.mygroup.example.com created
I1109 00:56:14.368593   51227 client.go:361] parsed scheme: "endpoint"
I1109 00:56:14.368650   51227 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 0  <nil>}]
I1109 00:56:14.373595   51227 controller.go:606] quota admission added evaluator for: resources.mygroup.example.com
kind.mygroup.example.com/myobj serverside-applied (server dry run)
Error from server (NotFound): resources.mygroup.example.com "myobj" not found
customresourcedefinition.apiextensions.k8s.io "resources.mygroup.example.com" deleted
+++ exit code: 0
Recording: run_kubectl_run_tests
Running command: run_kubectl_run_tests

+++ Running case: test-cmd.run_kubectl_run_tests 
... skipping 104 lines ...
Context "test" modified.
+++ [1109 00:56:18] Testing kubectl create filter
create.sh:30: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: 
(Bpod/selector-test-pod created
create.sh:34: Successful get pods selector-test-pod {{.metadata.labels.name}}: selector-test-pod
(BSuccessful
message:Error from server (NotFound): pods "selector-test-pod-dont-apply" not found
has:pods "selector-test-pod-dont-apply" not found
pod "selector-test-pod" deleted
+++ exit code: 0
Recording: run_kubectl_apply_deployments_tests
Running command: run_kubectl_apply_deployments_tests

... skipping 30 lines ...
I1109 00:56:21.295740   54666 event.go:281] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1573260979-9349", Name:"nginx-8484dd655", UID:"aa6b6c77-d945-4782-be5a-8a914acfcd80", APIVersion:"apps/v1", ResourceVersion:"624", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-8484dd655-dnx4r
I1109 00:56:21.299925   54666 event.go:281] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1573260979-9349", Name:"nginx-8484dd655", UID:"aa6b6c77-d945-4782-be5a-8a914acfcd80", APIVersion:"apps/v1", ResourceVersion:"624", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-8484dd655-d6rrl
I1109 00:56:21.300279   54666 event.go:281] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1573260979-9349", Name:"nginx-8484dd655", UID:"aa6b6c77-d945-4782-be5a-8a914acfcd80", APIVersion:"apps/v1", ResourceVersion:"624", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-8484dd655-mwd92
apps.sh:148: Successful get deployment nginx {{.metadata.name}}: nginx
(BI1109 00:56:25.341281   54666 horizontal.go:341] Horizontal Pod Autoscaler frontend has been deleted in namespace-1573260968-18140
Successful
message:Error from server (Conflict): error when applying patch:
{"metadata":{"annotations":{"kubectl.kubernetes.io/last-applied-configuration":"{\"apiVersion\":\"apps/v1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"labels\":{\"name\":\"nginx\"},\"name\":\"nginx\",\"namespace\":\"namespace-1573260979-9349\",\"resourceVersion\":\"99\"},\"spec\":{\"replicas\":3,\"selector\":{\"matchLabels\":{\"name\":\"nginx2\"}},\"template\":{\"metadata\":{\"labels\":{\"name\":\"nginx2\"}},\"spec\":{\"containers\":[{\"image\":\"k8s.gcr.io/nginx:test-cmd\",\"name\":\"nginx\",\"ports\":[{\"containerPort\":80}]}]}}}}\n"},"resourceVersion":"99"},"spec":{"selector":{"matchLabels":{"name":"nginx2"}},"template":{"metadata":{"labels":{"name":"nginx2"}}}}}
to:
Resource: "apps/v1, Resource=deployments", GroupVersionKind: "apps/v1, Kind=Deployment"
Name: "nginx", Namespace: "namespace-1573260979-9349"
Object: &{map["apiVersion":"apps/v1" "kind":"Deployment" "metadata":map["annotations":map["deployment.kubernetes.io/revision":"1" "kubectl.kubernetes.io/last-applied-configuration":"{\"apiVersion\":\"apps/v1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"labels\":{\"name\":\"nginx\"},\"name\":\"nginx\",\"namespace\":\"namespace-1573260979-9349\"},\"spec\":{\"replicas\":3,\"selector\":{\"matchLabels\":{\"name\":\"nginx1\"}},\"template\":{\"metadata\":{\"labels\":{\"name\":\"nginx1\"}},\"spec\":{\"containers\":[{\"image\":\"k8s.gcr.io/nginx:test-cmd\",\"name\":\"nginx\",\"ports\":[{\"containerPort\":80}]}]}}}}\n"] "creationTimestamp":"2019-11-09T00:56:21Z" "generation":'\x01' "labels":map["name":"nginx"] "name":"nginx" "namespace":"namespace-1573260979-9349" "resourceVersion":"636" "selfLink":"/apis/apps/v1/namespaces/namespace-1573260979-9349/deployments/nginx" "uid":"e2651cfa-2670-45f1-b2d5-07e060dda37b"] "spec":map["progressDeadlineSeconds":'\u0258' "replicas":'\x03' "revisionHistoryLimit":'\n' "selector":map["matchLabels":map["name":"nginx1"]] "strategy":map["rollingUpdate":map["maxSurge":"25%" "maxUnavailable":"25%"] "type":"RollingUpdate"] "template":map["metadata":map["creationTimestamp":<nil> "labels":map["name":"nginx1"]] "spec":map["containers":[map["image":"k8s.gcr.io/nginx:test-cmd" "imagePullPolicy":"IfNotPresent" "name":"nginx" "ports":[map["containerPort":'P' "protocol":"TCP"]] "resources":map[] "terminationMessagePath":"/dev/termination-log" "terminationMessagePolicy":"File"]] "dnsPolicy":"ClusterFirst" "restartPolicy":"Always" "schedulerName":"default-scheduler" "securityContext":map[] "terminationGracePeriodSeconds":'\x1e']]] "status":map["conditions":[map["lastTransitionTime":"2019-11-09T00:56:21Z" "lastUpdateTime":"2019-11-09T00:56:21Z" "message":"Deployment does not have minimum availability." "reason":"MinimumReplicasUnavailable" "status":"False" "type":"Available"] map["lastTransitionTime":"2019-11-09T00:56:21Z" "lastUpdateTime":"2019-11-09T00:56:21Z" "message":"ReplicaSet \"nginx-8484dd655\" is progressing." "reason":"ReplicaSetUpdated" "status":"True" "type":"Progressing"]] "observedGeneration":'\x01' "replicas":'\x03' "unavailableReplicas":'\x03' "updatedReplicas":'\x03']]}
for: "hack/testdata/deployment-label-change2.yaml": Operation cannot be fulfilled on deployments.apps "nginx": the object has been modified; please apply your changes to the latest version and try again
has:Error from server (Conflict)
deployment.apps/nginx configured
I1109 00:56:30.874105   54666 event.go:281] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"namespace-1573260979-9349", Name:"nginx", UID:"ed499db3-fd7d-4496-ac8b-8c64777563ee", APIVersion:"apps/v1", ResourceVersion:"665", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set nginx-668b6c7744 to 3
I1109 00:56:30.877484   54666 event.go:281] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1573260979-9349", Name:"nginx-668b6c7744", UID:"c8e3a46b-3598-4dfa-86fb-1816366f844c", APIVersion:"apps/v1", ResourceVersion:"666", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-668b6c7744-dg7ps
I1109 00:56:30.881939   54666 event.go:281] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1573260979-9349", Name:"nginx-668b6c7744", UID:"c8e3a46b-3598-4dfa-86fb-1816366f844c", APIVersion:"apps/v1", ResourceVersion:"666", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-668b6c7744-2frwl
I1109 00:56:30.882208   54666 event.go:281] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1573260979-9349", Name:"nginx-668b6c7744", UID:"c8e3a46b-3598-4dfa-86fb-1816366f844c", APIVersion:"apps/v1", ResourceVersion:"666", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-668b6c7744-t2z2j
Successful
... skipping 141 lines ...
+++ [1109 00:56:38] Creating namespace namespace-1573260998-26292
namespace/namespace-1573260998-26292 created
Context "test" modified.
+++ [1109 00:56:38] Testing kubectl get
get.sh:29: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: 
(BSuccessful
message:Error from server (NotFound): pods "abc" not found
has:pods "abc" not found
get.sh:37: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: 
(BSuccessful
message:Error from server (NotFound): pods "abc" not found
has:pods "abc" not found
get.sh:45: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: 
(BSuccessful
message:{
    "apiVersion": "v1",
    "items": [],
... skipping 23 lines ...
has not:No resources found
Successful
message:NAME
has not:No resources found
get.sh:73: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: 
(BSuccessful
message:error: the server doesn't have a resource type "foobar"
has not:No resources found
Successful
message:No resources found in namespace-1573260998-26292 namespace.
has:No resources found
Successful
message:
has not:No resources found
Successful
message:No resources found in namespace-1573260998-26292 namespace.
has:No resources found
get.sh:93: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: 
(BSuccessful
message:Error from server (NotFound): pods "abc" not found
has:pods "abc" not found
Successful
message:Error from server (NotFound): pods "abc" not found
has not:List
Successful
message:I1109 00:56:40.425796   65110 loader.go:375] Config loaded from file:  /tmp/tmp.iLYxMFY5IZ/.kube/config
I1109 00:56:40.428260   65110 round_trippers.go:443] GET http://127.0.0.1:8080/version?timeout=32s 200 OK in 1 milliseconds
I1109 00:56:40.458966   65110 round_trippers.go:443] GET http://127.0.0.1:8080/api/v1/namespaces/default/pods 200 OK in 2 milliseconds
I1109 00:56:40.460821   65110 round_trippers.go:443] GET http://127.0.0.1:8080/api/v1/namespaces/default/replicationcontrollers 200 OK in 1 milliseconds
... skipping 647 lines ...
Successful
message:NAME    DATA   AGE
one     0      1s
three   0      0s
two     0      1s
STATUS    REASON          MESSAGE
Failure   InternalError   an error on the server ("unable to decode an event from the watch stream: net/http: request canceled (Client.Timeout exceeded while reading body)") has prevented the request from succeeding
has not:watch is only supported on individual resources
Successful
message:STATUS    REASON          MESSAGE
Failure   InternalError   an error on the server ("unable to decode an event from the watch stream: net/http: request canceled (Client.Timeout exceeded while reading body)") has prevented the request from succeeding
has not:watch is only supported on individual resources
+++ [1109 00:56:47] Creating namespace namespace-1573261007-16072
namespace/namespace-1573261007-16072 created
Context "test" modified.
get.sh:153: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: 
(Bpod/valid-pod created
... skipping 56 lines ...
}
get.sh:158: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod:
(B<no value>Successful
message:valid-pod:
has:valid-pod:
Successful
message:error: error executing jsonpath "{.missing}": Error executing template: missing is not found. Printing more information for debugging the template:
	template was:
		{.missing}
	object given to jsonpath engine was:
		map[string]interface {}{"apiVersion":"v1", "kind":"Pod", "metadata":map[string]interface {}{"creationTimestamp":"2019-11-09T00:56:47Z", "labels":map[string]interface {}{"name":"valid-pod"}, "name":"valid-pod", "namespace":"namespace-1573261007-16072", "resourceVersion":"749", "selfLink":"/api/v1/namespaces/namespace-1573261007-16072/pods/valid-pod", "uid":"bb6df0ce-ddae-4c20-84a9-23b862408536"}, "spec":map[string]interface {}{"containers":[]interface {}{map[string]interface {}{"image":"k8s.gcr.io/serve_hostname", "imagePullPolicy":"Always", "name":"kubernetes-serve-hostname", "resources":map[string]interface {}{"limits":map[string]interface {}{"cpu":"1", "memory":"512Mi"}, "requests":map[string]interface {}{"cpu":"1", "memory":"512Mi"}}, "terminationMessagePath":"/dev/termination-log", "terminationMessagePolicy":"File"}}, "dnsPolicy":"ClusterFirst", "enableServiceLinks":true, "priority":0, "restartPolicy":"Always", "schedulerName":"default-scheduler", "securityContext":map[string]interface {}{}, "terminationGracePeriodSeconds":30}, "status":map[string]interface {}{"phase":"Pending", "qosClass":"Guaranteed"}}
has:missing is not found
error: error executing template "{{.missing}}": template: output:1:2: executing "output" at <.missing>: map has no entry for key "missing"
Successful
message:Error executing template: template: output:1:2: executing "output" at <.missing>: map has no entry for key "missing". Printing more information for debugging the template:
	template was:
		{{.missing}}
	raw data was:
		{"apiVersion":"v1","kind":"Pod","metadata":{"creationTimestamp":"2019-11-09T00:56:47Z","labels":{"name":"valid-pod"},"name":"valid-pod","namespace":"namespace-1573261007-16072","resourceVersion":"749","selfLink":"/api/v1/namespaces/namespace-1573261007-16072/pods/valid-pod","uid":"bb6df0ce-ddae-4c20-84a9-23b862408536"},"spec":{"containers":[{"image":"k8s.gcr.io/serve_hostname","imagePullPolicy":"Always","name":"kubernetes-serve-hostname","resources":{"limits":{"cpu":"1","memory":"512Mi"},"requests":{"cpu":"1","memory":"512Mi"}},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","enableServiceLinks":true,"priority":0,"restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"terminationGracePeriodSeconds":30},"status":{"phase":"Pending","qosClass":"Guaranteed"}}
	object given to template engine was:
		map[apiVersion:v1 kind:Pod metadata:map[creationTimestamp:2019-11-09T00:56:47Z labels:map[name:valid-pod] name:valid-pod namespace:namespace-1573261007-16072 resourceVersion:749 selfLink:/api/v1/namespaces/namespace-1573261007-16072/pods/valid-pod uid:bb6df0ce-ddae-4c20-84a9-23b862408536] spec:map[containers:[map[image:k8s.gcr.io/serve_hostname imagePullPolicy:Always name:kubernetes-serve-hostname resources:map[limits:map[cpu:1 memory:512Mi] requests:map[cpu:1 memory:512Mi]] terminationMessagePath:/dev/termination-log terminationMessagePolicy:File]] dnsPolicy:ClusterFirst enableServiceLinks:true priority:0 restartPolicy:Always schedulerName:default-scheduler securityContext:map[] terminationGracePeriodSeconds:30] status:map[phase:Pending qosClass:Guaranteed]]
has:map has no entry for key "missing"
Successful
message:NAME        READY   STATUS    RESTARTS   AGE
valid-pod   0/1     Pending   0          1s
STATUS      REASON          MESSAGE
Failure     InternalError   an error on the server ("unable to decode an event from the watch stream: net/http: request canceled (Client.Timeout exceeded while reading body)") has prevented the request from succeeding
has:STATUS
Successful
message:NAME        READY   STATUS    RESTARTS   AGE
valid-pod   0/1     Pending   0          1s
STATUS      REASON          MESSAGE
Failure     InternalError   an error on the server ("unable to decode an event from the watch stream: net/http: request canceled (Client.Timeout exceeded while reading body)") has prevented the request from succeeding
has:valid-pod
Successful
message:pod/valid-pod
status/<unknown>
has not:STATUS
Successful
... skipping 45 lines ...
      (Client.Timeout exceeded while reading body)'
    reason: UnexpectedServerResponse
  - message: 'unable to decode an event from the watch stream: net/http: request canceled
      (Client.Timeout exceeded while reading body)'
    reason: ClientWatchDecoding
kind: Status
message: 'an error on the server ("unable to decode an event from the watch stream:
  net/http: request canceled (Client.Timeout exceeded while reading body)") has prevented
  the request from succeeding'
metadata: {}
reason: InternalError
status: Failure
has not:STATUS
... skipping 42 lines ...
      (Client.Timeout exceeded while reading body)'
    reason: UnexpectedServerResponse
  - message: 'unable to decode an event from the watch stream: net/http: request canceled
      (Client.Timeout exceeded while reading body)'
    reason: ClientWatchDecoding
kind: Status
message: 'an error on the server ("unable to decode an event from the watch stream:
  net/http: request canceled (Client.Timeout exceeded while reading body)") has prevented
  the request from succeeding'
metadata: {}
reason: InternalError
status: Failure
has:name: valid-pod
Successful
message:Error from server (NotFound): pods "invalid-pod" not found
has:"invalid-pod" not found
pod "valid-pod" deleted
get.sh:196: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: 
(Bpod/redis-master created
pod/valid-pod created
Successful
... skipping 35 lines ...
+++ command: run_kubectl_exec_pod_tests
+++ [1109 00:56:53] Creating namespace namespace-1573261013-15033
namespace/namespace-1573261013-15033 created
Context "test" modified.
+++ [1109 00:56:53] Testing kubectl exec POD COMMAND
Successful
message:Error from server (NotFound): pods "abc" not found
has:pods "abc" not found
pod/test-pod created
Successful
message:Error from server (BadRequest): pod test-pod does not have a host assigned
has not:pods "test-pod" not found
Successful
message:Error from server (BadRequest): pod test-pod does not have a host assigned
has not:pod or type/name must be specified
pod "test-pod" deleted
+++ exit code: 0
Recording: run_kubectl_exec_resource_name_tests
Running command: run_kubectl_exec_resource_name_tests

... skipping 2 lines ...
+++ command: run_kubectl_exec_resource_name_tests
+++ [1109 00:56:54] Creating namespace namespace-1573261014-690
namespace/namespace-1573261014-690 created
Context "test" modified.
+++ [1109 00:56:54] Testing kubectl exec TYPE/NAME COMMAND
Successful
message:error: the server doesn't have a resource type "foo"
has:error:
Successful
message:Error from server (NotFound): deployments.apps "bar" not found
has:"bar" not found
pod/test-pod created
replicaset.apps/frontend created
I1109 00:56:54.849811   54666 event.go:281] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1573261014-690", Name:"frontend", UID:"ea40cc37-58c3-4266-ac5a-8afd1f8fc606", APIVersion:"apps/v1", ResourceVersion:"807", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: frontend-kznss
I1109 00:56:54.852595   54666 event.go:281] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1573261014-690", Name:"frontend", UID:"ea40cc37-58c3-4266-ac5a-8afd1f8fc606", APIVersion:"apps/v1", ResourceVersion:"807", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: frontend-jbrc5
I1109 00:56:54.854008   54666 event.go:281] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1573261014-690", Name:"frontend", UID:"ea40cc37-58c3-4266-ac5a-8afd1f8fc606", APIVersion:"apps/v1", ResourceVersion:"807", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: frontend-z22tx
configmap/test-set-env-config created
Successful
message:error: cannot attach to *v1.ConfigMap: selector for *v1.ConfigMap not implemented
has:not implemented
Successful
message:Error from server (BadRequest): pod test-pod does not have a host assigned
has not:not found
Successful
message:Error from server (BadRequest): pod test-pod does not have a host assigned
has not:pod or type/name must be specified
Successful
message:Error from server (BadRequest): pod frontend-jbrc5 does not have a host assigned
has not:not found
Successful
message:Error from server (BadRequest): pod frontend-jbrc5 does not have a host assigned
has not:pod or type/name must be specified
pod "test-pod" deleted
replicaset.apps "frontend" deleted
configmap "test-set-env-config" deleted
+++ exit code: 0
Recording: run_create_secret_tests
Running command: run_create_secret_tests

+++ Running case: test-cmd.run_create_secret_tests 
+++ working dir: /home/prow/go/src/k8s.io/kubernetes
+++ command: run_create_secret_tests
Successful
message:Error from server (NotFound): secrets "mysecret" not found
has:secrets "mysecret" not found
Successful
message:Error from server (NotFound): secrets "mysecret" not found
has:secrets "mysecret" not found
Successful
message:user-specified
has:user-specified
Successful
{"kind":"ConfigMap","apiVersion":"v1","metadata":{"name":"tester-update-cm","namespace":"default","selfLink":"/api/v1/namespaces/default/configmaps/tester-update-cm","uid":"be357306-462e-4434-80de-e31c4ef1407a","resourceVersion":"827","creationTimestamp":"2019-11-09T00:56:56Z"}}
... skipping 2 lines ...
has:uid
Successful
message:{"kind":"ConfigMap","apiVersion":"v1","metadata":{"name":"tester-update-cm","namespace":"default","selfLink":"/api/v1/namespaces/default/configmaps/tester-update-cm","uid":"be357306-462e-4434-80de-e31c4ef1407a","resourceVersion":"828","creationTimestamp":"2019-11-09T00:56:56Z"},"data":{"key1":"config1"}}
has:config1
{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Success","details":{"name":"tester-update-cm","kind":"configmaps","uid":"be357306-462e-4434-80de-e31c4ef1407a"}}
Successful
message:Error from server (NotFound): configmaps "tester-update-cm" not found
has:configmaps "tester-update-cm" not found
+++ exit code: 0
Recording: run_kubectl_create_kustomization_directory_tests
Running command: run_kubectl_create_kustomization_directory_tests

+++ Running case: test-cmd.run_kubectl_create_kustomization_directory_tests 
... skipping 110 lines ...
valid-pod   0/1     Pending   0          0s
has:valid-pod
Successful
message:NAME        READY   STATUS    RESTARTS   AGE
valid-pod   0/1     Pending   0          1s
STATUS      REASON          MESSAGE
Failure     InternalError   an error on the server ("unable to decode an event from the watch stream: net/http: request canceled (Client.Timeout exceeded while reading body)") has prevented the request from succeeding
has:Timeout exceeded while reading body
Successful
message:NAME        READY   STATUS    RESTARTS   AGE
valid-pod   0/1     Pending   0          2s
has:valid-pod
Successful
message:error: Invalid timeout value. Timeout must be a single integer in seconds, or an integer followed by a corresponding time unit (e.g. 1s | 2m | 3h)
has:Invalid timeout value
pod "valid-pod" deleted
+++ exit code: 0
Recording: run_crd_tests
Running command: run_crd_tests

... skipping 158 lines ...
foo.company.com/test patched
crd.sh:236: Successful get foos/test {{.patched}}: value1
(Bfoo.company.com/test patched
crd.sh:238: Successful get foos/test {{.patched}}: value2
(Bfoo.company.com/test patched
crd.sh:240: Successful get foos/test {{.patched}}: <no value>
(B+++ [1109 00:57:08] "kubectl patch --local" returns error as expected for CustomResource: error: cannot apply strategic merge patch for company.com/v1, Kind=Foo locally, try --type merge
{
    "apiVersion": "company.com/v1",
    "kind": "Foo",
    "metadata": {
        "annotations": {
            "kubernetes.io/change-cause": "kubectl patch foos/test --server=http://127.0.0.1:8080 --match-server-version=true --patch={\"patched\":null} --type=merge --record=true"
... skipping 189 lines ...
(Bcrd.sh:450: Successful get bars {{range.items}}{{.metadata.name}}:{{end}}: 
(Bnamespace/non-native-resources created
bar.company.com/test created
crd.sh:455: Successful get bars {{len .items}}: 1
(Bnamespace "non-native-resources" deleted
crd.sh:458: Successful get bars {{len .items}}: 0
(BError from server (NotFound): namespaces "non-native-resources" not found
customresourcedefinition.apiextensions.k8s.io "foos.company.com" deleted
customresourcedefinition.apiextensions.k8s.io "bars.company.com" deleted
customresourcedefinition.apiextensions.k8s.io "resources.mygroup.example.com" deleted
customresourcedefinition.apiextensions.k8s.io "validfoos.company.com" deleted
+++ exit code: 0
Recording: run_cmd_with_img_tests
... skipping 11 lines ...
I1109 00:57:30.314435   54666 event.go:281] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1573261050-32133", Name:"test1-6cdffdb5b8", UID:"72dc3e14-8e6b-422c-851b-40167137d4e9", APIVersion:"apps/v1", ResourceVersion:"998", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: test1-6cdffdb5b8-znfrq
Successful
message:deployment.apps/test1 created
has:deployment.apps/test1 created
deployment.apps "test1" deleted
Successful
message:error: Invalid image name "InvalidImageName": invalid reference format
has:error: Invalid image name "InvalidImageName": invalid reference format
+++ exit code: 0
+++ [1109 00:57:30] Testing recursive resources
+++ [1109 00:57:30] Creating namespace namespace-1573261050-31860
W1109 00:57:30.597763   51227 cacher.go:162] Terminating all watchers from cacher *unstructured.Unstructured
E1109 00:57:30.599332   54666 reflector.go:320] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource
namespace/namespace-1573261050-31860 created
W1109 00:57:30.697879   51227 cacher.go:162] Terminating all watchers from cacher *unstructured.Unstructured
E1109 00:57:30.700667   54666 reflector.go:320] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource
Context "test" modified.
W1109 00:57:30.808967   51227 cacher.go:162] Terminating all watchers from cacher *unstructured.Unstructured
E1109 00:57:30.810588   54666 reflector.go:320] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource
generic-resources.sh:202: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: 
(BW1109 00:57:30.946100   51227 cacher.go:162] Terminating all watchers from cacher *unstructured.Unstructured
E1109 00:57:30.947610   54666 reflector.go:320] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource
generic-resources.sh:206: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
(BSuccessful
message:pod/busybox0 created
pod/busybox1 created
error: error validating "hack/testdata/recursive/pod/pod/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false
has:error validating data: kind not set
generic-resources.sh:211: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
(Bgeneric-resources.sh:220: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: busybox:busybox:
(BSuccessful
message:error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}'
has:Object 'Kind' is missing
generic-resources.sh:227: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
(BE1109 00:57:31.600805   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:31.702127   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:31.812000   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
generic-resources.sh:231: Successful get pods {{range.items}}{{.metadata.labels.status}}:{{end}}: replaced:replaced:
(BSuccessful
message:pod/busybox0 replaced
pod/busybox1 replaced
error: error validating "hack/testdata/recursive/pod-modify/pod/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false
has:error validating data: kind not set
E1109 00:57:31.948978   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
generic-resources.sh:236: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
(BSuccessful
message:Name:         busybox0
Namespace:    namespace-1573261050-31860
Priority:     0
Node:         <none>
... skipping 159 lines ...
has:Object 'Kind' is missing
generic-resources.sh:246: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
(Bgeneric-resources.sh:250: Successful get pods {{range.items}}{{.metadata.annotations.annotatekey}}:{{end}}: annotatevalue:annotatevalue:
(BSuccessful
message:pod/busybox0 annotated
pod/busybox1 annotated
error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}'
has:Object 'Kind' is missing
generic-resources.sh:255: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
(BE1109 00:57:32.602208   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:32.703231   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
generic-resources.sh:259: Successful get pods {{range.items}}{{.metadata.labels.status}}:{{end}}: replaced:replaced:
(BSuccessful
message:Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply
pod/busybox0 configured
Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply
pod/busybox1 configured
error: error validating "hack/testdata/recursive/pod-modify/pod/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false
has:error validating data: kind not set
E1109 00:57:32.813523   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
generic-resources.sh:265: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: 
(BE1109 00:57:32.950337   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
deployment.apps/nginx created
I1109 00:57:33.072402   54666 event.go:281] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"namespace-1573261050-31860", Name:"nginx", UID:"afd5457c-e742-4c7a-84ef-76cc8d3dd075", APIVersion:"apps/v1", ResourceVersion:"1025", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set nginx-f87d999f7 to 3
I1109 00:57:33.076353   54666 event.go:281] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1573261050-31860", Name:"nginx-f87d999f7", UID:"b310f2f1-8e24-4a33-aacd-89ae4184912c", APIVersion:"apps/v1", ResourceVersion:"1026", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-f87d999f7-v8zlj
I1109 00:57:33.078668   54666 event.go:281] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1573261050-31860", Name:"nginx-f87d999f7", UID:"b310f2f1-8e24-4a33-aacd-89ae4184912c", APIVersion:"apps/v1", ResourceVersion:"1026", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-f87d999f7-pkgsn
I1109 00:57:33.080479   54666 event.go:281] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1573261050-31860", Name:"nginx-f87d999f7", UID:"b310f2f1-8e24-4a33-aacd-89ae4184912c", APIVersion:"apps/v1", ResourceVersion:"1026", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx-f87d999f7-xp6kp
generic-resources.sh:269: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: nginx:
... skipping 42 lines ...
      schedulerName: default-scheduler
      securityContext: {}
      terminationGracePeriodSeconds: 30
status: {}
has:extensions/v1beta1
deployment.apps "nginx" deleted
E1109 00:57:33.603777   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
generic-resources.sh:281: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
(BE1109 00:57:33.704683   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:33.815130   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
generic-resources.sh:285: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
(BSuccessful
message:kubectl convert is DEPRECATED and will be removed in a future version.
In order to convert, kubectl apply the object to the cluster, then kubectl get at the desired version.
error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}'
has:Object 'Kind' is missing
generic-resources.sh:290: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
(BE1109 00:57:33.951706   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
Successful
message:busybox0:busybox1:error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}'
has:busybox0:busybox1:
Successful
message:busybox0:busybox1:error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}'
has:Object 'Kind' is missing
generic-resources.sh:299: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
(Bpod/busybox0 labeled
pod/busybox1 labeled
error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}'
I1109 00:57:34.280582   54666 namespace_controller.go:185] Namespace has been deleted non-native-resources
generic-resources.sh:304: Successful get pods {{range.items}}{{.metadata.labels.mylabel}}:{{end}}: myvalue:myvalue:
(BSuccessful
message:pod/busybox0 labeled
pod/busybox1 labeled
error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}'
has:Object 'Kind' is missing
generic-resources.sh:309: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
(Bpod/busybox0 patched
pod/busybox1 patched
error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}'
E1109 00:57:34.605320   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
generic-resources.sh:314: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: prom/busybox:prom/busybox:
(BSuccessful
message:pod/busybox0 patched
pod/busybox1 patched
error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}'
has:Object 'Kind' is missing
E1109 00:57:34.706150   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
generic-resources.sh:319: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
(BE1109 00:57:34.816419   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:34.953346   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
generic-resources.sh:323: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: 
(BSuccessful
message:warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.
pod "busybox0" force deleted
pod "busybox1" force deleted
error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}'
has:Object 'Kind' is missing
generic-resources.sh:328: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: 
(Breplicationcontroller/busybox0 created
I1109 00:57:35.262829   54666 event.go:281] Event(v1.ObjectReference{Kind:"ReplicationController", Namespace:"namespace-1573261050-31860", Name:"busybox0", UID:"438bc6b4-aa0d-431f-a229-429c6f1f5290", APIVersion:"v1", ResourceVersion:"1057", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: busybox0-dtvqc
replicationcontroller/busybox1 created
error: error validating "hack/testdata/recursive/rc/rc/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false
I1109 00:57:35.268936   54666 event.go:281] Event(v1.ObjectReference{Kind:"ReplicationController", Namespace:"namespace-1573261050-31860", Name:"busybox1", UID:"7d541331-7ace-4456-a369-695d48527221", APIVersion:"v1", ResourceVersion:"1059", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: busybox1-hpsxq
generic-resources.sh:332: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
(Bgeneric-resources.sh:337: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
(Bgeneric-resources.sh:338: Successful get rc busybox0 {{.spec.replicas}}: 1
(BE1109 00:57:35.606823   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
generic-resources.sh:339: Successful get rc busybox1 {{.spec.replicas}}: 1
(BE1109 00:57:35.707646   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:35.818365   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
generic-resources.sh:344: Successful get hpa busybox0 {{.spec.minReplicas}} {{.spec.maxReplicas}} {{.spec.targetCPUUtilizationPercentage}}: 1 2 80
(BE1109 00:57:35.954885   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
generic-resources.sh:345: Successful get hpa busybox1 {{.spec.minReplicas}} {{.spec.maxReplicas}} {{.spec.targetCPUUtilizationPercentage}}: 1 2 80
(BSuccessful
message:horizontalpodautoscaler.autoscaling/busybox0 autoscaled
horizontalpodautoscaler.autoscaling/busybox1 autoscaled
error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
has:Object 'Kind' is missing
horizontalpodautoscaler.autoscaling "busybox0" deleted
horizontalpodautoscaler.autoscaling "busybox1" deleted
generic-resources.sh:353: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
(Bgeneric-resources.sh:354: Successful get rc busybox0 {{.spec.replicas}}: 1
(Bgeneric-resources.sh:355: Successful get rc busybox1 {{.spec.replicas}}: 1
(BE1109 00:57:36.608365   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
generic-resources.sh:359: Successful get service busybox0 {{(index .spec.ports 0).name}} {{(index .spec.ports 0).port}}: <no value> 80
(BE1109 00:57:36.709133   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
generic-resources.sh:360: Successful get service busybox1 {{(index .spec.ports 0).name}} {{(index .spec.ports 0).port}}: <no value> 80
(BSuccessful
message:service/busybox0 exposed
service/busybox1 exposed
error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
has:Object 'Kind' is missing
E1109 00:57:36.819509   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
generic-resources.sh:366: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
(BE1109 00:57:36.956071   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
generic-resources.sh:367: Successful get rc busybox0 {{.spec.replicas}}: 1
(Bgeneric-resources.sh:368: Successful get rc busybox1 {{.spec.replicas}}: 1
(BI1109 00:57:37.160227   54666 event.go:281] Event(v1.ObjectReference{Kind:"ReplicationController", Namespace:"namespace-1573261050-31860", Name:"busybox0", UID:"438bc6b4-aa0d-431f-a229-429c6f1f5290", APIVersion:"v1", ResourceVersion:"1079", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: busybox0-26v24
I1109 00:57:37.171750   54666 event.go:281] Event(v1.ObjectReference{Kind:"ReplicationController", Namespace:"namespace-1573261050-31860", Name:"busybox1", UID:"7d541331-7ace-4456-a369-695d48527221", APIVersion:"v1", ResourceVersion:"1084", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: busybox1-b7gsm
generic-resources.sh:372: Successful get rc busybox0 {{.spec.replicas}}: 2
(Bgeneric-resources.sh:373: Successful get rc busybox1 {{.spec.replicas}}: 2
(BSuccessful
message:replicationcontroller/busybox0 scaled
replicationcontroller/busybox1 scaled
error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
has:Object 'Kind' is missing
generic-resources.sh:378: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
(BE1109 00:57:37.609733   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
generic-resources.sh:382: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: 
(BSuccessful
message:warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.
replicationcontroller "busybox0" force deleted
replicationcontroller "busybox1" force deleted
error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
has:Object 'Kind' is missing
E1109 00:57:37.710672   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
generic-resources.sh:387: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: 
(BE1109 00:57:37.820885   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
deployment.apps/nginx1-deployment created
deployment.apps/nginx0-deployment created
I1109 00:57:37.935927   54666 event.go:281] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"namespace-1573261050-31860", Name:"nginx1-deployment", UID:"4456624a-e895-4247-b169-6ffc62502004", APIVersion:"apps/v1", ResourceVersion:"1100", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set nginx1-deployment-7bdbbfb5cf to 2
error: error validating "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false
I1109 00:57:37.938877   54666 event.go:281] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"namespace-1573261050-31860", Name:"nginx0-deployment", UID:"3ebf3d07-d43a-4b65-a813-3ece0fe1d34a", APIVersion:"apps/v1", ResourceVersion:"1102", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set nginx0-deployment-57c6bff7f6 to 2
I1109 00:57:37.941041   54666 event.go:281] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1573261050-31860", Name:"nginx1-deployment-7bdbbfb5cf", UID:"8b158b72-b355-4aff-a64d-5dba71a34801", APIVersion:"apps/v1", ResourceVersion:"1101", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx1-deployment-7bdbbfb5cf-mprg9
I1109 00:57:37.943229   54666 event.go:281] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1573261050-31860", Name:"nginx0-deployment-57c6bff7f6", UID:"9c464a41-2c8f-4e54-986e-ae1478b790f6", APIVersion:"apps/v1", ResourceVersion:"1103", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx0-deployment-57c6bff7f6-2c87c
I1109 00:57:37.945388   54666 event.go:281] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1573261050-31860", Name:"nginx1-deployment-7bdbbfb5cf", UID:"8b158b72-b355-4aff-a64d-5dba71a34801", APIVersion:"apps/v1", ResourceVersion:"1101", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx1-deployment-7bdbbfb5cf-hfkhm
I1109 00:57:37.949423   54666 event.go:281] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"namespace-1573261050-31860", Name:"nginx0-deployment-57c6bff7f6", UID:"9c464a41-2c8f-4e54-986e-ae1478b790f6", APIVersion:"apps/v1", ResourceVersion:"1103", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: nginx0-deployment-57c6bff7f6-6kdbx
E1109 00:57:37.958124   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
generic-resources.sh:391: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: nginx0-deployment:nginx1-deployment:
(Bgeneric-resources.sh:392: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx:1.7.9:k8s.gcr.io/nginx:1.7.9:
(Bgeneric-resources.sh:396: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx:1.7.9:k8s.gcr.io/nginx:1.7.9:
(BSuccessful
message:deployment.apps/nginx1-deployment skipped rollback (current template already matches revision 1)
deployment.apps/nginx0-deployment skipped rollback (current template already matches revision 1)
error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}'
has:Object 'Kind' is missing
deployment.apps/nginx1-deployment paused
deployment.apps/nginx0-deployment paused
E1109 00:57:38.611258   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
generic-resources.sh:404: Successful get deployment {{range.items}}{{.spec.paused}}:{{end}}: true:true:
(BSuccessful
message:unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}'
has:Object 'Kind' is missing
E1109 00:57:38.712130   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
deployment.apps/nginx1-deployment resumed
deployment.apps/nginx0-deployment resumed
E1109 00:57:38.822298   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
generic-resources.sh:410: Successful get deployment {{range.items}}{{.spec.paused}}:{{end}}: <no value>:<no value>:
(BSuccessful
message:unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}'
has:Object 'Kind' is missing
E1109 00:57:38.959697   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
Successful
message:deployment.apps/nginx1-deployment 
REVISION  CHANGE-CAUSE
1         <none>

deployment.apps/nginx0-deployment 
REVISION  CHANGE-CAUSE
1         <none>

error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}'
has:nginx0-deployment
Successful
message:deployment.apps/nginx1-deployment 
REVISION  CHANGE-CAUSE
1         <none>

deployment.apps/nginx0-deployment 
REVISION  CHANGE-CAUSE
1         <none>

error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}'
has:nginx1-deployment
Successful
message:deployment.apps/nginx1-deployment 
REVISION  CHANGE-CAUSE
1         <none>

deployment.apps/nginx0-deployment 
REVISION  CHANGE-CAUSE
1         <none>

error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}'
has:Object 'Kind' is missing
warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.
deployment.apps "nginx1-deployment" force deleted
deployment.apps "nginx0-deployment" force deleted
error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}'
E1109 00:57:39.612824   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:39.713554   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:39.823753   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:39.961133   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
generic-resources.sh:426: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: 
(Breplicationcontroller/busybox0 created
I1109 00:57:40.407474   54666 event.go:281] Event(v1.ObjectReference{Kind:"ReplicationController", Namespace:"namespace-1573261050-31860", Name:"busybox0", UID:"04207573-7ef7-4fde-8d32-868fb8f67487", APIVersion:"v1", ResourceVersion:"1150", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: busybox0-nxw4h
replicationcontroller/busybox1 created
error: error validating "hack/testdata/recursive/rc/rc/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false
I1109 00:57:40.414581   54666 event.go:281] Event(v1.ObjectReference{Kind:"ReplicationController", Namespace:"namespace-1573261050-31860", Name:"busybox1", UID:"d59fbaa1-4f3b-48a6-8ab2-edc234f90c09", APIVersion:"v1", ResourceVersion:"1152", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: busybox1-tp6bf
generic-resources.sh:430: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1:
(BE1109 00:57:40.614295   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
Successful
message:no rollbacker has been implemented for "ReplicationController"
no rollbacker has been implemented for "ReplicationController"
unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
has:no rollbacker has been implemented for "ReplicationController"
Successful
message:no rollbacker has been implemented for "ReplicationController"
no rollbacker has been implemented for "ReplicationController"
unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
has:Object 'Kind' is missing
E1109 00:57:40.714918   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
Successful
message:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
error: replicationcontrollers "busybox0" pausing is not supported
error: replicationcontrollers "busybox1" pausing is not supported
has:Object 'Kind' is missing
Successful
message:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
error: replicationcontrollers "busybox0" pausing is not supported
error: replicationcontrollers "busybox1" pausing is not supported
has:replicationcontrollers "busybox0" pausing is not supported
Successful
message:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
error: replicationcontrollers "busybox0" pausing is not supported
error: replicationcontrollers "busybox1" pausing is not supported
has:replicationcontrollers "busybox1" pausing is not supported
E1109 00:57:40.824862   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
Successful
message:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
error: replicationcontrollers "busybox0" resuming is not supported
error: replicationcontrollers "busybox1" resuming is not supported
has:Object 'Kind' is missing
Successful
message:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
error: replicationcontrollers "busybox0" resuming is not supported
error: replicationcontrollers "busybox1" resuming is not supported
has:replicationcontrollers "busybox0" resuming is not supported
Successful
message:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
error: replicationcontrollers "busybox0" resuming is not supported
error: replicationcontrollers "busybox1" resuming is not supported
has:replicationcontrollers "busybox1" resuming is not supported
warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.
replicationcontroller "busybox0" force deleted
replicationcontroller "busybox1" force deleted
error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}'
E1109 00:57:40.962396   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:41.615697   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:41.716500   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:41.826359   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
Recording: run_namespace_tests
Running command: run_namespace_tests
E1109 00:57:41.963831   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource

+++ Running case: test-cmd.run_namespace_tests 
+++ working dir: /home/prow/go/src/k8s.io/kubernetes
+++ command: run_namespace_tests
+++ [1109 00:57:41] Testing kubectl(v1:namespaces)
namespace/my-namespace created
core.sh:1308: Successful get namespaces/my-namespace {{.metadata.name}}: my-namespace
(Bnamespace "my-namespace" deleted
E1109 00:57:42.617145   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:42.717990   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:42.827950   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:42.965308   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:43.618581   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:43.719325   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:43.829476   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:43.966599   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:44.620032   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:44.720463   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:44.831101   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:44.968327   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:45.621614   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:45.721814   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:45.832616   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:45.969774   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:46.623051   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:46.723259   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:46.834407   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:46.971224   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I1109 00:57:47.052654   54666 shared_informer.go:197] Waiting for caches to sync for resource quota
I1109 00:57:47.052715   54666 shared_informer.go:204] Caches are synced for resource quota 
namespace/my-namespace condition met
Successful
message:Error from server (NotFound): namespaces "my-namespace" not found
has: not found
I1109 00:57:47.472295   54666 shared_informer.go:197] Waiting for caches to sync for garbage collector
I1109 00:57:47.472365   54666 shared_informer.go:204] Caches are synced for garbage collector 
namespace/my-namespace created
E1109 00:57:47.624298   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
core.sh:1317: Successful get namespaces/my-namespace {{.metadata.name}}: my-namespace
(BE1109 00:57:47.724969   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:47.835591   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
Successful
message:warning: deleting cluster-scoped resources, not scoped to the provided namespace
namespace "kube-node-lease" deleted
namespace "my-namespace" deleted
namespace "namespace-1573260916-14046" deleted
namespace "namespace-1573260918-3791" deleted
... skipping 26 lines ...
namespace "namespace-1573261017-29833" deleted
namespace "namespace-1573261019-890" deleted
namespace "namespace-1573261021-16618" deleted
namespace "namespace-1573261023-28848" deleted
namespace "namespace-1573261050-31860" deleted
namespace "namespace-1573261050-32133" deleted
Error from server (Forbidden): namespaces "default" is forbidden: this namespace may not be deleted
Error from server (Forbidden): namespaces "kube-public" is forbidden: this namespace may not be deleted
Error from server (Forbidden): namespaces "kube-system" is forbidden: this namespace may not be deleted
has:warning: deleting cluster-scoped resources
Successful
message:warning: deleting cluster-scoped resources, not scoped to the provided namespace
namespace "kube-node-lease" deleted
namespace "my-namespace" deleted
namespace "namespace-1573260916-14046" deleted
... skipping 27 lines ...
namespace "namespace-1573261017-29833" deleted
namespace "namespace-1573261019-890" deleted
namespace "namespace-1573261021-16618" deleted
namespace "namespace-1573261023-28848" deleted
namespace "namespace-1573261050-31860" deleted
namespace "namespace-1573261050-32133" deleted
Error from server (Forbidden): namespaces "default" is forbidden: this namespace may not be deleted
Error from server (Forbidden): namespaces "kube-public" is forbidden: this namespace may not be deleted
Error from server (Forbidden): namespaces "kube-system" is forbidden: this namespace may not be deleted
has:namespace "my-namespace" deleted
E1109 00:57:47.975565   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
core.sh:1329: Successful get namespaces {{range.items}}{{ if eq .metadata.name \"other\" }}found{{end}}{{end}}:: :
(Bnamespace/other created
core.sh:1333: Successful get namespaces/other {{.metadata.name}}: other
(Bcore.sh:1337: Successful get pods --namespace=other {{range.items}}{{.metadata.name}}:{{end}}: 
(Bpod/valid-pod created
core.sh:1341: Successful get pods --namespace=other {{range.items}}{{.metadata.name}}:{{end}}: valid-pod:
(BE1109 00:57:48.625844   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:48.726327   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
core.sh:1343: Successful get pods -n other {{range.items}}{{.metadata.name}}:{{end}}: valid-pod:
(BSuccessful
message:error: a resource cannot be retrieved by name across all namespaces
has:a resource cannot be retrieved by name across all namespaces
E1109 00:57:48.837096   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
core.sh:1350: Successful get pods --namespace=other {{range.items}}{{.metadata.name}}:{{end}}: valid-pod:
(BE1109 00:57:48.976932   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.
pod "valid-pod" force deleted
core.sh:1354: Successful get pods --namespace=other {{range.items}}{{.metadata.name}}:{{end}}: 
(Bnamespace "other" deleted
E1109 00:57:49.627121   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:49.727588   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:49.838500   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:49.978671   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:50.628698   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:50.728959   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I1109 00:57:50.770007   54666 horizontal.go:341] Horizontal Pod Autoscaler busybox0 has been deleted in namespace-1573261050-31860
I1109 00:57:50.775956   54666 horizontal.go:341] Horizontal Pod Autoscaler busybox1 has been deleted in namespace-1573261050-31860
E1109 00:57:50.840082   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:50.980384   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:51.630135   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:51.730478   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:51.841452   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:51.982023   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:52.631391   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:52.731971   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:52.842625   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:52.984097   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:53.632578   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:53.733509   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:53.844144   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:53.985615   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
+++ exit code: 0
Recording: run_secrets_test
Running command: run_secrets_test

+++ Running case: test-cmd.run_secrets_test 
+++ working dir: /home/prow/go/src/k8s.io/kubernetes
+++ command: run_secrets_test
+++ [1109 00:57:54] Creating namespace namespace-1573261074-15164
namespace/namespace-1573261074-15164 created
Context "test" modified.
+++ [1109 00:57:54] Testing secrets
E1109 00:57:54.634258   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I1109 00:57:54.699265   71529 loader.go:375] Config loaded from file:  /tmp/tmp.iLYxMFY5IZ/.kube/config
Successful
message:apiVersion: v1
data:
  key1: dmFsdWUx
kind: Secret
... skipping 25 lines ...
  key1: dmFsdWUx
kind: Secret
metadata:
  creationTimestamp: null
  name: test
has not:example.com
E1109 00:57:54.734973   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
core.sh:725: Successful get namespaces {{range.items}}{{ if eq .metadata.name \"test-secrets\" }}found{{end}}{{end}}:: :
(BE1109 00:57:54.845602   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
namespace/test-secrets created
E1109 00:57:54.987933   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
core.sh:729: Successful get namespaces/test-secrets {{.metadata.name}}: test-secrets
(Bcore.sh:733: Successful get secrets --namespace=test-secrets {{range.items}}{{.metadata.name}}:{{end}}: 
(Bsecret/test-secret created
core.sh:737: Successful get secret/test-secret --namespace=test-secrets {{.metadata.name}}: test-secret
(Bcore.sh:738: Successful get secret/test-secret --namespace=test-secrets {{.type}}: test-type
(Bsecret "test-secret" deleted
E1109 00:57:55.635774   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
core.sh:748: Successful get secrets --namespace=test-secrets {{range.items}}{{.metadata.name}}:{{end}}: 
(BE1109 00:57:55.736607   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
secret/test-secret created
E1109 00:57:55.847292   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
core.sh:752: Successful get secret/test-secret --namespace=test-secrets {{.metadata.name}}: test-secret
(Bcore.sh:753: Successful get secret/test-secret --namespace=test-secrets {{.type}}: kubernetes.io/dockerconfigjson
(BE1109 00:57:55.989618   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
secret "test-secret" deleted
core.sh:763: Successful get secrets --namespace=test-secrets {{range.items}}{{.metadata.name}}:{{end}}: 
(Bsecret/test-secret created
core.sh:766: Successful get secret/test-secret --namespace=test-secrets {{.metadata.name}}: test-secret
(Bcore.sh:767: Successful get secret/test-secret --namespace=test-secrets {{.type}}: kubernetes.io/tls
(Bsecret "test-secret" deleted
E1109 00:57:56.637240   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
secret/test-secret created
E1109 00:57:56.738091   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
core.sh:773: Successful get secret/test-secret --namespace=test-secrets {{.metadata.name}}: test-secret
(BE1109 00:57:56.848786   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
core.sh:774: Successful get secret/test-secret --namespace=test-secrets {{.type}}: kubernetes.io/tls
(BE1109 00:57:56.991438   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
secret "test-secret" deleted
secret/secret-string-data created
core.sh:796: Successful get secret/secret-string-data --namespace=test-secrets  {{.data}}: map[k1:djE= k2:djI=]
(Bcore.sh:797: Successful get secret/secret-string-data --namespace=test-secrets  {{.data}}: map[k1:djE= k2:djI=]
(BI1109 00:57:57.426930   54666 namespace_controller.go:185] Namespace has been deleted my-namespace
core.sh:798: Successful get secret/secret-string-data --namespace=test-secrets  {{.stringData}}: <no value>
(Bsecret "secret-string-data" deleted
E1109 00:57:57.638777   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
core.sh:807: Successful get secrets --namespace=test-secrets {{range.items}}{{.metadata.name}}:{{end}}: 
(BE1109 00:57:57.739619   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:57.850664   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
secret "test-secret" deleted
I1109 00:57:57.937910   54666 namespace_controller.go:185] Namespace has been deleted kube-node-lease
namespace "test-secrets" deleted
I1109 00:57:57.952149   54666 namespace_controller.go:185] Namespace has been deleted namespace-1573260916-14046
I1109 00:57:57.964449   54666 namespace_controller.go:185] Namespace has been deleted namespace-1573260938-24940
I1109 00:57:57.967331   54666 namespace_controller.go:185] Namespace has been deleted namespace-1573260918-3791
I1109 00:57:57.967338   54666 namespace_controller.go:185] Namespace has been deleted namespace-1573260939-14506
I1109 00:57:57.980436   54666 namespace_controller.go:185] Namespace has been deleted namespace-1573260934-7696
I1109 00:57:57.980821   54666 namespace_controller.go:185] Namespace has been deleted namespace-1573260923-363
I1109 00:57:57.980873   54666 namespace_controller.go:185] Namespace has been deleted namespace-1573260934-22202
I1109 00:57:57.984831   54666 namespace_controller.go:185] Namespace has been deleted namespace-1573260940-20667
E1109 00:57:57.992983   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I1109 00:57:58.016117   54666 namespace_controller.go:185] Namespace has been deleted namespace-1573260930-15603
I1109 00:57:58.144292   54666 namespace_controller.go:185] Namespace has been deleted namespace-1573260950-10225
I1109 00:57:58.183660   54666 namespace_controller.go:185] Namespace has been deleted namespace-1573260967-19938
I1109 00:57:58.183735   54666 namespace_controller.go:185] Namespace has been deleted namespace-1573260964-28490
I1109 00:57:58.187030   54666 namespace_controller.go:185] Namespace has been deleted namespace-1573260951-23678
I1109 00:57:58.192416   54666 namespace_controller.go:185] Namespace has been deleted namespace-1573260970-31028
... skipping 13 lines ...
I1109 00:57:58.503851   54666 namespace_controller.go:185] Namespace has been deleted namespace-1573261014-690
I1109 00:57:58.507075   54666 namespace_controller.go:185] Namespace has been deleted namespace-1573261017-29833
I1109 00:57:58.587748   54666 namespace_controller.go:185] Namespace has been deleted namespace-1573261021-16618
I1109 00:57:58.593458   54666 namespace_controller.go:185] Namespace has been deleted namespace-1573261019-890
I1109 00:57:58.612409   54666 namespace_controller.go:185] Namespace has been deleted namespace-1573261023-28848
I1109 00:57:58.624167   54666 namespace_controller.go:185] Namespace has been deleted namespace-1573261050-32133
E1109 00:57:58.640455   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I1109 00:57:58.682472   54666 namespace_controller.go:185] Namespace has been deleted namespace-1573261050-31860
E1109 00:57:58.741143   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:58.852265   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:58.994604   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I1109 00:57:59.340556   54666 namespace_controller.go:185] Namespace has been deleted other
E1109 00:57:59.642422   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:59.742734   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:59.853899   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:57:59.996310   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:00.644096   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:00.744301   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:00.855527   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:00.997673   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:01.645687   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:01.745869   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:01.856925   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:01.999464   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:02.647203   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:02.747605   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:02.858635   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:03.001329   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
+++ exit code: 0
Recording: run_configmap_tests
Running command: run_configmap_tests

+++ Running case: test-cmd.run_configmap_tests 
+++ working dir: /home/prow/go/src/k8s.io/kubernetes
+++ command: run_configmap_tests
+++ [1109 00:58:03] Creating namespace namespace-1573261083-29764
namespace/namespace-1573261083-29764 created
Context "test" modified.
+++ [1109 00:58:03] Testing configmaps
configmap/test-configmap created
E1109 00:58:03.648812   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
core.sh:28: Successful get configmap/test-configmap {{.metadata.name}}: test-configmap
(BE1109 00:58:03.748904   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
configmap "test-configmap" deleted
E1109 00:58:03.860412   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
core.sh:33: Successful get namespaces {{range.items}}{{ if eq .metadata.name \"test-configmaps\" }}found{{end}}{{end}}:: :
(Bnamespace/test-configmaps created
E1109 00:58:04.002436   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
core.sh:37: Successful get namespaces/test-configmaps {{.metadata.name}}: test-configmaps
(Bcore.sh:41: Successful get configmaps {{range.items}}{{ if eq .metadata.name \"test-configmap\" }}found{{end}}{{end}}:: :
(Bcore.sh:42: Successful get configmaps {{range.items}}{{ if eq .metadata.name \"test-binary-configmap\" }}found{{end}}{{end}}:: :
(Bconfigmap/test-configmap created
configmap/test-binary-configmap created
core.sh:48: Successful get configmap/test-configmap --namespace=test-configmaps {{.metadata.name}}: test-configmap
(Bcore.sh:49: Successful get configmap/test-binary-configmap --namespace=test-configmaps {{.metadata.name}}: test-binary-configmap
(BE1109 00:58:04.650137   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:04.750305   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:04.862094   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
configmap "test-configmap" deleted
configmap "test-binary-configmap" deleted
E1109 00:58:05.003500   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
namespace "test-configmaps" deleted
E1109 00:58:05.651529   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:05.751701   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:05.863599   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:06.005052   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:06.653076   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:06.753292   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:06.865145   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:07.006038   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:07.654675   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:07.754828   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:07.866637   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:08.007367   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I1109 00:58:08.070540   54666 namespace_controller.go:185] Namespace has been deleted test-secrets
E1109 00:58:08.656534   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:08.756413   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:08.868077   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:09.008908   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:09.658024   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:09.757903   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:09.869561   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:10.010502   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
+++ exit code: 0
Recording: run_client_config_tests
Running command: run_client_config_tests

+++ Running case: test-cmd.run_client_config_tests 
+++ working dir: /home/prow/go/src/k8s.io/kubernetes
+++ command: run_client_config_tests
+++ [1109 00:58:10] Creating namespace namespace-1573261090-8378
namespace/namespace-1573261090-8378 created
Context "test" modified.
+++ [1109 00:58:10] Testing client config
Successful
message:error: stat missing: no such file or directory
has:missing: no such file or directory
Successful
message:error: stat missing: no such file or directory
has:missing: no such file or directory
E1109 00:58:10.659666   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
Successful
message:error: stat missing: no such file or directory
has:missing: no such file or directory
Successful
message:Error in configuration: context was not found for specified context: missing-context
has:context was not found for specified context: missing-context
E1109 00:58:10.759428   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
Successful
message:error: no server found for cluster "missing-cluster"
has:no server found for cluster "missing-cluster"
E1109 00:58:10.871122   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
Successful
message:error: auth info "missing-user" does not exist
has:auth info "missing-user" does not exist
E1109 00:58:11.012246   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
Successful
message:error: error loading config file "/tmp/newconfig.yaml": no kind "Config" is registered for version "v-1" in scheme "k8s.io/client-go/tools/clientcmd/api/latest/latest.go:50"
has:error loading config file
Successful
message:error: stat missing-config: no such file or directory
has:no such file or directory
+++ exit code: 0
Recording: run_service_accounts_tests
Running command: run_service_accounts_tests

+++ Running case: test-cmd.run_service_accounts_tests 
... skipping 2 lines ...
+++ [1109 00:58:11] Creating namespace namespace-1573261091-24762
namespace/namespace-1573261091-24762 created
Context "test" modified.
+++ [1109 00:58:11] Testing service accounts
core.sh:828: Successful get namespaces {{range.items}}{{ if eq .metadata.name \"test-service-accounts\" }}found{{end}}{{end}}:: :
(Bnamespace/test-service-accounts created
E1109 00:58:11.660958   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
core.sh:832: Successful get namespaces/test-service-accounts {{.metadata.name}}: test-service-accounts
(Bserviceaccount/test-service-account created
E1109 00:58:11.760938   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
core.sh:838: Successful get serviceaccount/test-service-account --namespace=test-service-accounts {{.metadata.name}}: test-service-account
(BE1109 00:58:11.872590   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
serviceaccount "test-service-account" deleted
namespace "test-service-accounts" deleted
E1109 00:58:12.013646   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:12.662584   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:12.762515   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:12.874236   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:13.015330   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:13.664152   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:13.763917   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:13.875773   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:14.016802   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:14.665689   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:14.765409   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:14.877380   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:15.018490   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I1109 00:58:15.171011   54666 namespace_controller.go:185] Namespace has been deleted test-configmaps
E1109 00:58:15.667260   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:15.766966   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:15.878841   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:16.020394   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:16.668641   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:16.768710   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:16.880510   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:17.022051   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
+++ exit code: 0
Recording: run_job_tests
Running command: run_job_tests

+++ Running case: test-cmd.run_job_tests 
+++ working dir: /home/prow/go/src/k8s.io/kubernetes
... skipping 2 lines ...
namespace/namespace-1573261097-26898 created
Context "test" modified.
+++ [1109 00:58:17] Testing job
batch.sh:30: Successful get namespaces {{range.items}}{{ if eq .metadata.name \"test-jobs\" }}found{{end}}{{end}}:: :
(Bnamespace/test-jobs created
batch.sh:34: Successful get namespaces/test-jobs {{.metadata.name}}: test-jobs
(BE1109 00:58:17.669824   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
kubectl run --generator=cronjob/v1beta1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
cronjob.batch/pi created
E1109 00:58:17.770138   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
batch.sh:39: Successful get cronjob/pi --namespace=test-jobs {{.metadata.name}}: pi
(BE1109 00:58:17.881927   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
NAME   SCHEDULE       SUSPEND   ACTIVE   LAST SCHEDULE   AGE
pi     59 23 31 2 *   False     0        <none>          0s
E1109 00:58:18.023255   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
Name:                          pi
Namespace:                     test-jobs
Labels:                        run=pi
Annotations:                   <none>
Schedule:                      59 23 31 2 *
Concurrency Policy:            Allow
Suspend:                       False
Successful Job History Limit:  3
Failed Job History Limit:      1
Starting Deadline Seconds:     <unset>
Selector:                      <unset>
Parallelism:                   <unset>
Completions:                   <unset>
Pod Template:
  Labels:  run=pi
... skipping 33 lines ...
                run=pi
Annotations:    cronjob.kubernetes.io/instantiate: manual
Controlled By:  CronJob/pi
Parallelism:    1
Completions:    1
Start Time:     Sat, 09 Nov 2019 00:58:18 +0000
Pods Statuses:  1 Running / 0 Succeeded / 0 Failed
Pod Template:
  Labels:  controller-uid=1ef01960-3d66-4657-a6a2-7fee2f3ea251
           job-name=test-job
           run=pi
  Containers:
   pi:
... skipping 12 lines ...
    Mounts:       <none>
  Volumes:        <none>
Events:
  Type    Reason            Age   From            Message
  ----    ------            ----  ----            -------
  Normal  SuccessfulCreate  0s    job-controller  Created pod: test-job-bmn5r
E1109 00:58:18.671361   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
job.batch "test-job" deleted
E1109 00:58:18.774675   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
cronjob.batch "pi" deleted
E1109 00:58:18.883781   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
namespace "test-jobs" deleted
E1109 00:58:19.024998   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:19.672876   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:19.776407   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:19.885522   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:20.026668   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:20.674310   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:20.777960   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:20.887076   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:21.028058   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:21.675855   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:21.779544   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:21.888589   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:22.029637   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I1109 00:58:22.117718   54666 namespace_controller.go:185] Namespace has been deleted test-service-accounts
E1109 00:58:22.677436   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:22.781162   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:22.890384   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:23.031135   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:23.679106   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:23.782606   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:23.891995   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:24.032215   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
+++ exit code: 0
Recording: run_create_job_tests
Running command: run_create_job_tests

+++ Running case: test-cmd.run_create_job_tests 
+++ working dir: /home/prow/go/src/k8s.io/kubernetes
... skipping 4 lines ...
I1109 00:58:24.369953   54666 event.go:281] Event(v1.ObjectReference{Kind:"Job", Namespace:"namespace-1573261104-27808", Name:"test-job", UID:"25c1b9cb-b4eb-416d-9cd1-e456c1e1a9ed", APIVersion:"batch/v1", ResourceVersion:"1513", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: test-job-dxggf
job.batch/test-job created
create.sh:86: Successful get job test-job {{(index .spec.template.spec.containers 0).image}}: k8s.gcr.io/nginx:test-cmd
(Bjob.batch "test-job" deleted
I1109 00:58:24.630804   54666 event.go:281] Event(v1.ObjectReference{Kind:"Job", Namespace:"namespace-1573261104-27808", Name:"test-job-pi", UID:"2d8d34d6-c708-4e85-a1a0-0a5fa38cf8fc", APIVersion:"batch/v1", ResourceVersion:"1520", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: test-job-pi-wxlzz
job.batch/test-job-pi created
E1109 00:58:24.680512   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
create.sh:92: Successful get job test-job-pi {{(index .spec.template.spec.containers 0).image}}: k8s.gcr.io/perl
(BE1109 00:58:24.784061   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
job.batch "test-job-pi" deleted
kubectl run --generator=cronjob/v1beta1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
E1109 00:58:24.893169   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
cronjob.batch/test-pi created
I1109 00:58:24.989877   54666 event.go:281] Event(v1.ObjectReference{Kind:"Job", Namespace:"namespace-1573261104-27808", Name:"my-pi", UID:"78396408-46a3-4dcb-8334-9bff22e96810", APIVersion:"batch/v1", ResourceVersion:"1528", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: my-pi-479zh
job.batch/my-pi created
E1109 00:58:25.033708   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
Successful
message:[perl -Mbignum=bpi -wle print bpi(10)]
has:perl -Mbignum=bpi -wle print bpi(10)
job.batch "my-pi" deleted
cronjob.batch "test-pi" deleted
+++ exit code: 0
... skipping 5 lines ...
+++ command: run_pod_templates_tests
+++ [1109 00:58:25] Creating namespace namespace-1573261105-13669
namespace/namespace-1573261105-13669 created
Context "test" modified.
+++ [1109 00:58:25] Testing pod templates
core.sh:1415: Successful get podtemplates {{range.items}}{{.metadata.name}}:{{end}}: 
(BE1109 00:58:25.682397   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:25.785407   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I1109 00:58:25.834961   51227 controller.go:606] quota admission added evaluator for: podtemplates
podtemplate/nginx created
E1109 00:58:25.894798   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
core.sh:1419: Successful get podtemplates {{range.items}}{{.metadata.name}}:{{end}}: nginx:
(BNAME    CONTAINERS   IMAGES   POD LABELS
nginx   nginx        nginx    name=nginx
E1109 00:58:26.034994   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
core.sh:1427: Successful get podtemplates {{range.items}}{{.metadata.name}}:{{end}}: nginx:
(Bpodtemplate "nginx" deleted
core.sh:1431: Successful get podtemplate {{range.items}}{{.metadata.name}}:{{end}}: 
(B+++ exit code: 0
Recording: run_service_tests
Running command: run_service_tests

+++ Running case: test-cmd.run_service_tests 
+++ working dir: /home/prow/go/src/k8s.io/kubernetes
+++ command: run_service_tests
Context "test" modified.
+++ [1109 00:58:26] Testing kubectl(v1:services)
E1109 00:58:26.683945   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
core.sh:858: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:
(BE1109 00:58:26.786595   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:26.896374   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
service/redis-master created
core.sh:862: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:redis-master:
(BE1109 00:58:27.036578   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
matched Name:
matched Labels:
matched Selector:
matched IP:
matched Port:
matched Endpoints:
... skipping 94 lines ...
IP:                10.0.0.200
Port:              <unset>  6379/TCP
TargetPort:        6379/TCP
Endpoints:         <none>
Session Affinity:  None
Events:            <none>
(BE1109 00:58:27.684942   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
Successful describe
Name:              kubernetes
Namespace:         default
Labels:            component=apiserver
                   provider=kubernetes
Annotations:       <none>
... skipping 18 lines ...
IP:                10.0.0.200
Port:              <unset>  6379/TCP
TargetPort:        6379/TCP
Endpoints:         <none>
Session Affinity:  None
Events:            <none>
(BE1109 00:58:27.787894   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
Successful describe
Name:              kubernetes
Namespace:         default
Labels:            component=apiserver
                   provider=kubernetes
Annotations:       <none>
... skipping 16 lines ...
Type:              ClusterIP
IP:                10.0.0.200
Port:              <unset>  6379/TCP
TargetPort:        6379/TCP
Endpoints:         <none>
Session Affinity:  None
(BE1109 00:58:27.897565   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
Successful describe
Name:              kubernetes
Namespace:         default
Labels:            component=apiserver
                   provider=kubernetes
Annotations:       <none>
... skipping 19 lines ...
Port:              <unset>  6379/TCP
TargetPort:        6379/TCP
Endpoints:         <none>
Session Affinity:  None
Events:            <none>
(Bcore.sh:882: Successful get services redis-master {{range.spec.selector}}{{.}}:{{end}}: redis:master:backend:
(BE1109 00:58:28.038140   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
apiVersion: v1
kind: Service
metadata:
  creationTimestamp: null
  labels:
    app: redis
... skipping 59 lines ...
  selector:
    role: padawan
  sessionAffinity: None
  type: ClusterIP
status:
  loadBalancer: {}
E1109 00:58:28.686538   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
error: you must specify resources by --filename when --local is set.
Example resource specifications include:
   '-f rsrc.yaml'
   '--filename=rsrc.json'
E1109 00:58:28.789498   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
core.sh:898: Successful get services redis-master {{range.spec.selector}}{{.}}:{{end}}: redis:master:backend:
(BE1109 00:58:28.898895   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I1109 00:58:29.038025   54666 namespace_controller.go:185] Namespace has been deleted test-jobs
E1109 00:58:29.039356   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
core.sh:905: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:redis-master:
(Bservice "redis-master" deleted
core.sh:912: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:
(Bcore.sh:916: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:
(Bservice/redis-master created
core.sh:920: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:redis-master:
(BE1109 00:58:29.688026   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
core.sh:924: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:redis-master:
(BE1109 00:58:29.791001   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:29.900252   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
service/service-v1-test created
core.sh:945: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:redis-master:service-v1-test:
(BE1109 00:58:30.040934   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
service/service-v1-test replaced
core.sh:952: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:redis-master:service-v1-test:
(Bservice "redis-master" deleted
service "service-v1-test" deleted
core.sh:960: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:
(BE1109 00:58:30.689612   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
core.sh:964: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:
(BE1109 00:58:30.792695   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:30.901444   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
service/redis-master created
E1109 00:58:31.042513   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
service/redis-slave created
core.sh:969: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:redis-master:redis-slave:
(BSuccessful
message:NAME           RSRC
kubernetes     143
redis-master   1566
redis-slave    1569
has:redis-master
core.sh:979: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:redis-master:redis-slave:
(Bservice "redis-master" deleted
service "redis-slave" deleted
core.sh:986: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:
(BE1109 00:58:31.690969   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
core.sh:990: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:
(Bservice/beep-boop created
E1109 00:58:31.794206   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
core.sh:994: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: beep-boop:kubernetes:
(BE1109 00:58:31.902997   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
core.sh:998: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: beep-boop:kubernetes:
(BE1109 00:58:32.043768   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
service "beep-boop" deleted
core.sh:1005: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:
(Bcore.sh:1009: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: 
(Bkubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
I1109 00:58:32.345753   54666 event.go:281] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"default", Name:"testmetadata", UID:"2b0b148d-c5db-4b86-83a7-375d69cc7e0b", APIVersion:"apps/v1", ResourceVersion:"1583", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set testmetadata-bd968f46 to 2
I1109 00:58:32.352604   54666 event.go:281] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"default", Name:"testmetadata-bd968f46", UID:"f3c53315-9880-4f58-a9d0-9c5924884ed3", APIVersion:"apps/v1", ResourceVersion:"1584", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: testmetadata-bd968f46-trfnb
I1109 00:58:32.356268   54666 event.go:281] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"default", Name:"testmetadata-bd968f46", UID:"f3c53315-9880-4f58-a9d0-9c5924884ed3", APIVersion:"apps/v1", ResourceVersion:"1584", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: testmetadata-bd968f46-mmpxw
service/testmetadata created
deployment.apps/testmetadata created
core.sh:1013: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: testmetadata:
(Bcore.sh:1014: Successful get service testmetadata {{.metadata.annotations}}: map[zone-context:home]
(Bservice/exposemetadata exposed
E1109 00:58:32.692109   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
core.sh:1020: Successful get service exposemetadata {{.metadata.annotations}}: map[zone-context:work]
(BE1109 00:58:32.795589   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
service "exposemetadata" deleted
service "testmetadata" deleted
E1109 00:58:32.904390   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
deployment.apps "testmetadata" deleted
+++ exit code: 0
Recording: run_daemonset_tests
Running command: run_daemonset_tests
E1109 00:58:33.045090   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource

+++ Running case: test-cmd.run_daemonset_tests 
+++ working dir: /home/prow/go/src/k8s.io/kubernetes
+++ command: run_daemonset_tests
+++ [1109 00:58:33] Creating namespace namespace-1573261113-15527
namespace/namespace-1573261113-15527 created
Context "test" modified.
+++ [1109 00:58:33] Testing kubectl(v1:daemonsets)
apps.sh:30: Successful get daemonsets {{range.items}}{{.metadata.name}}:{{end}}: 
(BI1109 00:58:33.498273   51227 controller.go:606] quota admission added evaluator for: daemonsets.apps
daemonset.apps/bind created
I1109 00:58:33.509254   51227 controller.go:606] quota admission added evaluator for: controllerrevisions.apps
apps.sh:34: Successful get daemonsets bind {{.metadata.generation}}: 1
(BE1109 00:58:33.693558   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
daemonset.apps/bind configured
E1109 00:58:33.796981   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
apps.sh:37: Successful get daemonsets bind {{.metadata.generation}}: 1
(BE1109 00:58:33.906025   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
daemonset.apps/bind image updated
E1109 00:58:34.046529   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
apps.sh:40: Successful get daemonsets bind {{.metadata.generation}}: 2
(Bdaemonset.apps/bind env updated
apps.sh:42: Successful get daemonsets bind {{.metadata.generation}}: 3
(Bdaemonset.apps/bind resource requirements updated
apps.sh:44: Successful get daemonsets bind {{.metadata.generation}}: 4
(Bdaemonset.apps/bind restarted
apps.sh:48: Successful get daemonsets bind {{.metadata.generation}}: 5
(BE1109 00:58:34.695000   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
daemonset.apps "bind" deleted
+++ exit code: 0
E1109 00:58:34.798384   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
Recording: run_daemonset_history_tests
Running command: run_daemonset_history_tests

+++ Running case: test-cmd.run_daemonset_history_tests 
+++ working dir: /home/prow/go/src/k8s.io/kubernetes
+++ command: run_daemonset_history_tests
+++ [1109 00:58:34] Creating namespace namespace-1573261114-15404
E1109 00:58:34.907542   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
namespace/namespace-1573261114-15404 created
Context "test" modified.
+++ [1109 00:58:35] Testing kubectl(v1:daemonsets, v1:controllerrevisions)
E1109 00:58:35.047804   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
apps.sh:66: Successful get daemonsets {{range.items}}{{.metadata.name}}:{{end}}: 
(Bdaemonset.apps/bind created
apps.sh:70: Successful get controllerrevisions {{range.items}}{{.metadata.annotations}}:{{end}}: map[deprecated.daemonset.template.generation:1 kubectl.kubernetes.io/last-applied-configuration:{"apiVersion":"apps/v1","kind":"DaemonSet","metadata":{"annotations":{"kubernetes.io/change-cause":"kubectl apply --filename=hack/testdata/rollingupdate-daemonset.yaml --record=true --server=http://127.0.0.1:8080 --match-server-version=true"},"labels":{"service":"bind"},"name":"bind","namespace":"namespace-1573261114-15404"},"spec":{"selector":{"matchLabels":{"service":"bind"}},"template":{"metadata":{"labels":{"service":"bind"}},"spec":{"affinity":{"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchExpressions":[{"key":"service","operator":"In","values":["bind"]}]},"namespaces":[],"topologyKey":"kubernetes.io/hostname"}]}},"containers":[{"image":"k8s.gcr.io/pause:2.0","name":"kubernetes-pause"}]}},"updateStrategy":{"rollingUpdate":{"maxUnavailable":"10%"},"type":"RollingUpdate"}}}
 kubernetes.io/change-cause:kubectl apply --filename=hack/testdata/rollingupdate-daemonset.yaml --record=true --server=http://127.0.0.1:8080 --match-server-version=true]:
(Bdaemonset.apps/bind skipped rollback (current template already matches revision 1)
apps.sh:73: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/pause:2.0:
(BE1109 00:58:35.696440   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
apps.sh:74: Successful get daemonset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 1
(BE1109 00:58:35.799774   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1109 00:58:35.909511   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
daemonset.apps/bind configured
apps.sh:77: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/pause:latest:
(BE1109 00:58:36.049255   54666 reflector.go:156] k8s.io/client-go/metadata/metadatainformer/informer.go:89: Failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
apps.sh:78: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: k8s.gcr.io/nginx:test-cmd:
(Bapps.sh:79: Successful get daemonset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 2
(Bapps.sh:80: Successful get controllerrevisions {{range.items}}{{.metadata.annotations}}:{{end}}: map[deprecated.daemonset.template.generation:1 kubectl.kubernetes.io/last-applied-configuration:{"apiVersion":"apps/v1","kind":"DaemonSet","metadata":{"annotations":{"kubernetes.io/change-cause":"kubectl apply --filename=hack/testdata/rollingupdate-daemonset.yaml --record=true --server=http://127.0.0.1:8080 --match-server-version=true"},"labels":{"service":"bind"},"name":"bind","namespace":"namespace-1573261114-15404"},"spec":{"selector":{"matchLabels":{"service":"bind"}},"template":{"metadata":{"labels":{"service":"bind"}},"spec":{"affinity":{"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchExpressions":[{"key":"service","operator":"In","values":["bind"]}]},"namespaces":[],"topologyKey":"kubernetes.io/hostname"}]}},"containers":[{"image":"k8s.gcr.io/pause:2.0","name":"kubernetes-pause"}]}},"updateStrategy":{"rollingUpdate":{"maxUnavailable":"10%"},"type":"RollingUpdate"}}}
 kubernetes.io/change-cause:kubectl apply --filename=hack/testdata/rollingupdate-daemonset.yaml --record=true --server=http://127.0.0.1:8080 --match-server-version=true]:map[deprecated.daemonset.template.generation:2 kubectl.kubernetes.io/last-applied-configuration:{"apiVersion":"apps/v1","kind":"DaemonSet","metadata":{"annotations":{"kubernetes.io/change-cause":"kubectl apply --filename=hack/testdata/rollingupdate-daemonset-rv2.yaml --record=true --server=http://127.0.0.1:8080 --match-server-version=true"},"labels":{"service":"bind"},"name":"bind","namespace":"namespace-1573261114-15404"},"spec":{"selector":{"matchLabels":{"service":"bind"}},"template":{"metadata":{"labels":{"service":"bind"}},"spec":{"affinity":{"