PR | adtac: optimise defaultpreemption: enumerate fewer candidates |
Result | FAILURE |
Tests | 0 failed / 134 succeeded |
Started | |
Elapsed | 25m34s |
Revision | 7e027a445f209a7664b8bd3e1c1d6fbfa491e53b |
Refs |
94814 |
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdCompletion
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdCompletion/shell_not_expected
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdCompletion/unsupported_shell_type
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitAPIPort
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitAPIPort/accept_a_valid_port_number
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitAPIPort/fail_on_negative_port_number
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitAPIPort/fail_on_non-string_port
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitAPIPort/fail_on_too_large_port_number
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitCertPhaseCSR
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitCertPhaseCSR/fails_on_CSR
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitCertPhaseCSR/fails_on_all
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitCertPhaseCSR/generate_CSR
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/can't_load_old_component_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/can't_load_old_v1alpha1_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/can't_load_old_v1alpha2_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/can't_load_old_v1alpha3_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/can_load_current_component_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/can_load_v1beta1_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/can_load_v1beta2_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/don't_allow_mixed_arguments_v1beta1
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/don't_allow_mixed_arguments_v1beta2
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/fail_on_non_existing_path
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitFeatureGates
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitFeatureGates/feature_gate_IPv6DualStack=true
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitFeatureGates/feature_gate_PublicKeysECDSA=true
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitFeatureGates/no_feature_gates_passed
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitKubernetesVersion
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitKubernetesVersion/invalid_semantic_version_string_is_detected
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitKubernetesVersion/valid_version_is_accepted
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitToken
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitToken/invalid_token_non-lowercase
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitToken/invalid_token_size
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitToken/valid_token_is_accepted
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinArgsMixed
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinArgsMixed/discovery-token_and_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinBadArgs
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinBadArgs/discovery-token_and_discovery-file_can't_both_be_set
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinBadArgs/discovery-token_or_discovery-file_must_be_set
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinConfig
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinConfig/config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinConfig/config_path
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryFile
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryFile/invalid_discovery_file
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryFile/valid_discovery_file
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryToken
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryToken/valid_discovery_token
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryToken/valid_discovery_token_url
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinNodeName
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinNodeName/valid_node_name
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinTLSBootstrapToken
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinTLSBootstrapToken/valid_bootstrap_token
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinTLSBootstrapToken/valid_bootstrap_token_url
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinToken
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinToken/valid_token
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinToken/valid_token_url
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdTokenDelete
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdTokenDelete/invalid_token
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdTokenDelete/no_token_provided
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdTokenGenerate
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdTokenGenerateTypoError
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersion
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersion/default_output
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersion/invalid_output_option
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersion/short_output
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersionOutputJsonOrYaml
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersionOutputJsonOrYaml/json_output
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersionOutputJsonOrYaml/yaml_output
test-cmd run_RESTMapper_evaluation_tests
test-cmd run_assert_categories_tests
test-cmd run_assert_short_name_tests
test-cmd run_authorization_tests
test-cmd run_certificates_tests
test-cmd run_client_config_tests
test-cmd run_cluster_management_tests
test-cmd run_clusterroles_tests
test-cmd run_configmap_tests
test-cmd run_crd_tests
test-cmd run_create_job_tests
test-cmd run_create_secret_tests
test-cmd run_daemonset_history_tests
test-cmd run_daemonset_tests
test-cmd run_deployment_tests
test-cmd run_exec_credentials_tests
test-cmd run_impersonation_tests
test-cmd run_job_tests
test-cmd run_kubectl_all_namespace_tests
test-cmd run_kubectl_apply_deployments_tests
test-cmd run_kubectl_apply_tests
test-cmd run_kubectl_config_set_cluster_tests
test-cmd run_kubectl_config_set_credentials_tests
test-cmd run_kubectl_config_set_tests
test-cmd run_kubectl_create_error_tests
test-cmd run_kubectl_create_filter_tests
test-cmd run_kubectl_create_kustomization_directory_tests
test-cmd run_kubectl_debug_node_tests
test-cmd run_kubectl_debug_pod_tests
test-cmd run_kubectl_delete_allnamespaces_tests
test-cmd run_kubectl_diff_same_names
test-cmd run_kubectl_diff_tests
test-cmd run_kubectl_exec_pod_tests
test-cmd run_kubectl_exec_resource_name_tests
test-cmd run_kubectl_explain_tests
test-cmd run_kubectl_get_tests
test-cmd run_kubectl_local_proxy_tests
test-cmd run_kubectl_request_timeout_tests
test-cmd run_kubectl_run_tests
test-cmd run_kubectl_server_side_apply_tests
test-cmd run_kubectl_sort_by_tests
test-cmd run_kubectl_version_tests
test-cmd run_lists_tests
test-cmd run_multi_resources_tests
test-cmd run_namespace_tests
test-cmd run_nodes_tests
test-cmd run_persistent_volume_claims_tests
test-cmd run_persistent_volumes_tests
test-cmd run_plugins_tests
test-cmd run_pod_templates_tests
test-cmd run_pod_tests
test-cmd run_rc_tests
test-cmd run_resource_aliasing_tests
test-cmd run_retrieve_multiple_tests
test-cmd run_role_tests
test-cmd run_rs_tests
test-cmd run_save_config_tests
test-cmd run_secrets_test
test-cmd run_service_accounts_tests
test-cmd run_service_tests
test-cmd run_stateful_set_tests
test-cmd run_statefulset_history_tests
test-cmd run_storage_class_tests
test-cmd run_swagger_tests
test-cmd run_template_output_tests
test-cmd run_wait_tests
... skipping 61 lines ... Recording: record_command_canary Running command: record_command_canary +++ Running case: test-cmd.record_command_canary +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: record_command_canary /home/prow/go/src/k8s.io/kubernetes/test/cmd/legacy-script.sh: line 155: bogus-expected-to-fail: command not found !!! [1104 19:21:52] Call tree: !!! [1104 19:21:52] 1: /home/prow/go/src/k8s.io/kubernetes/test/cmd/../../third_party/forked/shell2junit/sh2ju.sh:47 record_command_canary(...) !!! [1104 19:21:52] 2: /home/prow/go/src/k8s.io/kubernetes/test/cmd/../../third_party/forked/shell2junit/sh2ju.sh:112 eVal(...) !!! [1104 19:21:52] 3: /home/prow/go/src/k8s.io/kubernetes/test/cmd/legacy-script.sh:131 juLog(...) !!! [1104 19:21:52] 4: /home/prow/go/src/k8s.io/kubernetes/test/cmd/legacy-script.sh:159 record_command(...) !!! [1104 19:21:52] 5: hack/make-rules/test-cmd.sh:35 source(...) +++ exit code: 1 +++ error: 1 +++ [1104 19:21:52] Running kubeadm tests +++ [1104 19:21:57] Building go targets for linux/amd64: cmd/kubeadm +++ [1104 19:22:38] Running tests without code coverage {"Time":"2020-11-04T19:24:03.552733059Z","Action":"output","Package":"k8s.io/kubernetes/cmd/kubeadm/test/cmd","Output":"ok \tk8s.io/kubernetes/cmd/kubeadm/test/cmd\t51.933s\n"} ✓ cmd/kubeadm/test/cmd (51.936s) ... skipping 332 lines ... +++ [1104 19:25:43] Building kube-controller-manager +++ [1104 19:25:47] Building go targets for linux/amd64: cmd/kube-controller-manager +++ [1104 19:26:12] Generate kubeconfig for controller-manager +++ [1104 19:26:12] Starting controller-manager I1104 19:26:12.779200 58014 serving.go:331] Generated self-signed cert in-memory W1104 19:26:13.376790 58014 authentication.go:396] failed to read in-cluster kubeconfig for delegated authentication: open /var/run/secrets/kubernetes.io/serviceaccount/token: no such file or directory W1104 19:26:13.376859 58014 authentication.go:293] No authentication-kubeconfig provided in order to lookup client-ca-file in configmap/extension-apiserver-authentication in kube-system, so client certificate authentication won't work. W1104 19:26:13.376869 58014 authentication.go:317] No authentication-kubeconfig provided in order to lookup requestheader-client-ca-file in configmap/extension-apiserver-authentication in kube-system, so request-header client certificate authentication won't work. W1104 19:26:13.376893 58014 authorization.go:205] failed to read in-cluster kubeconfig for delegated authorization: open /var/run/secrets/kubernetes.io/serviceaccount/token: no such file or directory W1104 19:26:13.376914 58014 authorization.go:173] No authorization-kubeconfig provided, so SubjectAccessReview of authorization tokens won't work. I1104 19:26:13.376946 58014 controllermanager.go:174] Version: v1.20.0-beta.1.57+3dc212d0c6bd93-dirty I1104 19:26:13.378389 58014 secure_serving.go:197] Serving securely on [::]:10257 I1104 19:26:13.378429 58014 tlsconfig.go:240] Starting DynamicServingCertificateController I1104 19:26:13.379296 58014 deprecated_insecure_serving.go:53] Serving insecurely on [::]:10252 I1104 19:26:13.379365 58014 leaderelection.go:243] attempting to acquire leader lease kube-system/kube-controller-manager... ... skipping 63 lines ... I1104 19:26:14.128312 58014 controllermanager.go:548] Started "csrcleaner" I1104 19:26:14.128515 58014 node_lifecycle_controller.go:380] Sending events to api server. W1104 19:26:14.128750 58014 mutation_detector.go:53] Mutation detector is enabled, this will result in memory leakage. I1104 19:26:14.128791 58014 taint_manager.go:163] Sending events to api server. I1104 19:26:14.128876 58014 node_lifecycle_controller.go:508] Controller will reconcile labels. I1104 19:26:14.128912 58014 controllermanager.go:548] Started "nodelifecycle" E1104 19:26:14.129209 58014 core.go:91] Failed to start service controller: WARNING: no cloud provider provided, services of type LoadBalancer will fail W1104 19:26:14.129232 58014 controllermanager.go:540] Skipping "service" I1104 19:26:14.129242 58014 core.go:241] Will not configure cloud provider routes for allocate-node-cidrs: false, configure-cloud-routes: true. W1104 19:26:14.129249 58014 controllermanager.go:540] Skipping "route" I1104 19:26:14.129554 58014 controllermanager.go:548] Started "pvc-protection" I1104 19:26:14.129913 58014 controllermanager.go:548] Started "garbagecollector" I1104 19:26:14.130225 58014 controllermanager.go:548] Started "deployment" I1104 19:26:14.130500 58014 controllermanager.go:548] Started "replicaset" I1104 19:26:14.130693 58014 node_lifecycle_controller.go:77] Sending events to api server E1104 19:26:14.130717 58014 core.go:231] failed to start cloud node lifecycle controller: no cloud provider provided W1104 19:26:14.130725 58014 controllermanager.go:540] Skipping "cloud-node-lifecycle" I1104 19:26:14.131099 58014 controllermanager.go:548] Started "endpointslicemirroring" I1104 19:26:14.132142 58014 pvc_protection_controller.go:110] Starting PVC protection controller I1104 19:26:14.132170 58014 shared_informer.go:240] Waiting for caches to sync for PVC protection I1104 19:26:14.132197 58014 garbagecollector.go:128] Starting garbage collector controller I1104 19:26:14.132203 58014 shared_informer.go:240] Waiting for caches to sync for garbage collector ... skipping 75 lines ... I1104 19:26:14.152890 58014 shared_informer.go:240] Waiting for caches to sync for daemon sets W1104 19:26:14.153259 58014 mutation_detector.go:53] Mutation detector is enabled, this will result in memory leakage. I1104 19:26:14.153283 58014 controllermanager.go:548] Started "persistentvolume-binder" W1104 19:26:14.153291 58014 controllermanager.go:540] Skipping "root-ca-cert-publisher" I1104 19:26:14.153358 58014 pv_controller_base.go:307] Starting persistent volume controller I1104 19:26:14.153374 58014 shared_informer.go:240] Waiting for caches to sync for persistent volume W1104 19:26:14.179642 58014 actual_state_of_world.go:506] Failed to update statusUpdateNeeded field in actual state of world: Failed to set statusUpdateNeeded to needed true, because nodeName="127.0.0.1" does not exist I1104 19:26:14.245331 58014 shared_informer.go:247] Caches are synced for namespace I1104 19:26:14.250006 58014 shared_informer.go:247] Caches are synced for PV protection I1104 19:26:14.251816 58014 shared_informer.go:247] Caches are synced for certificate-csrapproving I1104 19:26:14.252031 58014 shared_informer.go:247] Caches are synced for ClusterRoleAggregator I1104 19:26:14.252268 58014 shared_informer.go:247] Caches are synced for TTL E1104 19:26:14.264101 58014 clusterroleaggregation_controller.go:181] edit failed with : Operation cannot be fulfilled on clusterroles.rbac.authorization.k8s.io "edit": the object has been modified; please apply your changes to the latest version and try again E1104 19:26:14.265138 58014 clusterroleaggregation_controller.go:181] admin failed with : Operation cannot be fulfilled on clusterroles.rbac.authorization.k8s.io "admin": the object has been modified; please apply your changes to the latest version and try again The Service "kubernetes" is invalid: spec.clusterIPs: Invalid value: []string{"10.0.0.1"}: failed to allocated ip:10.0.0.1 with error:provided IP is already allocated E1104 19:26:14.275186 58014 clusterroleaggregation_controller.go:181] admin failed with : Operation cannot be fulfilled on clusterroles.rbac.authorization.k8s.io "admin": the object has been modified; please apply your changes to the latest version and try again I1104 19:26:14.349613 58014 shared_informer.go:247] Caches are synced for expand I1104 19:26:14.351194 58014 shared_informer.go:247] Caches are synced for service account I1104 19:26:14.354136 54408 controller.go:606] quota admission added evaluator for: serviceaccounts NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.0.0.1 <none> 443/TCP 32s Recording: run_kubectl_version_tests ... skipping 138 lines ... +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_RESTMapper_evaluation_tests +++ [1104 19:26:19] Creating namespace namespace-1604517979-14593 namespace/namespace-1604517979-14593 created Context "test" modified. +++ [1104 19:26:19] Testing RESTMapper +++ [1104 19:26:19] "kubectl get unknownresourcetype" returns error as expected: error: the server doesn't have a resource type "unknownresourcetype" +++ exit code: 0 NAME SHORTNAMES APIVERSION NAMESPACED KIND bindings v1 true Binding componentstatuses cs v1 false ComponentStatus configmaps cm v1 true ConfigMap endpoints ep v1 true Endpoints ... skipping 62 lines ... namespace/namespace-1604517983-6052 created Context "test" modified. +++ [1104 19:26:24] Testing clusterroles [32mrbac.sh:29: Successful get clusterroles/cluster-admin {{.metadata.name}}: cluster-admin (B[m[32mrbac.sh:30: Successful get clusterrolebindings/cluster-admin {{.metadata.name}}: cluster-admin (B[mSuccessful message:Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pod-admin" not found has:clusterroles.rbac.authorization.k8s.io "pod-admin" not found clusterrole.rbac.authorization.k8s.io/pod-admin created (dry run) clusterrole.rbac.authorization.k8s.io/pod-admin created (server dry run) Successful message:Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pod-admin" not found has:clusterroles.rbac.authorization.k8s.io "pod-admin" not found clusterrole.rbac.authorization.k8s.io/pod-admin created [32mrbac.sh:42: Successful get clusterrole/pod-admin {{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}: *: (B[mSuccessful message:warning: deleting cluster-scoped resources, not scoped to the provided namespace clusterrole.rbac.authorization.k8s.io "pod-admin" deleted ... skipping 18 lines ... (B[mclusterrole.rbac.authorization.k8s.io/url-reader created [32mrbac.sh:61: Successful get clusterrole/url-reader {{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}: get: (B[m[32mrbac.sh:62: Successful get clusterrole/url-reader {{range.rules}}{{range.nonResourceURLs}}{{.}}:{{end}}{{end}}: /logs/*:/healthz/*: (B[mclusterrole.rbac.authorization.k8s.io/aggregation-reader created [32mrbac.sh:64: Successful get clusterrole/aggregation-reader {{.metadata.name}}: aggregation-reader (B[mSuccessful message:Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "super-admin" not found has:clusterrolebindings.rbac.authorization.k8s.io "super-admin" not found clusterrolebinding.rbac.authorization.k8s.io/super-admin created (dry run) clusterrolebinding.rbac.authorization.k8s.io/super-admin created (server dry run) Successful message:Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "super-admin" not found has:clusterrolebindings.rbac.authorization.k8s.io "super-admin" not found clusterrolebinding.rbac.authorization.k8s.io/super-admin created [32mrbac.sh:77: Successful get clusterrolebinding/super-admin {{range.subjects}}{{.name}}:{{end}}: super-admin: (B[mclusterrolebinding.rbac.authorization.k8s.io/super-admin subjects updated (dry run) clusterrolebinding.rbac.authorization.k8s.io/super-admin subjects updated (server dry run) [32mrbac.sh:80: Successful get clusterrolebinding/super-admin {{range.subjects}}{{.name}}:{{end}}: super-admin: ... skipping 61 lines ... [32mrbac.sh:102: Successful get clusterrolebinding/super-admin {{range.subjects}}{{.name}}:{{end}}: super-admin:foo:test-all-user: (B[m[32mrbac.sh:103: Successful get clusterrolebinding/super-group {{range.subjects}}{{.name}}:{{end}}: the-group:foo:test-all-user: (B[m[32mrbac.sh:104: Successful get clusterrolebinding/super-sa {{range.subjects}}{{.name}}:{{end}}: sa-name:foo:test-all-user: (B[mrolebinding.rbac.authorization.k8s.io/admin created (dry run) rolebinding.rbac.authorization.k8s.io/admin created (server dry run) Successful message:Error from server (NotFound): rolebindings.rbac.authorization.k8s.io "admin" not found has: not found rolebinding.rbac.authorization.k8s.io/admin created [32mrbac.sh:113: Successful get rolebinding/admin {{.roleRef.kind}}: ClusterRole (B[m[32mrbac.sh:114: Successful get rolebinding/admin {{range.subjects}}{{.name}}:{{end}}: default-admin: (B[mrolebinding.rbac.authorization.k8s.io/admin subjects updated [32mrbac.sh:116: Successful get rolebinding/admin {{range.subjects}}{{.name}}:{{end}}: default-admin:foo: ... skipping 29 lines ... message:Warning: rbac.authorization.k8s.io/v1beta1 Role is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 Role No resources found in namespace-1604517991-1438 namespace. has:Role is deprecated Successful message:Warning: rbac.authorization.k8s.io/v1beta1 Role is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 Role No resources found in namespace-1604517991-1438 namespace. Error: 1 warning received has:Role is deprecated Successful message:Warning: rbac.authorization.k8s.io/v1beta1 Role is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 Role No resources found in namespace-1604517991-1438 namespace. Error: 1 warning received has:Error: 1 warning received role.rbac.authorization.k8s.io/pod-admin created (dry run) role.rbac.authorization.k8s.io/pod-admin created (server dry run) Successful message:Error from server (NotFound): roles.rbac.authorization.k8s.io "pod-admin" not found has: not found role.rbac.authorization.k8s.io/pod-admin created [32mrbac.sh:163: Successful get role/pod-admin {{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}: *: (B[m[32mrbac.sh:164: Successful get role/pod-admin {{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}: pods: (B[m[32mrbac.sh:165: Successful get role/pod-admin {{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}: : (B[mSuccessful ... skipping 460 lines ... has:valid-pod Successful message:NAME READY STATUS RESTARTS AGE valid-pod 0/1 Pending 0 1s has:valid-pod [32mcore.sh:190: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[merror: resource(s) were provided, but no name, label selector, or --all flag specified [32mcore.sh:194: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[m[32mcore.sh:198: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[merror: setting 'all' parameter but found a non empty selector. [32mcore.sh:202: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[m[32mcore.sh:206: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[mwarning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. pod "valid-pod" force deleted [32mcore.sh:210: Successful get pods -l'name in (valid-pod)' {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mcore.sh:215: Successful get namespaces {{range.items}}{{ if eq .metadata.name \"test-kubectl-describe-pod\" }}found{{end}}{{end}}:: : ... skipping 19 lines ... (B[mpoddisruptionbudget.policy/test-pdb-2 created [32mcore.sh:259: Successful get pdb/test-pdb-2 --namespace=test-kubectl-describe-pod {{.spec.minAvailable}}: 50% (B[mpoddisruptionbudget.policy/test-pdb-3 created [32mcore.sh:265: Successful get pdb/test-pdb-3 --namespace=test-kubectl-describe-pod {{.spec.maxUnavailable}}: 2 (B[mpoddisruptionbudget.policy/test-pdb-4 created [32mcore.sh:269: Successful get pdb/test-pdb-4 --namespace=test-kubectl-describe-pod {{.spec.maxUnavailable}}: 50% (B[merror: min-available and max-unavailable cannot be both specified [32mcore.sh:275: Successful get pods --namespace=test-kubectl-describe-pod {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/env-test-pod created matched TEST_CMD_1 matched <set to the key 'key-1' in secret 'test-secret'> matched TEST_CMD_2 matched <set to the key 'key-2' of config map 'test-configmap'> ... skipping 221 lines ... [32mcore.sh:534: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: k8s.gcr.io/pause:3.2: (B[mSuccessful message:kubectl-create kubectl-patch has:kubectl-patch pod/valid-pod patched [32mcore.sh:554: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: nginx: (B[m+++ [1104 19:27:04] "kubectl patch with resourceVersion 537" returns error as expected: Error from server (Conflict): Operation cannot be fulfilled on pods "valid-pod": the object has been modified; please apply your changes to the latest version and try again I1104 19:27:04.528060 54408 client.go:360] parsed scheme: "passthrough" I1104 19:27:04.528122 54408 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{http://127.0.0.1:2379 <nil> 0 <nil>}] <nil> <nil>} I1104 19:27:04.528133 54408 clientconn.go:948] ClientConn switching balancer to "pick_first" pod "valid-pod" deleted pod/valid-pod replaced [32mcore.sh:578: Successful get pod valid-pod {{(index .spec.containers 0).name}}: replaced-k8s-serve-hostname (B[mSuccessful message:kubectl-create kubectl-patch kubectl-replace has:kubectl-replace Successful message:error: --grace-period must have --force specified has:\-\-grace-period must have \-\-force specified Successful message:error: --timeout must have --force specified has:\-\-timeout must have \-\-force specified W1104 19:27:05.534961 58014 actual_state_of_world.go:506] Failed to update statusUpdateNeeded field in actual state of world: Failed to set statusUpdateNeeded to needed true, because nodeName="node-v1-test" does not exist node/node-v1-test created [32mcore.sh:606: Successful get node node-v1-test {{range.items}}{{if .metadata.annotations.a}}found{{end}}{{end}}:: : (B[mnode/node-v1-test replaced (server dry run) node/node-v1-test replaced (dry run) [32mcore.sh:631: Successful get node node-v1-test {{range.items}}{{if .metadata.annotations.a}}found{{end}}{{end}}:: : (B[mnode/node-v1-test replaced ... skipping 30 lines ... spec: containers: - image: k8s.gcr.io/pause:2.0 name: kubernetes-pause has:localonlyvalue [32mcore.sh:683: Successful get pod valid-pod {{.metadata.labels.name}}: valid-pod (B[merror: 'name' already has a value (valid-pod), and --overwrite is false [32mcore.sh:687: Successful get pod valid-pod {{.metadata.labels.name}}: valid-pod (B[m[32mcore.sh:691: Successful get pod valid-pod {{.metadata.labels.name}}: valid-pod (B[mpod/valid-pod labeled [32mcore.sh:695: Successful get pod valid-pod {{.metadata.labels.name}}: valid-pod-super-sayan (B[m[32mcore.sh:699: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[mwarning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. ... skipping 83 lines ... +++ Running case: test-cmd.run_kubectl_create_error_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_kubectl_create_error_tests +++ [1104 19:27:17] Creating namespace namespace-1604518037-13144 namespace/namespace-1604518037-13144 created Context "test" modified. +++ [1104 19:27:17] Testing kubectl create with error Error: must specify one of -f and -k Create a resource from a file or from stdin. JSON and YAML formats are accepted. Examples: ... skipping 43 lines ... Usage: kubectl create -f FILENAME [options] Use "kubectl <command> --help" for more information about a given command. Use "kubectl options" for a list of global command-line options (applies to all commands). +++ [1104 19:27:17] "kubectl create with empty string list returns error as expected: error: error validating "hack/testdata/invalid-rc-with-empty-args.yaml": error validating data: ValidationError(ReplicationController.spec.template.spec.containers[0].args): unknown object type "nil" in ReplicationController.spec.template.spec.containers[0].args[0]; if you choose to ignore these errors, turn validation off with --validate=false kubectl convert is DEPRECATED and will be removed in a future version. In order to convert, kubectl apply the object to the cluster, then kubectl get at the desired version. +++ exit code: 0 Recording: run_kubectl_apply_tests Running command: run_kubectl_apply_tests ... skipping 31 lines ... I1104 19:27:20.922714 58014 event.go:291] "Event occurred" object="namespace-1604518037-17430/test-deployment-retainkeys-8695b756f8" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: test-deployment-retainkeys-8695b756f8-8blvk" deployment.apps "test-deployment-retainkeys" deleted [32mapply.sh:88: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/selector-test-pod created [32mapply.sh:92: Successful get pods selector-test-pod {{.metadata.labels.name}}: selector-test-pod (B[mSuccessful message:Error from server (NotFound): pods "selector-test-pod-dont-apply" not found has:pods "selector-test-pod-dont-apply" not found pod "selector-test-pod" deleted [32mapply.sh:101: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mW1104 19:27:22.118293 66474 helpers.go:567] --dry-run=true is deprecated (boolean value) and can be replaced with --dry-run=client. pod/test-pod created (dry run) pod/test-pod created (dry run) ... skipping 34 lines ... (B[mpod/b created [32mapply.sh:196: Successful get pods a {{.metadata.name}}: a (B[m[32mapply.sh:197: Successful get pods b -n nsb {{.metadata.name}}: b (B[mpod "a" deleted pod "b" deleted Successful message:error: all resources selected for prune without explicitly passing --all. To prune all resources, pass the --all flag. If you did not mean to prune all resources, specify a label selector has:all resources selected for prune without explicitly passing --all pod/a created pod/b created service/prune-svc created Warning: extensions/v1beta1 Ingress is deprecated in v1.14+, unavailable in v1.22+; use networking.k8s.io/v1 Ingress I1104 19:27:31.528511 58014 horizontal.go:359] Horizontal Pod Autoscaler frontend has been deleted in namespace-1604518034-15958 ... skipping 44 lines ... (B[mpod/b unchanged pod/a pruned Warning: extensions/v1beta1 Ingress is deprecated in v1.14+, unavailable in v1.22+; use networking.k8s.io/v1 Ingress [32mapply.sh:254: Successful get pods -n nsb {{range.items}}{{.metadata.name}}:{{end}}: b: (B[mnamespace "nsb" deleted Successful message:error: the namespace from the provided object "nsb" does not match the namespace "foo". You must pass '--namespace=nsb' to perform this operation. has:the namespace from the provided object "nsb" does not match the namespace "foo". [32mapply.sh:265: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: (B[mservice/a created [32mapply.sh:269: Successful get services a {{.metadata.name}}: a (B[mSuccessful message:The Service "a" is invalid: spec.clusterIPs[0]: Invalid value: []string{"10.0.0.12"}: may not change once set ... skipping 25 lines ... (B[m[32mapply.sh:291: Successful get deployment test-the-deployment {{.metadata.name}}: test-the-deployment (B[m[32mapply.sh:292: Successful get service test-the-service {{.metadata.name}}: test-the-service (B[mconfigmap "test-the-map" deleted service "test-the-service" deleted deployment.apps "test-the-deployment" deleted Successful message:Error from server (NotFound): namespaces "multi-resource-ns" not found has:namespaces "multi-resource-ns" not found [32mapply.sh:300: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mSuccessful message:namespace/multi-resource-ns created Error from server (NotFound): error when creating "hack/testdata/multi-resource-1.yaml": namespaces "multi-resource-ns" not found has:namespaces "multi-resource-ns" not found Successful message:Error from server (NotFound): pods "test-pod" not found has:pods "test-pod" not found pod/test-pod created namespace/multi-resource-ns unchanged [32mapply.sh:308: Successful get pods test-pod -n multi-resource-ns {{.metadata.name}}: test-pod (B[mpod "test-pod" deleted namespace "multi-resource-ns" deleted I1104 19:27:58.802021 58014 namespace_controller.go:185] Namespace has been deleted nsb [32mapply.sh:314: Successful get configmaps {{range.items}}{{.metadata.name}}:{{end}}: (B[mSuccessful message:configmap/foo created error: unable to recognize "hack/testdata/multi-resource-2.yaml": no matches for kind "Bogus" in version "example.com/v1" has:no matches for kind "Bogus" in version "example.com/v1" [32mapply.sh:320: Successful get configmaps foo {{.metadata.name}}: foo (B[mconfigmap "foo" deleted [32mapply.sh:326: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mSuccessful message:pod/pod-a created ... skipping 6 lines ... pod "pod-c" deleted [32mapply.sh:334: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mapply.sh:338: Successful get crds {{range.items}}{{.metadata.name}}:{{end}}: (B[mSuccessful message:Warning: apiextensions.k8s.io/v1beta1 CustomResourceDefinition is deprecated in v1.16+, unavailable in v1.22+; use apiextensions.k8s.io/v1 CustomResourceDefinition customresourcedefinition.apiextensions.k8s.io/widgets.example.com created error: unable to recognize "hack/testdata/multi-resource-4.yaml": no matches for kind "Widget" in version "example.com/v1" has:no matches for kind "Widget" in version "example.com/v1" I1104 19:28:04.542593 54408 client.go:360] parsed scheme: "endpoint" I1104 19:28:04.542647 54408 endpoint.go:68] ccResolverWrapper: sending new addresses to cc: [{http://127.0.0.1:2379 <nil> 0 <nil>}] Successful message:Error from server (NotFound): widgets.example.com "foo" not found has:widgets.example.com "foo" not found [32mapply.sh:344: Successful get crds widgets.example.com {{.metadata.name}}: widgets.example.com (B[mI1104 19:28:04.863980 54408 controller.go:606] quota admission added evaluator for: widgets.example.com widget.example.com/foo created Warning: apiextensions.k8s.io/v1beta1 CustomResourceDefinition is deprecated in v1.16+, unavailable in v1.22+; use apiextensions.k8s.io/v1 CustomResourceDefinition customresourcedefinition.apiextensions.k8s.io/widgets.example.com unchanged ... skipping 34 lines ... message:803 has:803 pod "test-pod" deleted [32mapply.sh:403: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m+++ [1104 19:28:07] Testing upgrade kubectl client-side apply to server-side apply pod/test-pod created error: Apply failed with 1 conflict: conflict with "kubectl-client-side-apply" using v1: .metadata.labels.name Please review the fields above--they currently have other managers. Here are the ways you can resolve this warning: * If you intend to manage all of these fields, please re-run the apply command with the `--force-conflicts` flag. * If you do not intend to manage all of the fields, please edit your manifest to remove references to the fields that should keep their ... skipping 82 lines ... (B[mpod "nginx-extensions" deleted Successful message:pod/test1 created has:pod/test1 created pod "test1" deleted Successful message:error: Invalid image name "InvalidImageName": invalid reference format has:error: Invalid image name "InvalidImageName": invalid reference format +++ exit code: 0 Recording: run_kubectl_create_filter_tests Running command: run_kubectl_create_filter_tests +++ Running case: test-cmd.run_kubectl_create_filter_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes ... skipping 3 lines ... Context "test" modified. +++ [1104 19:28:11] Testing kubectl create filter [32mcreate.sh:50: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/selector-test-pod created [32mcreate.sh:54: Successful get pods selector-test-pod {{.metadata.labels.name}}: selector-test-pod (B[mSuccessful message:Error from server (NotFound): pods "selector-test-pod-dont-apply" not found has:pods "selector-test-pod-dont-apply" not found pod "selector-test-pod" deleted +++ exit code: 0 Recording: run_kubectl_apply_deployments_tests Running command: run_kubectl_apply_deployments_tests ... skipping 31 lines ... I1104 19:28:15.233230 58014 event.go:291] "Event occurred" object="namespace-1604518092-14250/nginx" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-9bb9c4878 to 3" I1104 19:28:15.237897 58014 event.go:291] "Event occurred" object="namespace-1604518092-14250/nginx-9bb9c4878" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-9bb9c4878-57nmq" I1104 19:28:15.244157 58014 event.go:291] "Event occurred" object="namespace-1604518092-14250/nginx-9bb9c4878" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-9bb9c4878-jwht5" I1104 19:28:15.245386 58014 event.go:291] "Event occurred" object="namespace-1604518092-14250/nginx-9bb9c4878" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-9bb9c4878-zc246" [32mapps.sh:152: Successful get deployment nginx {{.metadata.name}}: nginx (B[mSuccessful message:Error from server (Conflict): error when applying patch: {"metadata":{"annotations":{"kubectl.kubernetes.io/last-applied-configuration":"{\"apiVersion\":\"apps/v1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"labels\":{\"name\":\"nginx\"},\"name\":\"nginx\",\"namespace\":\"namespace-1604518092-14250\",\"resourceVersion\":\"99\"},\"spec\":{\"replicas\":3,\"selector\":{\"matchLabels\":{\"name\":\"nginx2\"}},\"template\":{\"metadata\":{\"labels\":{\"name\":\"nginx2\"}},\"spec\":{\"containers\":[{\"image\":\"k8s.gcr.io/nginx:test-cmd\",\"name\":\"nginx\",\"ports\":[{\"containerPort\":80}]}]}}}}\n"},"resourceVersion":"99"},"spec":{"selector":{"matchLabels":{"name":"nginx2"}},"template":{"metadata":{"labels":{"name":"nginx2"}}}}} to: Resource: "apps/v1, Resource=deployments", GroupVersionKind: "apps/v1, Kind=Deployment" Name: "nginx", Namespace: "namespace-1604518092-14250" for: "hack/testdata/deployment-label-change2.yaml": Operation cannot be fulfilled on deployments.apps "nginx": the object has been modified; please apply your changes to the latest version and try again has:Error from server (Conflict) deployment.apps/nginx configured I1104 19:28:23.850573 58014 event.go:291] "Event occurred" object="namespace-1604518092-14250/nginx" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-6dd6cfdb57 to 3" I1104 19:28:23.855814 58014 event.go:291] "Event occurred" object="namespace-1604518092-14250/nginx-6dd6cfdb57" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-6dd6cfdb57-w5qrb" I1104 19:28:23.861129 58014 event.go:291] "Event occurred" object="namespace-1604518092-14250/nginx-6dd6cfdb57" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-6dd6cfdb57-5qd2j" I1104 19:28:23.861182 58014 event.go:291] "Event occurred" object="namespace-1604518092-14250/nginx-6dd6cfdb57" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-6dd6cfdb57-kz79w" Successful ... skipping 300 lines ... +++ [1104 19:28:32] Creating namespace namespace-1604518112-27053 namespace/namespace-1604518112-27053 created Context "test" modified. +++ [1104 19:28:33] Testing kubectl get [32mget.sh:29: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mSuccessful message:Error from server (NotFound): pods "abc" not found has:pods "abc" not found [32mget.sh:37: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mSuccessful message:Error from server (NotFound): pods "abc" not found has:pods "abc" not found [32mget.sh:45: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mSuccessful message:{ "apiVersion": "v1", "items": [], ... skipping 23 lines ... has not:No resources found Successful message:NAME has not:No resources found [32mget.sh:73: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mSuccessful message:error: the server doesn't have a resource type "foobar" has not:No resources found Successful message:No resources found in namespace-1604518112-27053 namespace. has:No resources found Successful message: has not:No resources found Successful message:No resources found in namespace-1604518112-27053 namespace. has:No resources found [32mget.sh:93: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mSuccessful message:Error from server (NotFound): pods "abc" not found has:pods "abc" not found Successful message:Error from server (NotFound): pods "abc" not found has not:List Successful message:I1104 19:28:34.935265 69971 loader.go:375] Config loaded from file: /tmp/tmp.4q9vP4UlSS/.kube/config I1104 19:28:34.941545 69971 round_trippers.go:444] GET https://127.0.0.1:6443/version?timeout=32s 200 OK in 5 milliseconds I1104 19:28:34.966781 69971 round_trippers.go:444] GET https://127.0.0.1:6443/api/v1/namespaces/default/pods 200 OK in 2 milliseconds I1104 19:28:34.968928 69971 round_trippers.go:444] GET https://127.0.0.1:6443/api/v1/namespaces/default/replicationcontrollers 200 OK in 1 milliseconds ... skipping 632 lines ... } [32mget.sh:158: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[m<no value>Successful message:valid-pod: has:valid-pod: Successful message:error: error executing jsonpath "{.missing}": Error executing template: missing is not found. Printing more information for debugging the template: template was: {.missing} object given to jsonpath engine was: map[string]interface {}{"apiVersion":"v1", "kind":"Pod", "metadata":map[string]interface {}{"creationTimestamp":"2020-11-04T19:28:42Z", "labels":map[string]interface {}{"name":"valid-pod"}, "managedFields":[]interface {}{map[string]interface {}{"apiVersion":"v1", "fieldsType":"FieldsV1", "fieldsV1":map[string]interface {}{"f:metadata":map[string]interface {}{"f:labels":map[string]interface {}{".":map[string]interface {}{}, "f:name":map[string]interface {}{}}}, "f:spec":map[string]interface {}{"f:containers":map[string]interface {}{"k:{\"name\":\"kubernetes-serve-hostname\"}":map[string]interface {}{".":map[string]interface {}{}, "f:image":map[string]interface {}{}, "f:imagePullPolicy":map[string]interface {}{}, "f:name":map[string]interface {}{}, "f:resources":map[string]interface {}{".":map[string]interface {}{}, "f:limits":map[string]interface {}{".":map[string]interface {}{}, "f:cpu":map[string]interface {}{}, "f:memory":map[string]interface {}{}}, "f:requests":map[string]interface {}{".":map[string]interface {}{}, "f:cpu":map[string]interface {}{}, "f:memory":map[string]interface {}{}}}, "f:terminationMessagePath":map[string]interface {}{}, "f:terminationMessagePolicy":map[string]interface {}{}}}, "f:dnsPolicy":map[string]interface {}{}, "f:enableServiceLinks":map[string]interface {}{}, "f:restartPolicy":map[string]interface {}{}, "f:schedulerName":map[string]interface {}{}, "f:securityContext":map[string]interface {}{}, "f:terminationGracePeriodSeconds":map[string]interface {}{}}}, "manager":"kubectl-create", "operation":"Update", "time":"2020-11-04T19:28:42Z"}}, "name":"valid-pod", "namespace":"namespace-1604518122-1287", "resourceVersion":"965", "uid":"fac02ccb-a282-43fd-91b2-3a419c96cb9c"}, "spec":map[string]interface {}{"containers":[]interface {}{map[string]interface {}{"image":"k8s.gcr.io/serve_hostname", "imagePullPolicy":"Always", "name":"kubernetes-serve-hostname", "resources":map[string]interface {}{"limits":map[string]interface {}{"cpu":"1", "memory":"512Mi"}, "requests":map[string]interface {}{"cpu":"1", "memory":"512Mi"}}, "terminationMessagePath":"/dev/termination-log", "terminationMessagePolicy":"File"}}, "dnsPolicy":"ClusterFirst", "enableServiceLinks":true, "preemptionPolicy":"PreemptLowerPriority", "priority":0, "restartPolicy":"Always", "schedulerName":"default-scheduler", "securityContext":map[string]interface {}{}, "terminationGracePeriodSeconds":30}, "status":map[string]interface {}{"phase":"Pending", "qosClass":"Guaranteed"}} has:missing is not found error: error executing template "{{.missing}}": template: output:1:2: executing "output" at <.missing>: map has no entry for key "missing" Successful message:Error executing template: template: output:1:2: executing "output" at <.missing>: map has no entry for key "missing". Printing more information for debugging the template: template was: {{.missing}} raw data was: {"apiVersion":"v1","kind":"Pod","metadata":{"creationTimestamp":"2020-11-04T19:28:42Z","labels":{"name":"valid-pod"},"managedFields":[{"apiVersion":"v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"kubernetes-serve-hostname\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{".":{},"f:limits":{".":{},"f:cpu":{},"f:memory":{}},"f:requests":{".":{},"f:cpu":{},"f:memory":{}}},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}},"manager":"kubectl-create","operation":"Update","time":"2020-11-04T19:28:42Z"}],"name":"valid-pod","namespace":"namespace-1604518122-1287","resourceVersion":"965","uid":"fac02ccb-a282-43fd-91b2-3a419c96cb9c"},"spec":{"containers":[{"image":"k8s.gcr.io/serve_hostname","imagePullPolicy":"Always","name":"kubernetes-serve-hostname","resources":{"limits":{"cpu":"1","memory":"512Mi"},"requests":{"cpu":"1","memory":"512Mi"}},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","enableServiceLinks":true,"preemptionPolicy":"PreemptLowerPriority","priority":0,"restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"terminationGracePeriodSeconds":30},"status":{"phase":"Pending","qosClass":"Guaranteed"}} object given to template engine was: map[apiVersion:v1 kind:Pod metadata:map[creationTimestamp:2020-11-04T19:28:42Z labels:map[name:valid-pod] managedFields:[map[apiVersion:v1 fieldsType:FieldsV1 fieldsV1:map[f:metadata:map[f:labels:map[.:map[] f:name:map[]]] f:spec:map[f:containers:map[k:{"name":"kubernetes-serve-hostname"}:map[.:map[] f:image:map[] f:imagePullPolicy:map[] f:name:map[] f:resources:map[.:map[] f:limits:map[.:map[] f:cpu:map[] f:memory:map[]] f:requests:map[.:map[] f:cpu:map[] f:memory:map[]]] f:terminationMessagePath:map[] f:terminationMessagePolicy:map[]]] f:dnsPolicy:map[] f:enableServiceLinks:map[] f:restartPolicy:map[] f:schedulerName:map[] f:securityContext:map[] f:terminationGracePeriodSeconds:map[]]] manager:kubectl-create operation:Update time:2020-11-04T19:28:42Z]] name:valid-pod namespace:namespace-1604518122-1287 resourceVersion:965 uid:fac02ccb-a282-43fd-91b2-3a419c96cb9c] spec:map[containers:[map[image:k8s.gcr.io/serve_hostname imagePullPolicy:Always name:kubernetes-serve-hostname resources:map[limits:map[cpu:1 memory:512Mi] requests:map[cpu:1 memory:512Mi]] terminationMessagePath:/dev/termination-log terminationMessagePolicy:File]] dnsPolicy:ClusterFirst enableServiceLinks:true preemptionPolicy:PreemptLowerPriority priority:0 restartPolicy:Always schedulerName:default-scheduler securityContext:map[] terminationGracePeriodSeconds:30] status:map[phase:Pending qosClass:Guaranteed]] ... skipping 156 lines ... terminationGracePeriodSeconds: 30 status: phase: Pending qosClass: Guaranteed has:name: valid-pod Successful message:Error from server (NotFound): pods "invalid-pod" not found has:"invalid-pod" not found pod "valid-pod" deleted [32mget.sh:196: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/redis-master created pod/valid-pod created Successful ... skipping 36 lines ... +++ [1104 19:28:48] Creating namespace namespace-1604518128-7653 namespace/namespace-1604518128-7653 created Context "test" modified. +++ [1104 19:28:48] Testing kubectl exec POD COMMAND Successful message:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (NotFound): pods "abc" not found has:pods "abc" not found pod/test-pod created Successful message:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (BadRequest): pod test-pod does not have a host assigned has not:pods "test-pod" not found Successful message:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (BadRequest): pod test-pod does not have a host assigned has not:pod or type/name must be specified pod "test-pod" deleted +++ exit code: 0 Recording: run_kubectl_exec_resource_name_tests Running command: run_kubectl_exec_resource_name_tests ... skipping 3 lines ... +++ [1104 19:28:49] Creating namespace namespace-1604518129-18044 namespace/namespace-1604518129-18044 created Context "test" modified. +++ [1104 19:28:49] Testing kubectl exec TYPE/NAME COMMAND Successful message:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. error: the server doesn't have a resource type "foo" has:error: Successful message:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (NotFound): deployments.apps "bar" not found has:"bar" not found pod/test-pod created replicaset.apps/frontend created I1104 19:28:50.073597 58014 event.go:291] "Event occurred" object="namespace-1604518129-18044/frontend" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-8zbxb" I1104 19:28:50.078144 58014 event.go:291] "Event occurred" object="namespace-1604518129-18044/frontend" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-fvv77" I1104 19:28:50.078180 58014 event.go:291] "Event occurred" object="namespace-1604518129-18044/frontend" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-lr86t" configmap/test-set-env-config created Successful message:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. error: cannot attach to *v1.ConfigMap: selector for *v1.ConfigMap not implemented has:not implemented Successful message:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (BadRequest): pod test-pod does not have a host assigned has not:not found Successful message:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (BadRequest): pod test-pod does not have a host assigned has not:pod, type/name or --filename must be specified Successful message:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (BadRequest): pod frontend-8zbxb does not have a host assigned has not:not found Successful message:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (BadRequest): pod frontend-8zbxb does not have a host assigned has not:pod, type/name or --filename must be specified pod "test-pod" deleted replicaset.apps "frontend" deleted I1104 19:28:50.752279 54408 client.go:360] parsed scheme: "passthrough" I1104 19:28:50.752331 54408 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{http://127.0.0.1:2379 <nil> 0 <nil>}] <nil> <nil>} I1104 19:28:50.752340 54408 clientconn.go:948] ClientConn switching balancer to "pick_first" ... skipping 3 lines ... Running command: run_create_secret_tests +++ Running case: test-cmd.run_create_secret_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_create_secret_tests Successful message:Error from server (NotFound): secrets "mysecret" not found has:secrets "mysecret" not found Successful message:user-specified has:user-specified Successful message:Error from server (NotFound): secrets "mysecret" not found has:secrets "mysecret" not found Successful {"kind":"ConfigMap","apiVersion":"v1","metadata":{"name":"tester-update-cm","namespace":"default","uid":"8fb18977-6083-4a63-b1ab-a2ea14eacc09","resourceVersion":"1042","creationTimestamp":"2020-11-04T19:28:51Z"}} Successful message:{"kind":"ConfigMap","apiVersion":"v1","metadata":{"name":"tester-update-cm","namespace":"default","uid":"8fb18977-6083-4a63-b1ab-a2ea14eacc09","resourceVersion":"1043","creationTimestamp":"2020-11-04T19:28:51Z"},"data":{"key1":"config1"}} has:uid Successful message:{"kind":"ConfigMap","apiVersion":"v1","metadata":{"name":"tester-update-cm","namespace":"default","uid":"8fb18977-6083-4a63-b1ab-a2ea14eacc09","resourceVersion":"1043","creationTimestamp":"2020-11-04T19:28:51Z"},"data":{"key1":"config1"}} has:config1 {"kind":"Status","apiVersion":"v1","metadata":{},"status":"Success","details":{"name":"tester-update-cm","kind":"configmaps","uid":"8fb18977-6083-4a63-b1ab-a2ea14eacc09"}} Successful message:Error from server (NotFound): configmaps "tester-update-cm" not found has:configmaps "tester-update-cm" not found +++ exit code: 0 Recording: run_kubectl_create_kustomization_directory_tests Running command: run_kubectl_create_kustomization_directory_tests +++ Running case: test-cmd.run_kubectl_create_kustomization_directory_tests ... skipping 172 lines ... has:Timeout Successful message:NAME READY STATUS RESTARTS AGE valid-pod 0/1 Pending 0 1s has:valid-pod Successful message:error: Invalid timeout value. Timeout must be a single integer in seconds, or an integer followed by a corresponding time unit (e.g. 1s | 2m | 3h) has:Invalid timeout value pod "valid-pod" deleted +++ exit code: 0 Recording: run_crd_tests Running command: run_crd_tests ... skipping 240 lines ... foo.company.com/test patched [32mcrd.sh:236: Successful get foos/test {{.patched}}: value1 (B[mfoo.company.com/test patched [32mcrd.sh:238: Successful get foos/test {{.patched}}: value2 (B[mfoo.company.com/test patched [32mcrd.sh:240: Successful get foos/test {{.patched}}: <no value> (B[m+++ [1104 19:29:03] "kubectl patch --local" returns error as expected for CustomResource: error: cannot apply strategic merge patch for company.com/v1, Kind=Foo locally, try --type merge { "apiVersion": "company.com/v1", "kind": "Foo", "metadata": { "annotations": { "kubernetes.io/change-cause": "kubectl patch foos/test --server=https://127.0.0.1:6443 --insecure-skip-tls-verify=true --match-server-version=true --patch={\"patched\":null} --type=merge --record=true" ... skipping 371 lines ... (B[m[32mcrd.sh:450: Successful get bars {{range.items}}{{.metadata.name}}:{{end}}: (B[mnamespace/non-native-resources created bar.company.com/test created [32mcrd.sh:455: Successful get bars {{len .items}}: 1 (B[mnamespace "non-native-resources" deleted [32mcrd.sh:458: Successful get bars {{len .items}}: 0 (B[mError from server (NotFound): namespaces "non-native-resources" not found customresourcedefinition.apiextensions.k8s.io "foos.company.com" deleted customresourcedefinition.apiextensions.k8s.io "bars.company.com" deleted customresourcedefinition.apiextensions.k8s.io "resources.mygroup.example.com" deleted customresourcedefinition.apiextensions.k8s.io "validfoos.company.com" deleted +++ exit code: 0 +++ [1104 19:29:30] Testing recursive resources +++ [1104 19:29:30] Creating namespace namespace-1604518170-72 namespace/namespace-1604518170-72 created Context "test" modified. [32mgeneric-resources.sh:202: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mW1104 19:29:31.290630 54408 cacher.go:148] Terminating all watchers from cacher *unstructured.Unstructured E1104 19:29:31.292227 58014 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:206: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mSuccessful message:pod/busybox0 created pod/busybox1 created error: error validating "hack/testdata/recursive/pod/pod/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false has:error validating data: kind not set [32mgeneric-resources.sh:211: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mW1104 19:29:31.414678 54408 cacher.go:148] Terminating all watchers from cacher *unstructured.Unstructured E1104 19:29:31.416127 58014 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource W1104 19:29:31.516224 54408 cacher.go:148] Terminating all watchers from cacher *unstructured.Unstructured E1104 19:29:31.517754 58014 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:220: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: busybox:busybox: (B[mSuccessful message:error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing W1104 19:29:31.643700 54408 cacher.go:148] Terminating all watchers from cacher *unstructured.Unstructured E1104 19:29:31.645232 58014 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:227: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:231: Successful get pods {{range.items}}{{.metadata.labels.status}}:{{end}}: replaced:replaced: (B[mSuccessful message:pod/busybox0 replaced pod/busybox1 replaced error: error validating "hack/testdata/recursive/pod-modify/pod/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false has:error validating data: kind not set [32mgeneric-resources.sh:236: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mSuccessful message:Name: busybox0 Namespace: namespace-1604518170-72 Priority: 0 Node: <none> ... skipping 44 lines ... QoS Class: BestEffort Node-Selectors: <none> Tolerations: <none> Events: <none> unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:app=busybox0 E1104 19:29:32.296425 58014 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource Successful message:Name: busybox0 Namespace: namespace-1604518170-72 Priority: 0 Node: <none> Labels: app=busybox0 ... skipping 103 lines ... has:Object 'Kind' is missing [32mgeneric-resources.sh:246: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:250: Successful get pods {{range.items}}{{.metadata.annotations.annotatekey}}:{{end}}: annotatevalue:annotatevalue: (B[mSuccessful message:pod/busybox0 annotated pod/busybox1 annotated error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:255: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mE1104 19:29:32.818159 58014 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E1104 19:29:32.876503 58014 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E1104 19:29:33.021843 58014 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:259: Successful get pods {{range.items}}{{.metadata.labels.status}}:{{end}}: replaced:replaced: (B[mSuccessful message:Warning: resource pods/busybox0 is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. pod/busybox0 configured Warning: resource pods/busybox1 is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. pod/busybox1 configured error: error validating "hack/testdata/recursive/pod-modify/pod/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false has:error validating data: kind not set [32mgeneric-resources.sh:265: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: (B[mdeployment.apps/nginx created I1104 19:29:33.349454 58014 event.go:291] "Event occurred" object="namespace-1604518170-72/nginx" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-54785cbcb8 to 3" I1104 19:29:33.353944 58014 event.go:291] "Event occurred" object="namespace-1604518170-72/nginx-54785cbcb8" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-54785cbcb8-7dkfq" I1104 19:29:33.357604 58014 event.go:291] "Event occurred" object="namespace-1604518170-72/nginx-54785cbcb8" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-54785cbcb8-jzjf5" I1104 19:29:33.359569 58014 event.go:291] "Event occurred" object="namespace-1604518170-72/nginx-54785cbcb8" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-54785cbcb8-rmcjp" ... skipping 44 lines ... securityContext: {} terminationGracePeriodSeconds: 30 status: {} has:extensions/v1beta1 deployment.apps "nginx" deleted [32mgeneric-resources.sh:281: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mE1104 19:29:34.084961 58014 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:285: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mSuccessful message:kubectl convert is DEPRECATED and will be removed in a future version. In order to convert, kubectl apply the object to the cluster, then kubectl get at the desired version. error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:290: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mSuccessful message:busybox0:busybox1:error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:busybox0:busybox1: Successful message:busybox0:busybox1:error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:299: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mpod/busybox0 labeled pod/busybox1 labeled error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' E1104 19:29:34.584515 58014 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:304: Successful get pods {{range.items}}{{.metadata.labels.mylabel}}:{{end}}: myvalue:myvalue: (B[mSuccessful message:pod/busybox0 labeled pod/busybox1 labeled error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:309: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mE1104 19:29:34.762671 58014 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource pod/busybox0 patched pod/busybox1 patched error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' [32mgeneric-resources.sh:314: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: prom/busybox:prom/busybox: (B[mSuccessful message:pod/busybox0 patched pod/busybox1 patched error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing E1104 19:29:34.928736 58014 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource I1104 19:29:34.974127 58014 namespace_controller.go:185] Namespace has been deleted non-native-resources [32mgeneric-resources.sh:319: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:323: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mSuccessful message:warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. pod "busybox0" force deleted pod "busybox1" force deleted error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:328: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: (B[mreplicationcontroller/busybox0 created I1104 19:29:35.514349 58014 event.go:291] "Event occurred" object="namespace-1604518170-72/busybox0" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: busybox0-4m5t7" replicationcontroller/busybox1 created error: error validating "hack/testdata/recursive/rc/rc/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false I1104 19:29:35.521926 58014 event.go:291] "Event occurred" object="namespace-1604518170-72/busybox1" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: busybox1-r5sq7" [32mgeneric-resources.sh:332: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:337: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:338: Successful get rc busybox0 {{.spec.replicas}}: 1 (B[m[32mgeneric-resources.sh:339: Successful get rc busybox1 {{.spec.replicas}}: 1 (B[m[32mgeneric-resources.sh:344: Successful get hpa busybox0 {{.spec.minReplicas}} {{.spec.maxReplicas}} {{.spec.targetCPUUtilizationPercentage}}: 1 2 80 (B[m[32mgeneric-resources.sh:345: Successful get hpa busybox1 {{.spec.minReplicas}} {{.spec.maxReplicas}} {{.spec.targetCPUUtilizationPercentage}}: 1 2 80 (B[mSuccessful message:horizontalpodautoscaler.autoscaling/busybox0 autoscaled horizontalpodautoscaler.autoscaling/busybox1 autoscaled error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' has:Object 'Kind' is missing horizontalpodautoscaler.autoscaling "busybox0" deleted horizontalpodautoscaler.autoscaling "busybox1" deleted [32mgeneric-resources.sh:353: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:354: Successful get rc busybox0 {{.spec.replicas}}: 1 (B[m[32mgeneric-resources.sh:355: Successful get rc busybox1 {{.spec.replicas}}: 1 (B[m[32mgeneric-resources.sh:359: Successful get service busybox0 {{(index .spec.ports 0).name}} {{(index .spec.ports 0).port}}: <no value> 80 (B[m[32mgeneric-resources.sh:360: Successful get service busybox1 {{(index .spec.ports 0).name}} {{(index .spec.ports 0).port}}: <no value> 80 (B[mSuccessful message:service/busybox0 exposed service/busybox1 exposed error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:366: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:367: Successful get rc busybox0 {{.spec.replicas}}: 1 (B[m[32mgeneric-resources.sh:368: Successful get rc busybox1 {{.spec.replicas}}: 1 (B[mI1104 19:29:37.380447 58014 event.go:291] "Event occurred" object="namespace-1604518170-72/busybox0" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: busybox0-286mm" I1104 19:29:37.390091 58014 event.go:291] "Event occurred" object="namespace-1604518170-72/busybox1" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: busybox1-rr6c4" [32mgeneric-resources.sh:372: Successful get rc busybox0 {{.spec.replicas}}: 2 (B[m[32mgeneric-resources.sh:373: Successful get rc busybox1 {{.spec.replicas}}: 2 (B[mSuccessful message:replicationcontroller/busybox0 scaled replicationcontroller/busybox1 scaled error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:378: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:382: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: (B[mSuccessful message:warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. replicationcontroller "busybox0" force deleted replicationcontroller "busybox1" force deleted error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:387: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: (B[mdeployment.apps/nginx1-deployment created deployment.apps/nginx0-deployment created error: error validating "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false I1104 19:29:38.195641 58014 event.go:291] "Event occurred" object="namespace-1604518170-72/nginx1-deployment" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx1-deployment-758b5949b6 to 2" I1104 19:29:38.202432 58014 event.go:291] "Event occurred" object="namespace-1604518170-72/nginx0-deployment" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx0-deployment-75db9cdfd9 to 2" I1104 19:29:38.202479 58014 event.go:291] "Event occurred" object="namespace-1604518170-72/nginx1-deployment-758b5949b6" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx1-deployment-758b5949b6-cf27d" I1104 19:29:38.204968 58014 event.go:291] "Event occurred" object="namespace-1604518170-72/nginx1-deployment-758b5949b6" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx1-deployment-758b5949b6-ct9ch" I1104 19:29:38.207775 58014 event.go:291] "Event occurred" object="namespace-1604518170-72/nginx0-deployment-75db9cdfd9" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx0-deployment-75db9cdfd9-4kf6f" I1104 19:29:38.214125 58014 event.go:291] "Event occurred" object="namespace-1604518170-72/nginx0-deployment-75db9cdfd9" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx0-deployment-75db9cdfd9-2z8qq" [32mgeneric-resources.sh:391: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: nginx0-deployment:nginx1-deployment: (B[m[32mgeneric-resources.sh:392: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx:1.7.9:k8s.gcr.io/nginx:1.7.9: (B[m[32mgeneric-resources.sh:396: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx:1.7.9:k8s.gcr.io/nginx:1.7.9: (B[mSuccessful message:deployment.apps/nginx1-deployment skipped rollback (current template already matches revision 1) deployment.apps/nginx0-deployment skipped rollback (current template already matches revision 1) error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' has:Object 'Kind' is missing deployment.apps/nginx1-deployment paused deployment.apps/nginx0-deployment paused [32mgeneric-resources.sh:404: Successful get deployment {{range.items}}{{.spec.paused}}:{{end}}: true:true: (B[mSuccessful message:unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' ... skipping 10 lines ... 1 <none> deployment.apps/nginx0-deployment REVISION CHANGE-CAUSE 1 <none> error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' has:nginx0-deployment Successful message:deployment.apps/nginx1-deployment REVISION CHANGE-CAUSE 1 <none> deployment.apps/nginx0-deployment REVISION CHANGE-CAUSE 1 <none> error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' has:nginx1-deployment Successful message:deployment.apps/nginx1-deployment REVISION CHANGE-CAUSE 1 <none> deployment.apps/nginx0-deployment REVISION CHANGE-CAUSE 1 <none> error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' has:Object 'Kind' is missing E1104 19:29:39.235746 58014 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. deployment.apps "nginx1-deployment" force deleted deployment.apps "nginx0-deployment" force deleted error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' E1104 19:29:39.579094 58014 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E1104 19:29:39.841813 58014 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E1104 19:29:39.917720 58014 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:426: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: (B[mreplicationcontroller/busybox0 created I1104 19:29:40.584102 58014 event.go:291] "Event occurred" object="namespace-1604518170-72/busybox0" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: busybox0-tnq2g" replicationcontroller/busybox1 created error: error validating "hack/testdata/recursive/rc/rc/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false I1104 19:29:40.591524 58014 event.go:291] "Event occurred" object="namespace-1604518170-72/busybox1" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: busybox1-vfxs6" [32mgeneric-resources.sh:430: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mSuccessful message:no rollbacker has been implemented for "ReplicationController" no rollbacker has been implemented for "ReplicationController" unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' ... skipping 2 lines ... message:no rollbacker has been implemented for "ReplicationController" no rollbacker has been implemented for "ReplicationController" unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' has:Object 'Kind' is missing Successful message:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' error: replicationcontrollers "busybox0" pausing is not supported error: replicationcontrollers "busybox1" pausing is not supported has:Object 'Kind' is missing Successful message:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' error: replicationcontrollers "busybox0" pausing is not supported error: replicationcontrollers "busybox1" pausing is not supported has:replicationcontrollers "busybox0" pausing is not supported Successful message:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' error: replicationcontrollers "busybox0" pausing is not supported error: replicationcontrollers "busybox1" pausing is not supported has:replicationcontrollers "busybox1" pausing is not supported Successful message:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' error: replicationcontrollers "busybox0" resuming is not supported error: replicationcontrollers "busybox1" resuming is not supported has:Object 'Kind' is missing Successful message:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' error: replicationcontrollers "busybox0" resuming is not supported error: replicationcontrollers "busybox1" resuming is not supported has:replicationcontrollers "busybox0" resuming is not supported Successful message:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' error: replicationcontrollers "busybox0" resuming is not supported error: replicationcontrollers "busybox1" resuming is not supported has:replicationcontrollers "busybox1" resuming is not supported warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. replicationcontroller "busybox0" force deleted replicationcontroller "busybox1" force deleted error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' Recording: run_namespace_tests Running command: run_namespace_tests +++ Running case: test-cmd.run_namespace_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_namespace_tests +++ [1104 19:29:42] Testing kubectl(v1:namespaces) Successful message:Error from server (NotFound): namespaces "my-namespace" not found has: not found namespace/my-namespace created (dry run) namespace/my-namespace created (server dry run) Successful message:Error from server (NotFound): namespaces "my-namespace" not found has: not found namespace/my-namespace created [32mcore.sh:1459: Successful get namespaces/my-namespace {{.metadata.name}}: my-namespace (B[mnamespace "my-namespace" deleted namespace/my-namespace condition met I1104 19:29:48.060443 58014 shared_informer.go:240] Waiting for caches to sync for garbage collector I1104 19:29:48.060511 58014 shared_informer.go:247] Caches are synced for garbage collector Successful message:Error from server (NotFound): namespaces "my-namespace" not found has: not found namespace/my-namespace created E1104 19:29:48.173782 58014 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mcore.sh:1468: Successful get namespaces/my-namespace {{.metadata.name}}: my-namespace (B[mSuccessful message:warning: deleting cluster-scoped resources, not scoped to the provided namespace namespace "kube-node-lease" deleted namespace "my-namespace" deleted namespace "namespace-1604517975-15012" deleted ... skipping 29 lines ... namespace "namespace-1604518132-23940" deleted namespace "namespace-1604518132-27864" deleted namespace "namespace-1604518134-12379" deleted namespace "namespace-1604518136-2421" deleted namespace "namespace-1604518138-5629" deleted namespace "namespace-1604518170-72" deleted Error from server (Forbidden): namespaces "default" is forbidden: this namespace may not be deleted Error from server (Forbidden): namespaces "kube-public" is forbidden: this namespace may not be deleted Error from server (Forbidden): namespaces "kube-system" is forbidden: this namespace may not be deleted has:warning: deleting cluster-scoped resources Successful message:warning: deleting cluster-scoped resources, not scoped to the provided namespace namespace "kube-node-lease" deleted namespace "my-namespace" deleted namespace "namespace-1604517975-15012" deleted ... skipping 29 lines ... namespace "namespace-1604518132-23940" deleted namespace "namespace-1604518132-27864" deleted namespace "namespace-1604518134-12379" deleted namespace "namespace-1604518136-2421" deleted namespace "namespace-1604518138-5629" deleted namespace "namespace-1604518170-72" deleted Error from server (Forbidden): namespaces "default" is forbidden: this namespace may not be deleted Error from server (Forbidden): namespaces "kube-public" is forbidden: this namespace may not be deleted Error from server (Forbidden): namespaces "kube-system" is forbidden: this namespace may not be deleted has:namespace "my-namespace" deleted I1104 19:29:48.538279 58014 shared_informer.go:240] Waiting for caches to sync for resource quota I1104 19:29:48.538324 58014 shared_informer.go:247] Caches are synced for resource quota namespace/quotas created [32mcore.sh:1475: Successful get namespaces/quotas {{.metadata.name}}: quotas (B[m[32mcore.sh:1476: Successful get quota --namespace=quotas {{range.items}}{{ if eq .metadata.name \"test-quota\" }}found{{end}}{{end}}:: : (B[mresourcequota/test-quota created (dry run) E1104 19:29:49.026025 58014 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource resourcequota/test-quota created (server dry run) [32mcore.sh:1480: Successful get quota --namespace=quotas {{range.items}}{{ if eq .metadata.name \"test-quota\" }}found{{end}}{{end}}:: : (B[mresourcequota/test-quota created [32mcore.sh:1483: Successful get quota --namespace=quotas {{range.items}}{{ if eq .metadata.name \"test-quota\" }}found{{end}}{{end}}:: found: (B[mI1104 19:29:49.426431 58014 resource_quota_controller.go:307] Resource quota has been deleted quotas/test-quota resourcequota "test-quota" deleted namespace "quotas" deleted E1104 19:29:49.913950 58014 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E1104 19:29:50.188023 58014 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource I1104 19:29:51.000884 58014 horizontal.go:359] Horizontal Pod Autoscaler busybox0 has been deleted in namespace-1604518170-72 I1104 19:29:51.005478 58014 horizontal.go:359] Horizontal Pod Autoscaler busybox1 has been deleted in namespace-1604518170-72 [32mcore.sh:1495: Successful get namespaces {{range.items}}{{ if eq .metadata.name \"other\" }}found{{end}}{{end}}:: : (B[mnamespace/other created [32mcore.sh:1499: Successful get namespaces/other {{.metadata.name}}: other (B[m[32mcore.sh:1503: Successful get pods --namespace=other {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/valid-pod created [32mcore.sh:1507: Successful get pods --namespace=other {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[m[32mcore.sh:1509: Successful get pods -n other {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[mSuccessful message:error: a resource cannot be retrieved by name across all namespaces has:a resource cannot be retrieved by name across all namespaces [32mcore.sh:1516: Successful get pods --namespace=other {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[mwarning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. pod "valid-pod" force deleted [32mcore.sh:1520: Successful get pods --namespace=other {{range.items}}{{.metadata.name}}:{{end}}: (B[mnamespace "other" deleted ... skipping 119 lines ... [32mcore.sh:920: Successful get secrets --namespace=test-secrets {{range.items}}{{.metadata.name}}:{{end}}: (B[msecret "test-secret" deleted namespace "test-secrets" deleted I1104 19:30:05.245010 54408 client.go:360] parsed scheme: "passthrough" I1104 19:30:05.245082 54408 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{http://127.0.0.1:2379 <nil> 0 <nil>}] <nil> <nil>} I1104 19:30:05.245096 54408 clientconn.go:948] ClientConn switching balancer to "pick_first" E1104 19:30:05.801511 58014 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource I1104 19:30:06.049875 58014 namespace_controller.go:185] Namespace has been deleted other E1104 19:30:07.223737 58014 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource +++ exit code: 0 Recording: run_configmap_tests Running command: run_configmap_tests +++ Running case: test-cmd.run_configmap_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes ... skipping 8 lines ... [32mcore.sh:33: Successful get namespaces {{range.items}}{{ if eq .metadata.name \"test-configmaps\" }}found{{end}}{{end}}:: : (B[mnamespace/test-configmaps created [32mcore.sh:37: Successful get namespaces/test-configmaps {{.metadata.name}}: test-configmaps (B[m[32mcore.sh:41: Successful get configmaps {{range.items}}{{ if eq .metadata.name \"test-configmap\" }}found{{end}}{{end}}:: : (B[m[32mcore.sh:42: Successful get configmaps {{range.items}}{{ if eq .metadata.name \"test-binary-configmap\" }}found{{end}}{{end}}:: : (B[mconfigmap/test-configmap created (dry run) E1104 19:30:11.470508 58014 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource configmap/test-configmap created (server dry run) [32mcore.sh:46: Successful get configmaps {{range.items}}{{ if eq .metadata.name \"test-configmap\" }}found{{end}}{{end}}:: : (B[mconfigmap/test-configmap created configmap/test-binary-configmap created [32mcore.sh:51: Successful get configmap/test-configmap --namespace=test-configmaps {{.metadata.name}}: test-configmap (B[m[32mcore.sh:52: Successful get configmap/test-binary-configmap --namespace=test-configmaps {{.metadata.name}}: test-binary-configmap (B[mconfigmap "test-configmap" deleted configmap "test-binary-configmap" deleted namespace "test-configmaps" deleted E1104 19:30:14.277520 58014 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource I1104 19:30:15.260365 58014 namespace_controller.go:185] Namespace has been deleted test-secrets +++ exit code: 0 Recording: run_client_config_tests Running command: run_client_config_tests +++ Running case: test-cmd.run_client_config_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_client_config_tests +++ [1104 19:30:17] Creating namespace namespace-1604518217-19490 namespace/namespace-1604518217-19490 created Context "test" modified. +++ [1104 19:30:17] Testing client config Successful message:error: stat missing: no such file or directory has:missing: no such file or directory Successful message:error: stat missing: no such file or directory has:missing: no such file or directory Successful message:error: stat missing: no such file or directory has:missing: no such file or directory Successful message:Error in configuration: context was not found for specified context: missing-context has:context was not found for specified context: missing-context Successful message:error: no server found for cluster "missing-cluster" has:no server found for cluster "missing-cluster" Successful message:error: auth info "missing-user" does not exist has:auth info "missing-user" does not exist Successful message:error: error loading config file "/tmp/newconfig.yaml": no kind "Config" is registered for version "v-1" in scheme "k8s.io/client-go/tools/clientcmd/api/latest/latest.go:50" has:error loading config file Successful message:error: stat missing-config: no such file or directory has:no such file or directory +++ exit code: 0 Recording: run_service_accounts_tests Running command: run_service_accounts_tests +++ Running case: test-cmd.run_service_accounts_tests ... skipping 43 lines ... Labels: <none> Annotations: <none> Schedule: 59 23 31 2 * Concurrency Policy: Allow Suspend: False Successful Job History Limit: 3 Failed Job History Limit: 1 Starting Deadline Seconds: <unset> Selector: <unset> Parallelism: <unset> Completions: <unset> Pod Template: Labels: <none> ... skipping 38 lines ... Labels: controller-uid=0df50ea3-2fa8-4ced-87c3-bd0615cae96a job-name=test-job Annotations: cronjob.kubernetes.io/instantiate: manual Parallelism: 1 Completions: 1 Start Time: Wed, 04 Nov 2020 19:30:27 +0000 Pods Statuses: 1 Running / 0 Succeeded / 0 Failed Pod Template: Labels: controller-uid=0df50ea3-2fa8-4ced-87c3-bd0615cae96a job-name=test-job Containers: pi: Image: k8s.gcr.io/perl ... skipping 492 lines ... status: loadBalancer: {} Successful message:kubectl-create kubectl-set has:kubectl-set I1104 19:30:37.741420 58014 namespace_controller.go:185] Namespace has been deleted test-jobs error: you must specify resources by --filename when --local is set. Example resource specifications include: '-f rsrc.yaml' '--filename=rsrc.json' [32mcore.sh:1020: Successful get services redis-master {{range.spec.selector}}{{.}}:{{end}}: redis:master:backend: (B[mservice/redis-master selector updated Successful message:Error from server (Conflict): Operation cannot be fulfilled on services "redis-master": the object has been modified; please apply your changes to the latest version and try again has:Conflict [32mcore.sh:1033: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:redis-master: (B[mservice "redis-master" deleted [32mcore.sh:1040: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes: (B[m[32mcore.sh:1044: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes: (B[mservice/redis-master created ... skipping 43 lines ... I1104 19:30:44.194206 54408 clientconn.go:948] ClientConn switching balancer to "pick_first" Flag --service-overrides has been deprecated, and will be removed in the future. service/testmetadata created pod/testmetadata created [32mcore.sh:1152: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: testmetadata: (B[m[32mcore.sh:1153: Successful get service testmetadata {{.metadata.annotations}}: map[zone-context:home] (B[mE1104 19:30:44.557338 58014 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource Successful message:kubectl-run has:kubectl-run service/exposemetadata exposed [32mcore.sh:1162: Successful get service exposemetadata {{.metadata.annotations}}: map[zone-context:work] (B[mSuccessful ... skipping 30 lines ... message:kube-controller-manager kubectl-client-side-apply kubectl-set has:kubectl-set daemonset.apps/bind restarted [32mapps.sh:51: Successful get daemonsets bind {{.metadata.generation}}: 5 (B[mdaemonset.apps "bind" deleted +++ exit code: 0 E1104 19:30:47.263501 58014 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource Recording: run_daemonset_history_tests Running command: run_daemonset_history_tests +++ Running case: test-cmd.run_daemonset_history_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_daemonset_history_tests ... skipping 6 lines ... [32mapps.sh:73: Successful get controllerrevisions {{range.items}}{{.metadata.annotations}}:{{end}}: map[deprecated.daemonset.template.generation:1 kubectl.kubernetes.io/last-applied-configuration:{"apiVersion":"apps/v1","kind":"DaemonSet","metadata":{"annotations":{"kubernetes.io/change-cause":"kubectl apply --filename=hack/testdata/rollingupdate-daemonset.yaml --record=true --server=https://127.0.0.1:6443 --insecure-skip-tls-verify=true --match-server-version=true"},"labels":{"service":"bind"},"name":"bind","namespace":"namespace-1604518247-12447"},"spec":{"selector":{"matchLabels":{"service":"bind"}},"template":{"metadata":{"labels":{"service":"bind"}},"spec":{"affinity":{"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchExpressions":[{"key":"service","operator":"In","values":["bind"]}]},"namespaces":[],"topologyKey":"kubernetes.io/hostname"}]}},"containers":[{"image":"k8s.gcr.io/pause:2.0","name":"kubernetes-pause"}]}},"updateStrategy":{"rollingUpdate":{"maxUnavailable":"10%"},"type":"RollingUpdate"}}} kubernetes.io/change-cause:kubectl apply --filename=hack/testdata/rollingupdate-daemonset.yaml --record=true --server=https://127.0.0.1:6443 --insecure-skip-tls-verify=true --match-server-version=true]: (B[mdaemonset.apps/bind skipped rollback (current template already matches revision 1) [32mapps.sh:76: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/pause:2.0: (B[m[32mapps.sh:77: Successful get daemonset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 1 (B[mdaemonset.apps/bind configured E1104 19:30:48.485694 58014 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mapps.sh:80: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/pause:latest: (B[m[32mapps.sh:81: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: k8s.gcr.io/nginx:test-cmd: (B[m[32mapps.sh:82: Successful get daemonset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 2 (B[m[32mapps.sh:83: Successful get controllerrevisions {{range.items}}{{.metadata.annotations}}:{{end}}: map[deprecated.daemonset.template.generation:2 kubectl.kubernetes.io/last-applied-configuration:{"apiVersion":"apps/v1","kind":"DaemonSet","metadata":{"annotations":{"kubernetes.io/change-cause":"kubectl apply --filename=hack/testdata/rollingupdate-daemonset-rv2.yaml --record=true --server=https://127.0.0.1:6443 --insecure-skip-tls-verify=true --match-server-version=true"},"labels":{"service":"bind"},"name":"bind","namespace":"namespace-1604518247-12447"},"spec":{"selector":{"matchLabels":{"service":"bind"}},"template":{"metadata":{"labels":{"service":"bind"}},"spec":{"affinity":{"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchExpressions":[{"key":"service","operator":"In","values":["bind"]}]},"namespaces":[],"topologyKey":"kubernetes.io/hostname"}]}},"containers":[{"image":"k8s.gcr.io/pause:latest","name":"kubernetes-pause"},{"image":"k8s.gcr.io/nginx:test-cmd","name":"app"}]}},"updateStrategy":{"rollingUpdate":{"maxUnavailable":"10%"},"type":"RollingUpdate"}}} kubernetes.io/change-cause:kubectl apply --filename=hack/testdata/rollingupdate-daemonset-rv2.yaml --record=true --server=https://127.0.0.1:6443 --insecure-skip-tls-verify=true --match-server-version=true]:map[deprecated.daemonset.template.generation:1 kubectl.kubernetes.io/last-applied-configuration:{"apiVersion":"apps/v1","kind":"DaemonSet","metadata":{"annotations":{"kubernetes.io/change-cause":"kubectl apply --filename=hack/testdata/rollingupdate-daemonset.yaml --record=true --server=https://127.0.0.1:6443 --insecure-skip-tls-verify=true --match-server-version=true"},"labels":{"service":"bind"},"name":"bind","namespace":"namespace-1604518247-12447"},"spec":{"selector":{"matchLabels":{"service":"bind"}},"template":{"metadata":{"labels":{"service":"bind"}},"spec":{"affinity":{"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchExpressions":[{"key":"service","operator":"In","values":["bind"]}]},"namespaces":[],"topologyKey":"kubernetes.io/hostname"}]}},"containers":[{"image":"k8s.gcr.io/pause:2.0","name":"kubernetes-pause"}]}},"updateStrategy":{"rollingUpdate":{"maxUnavailable":"10%"},"type":"RollingUpdate"}}} kubernetes.io/change-cause:kubectl apply --filename=hack/testdata/rollingupdate-daemonset.yaml --record=true --server=https://127.0.0.1:6443 --insecure-skip-tls-verify=true --match-server-version=true]: ... skipping 13 lines ... (B[m[32mapps.sh:88: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: k8s.gcr.io/nginx:test-cmd: (B[m[32mapps.sh:89: Successful get daemonset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 2 (B[mdaemonset.apps/bind rolled back [32mapps.sh:92: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/pause:2.0: (B[m[32mapps.sh:93: Successful get daemonset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 1 (B[mSuccessful message:error: unable to find specified revision 1000000 in history has:unable to find specified revision [32mapps.sh:97: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/pause:2.0: (B[m[32mapps.sh:98: Successful get daemonset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 1 (B[mdaemonset.apps/bind rolled back E1104 19:30:50.257075 58014 daemon_controller.go:320] namespace-1604518247-12447/bind failed with : error storing status for daemon set &v1.DaemonSet{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"bind", GenerateName:"", Namespace:"namespace-1604518247-12447", SelfLink:"", UID:"8cb99a81-ca73-46ad-a849-ddd7d264e91a", ResourceVersion:"1856", Generation:4, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:63740115047, loc:(*time.Location)(0x6c1df60)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"service":"bind"}, Annotations:map[string]string{"deprecated.daemonset.template.generation":"4", "kubectl.kubernetes.io/last-applied-configuration":"{\"apiVersion\":\"apps/v1\",\"kind\":\"DaemonSet\",\"metadata\":{\"annotations\":{\"kubernetes.io/change-cause\":\"kubectl apply --filename=hack/testdata/rollingupdate-daemonset-rv2.yaml --record=true --server=https://127.0.0.1:6443 --insecure-skip-tls-verify=true --match-server-version=true\"},\"labels\":{\"service\":\"bind\"},\"name\":\"bind\",\"namespace\":\"namespace-1604518247-12447\"},\"spec\":{\"selector\":{\"matchLabels\":{\"service\":\"bind\"}},\"template\":{\"metadata\":{\"labels\":{\"service\":\"bind\"}},\"spec\":{\"affinity\":{\"podAntiAffinity\":{\"requiredDuringSchedulingIgnoredDuringExecution\":[{\"labelSelector\":{\"matchExpressions\":[{\"key\":\"service\",\"operator\":\"In\",\"values\":[\"bind\"]}]},\"namespaces\":[],\"topologyKey\":\"kubernetes.io/hostname\"}]}},\"containers\":[{\"image\":\"k8s.gcr.io/pause:latest\",\"name\":\"kubernetes-pause\"},{\"image\":\"k8s.gcr.io/nginx:test-cmd\",\"name\":\"app\"}]}},\"updateStrategy\":{\"rollingUpdate\":{\"maxUnavailable\":\"10%\"},\"type\":\"RollingUpdate\"}}}\n", "kubernetes.io/change-cause":"kubectl apply --filename=hack/testdata/rollingupdate-daemonset-rv2.yaml --record=true --server=https://127.0.0.1:6443 --insecure-skip-tls-verify=true --match-server-version=true"}, OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:"kube-controller-manager", Operation:"Update", APIVersion:"apps/v1", Time:(*v1.Time)(0xc0011eaea0), FieldsType:"FieldsV1", FieldsV1:(*v1.FieldsV1)(0xc0011eaf00)}, v1.ManagedFieldsEntry{Manager:"kubectl-client-side-apply", Operation:"Update", APIVersion:"apps/v1", Time:(*v1.Time)(0xc0011eaf40), FieldsType:"FieldsV1", FieldsV1:(*v1.FieldsV1)(0xc0011eaf60)}, v1.ManagedFieldsEntry{Manager:"kubectl", Operation:"Update", APIVersion:"apps/v1", Time:(*v1.Time)(0xc0011eaf80), FieldsType:"FieldsV1", FieldsV1:(*v1.FieldsV1)(0xc0011eafa0)}}}, Spec:v1.DaemonSetSpec{Selector:(*v1.LabelSelector)(0xc0011eafe0), Template:v1.PodTemplateSpec{ObjectMeta:v1.ObjectMeta{Name:"", GenerateName:"", Namespace:"", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"service":"bind"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, Spec:v1.PodSpec{Volumes:[]v1.Volume(nil), InitContainers:[]v1.Container(nil), Containers:[]v1.Container{v1.Container{Name:"kubernetes-pause", Image:"k8s.gcr.io/pause:latest", Command:[]string(nil), Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil)}, VolumeMounts:[]v1.VolumeMount(nil), VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), StartupProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}, v1.Container{Name:"app", Image:"k8s.gcr.io/nginx:test-cmd", Command:[]string(nil), Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil)}, VolumeMounts:[]v1.VolumeMount(nil), VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), StartupProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}}, EphemeralContainers:[]v1.EphemeralContainer(nil), RestartPolicy:"Always", TerminationGracePeriodSeconds:(*int64)(0xc002aee8a8), ActiveDeadlineSeconds:(*int64)(nil), DNSPolicy:"ClusterFirst", NodeSelector:map[string]string(nil), ServiceAccountName:"", DeprecatedServiceAccount:"", AutomountServiceAccountToken:(*bool)(nil), NodeName:"", HostNetwork:false, HostPID:false, HostIPC:false, ShareProcessNamespace:(*bool)(nil), SecurityContext:(*v1.PodSecurityContext)(0xc0004fe3f0), ImagePullSecrets:[]v1.LocalObjectReference(nil), Hostname:"", Subdomain:"", Affinity:(*v1.Affinity)(0xc0011eb020), SchedulerName:"default-scheduler", Tolerations:[]v1.Toleration(nil), HostAliases:[]v1.HostAlias(nil), PriorityClassName:"", Priority:(*int32)(nil), DNSConfig:(*v1.PodDNSConfig)(nil), ReadinessGates:[]v1.PodReadinessGate(nil), RuntimeClassName:(*string)(nil), EnableServiceLinks:(*bool)(nil), PreemptionPolicy:(*v1.PreemptionPolicy)(nil), Overhead:v1.ResourceList(nil), TopologySpreadConstraints:[]v1.TopologySpreadConstraint(nil), SetHostnameAsFQDN:(*bool)(nil)}}, UpdateStrategy:v1.DaemonSetUpdateStrategy{Type:"RollingUpdate", RollingUpdate:(*v1.RollingUpdateDaemonSet)(0xc000e54a68)}, MinReadySeconds:0, RevisionHistoryLimit:(*int32)(0xc002aee8fc)}, Status:v1.DaemonSetStatus{CurrentNumberScheduled:0, NumberMisscheduled:0, DesiredNumberScheduled:0, NumberReady:0, ObservedGeneration:3, UpdatedNumberScheduled:0, NumberAvailable:0, NumberUnavailable:0, CollisionCount:(*int32)(nil), Conditions:[]v1.DaemonSetCondition(nil)}}: Operation cannot be fulfilled on daemonsets.apps "bind": the object has been modified; please apply your changes to the latest version and try again [32mapps.sh:101: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/pause:latest: (B[m[32mapps.sh:102: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: k8s.gcr.io/nginx:test-cmd: (B[m[32mapps.sh:103: Successful get daemonset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 2 (B[mdaemonset.apps "bind" deleted +++ exit code: 0 Recording: run_rc_tests ... skipping 32 lines ... Namespace: namespace-1604518250-20092 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 17 lines ... Namespace: namespace-1604518250-20092 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 18 lines ... Namespace: namespace-1604518250-20092 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 12 lines ... Namespace: namespace-1604518250-20092 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 27 lines ... Namespace: namespace-1604518250-20092 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 17 lines ... Namespace: namespace-1604518250-20092 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 17 lines ... Namespace: namespace-1604518250-20092 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 11 lines ... Namespace: namespace-1604518250-20092 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 15 lines ... (B[m[32mcore.sh:1224: Successful get rc frontend {{.spec.replicas}}: 3 (B[mreplicationcontroller/frontend scaled E1104 19:30:52.990475 58014 replica_set.go:201] ReplicaSet has no controller: &ReplicaSet{ObjectMeta:{frontend namespace-1604518250-20092 d566c849-e022-4fec-8db5-16d21252ec2c 1892 2 2020-11-04 19:30:51 +0000 UTC <nil> <nil> map[app:guestbook tier:frontend] map[] [] [] [{kube-controller-manager Update v1 2020-11-04 19:30:51 +0000 UTC FieldsV1 {"f:status":{"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:replicas":{}}}} {kubectl-create Update v1 2020-11-04 19:30:51 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:app":{},"f:tier":{}}},"f:spec":{"f:replicas":{},"f:selector":{".":{},"f:app":{},"f:tier":{}},"f:template":{".":{},"f:metadata":{".":{},"f:creationTimestamp":{},"f:labels":{".":{},"f:app":{},"f:tier":{}}},"f:spec":{".":{},"f:containers":{".":{},"k:{\"name\":\"php-redis\"}":{".":{},"f:env":{".":{},"k:{\"name\":\"GET_HOSTS_FROM\"}":{".":{},"f:name":{},"f:value":{}}},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":80,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{".":{},"f:requests":{".":{},"f:cpu":{},"f:memory":{}}},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}}}]},Spec:ReplicaSetSpec{Replicas:*2,Selector:&v1.LabelSelector{MatchLabels:map[string]string{app: guestbook,tier: frontend,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[app:guestbook tier:frontend] map[] [] [] []} {[] [] [{php-redis gcr.io/google_samples/gb-frontend:v4 [] [] [{ 0 80 TCP }] [] [{GET_HOSTS_FROM dns nil}] {map[] map[cpu:{{100 -3} {<nil>} 100m DecimalSI} memory:{{104857600 0} {<nil>} 100Mi BinarySI}]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent nil false false false}] [] Always 0xc002721f08 <nil> ClusterFirst map[] <nil> false false false <nil> PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] <nil> nil [] <nil> <nil> <nil> map[] [] <nil>}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:3,FullyLabeledReplicas:3,ObservedGeneration:1,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} I1104 19:30:53.002051 58014 event.go:291] "Event occurred" object="namespace-1604518250-20092/frontend" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: frontend-8c7hl" [32mcore.sh:1228: Successful get rc frontend {{.spec.replicas}}: 2 (B[m[32mcore.sh:1232: Successful get rc frontend {{.spec.replicas}}: 2 (B[merror: Expected replicas to be 3, was 2 [32mcore.sh:1236: Successful get rc frontend {{.spec.replicas}}: 2 (B[m[32mcore.sh:1240: Successful get rc frontend {{.spec.replicas}}: 2 (B[mreplicationcontroller/frontend scaled I1104 19:30:53.613055 58014 event.go:291] "Event occurred" object="namespace-1604518250-20092/frontend" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-6xtdr" [32mcore.sh:1244: Successful get rc frontend {{.spec.replicas}}: 3 (B[m[32mcore.sh:1248: Successful get rc frontend {{.spec.replicas}}: 3 ... skipping 31 lines ... (B[mdeployment.apps "nginx-deployment" deleted Successful message:service/expose-test-deployment exposed has:service/expose-test-deployment exposed service "expose-test-deployment" deleted Successful message:error: couldn't retrieve selectors via --selector flag or introspection: invalid deployment: no selectors, therefore cannot be exposed See 'kubectl expose -h' for help and examples has:invalid deployment: no selectors deployment.apps/nginx-deployment created I1104 19:30:56.000098 58014 event.go:291] "Event occurred" object="namespace-1604518250-20092/nginx-deployment" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-76b5cd66f5 to 3" I1104 19:30:56.006760 58014 event.go:291] "Event occurred" object="namespace-1604518250-20092/nginx-deployment-76b5cd66f5" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-76b5cd66f5-r7ggk" I1104 19:30:56.011155 58014 event.go:291] "Event occurred" object="namespace-1604518250-20092/nginx-deployment-76b5cd66f5" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-76b5cd66f5-rkptr" ... skipping 23 lines ... service "frontend" deleted service "frontend-2" deleted service "frontend-3" deleted service "frontend-4" deleted service "frontend-5" deleted Successful message:error: cannot expose a Node has:cannot expose Successful message:The Service "invalid-large-service-name-that-has-more-than-sixty-three-characters" is invalid: metadata.name: Invalid value: "invalid-large-service-name-that-has-more-than-sixty-three-characters": must be no more than 63 characters has:metadata.name: Invalid value Successful message:service/kubernetes-serve-hostname-testing-sixty-three-characters-in-len exposed has:kubernetes-serve-hostname-testing-sixty-three-characters-in-len exposed service "kubernetes-serve-hostname-testing-sixty-three-characters-in-len" deleted E1104 19:30:58.626362 58014 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource Successful message:service/etcd-server exposed has:etcd-server exposed [32mcore.sh:1353: Successful get service etcd-server {{(index .spec.ports 0).name}} {{(index .spec.ports 0).port}}: port-1 2380 (B[m[32mcore.sh:1354: Successful get service etcd-server {{(index .spec.ports 1).name}} {{(index .spec.ports 1).port}}: port-2 2379 (B[mservice "etcd-server" deleted ... skipping 22 lines ... (B[mhorizontalpodautoscaler.autoscaling/frontend autoscaled [32mcore.sh:1391: Successful get hpa frontend {{.spec.minReplicas}} {{.spec.maxReplicas}} {{.spec.targetCPUUtilizationPercentage}}: 1 2 70 (B[mhorizontalpodautoscaler.autoscaling "frontend" deleted horizontalpodautoscaler.autoscaling/frontend autoscaled [32mcore.sh:1395: Successful get hpa frontend {{.spec.minReplicas}} {{.spec.maxReplicas}} {{.spec.targetCPUUtilizationPercentage}}: 2 3 80 (B[mhorizontalpodautoscaler.autoscaling "frontend" deleted Error: required flag(s) "max" not set replicationcontroller "frontend" deleted [32mcore.sh:1404: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: (B[mapiVersion: apps/v1 kind: Deployment metadata: creationTimestamp: null ... skipping 24 lines ... limits: cpu: 300m requests: cpu: 300m terminationGracePeriodSeconds: 0 status: {} Error from server (NotFound): deployments.apps "nginx-deployment-resources" not found deployment.apps/nginx-deployment-resources created I1104 19:31:01.935237 58014 event.go:291] "Event occurred" object="namespace-1604518250-20092/nginx-deployment-resources" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-resources-748ddcb48b to 3" I1104 19:31:01.939990 58014 event.go:291] "Event occurred" object="namespace-1604518250-20092/nginx-deployment-resources-748ddcb48b" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-resources-748ddcb48b-zlg72" I1104 19:31:01.945865 58014 event.go:291] "Event occurred" object="namespace-1604518250-20092/nginx-deployment-resources-748ddcb48b" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-resources-748ddcb48b-vghsn" I1104 19:31:01.945962 58014 event.go:291] "Event occurred" object="namespace-1604518250-20092/nginx-deployment-resources-748ddcb48b" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-resources-748ddcb48b-h8vkn" [32mcore.sh:1410: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: nginx-deployment-resources: (B[m[32mcore.sh:1411: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx:test-cmd: (B[m[32mcore.sh:1412: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: k8s.gcr.io/perl: (B[mdeployment.apps/nginx-deployment-resources resource requirements updated I1104 19:31:02.351560 58014 event.go:291] "Event occurred" object="namespace-1604518250-20092/nginx-deployment-resources" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-resources-7bfb7d56b6 to 1" I1104 19:31:02.356065 58014 event.go:291] "Event occurred" object="namespace-1604518250-20092/nginx-deployment-resources-7bfb7d56b6" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-resources-7bfb7d56b6-nhv4x" [32mcore.sh:1415: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}: 100m: (B[m[32mcore.sh:1416: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}: 100m: (B[merror: unable to find container named redis deployment.apps/nginx-deployment-resources resource requirements updated I1104 19:31:02.774731 58014 event.go:291] "Event occurred" object="namespace-1604518250-20092/nginx-deployment-resources" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set nginx-deployment-resources-748ddcb48b to 2" I1104 19:31:02.784810 58014 event.go:291] "Event occurred" object="namespace-1604518250-20092/nginx-deployment-resources-748ddcb48b" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: nginx-deployment-resources-748ddcb48b-zlg72" I1104 19:31:02.791234 58014 event.go:291] "Event occurred" object="namespace-1604518250-20092/nginx-deployment-resources" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-resources-75dbcccf44 to 1" I1104 19:31:02.796275 58014 event.go:291] "Event occurred" object="namespace-1604518250-20092/nginx-deployment-resources-75dbcccf44" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-resources-75dbcccf44-ht5fk" [32mcore.sh:1421: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}: 200m: ... skipping 390 lines ... status: "True" type: Progressing observedGeneration: 4 replicas: 4 unavailableReplicas: 4 updatedReplicas: 1 error: you must specify resources by --filename when --local is set. Example resource specifications include: '-f rsrc.yaml' '--filename=rsrc.json' [32mcore.sh:1432: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}: 200m: (B[m[32mcore.sh:1433: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}: 300m: (B[m[32mcore.sh:1434: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).resources.requests.cpu}}:{{end}}: 300m: ... skipping 46 lines ... pod-template-hash=69dd6dcd84 Annotations: deployment.kubernetes.io/desired-replicas: 1 deployment.kubernetes.io/max-replicas: 2 deployment.kubernetes.io/revision: 1 Controlled By: Deployment/test-nginx-apps Replicas: 1 current / 1 desired Pods Status: 0 Running / 1 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=test-nginx-apps pod-template-hash=69dd6dcd84 Containers: nginx: Image: k8s.gcr.io/nginx:test-cmd ... skipping 102 lines ... [32mapps.sh:305: Successful get deployment.apps {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx:1.7.9: (B[m Image: k8s.gcr.io/nginx:test-cmd deployment.apps/nginx rolled back (server dry run) [32mapps.sh:309: Successful get deployment.apps {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx:1.7.9: (B[mdeployment.apps/nginx rolled back [32mapps.sh:313: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx:test-cmd: (B[merror: unable to find specified revision 1000000 in history [32mapps.sh:316: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx:test-cmd: (B[mdeployment.apps/nginx rolled back [32mapps.sh:320: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx:1.7.9: (B[mdeployment.apps/nginx paused error: you cannot rollback a paused deployment; resume it first with 'kubectl rollout resume deployment/nginx' and try again error: deployments.apps "nginx" can't restart paused deployment (run rollout resume first) deployment.apps/nginx resumed deployment.apps/nginx rolled back deployment.kubernetes.io/revision-history: 1,3 error: desired revision (3) is different from the running revision (5) deployment.apps/nginx restarted I1104 19:31:15.287046 58014 event.go:291] "Event occurred" object="namespace-1604518264-9279/nginx" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set nginx-54785cbcb8 to 2" I1104 19:31:15.293623 58014 event.go:291] "Event occurred" object="namespace-1604518264-9279/nginx-54785cbcb8" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: nginx-54785cbcb8-drptp" I1104 19:31:15.299849 58014 event.go:291] "Event occurred" object="namespace-1604518264-9279/nginx" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-577fc9fd8f to 1" I1104 19:31:15.313122 58014 event.go:291] "Event occurred" object="namespace-1604518264-9279/nginx-577fc9fd8f" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-577fc9fd8f-pslng" I1104 19:31:15.763619 58014 horizontal.go:359] Horizontal Pod Autoscaler frontend has been deleted in namespace-1604518250-20092 ... skipping 145 lines ... (B[m[32mapps.sh:364: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: k8s.gcr.io/perl: (B[mdeployment.apps/nginx-deployment image updated I1104 19:31:18.201674 58014 event.go:291] "Event occurred" object="namespace-1604518264-9279/nginx-deployment" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-6dd48b9849 to 1" I1104 19:31:18.205955 58014 event.go:291] "Event occurred" object="namespace-1604518264-9279/nginx-deployment-6dd48b9849" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-6dd48b9849-vqdjt" [32mapps.sh:367: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx:1.7.9: (B[m[32mapps.sh:368: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: k8s.gcr.io/perl: (B[merror: unable to find container named "redis" deployment.apps/nginx-deployment image updated [32mapps.sh:373: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx:test-cmd: (B[m[32mapps.sh:374: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: k8s.gcr.io/perl: (B[mdeployment.apps/nginx-deployment image updated E1104 19:31:18.953643 58014 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mapps.sh:377: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx:1.7.9: (B[m[32mapps.sh:378: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: k8s.gcr.io/perl: (B[m[32mapps.sh:381: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx:1.7.9: (B[m[32mapps.sh:382: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: k8s.gcr.io/perl: (B[mdeployment.apps/nginx-deployment image updated I1104 19:31:19.560723 58014 event.go:291] "Event occurred" object="namespace-1604518264-9279/nginx-deployment" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set nginx-deployment-b8c4df945 to 2" ... skipping 44 lines ... I1104 19:31:22.590313 58014 event.go:291] "Event occurred" object="namespace-1604518264-9279/nginx-deployment" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set nginx-deployment-59b7fccd97 to 0" deployment.apps/nginx-deployment env updated deployment.apps/nginx-deployment env updated I1104 19:31:22.738499 58014 event.go:291] "Event occurred" object="namespace-1604518264-9279/nginx-deployment" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-57ddd474c4 to 1" I1104 19:31:22.746394 58014 event.go:291] "Event occurred" object="namespace-1604518264-9279/nginx-deployment-59b7fccd97" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: nginx-deployment-59b7fccd97-27dl7" deployment.apps "nginx-deployment" deleted E1104 19:31:22.843810 58014 replica_set.go:532] sync "namespace-1604518264-9279/nginx-deployment-b8c4df945" failed with Operation cannot be fulfilled on replicasets.apps "nginx-deployment-b8c4df945": StorageError: invalid object, Code: 4, Key: /registry/replicasets/namespace-1604518264-9279/nginx-deployment-b8c4df945, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: d5728b06-2045-4632-8fd0-462800ef7339, UID in object meta: I1104 19:31:22.893298 58014 event.go:291] "Event occurred" object="namespace-1604518264-9279/nginx-deployment-57ddd474c4" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-57ddd474c4-s7bfp" configmap "test-set-env-config" deleted secret "test-set-env-secret" deleted +++ exit code: 0 Recording: run_rs_tests Running command: run_rs_tests +++ Running case: test-cmd.run_rs_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_rs_tests +++ [1104 19:31:23] Creating namespace namespace-1604518283-6630 namespace/namespace-1604518283-6630 created E1104 19:31:23.241299 58014 replica_set.go:532] sync "namespace-1604518264-9279/nginx-deployment-59b7fccd97" failed with replicasets.apps "nginx-deployment-59b7fccd97" not found Context "test" modified. +++ [1104 19:31:23] Testing kubectl(v1:replicasets) E1104 19:31:23.291204 58014 replica_set.go:532] sync "namespace-1604518264-9279/nginx-deployment-68d657fb6" failed with replicasets.apps "nginx-deployment-68d657fb6" not found E1104 19:31:23.342987 58014 replica_set.go:532] sync "namespace-1604518264-9279/nginx-deployment-5f8c874568" failed with replicasets.apps "nginx-deployment-5f8c874568" not found [32mapps.sh:541: Successful get rs {{range.items}}{{.metadata.name}}:{{end}}: (B[mE1104 19:31:23.391471 58014 replica_set.go:532] sync "namespace-1604518264-9279/nginx-deployment-57ddd474c4" failed with replicasets.apps "nginx-deployment-57ddd474c4" not found replicaset.apps/frontend created I1104 19:31:23.571988 58014 event.go:291] "Event occurred" object="namespace-1604518283-6630/frontend" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-472zl" +++ [1104 19:31:23] Deleting rs I1104 19:31:23.576680 58014 event.go:291] "Event occurred" object="namespace-1604518283-6630/frontend" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-djf2n" I1104 19:31:23.594552 58014 event.go:291] "Event occurred" object="namespace-1604518283-6630/frontend" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-4t8k8" replicaset.apps "frontend" deleted [32mapps.sh:547: Successful get pods -l "tier=frontend" {{range.items}}{{.metadata.name}}:{{end}}: (B[mE1104 19:31:23.791398 58014 replica_set.go:532] sync "namespace-1604518283-6630/frontend" failed with replicasets.apps "frontend" not found [32mapps.sh:551: Successful get rs {{range.items}}{{.metadata.name}}:{{end}}: (B[mreplicaset.apps/frontend created I1104 19:31:24.081528 58014 event.go:291] "Event occurred" object="namespace-1604518283-6630/frontend" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-gdk6n" I1104 19:31:24.085947 58014 event.go:291] "Event occurred" object="namespace-1604518283-6630/frontend" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-rb997" I1104 19:31:24.086115 58014 event.go:291] "Event occurred" object="namespace-1604518283-6630/frontend" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-phjxm" [32mapps.sh:555: Successful get pods -l "tier=frontend" {{range.items}}{{(index .spec.containers 0).name}}:{{end}}: php-redis:php-redis:php-redis: (B[m+++ [1104 19:31:24] Deleting rs replicaset.apps "frontend" deleted E1104 19:31:24.293487 58014 replica_set.go:532] sync "namespace-1604518283-6630/frontend" failed with Operation cannot be fulfilled on replicasets.apps "frontend": the object has been modified; please apply your changes to the latest version and try again E1104 19:31:24.391295 58014 replica_set.go:532] sync "namespace-1604518283-6630/frontend" failed with replicasets.apps "frontend" not found [32mapps.sh:559: Successful get rs {{range.items}}{{.metadata.name}}:{{end}}: (B[mE1104 19:31:24.461047 58014 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mapps.sh:561: Successful get pods -l "tier=frontend" {{range.items}}{{(index .spec.containers 0).name}}:{{end}}: php-redis:php-redis:php-redis: (B[mpod "frontend-gdk6n" deleted pod "frontend-phjxm" deleted pod "frontend-rb997" deleted I1104 19:31:24.683042 58014 horizontal.go:359] Horizontal Pod Autoscaler nginx-deployment has been deleted in namespace-1604518264-9279 [32mapps.sh:564: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: ... skipping 15 lines ... Namespace: namespace-1604518283-6630 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 17 lines ... Namespace: namespace-1604518283-6630 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 18 lines ... Namespace: namespace-1604518283-6630 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 12 lines ... Namespace: namespace-1604518283-6630 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 10 lines ... Type Reason Age From Message ---- ------ ---- ---- ------- Normal SuccessfulCreate 0s replicaset-controller Created pod: frontend-pdkkj Normal SuccessfulCreate 0s replicaset-controller Created pod: frontend-ns2mg Normal SuccessfulCreate 0s replicaset-controller Created pod: frontend-cvpfp (B[m E1104 19:31:25.674980 58014 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource matched Name: matched Pod Template: matched Labels: matched Selector: matched Replicas: matched Pods Status: ... skipping 3 lines ... Namespace: namespace-1604518283-6630 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 17 lines ... Namespace: namespace-1604518283-6630 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 17 lines ... Namespace: namespace-1604518283-6630 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 11 lines ... Namespace: namespace-1604518283-6630 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 219 lines ... horizontalpodautoscaler.autoscaling/frontend autoscaled [32mapps.sh:706: Successful get hpa frontend {{.spec.minReplicas}} {{.spec.maxReplicas}} {{.spec.targetCPUUtilizationPercentage}}: 2 3 80 (B[mSuccessful message:kubectl-autoscale has:kubectl-autoscale horizontalpodautoscaler.autoscaling "frontend" deleted Error: required flag(s) "max" not set replicaset.apps "frontend" deleted +++ exit code: 0 Recording: run_stateful_set_tests Running command: run_stateful_set_tests +++ Running case: test-cmd.run_stateful_set_tests ... skipping 61 lines ... (B[m[32mapps.sh:466: Successful get statefulset {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: k8s.gcr.io/pause:2.0: (B[m[32mapps.sh:467: Successful get statefulset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 2 (B[mstatefulset.apps/nginx rolled back [32mapps.sh:470: Successful get statefulset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx-slim:0.7: (B[m[32mapps.sh:471: Successful get statefulset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 1 (B[mSuccessful message:error: unable to find specified revision 1000000 in history has:unable to find specified revision [32mapps.sh:475: Successful get statefulset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx-slim:0.7: (B[m[32mapps.sh:476: Successful get statefulset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 1 (B[mstatefulset.apps/nginx rolled back [32mapps.sh:479: Successful get statefulset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: k8s.gcr.io/nginx-slim:0.8: (B[m[32mapps.sh:480: Successful get statefulset {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: k8s.gcr.io/pause:2.0: ... skipping 60 lines ... Name: mock Namespace: namespace-1604518300-15874 Selector: app=mock Labels: app=mock Annotations: <none> Replicas: 1 current / 1 desired Pods Status: 0 Running / 1 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=mock Containers: mock-container: Image: k8s.gcr.io/pause:2.0 Port: 9949/TCP ... skipping 58 lines ... Name: mock Namespace: namespace-1604518300-15874 Selector: app=mock Labels: app=mock Annotations: <none> Replicas: 1 current / 1 desired Pods Status: 0 Running / 1 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=mock Containers: mock-container: Image: k8s.gcr.io/pause:2.0 Port: 9949/TCP ... skipping 58 lines ... Name: mock Namespace: namespace-1604518300-15874 Selector: app=mock Labels: app=mock Annotations: <none> Replicas: 1 current / 1 desired Pods Status: 0 Running / 1 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=mock Containers: mock-container: Image: k8s.gcr.io/pause:2.0 Port: 9949/TCP ... skipping 41 lines ... Namespace: namespace-1604518300-15874 Selector: app=mock Labels: app=mock status=replaced Annotations: <none> Replicas: 1 current / 1 desired Pods Status: 0 Running / 1 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=mock Containers: mock-container: Image: k8s.gcr.io/pause:2.0 Port: 9949/TCP ... skipping 11 lines ... Namespace: namespace-1604518300-15874 Selector: app=mock2 Labels: app=mock2 status=replaced Annotations: <none> Replicas: 1 current / 1 desired Pods Status: 0 Running / 1 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=mock2 Containers: mock-container: Image: k8s.gcr.io/pause:2.0 Port: 9949/TCP ... skipping 3 lines ... Volumes: <none> Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal SuccessfulCreate 0s replication-controller Created pod: mock2-6kghv I1104 19:31:48.854944 58014 horizontal.go:359] Horizontal Pod Autoscaler frontend has been deleted in namespace-1604518283-6630 E1104 19:31:48.984777 58014 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource replicationcontroller "mock" deleted replicationcontroller "mock2" deleted replicationcontroller/mock replaced replicationcontroller/mock2 replaced I1104 19:31:49.052117 58014 event.go:291] "Event occurred" object="namespace-1604518300-15874/mock" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: mock-m7hqv" I1104 19:31:49.058397 58014 event.go:291] "Event occurred" object="namespace-1604518300-15874/mock2" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: mock2-chg9s" ... skipping 96 lines ... +++ [1104 19:31:54] Testing persistent volumes [32mstorage.sh:30: Successful get pv {{range.items}}{{.metadata.name}}:{{end}}: (B[mpersistentvolume/pv0001 created [32mstorage.sh:33: Successful get pv {{range.items}}{{.metadata.name}}:{{end}}: pv0001: (B[mpersistentvolume "pv0001" deleted persistentvolume/pv0002 created E1104 19:31:55.193065 58014 pv_protection_controller.go:118] PV pv0002 failed with : Operation cannot be fulfilled on persistentvolumes "pv0002": the object has been modified; please apply your changes to the latest version and try again E1104 19:31:55.204446 58014 pv_protection_controller.go:118] PV pv0002 failed with : Operation cannot be fulfilled on persistentvolumes "pv0002": the object has been modified; please apply your changes to the latest version and try again [32mstorage.sh:36: Successful get pv {{range.items}}{{.metadata.name}}:{{end}}: pv0002: (B[mpersistentvolume "pv0002" deleted persistentvolume/pv0003 created E1104 19:31:55.590441 58014 pv_protection_controller.go:118] PV pv0003 failed with : Operation cannot be fulfilled on persistentvolumes "pv0003": the object has been modified; please apply your changes to the latest version and try again [32mstorage.sh:39: Successful get pv {{range.items}}{{.metadata.name}}:{{end}}: pv0003: (B[mpersistentvolume "pv0003" deleted [32mstorage.sh:42: Successful get pv {{range.items}}{{.metadata.name}}:{{end}}: (B[mpersistentvolume/pv0001 created E1104 19:31:56.099690 58014 pv_protection_controller.go:118] PV pv0001 failed with : Operation cannot be fulfilled on persistentvolumes "pv0001": the object has been modified; please apply your changes to the latest version and try again [32mstorage.sh:45: Successful get pv {{range.items}}{{.metadata.name}}:{{end}}: pv0001: (B[mSuccessful message:warning: deleting cluster-scoped resources, not scoped to the provided namespace persistentvolume "pv0001" deleted has:warning: deleting cluster-scoped resources Successful ... skipping 9 lines ... +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_persistent_volume_claims_tests +++ [1104 19:31:56] Creating namespace namespace-1604518316-691 namespace/namespace-1604518316-691 created Context "test" modified. +++ [1104 19:31:56] Testing persistent volumes claims [32mE1104 19:31:56.754123 58014 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource storage.sh:64: Successful get pvc {{range.items}}{{.metadata.name}}:{{end}}: (B[mpersistentvolumeclaim/myclaim-1 created I1104 19:31:56.961010 58014 event.go:291] "Event occurred" object="namespace-1604518316-691/myclaim-1" kind="PersistentVolumeClaim" apiVersion="v1" type="Normal" reason="FailedBinding" message="no persistent volumes available for this claim and no storage class is set" I1104 19:31:56.967208 58014 event.go:291] "Event occurred" object="namespace-1604518316-691/myclaim-1" kind="PersistentVolumeClaim" apiVersion="v1" type="Normal" reason="FailedBinding" message="no persistent volumes available for this claim and no storage class is set" [32mstorage.sh:67: Successful get pvc {{range.items}}{{.metadata.name}}:{{end}}: myclaim-1: (B[mpersistentvolumeclaim "myclaim-1" deleted ... skipping 46 lines ... Roles: <none> Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 CreationTimestamp: Wed, 04 Nov 2020 19:26:13 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Wed, 04 Nov 2020 19:26:13 +0000 Wed, 04 Nov 2020 19:27:14 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Wed, 04 Nov 2020 19:26:13 +0000 Wed, 04 Nov 2020 19:27:14 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Wed, 04 Nov 2020 19:26:13 +0000 Wed, 04 Nov 2020 19:27:14 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 30 lines ... Roles: <none> Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 CreationTimestamp: Wed, 04 Nov 2020 19:26:13 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Wed, 04 Nov 2020 19:26:13 +0000 Wed, 04 Nov 2020 19:27:14 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Wed, 04 Nov 2020 19:26:13 +0000 Wed, 04 Nov 2020 19:27:14 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Wed, 04 Nov 2020 19:26:13 +0000 Wed, 04 Nov 2020 19:27:14 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 31 lines ... Roles: <none> Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 CreationTimestamp: Wed, 04 Nov 2020 19:26:13 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Wed, 04 Nov 2020 19:26:13 +0000 Wed, 04 Nov 2020 19:27:14 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Wed, 04 Nov 2020 19:26:13 +0000 Wed, 04 Nov 2020 19:27:14 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Wed, 04 Nov 2020 19:26:13 +0000 Wed, 04 Nov 2020 19:27:14 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 30 lines ... Roles: <none> Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 CreationTimestamp: Wed, 04 Nov 2020 19:26:13 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Wed, 04 Nov 2020 19:26:13 +0000 Wed, 04 Nov 2020 19:27:14 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Wed, 04 Nov 2020 19:26:13 +0000 Wed, 04 Nov 2020 19:27:14 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Wed, 04 Nov 2020 19:26:13 +0000 Wed, 04 Nov 2020 19:27:14 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 38 lines ... Roles: <none> Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 CreationTimestamp: Wed, 04 Nov 2020 19:26:13 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Wed, 04 Nov 2020 19:26:13 +0000 Wed, 04 Nov 2020 19:27:14 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Wed, 04 Nov 2020 19:26:13 +0000 Wed, 04 Nov 2020 19:27:14 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Wed, 04 Nov 2020 19:26:13 +0000 Wed, 04 Nov 2020 19:27:14 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 30 lines ... Roles: <none> Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 CreationTimestamp: Wed, 04 Nov 2020 19:26:13 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Wed, 04 Nov 2020 19:26:13 +0000 Wed, 04 Nov 2020 19:27:14 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Wed, 04 Nov 2020 19:26:13 +0000 Wed, 04 Nov 2020 19:27:14 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Wed, 04 Nov 2020 19:26:13 +0000 Wed, 04 Nov 2020 19:27:14 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 30 lines ... Roles: <none> Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 CreationTimestamp: Wed, 04 Nov 2020 19:26:13 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Wed, 04 Nov 2020 19:26:13 +0000 Wed, 04 Nov 2020 19:27:14 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Wed, 04 Nov 2020 19:26:13 +0000 Wed, 04 Nov 2020 19:27:14 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Wed, 04 Nov 2020 19:26:13 +0000 Wed, 04 Nov 2020 19:27:14 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 29 lines ... Roles: <none> Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 CreationTimestamp: Wed, 04 Nov 2020 19:26:13 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Wed, 04 Nov 2020 19:26:13 +0000 Wed, 04 Nov 2020 19:27:14 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Wed, 04 Nov 2020 19:26:13 +0000 Wed, 04 Nov 2020 19:27:14 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Wed, 04 Nov 2020 19:26:13 +0000 Wed, 04 Nov 2020 19:27:14 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 48 lines ... Running command: run_authorization_tests +++ Running case: test-cmd.run_authorization_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_authorization_tests +++ [1104 19:32:01] Testing authorization E1104 19:32:01.091224 58014 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource subjectaccessreview.authorization.k8s.io/<unknown> created Warning: authorization.k8s.io/v1beta1 SubjectAccessReview is deprecated in v1.19+, unavailable in v1.22+; use authorization.k8s.io/v1 SubjectAccessReview subjectaccessreview.authorization.k8s.io/<unknown> created +++ [1104 19:32:01] "authorization.k8s.io/subjectaccessreviews" returns as expected: { "kind": "SubjectAccessReview", "apiVersion": "authorization.k8s.io/v1beta1", ... skipping 72 lines ... yes has:the server doesn't have a resource type Successful message:yes has:yes Successful message:error: --subresource can not be used with NonResourceURL has:subresource can not be used with NonResourceURL Successful Successful message:yes 0 has:0 ... skipping 59 lines ... {Verbs:[get list watch] APIGroups:[] Resources:[configmaps] ResourceNames:[] NonResourceURLs:[]} [32mlegacy-script.sh:838: Successful get rolebindings -n some-other-random -l test-cmd=auth {{range.items}}{{.metadata.name}}:{{end}}: testing-RB: (B[m[32mlegacy-script.sh:839: Successful get roles -n some-other-random -l test-cmd=auth {{range.items}}{{.metadata.name}}:{{end}}: testing-R: (B[m[32mlegacy-script.sh:840: Successful get clusterrolebindings -l test-cmd=auth {{range.items}}{{.metadata.name}}:{{end}}: testing-CRB: (B[m[32mlegacy-script.sh:841: Successful get clusterroles -l test-cmd=auth {{range.items}}{{.metadata.name}}:{{end}}: testing-CR: (B[mSuccessful message:error: only rbac.authorization.k8s.io/v1 is supported: not *v1beta1.ClusterRole has:only rbac.authorization.k8s.io/v1 is supported rolebinding.rbac.authorization.k8s.io "testing-RB" deleted role.rbac.authorization.k8s.io "testing-R" deleted warning: deleting cluster-scoped resources, not scoped to the provided namespace clusterrole.rbac.authorization.k8s.io "testing-CR" deleted clusterrolebinding.rbac.authorization.k8s.io "testing-CRB" deleted ... skipping 21 lines ... I1104 19:32:05.110802 58014 event.go:291] "Event occurred" object="namespace-1604518324-6771/cassandra" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: cassandra-nkndc" I1104 19:32:05.117703 58014 event.go:291] "Event occurred" object="namespace-1604518324-6771/cassandra" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: cassandra-w6fwl" service/cassandra created [32mdiscovery.sh:91: Successful get all -l'app=cassandra' {{range.items}}{{range .metadata.labels}}{{.}}:{{end}}{{end}}: cassandra:cassandra:cassandra:cassandra: (B[mpod "cassandra-nkndc" deleted I1104 19:32:05.584571 58014 event.go:291] "Event occurred" object="namespace-1604518324-6771/cassandra" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: cassandra-vfspk" E1104 19:32:05.590942 58014 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource pod "cassandra-w6fwl" deleted I1104 19:32:05.595405 58014 event.go:291] "Event occurred" object="namespace-1604518324-6771/cassandra" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: cassandra-jmjcb" replicationcontroller "cassandra" deleted E1104 19:32:05.602331 58014 replica_set.go:532] sync "namespace-1604518324-6771/cassandra" failed with replicationcontrollers "cassandra" not found service "cassandra" deleted +++ exit code: 0 Recording: run_kubectl_explain_tests Running command: run_kubectl_explain_tests +++ Running case: test-cmd.run_kubectl_explain_tests ... skipping 1113 lines ... has:kubectl-taint node/127.0.0.1 untainted node/127.0.0.1 untainted [32mnode-management.sh:96: Successful get nodes 127.0.0.1 {{range .spec.taints}}{{if eq .key \"dedicated\"}}{{.key}}={{.value}}:{{.effect}}{{end}}{{end}}: dedicated=<no value>:PreferNoSchedule (B[mnode/127.0.0.1 untainted [32mnode-management.sh:100: Successful get nodes 127.0.0.1 {{range .spec.taints}}{{if eq .key \"dedicated\"}}{{.key}}={{.value}}:{{.effect}}{{end}}{{end}}: (B[mE1104 19:32:33.872642 58014 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mnode-management.sh:104: Successful get nodes 127.0.0.1 {{.spec.unschedulable}}: <no value> (B[mnode/127.0.0.1 cordoned (dry run) node/127.0.0.1 cordoned (server dry run) [32mnode-management.sh:107: Successful get nodes 127.0.0.1 {{.spec.unschedulable}}: <no value> (B[m[32mnode-management.sh:111: Successful get nodes 127.0.0.1 {{.spec.unschedulable}}: <no value> (B[mnode/127.0.0.1 cordoned (dry run) ... skipping 23 lines ... message:node/127.0.0.1 already uncordoned (server dry run) has:already uncordoned [32mnode-management.sh:145: Successful get nodes 127.0.0.1 {{.spec.unschedulable}}: <no value> (B[mnode/127.0.0.1 labeled [32mnode-management.sh:150: Successful get nodes 127.0.0.1 {{.metadata.labels.test}}: label (B[mSuccessful message:error: cannot specify both a node name and a --selector option See 'kubectl drain -h' for help and examples has:cannot specify both a node name Successful message:error: USAGE: cordon NODE [flags] See 'kubectl cordon -h' for help and examples has:error\: USAGE\: cordon NODE node/127.0.0.1 already uncordoned Successful message:error: You must provide one or more resources by argument or filename. Example resource specifications include: '-f rsrc.yaml' '--filename=rsrc.json' '<resource> <name>' '<resource>' has:must provide one or more resources ... skipping 14 lines ... +++ [1104 19:32:37] Testing kubectl plugins Successful message:The following compatible plugins are available: test/fixtures/pkg/kubectl/plugins/version/kubectl-version - warning: kubectl-version overwrites existing command: "kubectl version" error: one plugin warning was found has:kubectl-version overwrites existing command: "kubectl version" Successful message:The following compatible plugins are available: test/fixtures/pkg/kubectl/plugins/kubectl-foo test/fixtures/pkg/kubectl/plugins/foo/kubectl-foo - warning: test/fixtures/pkg/kubectl/plugins/foo/kubectl-foo is overshadowed by a similarly named plugin: test/fixtures/pkg/kubectl/plugins/kubectl-foo error: one plugin warning was found has:test/fixtures/pkg/kubectl/plugins/foo/kubectl-foo is overshadowed by a similarly named plugin Successful message:The following compatible plugins are available: test/fixtures/pkg/kubectl/plugins/kubectl-foo has:plugins are available Successful message:Unable read directory "test/fixtures/pkg/kubectl/plugins/empty" from your PATH: open test/fixtures/pkg/kubectl/plugins/empty: no such file or directory. Skipping... error: unable to find any kubectl plugins in your PATH has:unable to find any kubectl plugins in your PATH Successful message:I am plugin foo has:plugin foo Successful message:I am plugin bar called with args test/fixtures/pkg/kubectl/plugins/bar/kubectl-bar arg1 ... skipping 10 lines ... +++ Running case: test-cmd.run_impersonation_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_impersonation_tests +++ [1104 19:32:37] Testing impersonation Successful message:error: requesting groups or user-extra for test-admin without impersonating a user has:without impersonating a user Warning: certificates.k8s.io/v1beta1 CertificateSigningRequest is deprecated in v1.19+, unavailable in v1.22+; use certificates.k8s.io/v1 CertificateSigningRequest certificatesigningrequest.certificates.k8s.io/foo created [32mauthorization.sh:68: Successful get csr/foo {{.spec.username}}: user1 (B[m[32mauthorization.sh:69: Successful get csr/foo {{range .spec.groups}}{{.}}{{end}}: system:authenticated (B[mWarning: certificates.k8s.io/v1beta1 CertificateSigningRequest is deprecated in v1.19+, unavailable in v1.22+; use certificates.k8s.io/v1 CertificateSigningRequest ... skipping 19 lines ... deployment.apps/test-1 created I1104 19:32:39.360870 58014 event.go:291] "Event occurred" object="namespace-1604518359-10559/test-1-7487ff9cbb" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: test-1-7487ff9cbb-pgjxt" I1104 19:32:39.455022 58014 event.go:291] "Event occurred" object="namespace-1604518359-10559/test-2" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set test-2-646997777c to 1" I1104 19:32:39.461803 58014 event.go:291] "Event occurred" object="namespace-1604518359-10559/test-2-646997777c" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: test-2-646997777c-59sj8" deployment.apps/test-2 created [32mwait.sh:36: Successful get deployments {{range .items}}{{.metadata.name}},{{end}}: test-1,test-2, (B[mE1104 19:32:41.468644 58014 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E1104 19:32:41.502422 58014 reflector.go:138] k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource deployment.apps "test-1" deleted deployment.apps "test-2" deleted Successful message:deployment.apps/test-1 condition met deployment.apps/test-2 condition met has:test-1 condition met ... skipping 92 lines ... I1104 19:32:46.367989 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I1104 19:32:46.368063 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I1104 19:32:46.368076 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I1104 19:32:46.368083 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I1104 19:32:46.368103 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I1104 19:32:46.368158 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W1104 19:32:46.368180 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I1104 19:32:46.368193 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I1104 19:32:46.368230 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I1104 19:32:46.368249 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I1104 19:32:46.368282 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I1104 19:32:46.368285 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I1104 19:32:46.368323 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I1104 19:32:46.368340 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I1104 19:32:46.368375 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W1104 19:32:46.368390 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I1104 19:32:46.368423 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I1104 19:32:46.368427 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I1104 19:32:46.368467 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I1104 19:32:46.368516 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I1104 19:32:46.368516 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I1104 19:32:46.368516 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W1104 19:32:46.368556 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I1104 19:32:46.368602 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I1104 19:32:46.368665 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I1104 19:32:46.368678 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I1104 19:32:46.368185 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I1104 19:32:46.368762 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I1104 19:32:46.368821 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I1104 19:32:46.368855 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W1104 19:32:46.368858 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.368866 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I1104 19:32:46.368895 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W1104 19:32:46.368901 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.368927 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.368957 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I1104 19:32:46.368983 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I1104 19:32:46.368982 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W1104 19:32:46.369013 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I1104 19:32:46.369058 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I1104 19:32:46.369071 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W1104 19:32:46.369071 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I1104 19:32:46.369096 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W1104 19:32:46.369096 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I1104 19:32:46.369124 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W1104 19:32:46.369132 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I1104 19:32:46.369179 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W1104 19:32:46.369183 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... E1104 19:32:46.369186 54408 controller.go:184] rpc error: code = Unavailable desc = transport is closing W1104 19:32:46.369252 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I1104 19:32:46.369267 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I1104 19:32:46.369267 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I1104 19:32:46.369188 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I1104 19:32:46.369279 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I1104 19:32:46.369313 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W1104 19:32:46.369355 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I1104 19:32:46.369377 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I1104 19:32:46.369381 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I1104 19:32:46.369406 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W1104 19:32:46.369425 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I1104 19:32:46.369474 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I1104 19:32:46.369549 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I1104 19:32:46.369571 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W1104 19:32:46.369640 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I1104 19:32:46.369678 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I1104 19:32:46.369680 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W1104 19:32:46.369707 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.369724 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.369731 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.369762 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.369790 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.369825 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I1104 19:32:46.369850 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W1104 19:32:46.369880 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.369895 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.369906 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I1104 19:32:46.369914 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W1104 19:32:46.369924 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.369972 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.370006 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.370028 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.370061 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I1104 19:32:46.370065 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W1104 19:32:46.370081 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.370111 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.370119 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I1104 19:32:46.370006 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W1104 19:32:46.370139 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.370154 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.370185 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.370197 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.370209 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.370243 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.370251 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I1104 19:32:46.370257 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W1104 19:32:46.370265 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.369975 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.370300 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.370313 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.370333 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.370349 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I1104 19:32:46.370370 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W1104 19:32:46.370316 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.370389 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I1104 19:32:46.369179 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick I1104 19:32:46.370436 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W1104 19:32:46.370437 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.370483 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.370492 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.370375 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.370541 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.370536 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.370577 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.370604 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.370624 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.370633 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.370659 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I1104 19:32:46.370709 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W1104 19:32:46.370782 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I1104 19:32:46.370797 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W1104 19:32:46.370820 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.370848 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.370861 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.370876 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... I1104 19:32:46.370894 54408 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick W1104 19:32:46.370921 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.370936 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.370989 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.371056 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.371093 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.371302 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... W1104 19:32:46.371353 54408 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {http://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting... junit report dir: /logs/artifacts +++ [1104 19:32:46] Clean up complete + make test-integration +++ [1104 19:32:51] Checking etcd is on PATH /home/prow/go/src/k8s.io/kubernetes/third_party/etcd/etcd +++ [1104 19:32:51] Starting etcd instance ... skipping 16 lines ... {"Time":"2020-11-04T19:35:42.045536787Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/apiserver","Test":"TestListOptions/watchCacheEnabled=true/limit=0_continue=empty_rv=invalid_rvMatch=NotOlderThan","Output":"kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go:326 +0x2e9\\nk8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json.(*Serializer).Encode(0xc00039ae60, 0x529afa0, 0xc019f403c0, 0x5289ce0, 0xc005465ae0, 0x3b7eea4, 0x6)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go:300 +0x169\\nk8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning.(*codec).doEncode(0xc019f40460, 0x529afa0, 0xc019f403c0, 0x5289ce0, 0xc005465ae0, 0x0, 0x0)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go:228 +0x396\\nk8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning.(*codec).Encode(0xc019f40460, 0x529afa0, 0xc019f403c0, 0x5289ce0, 0xc005465ae0, 0x52f18c0, 0xc00039ae60)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local"} {"Time":"2020-11-04T19:35:42.045545737Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/apiserver","Test":"TestListOptions/watchCacheEnabled=true/limit=0_continue=empty_rv=invalid_rvMatch=NotOlderThan","Output":"/go/src/k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go:184 +0x170\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters.SerializeObject(0x4a61e46, 0x10, 0x7f6a44214fe0, 0xc019f40460, 0x52f2180, 0xc0005a96a8, 0xc019f4e100, 0x1f4, 0x529afa0, 0xc019f403c0)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/writers.go:96 +0x12c\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters.WriteObjectNegotiated(0x52f5580, 0xc01a178780, 0x52f58c0, 0x74f6350, 0x4a44f2c, 0x4, 0x4a437d8, 0x2, 0x52f2180, 0xc0005a96a8, ...)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/writers.go:253 +0x572\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters.ErrorNegotiated(0x528e040, 0xc019f15ea0, 0x52f5580, 0xc01a178780, 0x4a44f2c,"} {"Time":"2020-11-04T19:35:42.045570166Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/apiserver","Test":"TestListOptions/watchCacheEnabled=true/limit=0_continue=empty_rv=invalid_rvMatch=NotOlderThan","Output":"s.InstrumentRouteFunc.func1(0xc019d859b0, 0xc009e7b570)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go:422 +0x2d5\\nk8s.io/kubernetes/vendor/github.com/emicklei/go-restful.(*Container).dispatch(0xc009d27200, 0x7f6a348cd578, 0xc0005a9698, 0xc019f4e100)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/github.com/emicklei/go-restful/container.go:288 +0xa84\\nk8s.io/kubernetes/vendor/github.com/emicklei/go-restful.(*Container).Dispatch(...)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/github.com/emicklei/go-restful/container.go:199\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server.director.ServeHTTP(0x4a5b4e1, 0xe, 0xc009d27200, 0xc009472bd0, 0x7f6a348cd578, 0xc0005a9698, 0xc019f4e100)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/handler.go:146 +0x5de\\nk8s.io/kubernetes/vendor/k8s.io"} {"Time":"2020-11-04T19:35:42.045587351Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/apiserver","Test":"TestListOptions/watchCacheEnabled=true/limit=0_continue=empty_rv=invalid_rvMatch=NotOlderThan","Output":".io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filterlatency/filterlatency.go:71 +0x186\\nnet/http.HandlerFunc.ServeHTTP(0xc00e4b3e00, 0x7f6a348cd578, 0xc0005a9698, 0xc019f4e100)\\n\\t/usr/local/go/src/net/http/server.go:2042 +0x44\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func2(0x7f6a348cd578, 0xc0005a9698, 0xc019f4e100)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters/maxinflight.go:175 +0x4cf\\nnet/http.HandlerFunc.ServeHTTP(0xc00b6b9170, 0x7f6a348cd578, 0xc0005a9698, 0xc019f4e100)\\n\\t/usr/local/go/src/net/http/server.go:2042 +0x44\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filterlatency.trackCompleted.func1(0x7f6a348cd578, 0xc0005a9698, 0xc019f4e100)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filterlatency/filterlatency.go:95 +0x165\\nnet/http.HandlerFunc.ServeHTTP(0xc00b6b91a0, 0x7f6a348cd578, 0xc0005a9698,"} {"Time":"2020-11-04T19:35:42.045596308Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/apiserver","Test":"TestListOptions/watchCacheEnabled=true/limit=0_continue=empty_rv=invalid_rvMatch=NotOlderThan","Output":" 0xc019f4e100)\\n\\t/usr/local/go/src/net/http/server.go:2042 +0x44\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f6a348cd578, 0xc0005a9698, 0xc019f4e100)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go:50 +0x23e6\\nnet/http.HandlerFunc.ServeHTTP(0xc00e4b3e40, 0x7f6a348cd578, 0xc0005a9698, 0xc019f4e100)\\n\\t/usr/local/go/src/net/http/server.go:2042 +0x44\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filterlatency.trackStarted.func1(0x7f6a348cd578, 0xc0005a9698, 0xc019f4e100)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filterlatency/filterlatency.go:71 +0x186\\nnet/http.HandlerFunc.ServeHTTP(0xc00e4b3e80, 0x7f6a348cd578, 0xc0005a9698, 0xc019f4e100)\\n\\t/usr/local/go/src/net/http/server.go:2042 +0x44\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filterlatency.trackCompleted.func1(0x7f6a34"} {"Time":"2020-11-04T19:35:42.045616108Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/apiserver","Test":"TestListOptions/watchCacheEnabled=true/limit=0_continue=empty_rv=invalid_rvMatch=NotOlderThan","Output":"latency/filterlatency.go:95 +0x165\\nnet/http.HandlerFunc.ServeHTTP(0xc00b6b9230, 0x7f6a348cd578, 0xc0005a9698, 0xc019f4e100)\\n\\t/usr/local/go/src/net/http/server.go:2042 +0x44\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f6a348cd578, 0xc0005a9698, 0xc019f4e000)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters/authentication.go:70 +0x6d2\\nnet/http.HandlerFunc.ServeHTTP(0xc0055da230, 0x7f6a348cd578, 0xc0005a9698, 0xc019f4e000)\\n\\t/usr/local/go/src/net/http/server.go:2042 +0x44\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filterlatency.trackStarted.func1(0x7f6a348cd578, 0xc0005a9698, 0xc019f33f00)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filterlatency/filterlatency.go:80 +0x38a\\nnet/http.HandlerFunc.ServeHTTP(0xc00e4b3f00, 0x7f6a348cd578, 0xc0005a9698, 0xc019f33f00)\\n\\t/usr/local/go/src/net/http/server.go:"} {"Time":"2020-11-04T19:35:42.045624678Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/apiserver","Test":"TestListOptions/watchCacheEnabled=true/limit=0_continue=empty_rv=invalid_rvMatch=NotOlderThan","Output":"2042 +0x44\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc0107a2ae0, 0xc0085c6200, 0x52f8bc0, 0xc0005a9698, 0xc019f33f00)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters/timeout.go:113 +0xb8\\ncreated by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters/timeout.go:99 +0x1cc\\n\" addedInfo=\"\\nlogging error output: \\\"{\\\\\\\"kind\\\\\\\":\\\\\\\"Status\\\\\\\",\\\\\\\"apiVersion\\\\\\\":\\\\\\\"v1\\\\\\\",\\\\\\\"metadata\\\\\\\":{},\\\\\\\"status\\\\\\\":\\\\\\\"Failure\\\\\\\",\\\\\\\"message\\\\\\\":\\\\\\\"resourceVersion: Invalid value: \\\\\\\\\\\\\\\"invalid\\\\\\\\\\\\\\\": strconv.ParseUint: parsing \\\\\\\\\\\\\\\"invalid\\\\\\\\\\\\\\\": invalid syntax\\\\\\\",\\\\\\\"code\\\\\\\":500}\\\\n\\\"\\n\"\n"} {"Time":"2020-11-04T19:35:42.264345033Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/auth","Test":"TestSelfSubjectAccessReview","Output":"meout.go:228 +0xb2\\nnet/http.Error(0x7f5c5c20fd90, 0xc008eb45e8, 0xc008f0cc60, 0x60, 0x1f4)\\n\\t/usr/local/go/src/net/http/server.go:2054 +0x1f6\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters.InternalError(0x7f5c5c20fd90, 0xc008eb45e8, 0xc008f34b00, 0x5302960, 0xc008f27b60)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/errors.go:75 +0x11e\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func1(0x7f5c5c20fd90, 0xc008eb45e8, 0xc008f34b00)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters/authorization.go:69 +0x4d4\\nnet/http.HandlerFunc.ServeHTTP(0xc0053f6700, 0x7f5c5c20fd90, 0xc008eb45e8, 0xc008f34b00)\\n\\t/usr/local/go/src/net/http/server.go:2042 +0x44\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filterlatency.trackStarted.func1(0x7f5c5c20fd90, 0xc008eb45e8, 0xc008f34"} {"Time":"2020-11-04T19:35:42.264357225Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/auth","Test":"TestSelfSubjectAccessReview","Output":"b00)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filterlatency/filterlatency.go:71 +0x186\\nnet/http.HandlerFunc.ServeHTTP(0xc0053f6740, 0x7f5c5c20fd90, 0xc008eb45e8, 0xc008f34b00)\\n\\t/usr/local/go/src/net/http/server.go:2042 +0x44\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func2(0x7f5c5c20fd90, 0xc008eb45e8, 0xc008f34b00)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters/maxinflight.go:175 +0x4cf\\nnet/http.HandlerFunc.ServeHTTP(0xc0053f16b0, 0x7f5c5c20fd90, 0xc008eb45e8, 0xc008f34b00)\\n\\t/usr/local/go/src/net/http/server.go:2042 +0x44\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filterlatency.trackCompleted.func1(0x7f5c5c20fd90, 0xc008eb45e8, 0xc008f34b00)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filterlatency/filterlatency.go:95 +0x165\\nnet/ht"} {"Time":"2020-11-04T19:35:42.264380487Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/auth","Test":"TestSelfSubjectAccessReview","Output":"o/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filterlatency/filterlatency.go:95 +0x165\\nnet/http.HandlerFunc.ServeHTTP(0xc0053f1770, 0x7f5c5c20fd90, 0xc008eb45e8, 0xc008f34b00)\\n\\t/usr/local/go/src/net/http/server.go:2042 +0x44\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f5c5c20fd90, 0xc008eb45e8, 0xc008f34a00)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters/authentication.go:70 +0x6d2\\nnet/http.HandlerFunc.ServeHTTP(0xc0053fc0a0, 0x7f5c5c20fd90, 0xc008eb45e8, 0xc008f34a00)\\n\\t/usr/local/go/src/net/http/server.go:2042 +0x44\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filterlatency.trackStarted.func1(0x7f5c5c20fd90, 0xc008eb45e8, 0xc008f34900)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filterlatency/filterlatency.go:80 +0x38a\\nnet/http.HandlerFunc.ServeHTTP(0xc0053f6840, 0x7f5c5c20fd90, "} {"Time":"2020-11-04T19:35:42.264386912Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/auth","Test":"TestSelfSubjectAccessReview","Output":"0xc008eb45e8, 0xc008f34900)\\n\\t/usr/local/go/src/net/http/server.go:2042 +0x44\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc008f0cc00, 0xc004a08d60, 0x536e8e0, 0xc008eb45e8, 0xc008f34900)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters/timeout.go:113 +0xb8\\ncreated by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters/timeout.go:99 +0x1cc\\n\" addedInfo=\"\\nlogging error output: \\\"Internal Server Error: \\\\\\\"/apis/authorization.k8s.io/v1/selfsubjectaccessreviews\\\\\\\": I'm sorry, Dave\\\\n\\\"\\n\"\n"} {"Time":"2020-11-04T19:35:50.597408752Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/auth","Test":"TestAuthModeAlwaysAllow","Output":"netes/vendor/k8s.io/apiserver/pkg/endpoints/metrics.(*ResponseWriterDelegator).WriteHeader(0xc01135d3b0, 0x1f7)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go:544 +0x45\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters.(*deferredResponseWriter).Write(0xc0113760f0, 0xc004ce0a80, 0xa3, 0xa80, 0x0, 0x0, 0x4a3b180)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/writers.go:204 +0x1f7\\nencoding/json.(*Encoder).Encode(0xc00c001a98, 0x4a049c0, 0xc011350a00, 0x0, 0x41147b)\\n\\t/usr/local/go/src/encoding/json/stream.go:231 +0x1cb\\nk8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json.(*Serializer).doEncode(0xc0000c8f50, 0x530fc40, 0xc011350a00, 0x52fe9a0, 0xc0113760f0, 0x0, 0x0)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runti"} {"Time":"2020-11-04T19:35:50.597419161Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/auth","Test":"TestAuthModeAlwaysAllow","Output":"me/serializer/json/json.go:326 +0x2e9\\nk8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json.(*Serializer).Encode(0xc0000c8f50, 0x530fc40, 0xc011350a00, 0x52fe9a0, 0xc0113760f0, 0x3bcd12c, 0x6)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go:300 +0x169\\nk8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning.(*codec).doEncode(0xc011350aa0, 0x530fc40, 0xc011350a00, 0x52fe9a0, 0xc0113760f0, 0x0, 0x0)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go:228 +0x396\\nk8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning.(*codec).Encode(0xc011350aa0, 0x530fc40, 0xc011350a00, 0x52fe9a0, 0xc0113760f0, 0x53674e0, 0xc0000c8f50)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning"} {"Time":"2020-11-04T19:35:50.597427692Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/auth","Test":"TestAuthModeAlwaysAllow","Output":"/versioning.go:184 +0x170\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters.SerializeObject(0x4ac9b42, 0x10, 0x7f5c5c357f28, 0xc011350aa0, 0x5367da0, 0xc007a6b2e0, 0xc011360d00, 0x1f7, 0x530fc40, 0xc011350a00)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/writers.go:96 +0x12c\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters.WriteObjectNegotiated(0x536b220, 0xc008cedd00, 0x536b560, 0x763ceb0, 0x0, 0x0, 0x4aaa566, 0x2, 0x5367da0, 0xc007a6b2e0, ...)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/writers.go:253 +0x572\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters.ErrorNegotiated(0x52fe1c0, 0xc011350960, 0x536b220, 0xc008cedd00, 0x0, 0x0, 0x4aaa566, 0x2, 0x5367da0, 0xc007a6b2e0, ...)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output"} {"Time":"2020-11-04T19:35:50.597447359Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/auth","Test":"TestAuthModeAlwaysAllow","Output":"/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/writers.go:272 +0x16f\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/handlers.(*RequestScope).err(...)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/handlers/rest.go:89\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/handlers.ConnectResource.func1.1()\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/handlers/rest.go:188 +0x259\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/metrics.RecordLongRunning(0xc011360d00, 0xc01135a790, 0x4ab4c93, 0x9, 0xc00c002890)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go:365 +0x293\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/handlers.ConnectResource.func1(0x5367da0, 0xc007a6b2e0, 0xc011360d00)\\n\\t/home/prow/go/src/k8s.io/ku"} {"Time":"2020-11-04T19:35:50.597455609Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/auth","Test":"TestAuthModeAlwaysAllow","Output":"bernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/handlers/rest.go:185 +0x472\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints.restfulConnectResource.func1(0xc01135d320, 0xc011163f10)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/installer.go:1215 +0x99\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/metrics.InstrumentRouteFunc.func1(0xc01135d320, 0xc011163f10)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go:422 +0x2d5\\nk8s.io/kubernetes/vendor/github.com/emicklei/go-restful.(*Container).dispatch(0xc008755dd0, 0x7f5c5c20fd90, 0xc007a6b2d0, 0xc011360d00)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/github.com/emicklei/go-restful/container.go:288 +0xa84\\nk8s.io/kubernetes/vendor/github.com/emicklei/go-restful.(*Container).Dispatch(...)\\n\\t/home/prow/go/src/k8s.io/ku"} {"Time":"2020-11-04T19:35:50.59747236Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/auth","Test":"TestAuthModeAlwaysAllow","Output":"c/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters/authorization.go:64 +0x59a\\nnet/http.HandlerFunc.ServeHTTP(0xc008ced500, 0x7f5c5c20fd90, 0xc007a6b2d0, 0xc011360d00)\\n\\t/usr/local/go/src/net/http/server.go:2042 +0x44\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filterlatency.trackStarted.func1(0x7f5c5c20fd90, 0xc007a6b2d0, 0xc011360d00)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filterlatency/filterlatency.go:71 +0x186\\nnet/http.HandlerFunc.ServeHTTP(0xc008ced540, 0x7f5c5c20fd90, 0xc007a6b2d0, 0xc011360d00)\\n\\t/usr/local/go/src/net/http/server.go:2042 +0x44\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.WithMaxInFlightLimit.func2(0x7f5c5c20fd90, 0xc007a6b2d0, 0xc011360d00)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters/maxinflight.go:175 +0x4cf\\nnet/http.HandlerFunc.ServeHTTP(0xc00a26cbd0, 0x7f5c5c20fd90, 0xc007a6b2d0, 0x"} {"Time":"2020-11-04T19:35:50.597481663Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/auth","Test":"TestAuthModeAlwaysAllow","Output":"c011360d00)\\n\\t/usr/local/go/src/net/http/server.go:2042 +0x44\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filterlatency.trackCompleted.func1(0x7f5c5c20fd90, 0xc007a6b2d0, 0xc011360d00)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filterlatency/filterlatency.go:95 +0x165\\nnet/http.HandlerFunc.ServeHTTP(0xc00a26cc00, 0x7f5c5c20fd90, 0xc007a6b2d0, 0xc011360d00)\\n\\t/usr/local/go/src/net/http/server.go:2042 +0x44\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func1(0x7f5c5c20fd90, 0xc007a6b2d0, 0xc011360d00)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go:50 +0x23e6\\nnet/http.HandlerFunc.ServeHTTP(0xc008ced580, 0x7f5c5c20fd90, 0xc007a6b2d0, 0xc011360d00)\\n\\t/usr/local/go/src/net/http/server.go:2042 +0x44\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filterlatency.trackStarted.func1(0x7f5c5c20f"} {"Time":"2020-11-04T19:35:50.597501769Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/auth","Test":"TestAuthModeAlwaysAllow","Output":"ency/filterlatency.go:71 +0x186\\nnet/http.HandlerFunc.ServeHTTP(0xc008ced600, 0x7f5c5c20fd90, 0xc007a6b2d0, 0xc011360d00)\\n\\t/usr/local/go/src/net/http/server.go:2042 +0x44\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filterlatency.trackCompleted.func1(0x7f5c5c20fd90, 0xc007a6b2d0, 0xc011360d00)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filterlatency/filterlatency.go:95 +0x165\\nnet/http.HandlerFunc.ServeHTTP(0xc00a26cc90, 0x7f5c5c20fd90, 0xc007a6b2d0, 0xc011360d00)\\n\\t/usr/local/go/src/net/http/server.go:2042 +0x44\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters.WithAuthentication.func1(0x7f5c5c20fd90, 0xc007a6b2d0, 0xc011360c00)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filters/authentication.go:70 +0x6d2\\nnet/http.HandlerFunc.ServeHTTP(0xc008b65f40, 0x7f5c5c20fd90, 0xc007a6b2d0, 0xc011360c00)\\n\\t/usr/local/go/src/net/http/server.go:2"} {"Time":"2020-11-04T19:35:50.597509684Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/auth","Test":"TestAuthModeAlwaysAllow","Output":"042 +0x44\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filterlatency.trackStarted.func1(0x7f5c5c20fd90, 0xc007a6b2d0, 0xc011360b00)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/filterlatency/filterlatency.go:80 +0x38a\\nnet/http.HandlerFunc.ServeHTTP(0xc008ced640, 0x7f5c5c20fd90, 0xc007a6b2d0, 0xc011360b00)\\n\\t/usr/local/go/src/net/http/server.go:2042 +0x44\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func1(0xc01134eae0, 0xc008f27b80, 0x536e8e0, 0xc007a6b2d0, 0xc011360b00)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters/timeout.go:113 +0xb8\\ncreated by k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters/timeout.go:99 +0x1cc\\n\" addedInfo=\"\\nlogging error outp"} {"Time":"2020-11-04T19:35:53.831713905Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/apimachinery","Output":"ok \tk8s.io/kubernetes/test/integration/apimachinery\t51.870s\n"} {"Time":"2020-11-04T19:35:54.938058241Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/apiserver/admissionwebhook","Test":"TestWebhookAdmissionWithWatchCache","Output":"PASS: TestWebhookAdmissionWithWatchCache/storage.k8s.io.v1alpha1.volumeattachments (0.12s)\n"} {"Time":"2020-11-04T19:35:59.376257402Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/auth","Test":"TestAliceNotForbiddenOrUnauthorized","Output":"io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/metrics.(*ResponseWriterDelegator).WriteHeader(0xc0185d28d0, 0x1f7)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go:544 +0x45\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters.(*deferredResponseWriter).Write(0xc018604a50, 0xc004f0e000, 0xa3, 0x8e6, 0x0, 0x0, 0x4a3b180)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/writers.go:204 +0x1f7\\nencoding/json.(*Encoder).Encode(0xc01452da98, 0x4a049c0, 0xc01860c820, 0x0, 0x41147b)\\n\\t/usr/local/go/src/encoding/json/stream.go:231 +0x1cb\\nk8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json.(*Serializer).doEncode(0xc0000c8f50, 0x530fc40, 0xc01860c820, 0x52fe9a0, 0xc018604a50, 0x0, 0x0)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apimachinery/p"} {"Time":"2020-11-04T19:35:59.376267282Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/auth","Test":"TestAliceNotForbiddenOrUnauthorized","Output":"kg/runtime/serializer/json/json.go:326 +0x2e9\\nk8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json.(*Serializer).Encode(0xc0000c8f50, 0x530fc40, 0xc01860c820, 0x52fe9a0, 0xc018604a50, 0x3bcd12c, 0x6)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go:300 +0x169\\nk8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning.(*codec).doEncode(0xc01860c8c0, 0x530fc40, 0xc01860c820, 0x52fe9a0, 0xc018604a50, 0x0, 0x0)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go:228 +0x396\\nk8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning.(*codec).Encode(0xc01860c8c0, 0x530fc40, 0xc01860c820, 0x52fe9a0, 0xc018604a50, 0x53674e0, 0xc0000c8f50)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/serializer/ve"} {"Time":"2020-11-04T19:35:59.376274295Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/auth","Test":"TestAliceNotForbiddenOrUnauthorized","Output":"rsioning/versioning.go:184 +0x170\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters.SerializeObject(0x4ac9b42, 0x10, 0x7f5c5c357f28, 0xc01860c8c0, 0x5367da0, 0xc01031b348, 0xc0185b3e00, 0x1f7, 0x530fc40, 0xc01860c820)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/writers.go:96 +0x12c\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters.WriteObjectNegotiated(0x536b220, 0xc00fc3cec0, 0x536b560, 0x763ceb0, 0x0, 0x0, 0x4aaa566, 0x2, 0x5367da0, 0xc01031b348, ...)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/writers.go:253 +0x572\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters.ErrorNegotiated(0x52fe1c0, 0xc01860c780, 0x536b220, 0xc00fc3cec0, 0x0, 0x0, 0x4aaa566, 0x2, 0x5367da0, 0xc01031b348, ...)\\n\\t/home/prow/go/src/k8s.io/kubernetes"} {"Time":"2020-11-04T19:35:59.376290804Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/auth","Test":"TestAliceNotForbiddenOrUnauthorized","Output":"/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/writers.go:272 +0x16f\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/handlers.(*RequestScope).err(...)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/handlers/rest.go:89\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/handlers.ConnectResource.func1.1()\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/handlers/rest.go:188 +0x259\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/metrics.RecordLongRunning(0xc0185b3e00, 0xc0185a8b00, 0x4ab4c93, 0x9, 0xc01452e890)\\n\\t/home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go:365 +0x293\\nk8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/endpoints/handlers.ConnectResource.func1(0x5367da0, 0xc01031b348, 0xc0185b3e00)\\n\\t/home/prow/go/src/k"} ... skipping 128 lines ... {"Time":"2020-11-04T19:41:11.034412009Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/garbagecollector","Test":"TestCustomResourceCascadingDeletion","Output":"dangling: []v1.OwnerReference{v1.OwnerReference{APIVersion:\"mygroup.example.com/v1beta1\", Kind:\"foog7npqa\", Name:\"owner6jss7\", UID:\"c3eafc76-da6e-4843-a8ab-49278757cdbb\", Controller:(*bool)(nil), BlockOwnerDeletion:(*bool)(nil)}}\n"} {"Time":"2020-11-04T19:41:15.197306774Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/replicaset","Output":"ok \tk8s.io/kubernetes/test/integration/replicaset\t66.322s\n"} {"Time":"2020-11-04T19:41:23.71099113Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/garbagecollector","Test":"TestMixedRelationships","Output":"waitingForDependentsDeletion: []v1.OwnerReference{v1.OwnerReference{APIVersion:\"mygroup.example.com/v1beta1\", Kind:\"foocjv9xa\", Name:\"ownernjqqt\", UID:\"678898ce-6d0d-44ae-83da-fbbc6987ca9d\", Controller:(*bool)(nil), BlockOwnerDeletion:(*bool)(nil)}}\n"} {"Time":"2020-11-04T19:41:26.384320402Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/replicationcontroller","Output":"ok \tk8s.io/kubernetes/test/integration/replicationcontroller\t61.955s\n"} {"Time":"2020-11-04T19:41:43.605832743Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/garbagecollector","Test":"TestCRDDeletionCascading","Output":"dangling: []v1.OwnerReference{v1.OwnerReference{APIVersion:\"mygroup.example.com/v1beta1\", Kind:\"fooqr6bca\", Name:\"ownermpnr5\", UID:\"80c0c700-849d-44bd-87ef-5de9f5d35d2d\", Controller:(*bool)(nil), BlockOwnerDeletion:(*bool)(nil)}}\n"} {"Time":"2020-11-04T19:41:45.549404051Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/serviceaccount","Output":"ok \tk8s.io/kubernetes/test/integration/serviceaccount\t42.146s\n"} {"Time":"2020-11-04T19:41:56.359327015Z","Action":"output","Packa{"component":"entrypoint","file":"prow/entrypoint/run.go:169","func":"k8s.io/test-infra/prow/entrypoint.Options.ExecuteProcess","level":"error","msg":"Entrypoint received interrupt: terminated","severity":"error","time":"2020-11-04T19:42:19Z"}