PR | bobbypage: WIP: verify specs for mirror pods |
Result | ABORTED |
Tests | 0 failed / 140 succeeded |
Started | |
Elapsed | 44m9s |
Revision | 2064e5239f64d9a21490cc8b4067c5b3208bf4b0 |
Refs |
116726 |
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdCompletion
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdCompletion/shell_not_expected
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdCompletion/unsupported_shell_type
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitAPIPort
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitAPIPort/accept_a_valid_port_number
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitAPIPort/fail_on_negative_port_number
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitAPIPort/fail_on_non-string_port
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitAPIPort/fail_on_too_large_port_number
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/can't_load_old_component_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/can't_load_v1beta1_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/can't_load_v1beta2_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/can_load_current_component_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/can_load_v1beta3_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/don't_allow_mixed_arguments_v1beta3
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/fail_on_non_existing_path
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitFeatureGates
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitFeatureGates/feature_gate_PublicKeysECDSA=true
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitFeatureGates/no_feature_gates_passed
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitKubernetesVersion
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitKubernetesVersion/invalid_semantic_version_string_is_detected
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitKubernetesVersion/valid_version_is_accepted
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitToken
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitToken/invalid_token_non-lowercase
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitToken/invalid_token_size
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitToken/valid_token_is_accepted
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinArgsMixed
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinArgsMixed/discovery-token_and_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinBadArgs
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinBadArgs/discovery-token_and_discovery-file_can't_both_be_set
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinBadArgs/discovery-token_or_discovery-file_must_be_set
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinConfig
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinConfig/config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinConfig/config_path
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryFile
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryFile/invalid_discovery_file
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryFile/valid_discovery_file
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryToken
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryToken/valid_discovery_token
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryToken/valid_discovery_token_url
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinNodeName
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinNodeName/valid_node_name
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinTLSBootstrapToken
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinTLSBootstrapToken/valid_bootstrap_token
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinTLSBootstrapToken/valid_bootstrap_token_url
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinToken
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinToken/valid_token
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinToken/valid_token_url
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdTokenDelete
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdTokenDelete/invalid_token
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdTokenDelete/no_token_provided
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdTokenGenerate
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdTokenGenerateTypoError
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersion
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersion/default_output
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersion/invalid_output_option
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersion/short_output
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersionOutputJsonOrYaml
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersionOutputJsonOrYaml/json_output
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersionOutputJsonOrYaml/yaml_output
test-cmd run_RESTMapper_evaluation_tests
test-cmd run_assert_categories_tests
test-cmd run_assert_short_name_tests
test-cmd run_assert_singular_name_tests
test-cmd run_authorization_tests
test-cmd run_certificates_tests
test-cmd run_client_config_tests
test-cmd run_cluster_management_tests
test-cmd run_clusterroles_tests
test-cmd run_configmap_tests
test-cmd run_convert_tests
test-cmd run_crd_deletion_recreation_tests
test-cmd run_crd_tests
test-cmd run_create_job_tests
test-cmd run_create_secret_tests
test-cmd run_daemonset_history_tests
test-cmd run_daemonset_tests
test-cmd run_deployment_tests
test-cmd run_deprecated_api_tests
test-cmd run_exec_credentials_interactive_tests
test-cmd run_exec_credentials_tests
test-cmd run_impersonation_tests
test-cmd run_job_tests
test-cmd run_kubectl_all_namespace_tests
test-cmd run_kubectl_apply_deployments_tests
test-cmd run_kubectl_apply_tests
test-cmd run_kubectl_config_set_cluster_tests
test-cmd run_kubectl_config_set_credentials_tests
test-cmd run_kubectl_config_set_tests
test-cmd run_kubectl_create_error_tests
test-cmd run_kubectl_create_filter_tests
test-cmd run_kubectl_create_kustomization_directory_tests
test-cmd run_kubectl_create_validate_tests
test-cmd run_kubectl_debug_baseline_node_tests
test-cmd run_kubectl_debug_baseline_tests
test-cmd run_kubectl_debug_general_node_tests
test-cmd run_kubectl_debug_general_tests
test-cmd run_kubectl_debug_node_tests
test-cmd run_kubectl_debug_pod_tests
test-cmd run_kubectl_delete_allnamespaces_tests
test-cmd run_kubectl_diff_same_names
test-cmd run_kubectl_diff_tests
test-cmd run_kubectl_events_tests
test-cmd run_kubectl_exec_pod_tests
test-cmd run_kubectl_exec_resource_name_tests
test-cmd run_kubectl_explain_tests
test-cmd run_kubectl_get_tests
test-cmd run_kubectl_help_tests
test-cmd run_kubectl_local_proxy_tests
test-cmd run_kubectl_request_timeout_tests
test-cmd run_kubectl_results_tests
test-cmd run_kubectl_run_tests
test-cmd run_kubectl_server_side_apply_tests
test-cmd run_kubectl_sort_by_tests
test-cmd run_kubectl_version_tests
test-cmd run_lists_tests
test-cmd run_multi_resources_tests
test-cmd run_namespace_tests
test-cmd run_nodes_tests
test-cmd run_persistent_volume_claims_tests
test-cmd run_persistent_volumes_tests
test-cmd run_plugins_tests
test-cmd run_pod_templates_tests
test-cmd run_pod_tests
test-cmd run_rc_tests
test-cmd run_recursive_resources_tests
test-cmd run_resource_aliasing_tests
test-cmd run_retrieve_multiple_tests
test-cmd run_role_tests
test-cmd run_rs_tests
test-cmd run_save_config_tests
test-cmd run_secrets_test
test-cmd run_service_accounts_tests
test-cmd run_service_tests
test-cmd run_stateful_set_tests
test-cmd run_statefulset_history_tests
test-cmd run_storage_class_tests
test-cmd run_swagger_tests
test-cmd run_template_output_tests
test-cmd run_wait_tests
... skipping 49 lines ... Recording: record_command_canary Running command: record_command_canary +++ Running case: test-cmd.record_command_canary +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: record_command_canary /home/prow/go/src/k8s.io/kubernetes/test/cmd/legacy-script.sh: line 165: bogus-expected-to-fail: command not found !!! [0317 23:08:34] Call tree: !!! [0317 23:08:34] 1: /home/prow/go/src/k8s.io/kubernetes/test/cmd/../../third_party/forked/shell2junit/sh2ju.sh:47 record_command_canary(...) !!! [0317 23:08:34] 2: /home/prow/go/src/k8s.io/kubernetes/test/cmd/../../third_party/forked/shell2junit/sh2ju.sh:112 eVal(...) !!! [0317 23:08:34] 3: /home/prow/go/src/k8s.io/kubernetes/test/cmd/legacy-script.sh:141 juLog(...) !!! [0317 23:08:34] 4: /home/prow/go/src/k8s.io/kubernetes/test/cmd/legacy-script.sh:169 record_command(...) !!! [0317 23:08:34] 5: hack/make-rules/test-cmd.sh:35 source(...) +++ exit code: 1 +++ error: 1 +++ [0317 23:08:34] Running kubeadm tests go version go1.20.2 linux/amd64 +++ [0317 23:08:38] Building go targets for linux/amd64 k8s.io/kubernetes/cmd/kubeadm (static) go version go1.20.2 linux/amd64 +++ [0317 23:09:33] Running tests without code coverage ... skipping 225 lines ... I0317 23:12:00.494757 20078 naming_controller.go:291] Starting NamingConditionController I0317 23:12:00.494777 20078 establishing_controller.go:76] Starting EstablishingController I0317 23:12:00.494794 20078 nonstructuralschema_controller.go:192] Starting NonStructuralSchemaConditionController I0317 23:12:00.494832 20078 apiapproval_controller.go:186] Starting KubernetesAPIApprovalPolicyConformantConditionController I0317 23:12:00.494913 20078 crd_finalizer.go:266] Starting CRDFinalizer I0317 23:12:00.494955 20078 dynamic_cafile_content.go:157] "Starting controller" name="client-ca-bundle::hack/testdata/ca/ca.crt" E0317 23:12:00.566857 20078 controller.go:146] "Failed to ensure lease exists, will retry" err="namespaces \"kube-system\" not found" interval="200ms" I0317 23:12:00.592967 20078 cache.go:39] Caches are synced for autoregister controller I0317 23:12:00.593012 20078 cache.go:39] Caches are synced for APIServiceRegistrationController controller I0317 23:12:00.593165 20078 cache.go:39] Caches are synced for AvailableConditionController controller I0317 23:12:00.593216 20078 apf_controller.go:366] Running API Priority and Fairness config worker I0317 23:12:00.593247 20078 apf_controller.go:369] Running API Priority and Fairness periodic rebalancing process I0317 23:12:00.593496 20078 shared_informer.go:318] Caches are synced for configmaps ... skipping 17 lines ... go version go1.20.2 linux/amd64 +++ [0317 23:12:03] Building go targets for linux/amd64 k8s.io/kubernetes/cmd/kube-controller-manager (static) +++ [0317 23:12:41] Generate kubeconfig for controller-manager +++ [0317 23:12:41] Starting controller-manager I0317 23:12:42.376869 23174 serving.go:348] Generated self-signed cert in-memory W0317 23:12:43.177288 23174 authentication.go:426] failed to read in-cluster kubeconfig for delegated authentication: open /var/run/secrets/kubernetes.io/serviceaccount/token: no such file or directory W0317 23:12:43.177326 23174 authentication.go:320] No authentication-kubeconfig provided in order to lookup client-ca-file in configmap/extension-apiserver-authentication in kube-system, so client certificate authentication won't work. W0317 23:12:43.177341 23174 authentication.go:344] No authentication-kubeconfig provided in order to lookup requestheader-client-ca-file in configmap/extension-apiserver-authentication in kube-system, so request-header client certificate authentication won't work. W0317 23:12:43.177377 23174 authorization.go:225] failed to read in-cluster kubeconfig for delegated authorization: open /var/run/secrets/kubernetes.io/serviceaccount/token: no such file or directory W0317 23:12:43.177389 23174 authorization.go:193] No authorization-kubeconfig provided, so SubjectAccessReview of authorization tokens won't work. I0317 23:12:43.177844 23174 controllermanager.go:187] "Starting" version="v1.27.0-beta.0.22+2bf6326589b014" I0317 23:12:43.177871 23174 controllermanager.go:189] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" I0317 23:12:43.179746 23174 secure_serving.go:210] Serving securely on [::]:10257 I0317 23:12:43.179916 23174 tlsconfig.go:240] "Starting DynamicServingCertificateController" I0317 23:12:43.180080 23174 leaderelection.go:245] attempting to acquire leader lease kube-system/kube-controller-manager... ... skipping 74 lines ... I0317 23:12:43.226073 23174 dynamic_serving_content.go:132] "Starting controller" name="csr-controller::hack/testdata/ca/ca.crt::hack/testdata/ca/ca.key" I0317 23:12:43.227474 23174 controllermanager.go:638] "Started controller" controller="csrsigning" I0317 23:12:43.227504 23174 controllermanager.go:603] "Warning: controller is disabled" controller="tokencleaner" I0317 23:12:43.227600 23174 certificate_controller.go:112] Starting certificate controller "csrsigning-legacy-unknown" I0317 23:12:43.227628 23174 shared_informer.go:311] Waiting for caches to sync for certificate-csrsigning-legacy-unknown I0317 23:12:43.227665 23174 dynamic_serving_content.go:132] "Starting controller" name="csr-controller::hack/testdata/ca/ca.crt::hack/testdata/ca/ca.key" E0317 23:12:43.227696 23174 core.go:213] "Failed to start cloud node lifecycle controller" err="no cloud provider provided" I0317 23:12:43.227715 23174 controllermanager.go:616] "Warning: skipping controller" controller="cloud-node-lifecycle" I0317 23:12:43.227809 23174 controllermanager.go:603] "Warning: controller is disabled" controller="bootstrapsigner" I0317 23:12:43.227833 23174 controllermanager.go:616] "Warning: skipping controller" controller="nodeipam" W0317 23:12:43.228203 23174 mutation_detector.go:53] Mutation detector is enabled, this will result in memory leakage. I0317 23:12:43.228254 23174 controllermanager.go:638] "Started controller" controller="endpointslice" I0317 23:12:43.228456 23174 endpointslice_controller.go:252] Starting endpoint slice controller ... skipping 77 lines ... I0317 23:12:43.243841 23174 controllermanager.go:638] "Started controller" controller="serviceaccount" I0317 23:12:43.243866 23174 serviceaccounts_controller.go:111] "Starting service account controller" I0317 23:12:43.244073 23174 shared_informer.go:311] Waiting for caches to sync for service account I0317 23:12:43.244208 23174 controllermanager.go:638] "Started controller" controller="csrapproving" I0317 23:12:43.244334 23174 certificate_controller.go:112] Starting certificate controller "csrapproving" I0317 23:12:43.244389 23174 shared_informer.go:311] Waiting for caches to sync for certificate-csrapproving E0317 23:12:43.244560 23174 core.go:92] "Failed to start service controller" err="WARNING: no cloud provider provided, services of type LoadBalancer will fail" I0317 23:12:43.244588 23174 controllermanager.go:616] "Warning: skipping controller" controller="service" I0317 23:12:43.244605 23174 core.go:224] "Will not configure cloud provider routes for allocate-node-cidrs" CIDRs=false routes=true I0317 23:12:43.244616 23174 controllermanager.go:616] "Warning: skipping controller" controller="route" I0317 23:12:43.244813 23174 controllermanager.go:638] "Started controller" controller="pv-protection" I0317 23:12:43.244843 23174 pv_protection_controller.go:78] "Starting PV protection controller" I0317 23:12:43.244859 23174 shared_informer.go:311] Waiting for caches to sync for PV protection ... skipping 43 lines ... I0317 23:12:43.536288 23174 shared_informer.go:318] Caches are synced for HPA I0317 23:12:43.536429 23174 shared_informer.go:318] Caches are synced for stateful set I0317 23:12:43.539735 23174 shared_informer.go:318] Caches are synced for daemon sets I0317 23:12:43.620932 23174 shared_informer.go:318] Caches are synced for resource quota I0317 23:12:43.647753 23174 shared_informer.go:318] Caches are synced for resource quota node/127.0.0.1 created I0317 23:12:43.857476 23174 actual_state_of_world.go:547] "Failed to update statusUpdateNeeded field in actual state of world" err="Failed to set statusUpdateNeeded to needed true, because nodeName=\"127.0.0.1\" does not exist" +++ [0317 23:12:43] Checking kubectl version WARNING: This version information is deprecated and will be replaced with the output from kubectl version --short. Use --output=yaml|json to get the full version. Client Version: version.Info{Major:"1", Minor:"27+", GitVersion:"v1.27.0-beta.0.22+2bf6326589b014", GitCommit:"2bf6326589b0142b461d4679b7907ffa71f4ed20", GitTreeState:"clean", BuildDate:"2023-03-17T22:13:18Z", GoVersion:"go1.20.2", Compiler:"gc", Platform:"linux/amd64"} Kustomize Version: v5.0.1 Server Version: version.Info{Major:"1", Minor:"27+", GitVersion:"v1.27.0-beta.0.22+2bf6326589b014", GitCommit:"2bf6326589b0142b461d4679b7907ffa71f4ed20", GitTreeState:"clean", BuildDate:"2023-03-17T22:13:18Z", GoVersion:"go1.20.2", Compiler:"gc", Platform:"linux/amd64"} I0317 23:12:43.966025 23174 shared_informer.go:318] Caches are synced for garbage collector I0317 23:12:44.030261 23174 shared_informer.go:318] Caches are synced for garbage collector I0317 23:12:44.030320 23174 garbagecollector.go:166] "All resource monitors have synced. Proceeding to collect garbage" The Service "kubernetes" is invalid: spec.clusterIPs: Invalid value: []string{"10.0.0.1"}: failed to allocate IP 10.0.0.1: provided IP is already allocated NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.0.0.1 <none> 443/TCP 41s Recording: run_kubectl_version_tests Running command: run_kubectl_version_tests +++ Running case: test-cmd.run_kubectl_version_tests ... skipping 196 lines ... +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_RESTMapper_evaluation_tests +++ [0317 23:12:49] Creating namespace namespace-1679094769-13669 namespace/namespace-1679094769-13669 created Context "test" modified. +++ [0317 23:12:49] Testing RESTMapper +++ [0317 23:12:49] "kubectl get unknownresourcetype" returns error as expected: error: the server doesn't have a resource type "unknownresourcetype" +++ exit code: 0 NAME SHORTNAMES APIVERSION NAMESPACED KIND bindings v1 true Binding componentstatuses cs v1 false ComponentStatus configmaps cm v1 true ConfigMap endpoints ep v1 true Endpoints ... skipping 60 lines ... namespace/namespace-1679094771-6821 created Context "test" modified. +++ [0317 23:12:51] Testing clusterroles [32mrbac.sh:29: Successful get clusterroles/cluster-admin {{.metadata.name}}: cluster-admin (B[m[32mrbac.sh:30: Successful get clusterrolebindings/cluster-admin {{.metadata.name}}: cluster-admin (B[m[32mSuccessful (B[mmessage:Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pod-admin" not found has:clusterroles.rbac.authorization.k8s.io "pod-admin" not found clusterrole.rbac.authorization.k8s.io/pod-admin created (dry run) clusterrole.rbac.authorization.k8s.io/pod-admin created (server dry run) [32mSuccessful (B[mmessage:Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pod-admin" not found has:clusterroles.rbac.authorization.k8s.io "pod-admin" not found clusterrole.rbac.authorization.k8s.io/pod-admin created [32mrbac.sh:42: Successful get clusterrole/pod-admin {{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}: *: (B[m[32mSuccessful (B[mmessage:Warning: deleting cluster-scoped resources, not scoped to the provided namespace clusterrole.rbac.authorization.k8s.io "pod-admin" deleted ... skipping 18 lines ... (B[mclusterrole.rbac.authorization.k8s.io/url-reader created [32mrbac.sh:61: Successful get clusterrole/url-reader {{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}: get: (B[m[32mrbac.sh:62: Successful get clusterrole/url-reader {{range.rules}}{{range.nonResourceURLs}}{{.}}:{{end}}{{end}}: /logs/*:/healthz/*: (B[mclusterrole.rbac.authorization.k8s.io/aggregation-reader created [32mrbac.sh:64: Successful get clusterrole/aggregation-reader {{.metadata.name}}: aggregation-reader (B[m[32mSuccessful (B[mmessage:Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "super-admin" not found has:clusterrolebindings.rbac.authorization.k8s.io "super-admin" not found clusterrolebinding.rbac.authorization.k8s.io/super-admin created (dry run) clusterrolebinding.rbac.authorization.k8s.io/super-admin created (server dry run) [32mSuccessful (B[mmessage:Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "super-admin" not found has:clusterrolebindings.rbac.authorization.k8s.io "super-admin" not found clusterrolebinding.rbac.authorization.k8s.io/super-admin created [32mrbac.sh:77: Successful get clusterrolebinding/super-admin {{range.subjects}}{{.name}}:{{end}}: super-admin: (B[mclusterrolebinding.rbac.authorization.k8s.io/super-admin subjects updated (dry run) clusterrolebinding.rbac.authorization.k8s.io/super-admin subjects updated (server dry run) [32mrbac.sh:80: Successful get clusterrolebinding/super-admin {{range.subjects}}{{.name}}:{{end}}: super-admin: ... skipping 64 lines ... [32mrbac.sh:102: Successful get clusterrolebinding/super-admin {{range.subjects}}{{.name}}:{{end}}: super-admin:foo:test-all-user: (B[m[32mrbac.sh:103: Successful get clusterrolebinding/super-group {{range.subjects}}{{.name}}:{{end}}: the-group:foo:test-all-user: (B[m[32mrbac.sh:104: Successful get clusterrolebinding/super-sa {{range.subjects}}{{.name}}:{{end}}: sa-name:foo:test-all-user: (B[mrolebinding.rbac.authorization.k8s.io/admin created (dry run) rolebinding.rbac.authorization.k8s.io/admin created (server dry run) [32mSuccessful (B[mmessage:Error from server (NotFound): rolebindings.rbac.authorization.k8s.io "admin" not found has: not found rolebinding.rbac.authorization.k8s.io/admin created [32mrbac.sh:113: Successful get rolebinding/admin {{.roleRef.kind}}: ClusterRole (B[m[32mrbac.sh:114: Successful get rolebinding/admin {{range.subjects}}{{.name}}:{{end}}: default-admin: (B[mrolebinding.rbac.authorization.k8s.io/admin subjects updated [32mrbac.sh:116: Successful get rolebinding/admin {{range.subjects}}{{.name}}:{{end}}: default-admin:foo: ... skipping 152 lines ... namespace/namespace-1679094779-6811 created Context "test" modified. +++ [0317 23:12:59] Testing role role.rbac.authorization.k8s.io/pod-admin created (dry run) role.rbac.authorization.k8s.io/pod-admin created (server dry run) [32mSuccessful (B[mmessage:Error from server (NotFound): roles.rbac.authorization.k8s.io "pod-admin" not found has: not found role.rbac.authorization.k8s.io/pod-admin created [32mrbac.sh:159: Successful get role/pod-admin {{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}: *: (B[m[32mrbac.sh:160: Successful get role/pod-admin {{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}: pods: (B[m[32mrbac.sh:161: Successful get role/pod-admin {{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}: : (B[m[32mSuccessful ... skipping 623 lines ... has:valid-pod [32mSuccessful (B[mmessage:NAME READY STATUS RESTARTS AGE valid-pod 0/1 Pending 0 0s has:valid-pod [32mcore.sh:194: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[merror: resource(s) were provided, but no name was specified [32mcore.sh:198: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[m[32mcore.sh:202: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[merror: setting 'all' parameter but found a non empty selector. [32mcore.sh:206: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[m[32mcore.sh:210: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[mWarning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. pod "valid-pod" force deleted [32mcore.sh:214: Successful get pods -lname=valid-pod {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mcore.sh:219: Successful get namespaces {{range.items}}{{ if eq .metadata.name "test-kubectl-describe-pod" }}found{{end}}{{end}}:: : ... skipping 30 lines ... I0317 23:13:17.942378 28317 round_trippers.go:553] GET https://127.0.0.1:6443/apis/policy/v1/namespaces/test-kubectl-describe-pod/poddisruptionbudgets/test-pdb-2 200 OK in 1 milliseconds I0317 23:13:17.944316 28317 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/namespaces/test-kubectl-describe-pod/events?fieldSelector=involvedObject.name%3Dtest-pdb-2%2CinvolvedObject.namespace%3Dtest-kubectl-describe-pod%2CinvolvedObject.kind%3DPodDisruptionBudget%2CinvolvedObject.uid%3D6e0c806e-b0ec-49ec-938b-2ffa1dcceafe&limit=500 200 OK in 1 milliseconds (B[mpoddisruptionbudget.policy/test-pdb-3 created [32mcore.sh:271: Successful get pdb/test-pdb-3 --namespace=test-kubectl-describe-pod {{.spec.maxUnavailable}}: 2 (B[mpoddisruptionbudget.policy/test-pdb-4 created [32mcore.sh:275: Successful get pdb/test-pdb-4 --namespace=test-kubectl-describe-pod {{.spec.maxUnavailable}}: 50% (B[merror: min-available and max-unavailable cannot be both specified [32mcore.sh:281: Successful get pods --namespace=test-kubectl-describe-pod {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/env-test-pod created matched TEST_CMD_1 matched <set to the key 'key-1' in secret 'test-secret'> matched TEST_CMD_2 matched <set to the key 'key-2' of config map 'test-configmap'> ... skipping 242 lines ... [32mcore.sh:542: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: registry.k8s.io/pause:3.9: (B[m[32mSuccessful (B[mmessage:kubectl-create kubectl-patch has:kubectl-patch pod/valid-pod patched [32mcore.sh:562: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: nginx: (B[m+++ [0317 23:13:34] "kubectl patch with resourceVersion 623" returns error as expected: Error from server (Conflict): Operation cannot be fulfilled on pods "valid-pod": the object has been modified; please apply your changes to the latest version and try again pod "valid-pod" deleted pod/valid-pod replaced [32mcore.sh:586: Successful get pod valid-pod {{(index .spec.containers 0).name}}: replaced-k8s-serve-hostname (B[m[32mSuccessful (B[mmessage:kubectl-replace has:kubectl-replace [32mSuccessful (B[mmessage:error: --grace-period must have --force specified has:\-\-grace-period must have \-\-force specified [32mSuccessful (B[mmessage:error: --timeout must have --force specified has:\-\-timeout must have \-\-force specified node/node-v1-test created I0317 23:13:35.762377 23174 actual_state_of_world.go:547] "Failed to update statusUpdateNeeded field in actual state of world" err="Failed to set statusUpdateNeeded to needed true, because nodeName=\"node-v1-test\" does not exist" [32mcore.sh:614: Successful get node node-v1-test {{range.items}}{{if .metadata.annotations.a}}found{{end}}{{end}}:: : (B[mnode/node-v1-test replaced (server dry run) node/node-v1-test replaced (dry run) [32mcore.sh:639: Successful get node node-v1-test {{range.items}}{{if .metadata.annotations.a}}found{{end}}{{end}}:: : (B[mnode/node-v1-test replaced [32mcore.sh:655: Successful get node node-v1-test {{.metadata.annotations.a}}: b ... skipping 29 lines ... spec: containers: - image: registry.k8s.io/pause:3.9 name: kubernetes-pause has:localonlyvalue [32mcore.sh:691: Successful get pod valid-pod {{.metadata.labels.name}}: valid-pod (B[merror: 'name' already has a value (valid-pod), and --overwrite is false [32mcore.sh:695: Successful get pod valid-pod {{.metadata.labels.name}}: valid-pod (B[m[32mcore.sh:699: Successful get pod valid-pod {{.metadata.labels.name}}: valid-pod (B[mpod/valid-pod labeled [32mcore.sh:703: Successful get pod valid-pod {{.metadata.labels.name}}: valid-pod-super-sayan (B[m[32mcore.sh:707: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[mWarning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. ... skipping 84 lines ... +++ Running case: test-cmd.run_kubectl_create_error_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_kubectl_create_error_tests +++ [0317 23:13:46] Creating namespace namespace-1679094826-20310 namespace/namespace-1679094826-20310 created Context "test" modified. +++ [0317 23:13:46] Testing kubectl create with error Error: must specify one of -f and -k Create a resource from a file or from stdin. JSON and YAML formats are accepted. Examples: ... skipping 63 lines ... If true, keep the managedFields when printing objects in JSON or YAML format. --template='': Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview]. --validate='strict': Must be one of: strict (or true), warn, ignore (or false). "true" or "strict" will use a schema to validate the input and fail the request if invalid. It will perform server side validation if ServerSideFieldValidation is enabled on the api-server, but will fall back to less reliable client-side validation if not. "warn" will warn about unknown or duplicate fields without blocking the request if server-side field validation is enabled on the API server, and behave as "ignore" otherwise. "false" or "ignore" will not perform any schema validation, silently dropping any unknown or duplicate fields. --windows-line-endings=false: Only relevant if --edit=true. Defaults to the line ending native to your platform. Usage: kubectl create -f FILENAME [options] ... skipping 38 lines ... I0317 23:13:49.802683 23174 event.go:307] "Event occurred" object="namespace-1679094826-11123/test-deployment-retainkeys-d65c44c97" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: test-deployment-retainkeys-d65c44c97-qhrs4" deployment.apps "test-deployment-retainkeys" deleted [32mapply.sh:88: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/selector-test-pod created [32mapply.sh:92: Successful get pods selector-test-pod {{.metadata.labels.name}}: selector-test-pod (B[m[32mSuccessful (B[mmessage:Error from server (NotFound): pods "selector-test-pod-dont-apply" not found has:pods "selector-test-pod-dont-apply" not found pod "selector-test-pod" deleted [32mapply.sh:101: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/test-pod created (dry run) pod/test-pod created (server dry run) [32mapply.sh:107: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: ... skipping 31 lines ... (B[mpod/b created [32mapply.sh:207: Successful get pods a {{.metadata.name}}: a (B[m[32mapply.sh:208: Successful get pods b -n nsb {{.metadata.name}}: b (B[mpod "a" deleted pod "b" deleted [32mSuccessful (B[mmessage:error: all resources selected for prune without explicitly passing --all. To prune all resources, pass the --all flag. If you did not mean to prune all resources, specify a label selector has:all resources selected for prune without explicitly passing --all pod/a created pod/b created I0317 23:13:59.811657 20078 alloc.go:330] "allocated clusterIPs" service="namespace-1679094826-11123/prune-svc" clusterIPs=map[IPv4:10.0.0.20] service/prune-svc created W0317 23:13:59.812423 32383 prune.go:71] Deprecated: kubectl apply will no longer prune non-namespaced resources by default when used with the --namespace flag in a future release. To preserve the current behaviour, list the resources you want to target explicitly in the --prune-allowlist flag. ... skipping 44 lines ... (B[mpod/b unchanged W0317 23:14:18.094506 32753 prune.go:71] Deprecated: kubectl apply will no longer prune non-namespaced resources by default when used with the --namespace flag in a future release. To preserve the current behaviour, list the resources you want to target explicitly in the --prune-allowlist flag. pod/a pruned [32mapply.sh:265: Successful get pods -n nsb {{range.items}}{{.metadata.name}}:{{end}}: b: (B[mnamespace "nsb" deleted [32mSuccessful (B[mmessage:error: the namespace from the provided object "nsb" does not match the namespace "foo". You must pass '--namespace=nsb' to perform this operation. has:the namespace from the provided object "nsb" does not match the namespace "foo". [32mapply.sh:276: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: (B[mservice/a created [32mapply.sh:280: Successful get services a {{.metadata.name}}: a (B[m[32mSuccessful (B[mmessage:The Service "a" is invalid: spec.clusterIPs[0]: Invalid value: []string{"10.0.0.12"}: may not change once set ... skipping 28 lines ... (B[m[32mapply.sh:302: Successful get deployment test-the-deployment {{.metadata.name}}: test-the-deployment (B[m[32mapply.sh:303: Successful get service test-the-service {{.metadata.name}}: test-the-service (B[mconfigmap "test-the-map" deleted service "test-the-service" deleted deployment.apps "test-the-deployment" deleted [32mSuccessful (B[mmessage:Error from server (NotFound): namespaces "multi-resource-ns" not found has:namespaces "multi-resource-ns" not found [32mapply.sh:311: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:namespace/multi-resource-ns created Error from server (NotFound): error when creating "hack/testdata/multi-resource-1.yaml": namespaces "multi-resource-ns" not found has:namespaces "multi-resource-ns" not found [32mSuccessful (B[mmessage:Error from server (NotFound): pods "test-pod" not found has:pods "test-pod" not found pod/test-pod created namespace/multi-resource-ns unchanged [32mapply.sh:319: Successful get pods test-pod -n multi-resource-ns {{.metadata.name}}: test-pod (B[mpod "test-pod" deleted namespace "multi-resource-ns" deleted I0317 23:14:29.804582 23174 namespace_controller.go:182] "Namespace has been deleted" namespace="nsb" [32mapply.sh:325: Successful get configmaps --field-selector=metadata.name=foo {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:configmap/foo created error: resource mapping not found for name: "foo" namespace: "" from "hack/testdata/multi-resource-2.yaml": no matches for kind "Bogus" in version "example.com/v1" ensure CRDs are installed first has:no matches for kind "Bogus" in version "example.com/v1" [32mapply.sh:331: Successful get configmaps foo {{.metadata.name}}: foo (B[mconfigmap "foo" deleted [32mapply.sh:337: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful ... skipping 6 lines ... (B[mpod "pod-a" deleted pod "pod-c" deleted [32mapply.sh:345: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mapply.sh:349: Successful get crds {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:customresourcedefinition.apiextensions.k8s.io/widgets.example.com created error: resource mapping not found for name: "foo" namespace: "" from "hack/testdata/multi-resource-4.yaml": no matches for kind "Widget" in version "example.com/v1" ensure CRDs are installed first has:no matches for kind "Widget" in version "example.com/v1" I0317 23:14:35.627977 20078 handler.go:165] Adding GroupVersion example.com v1 to ResourceManager customresourcedefinition.apiextensions.k8s.io/widgets.example.com condition met [32mSuccessful (B[mmessage:Error from server (NotFound): widgets.example.com "foo" not found has:widgets.example.com "foo" not found [32mapply.sh:356: Successful get crds widgets.example.com {{.metadata.name}}: widgets.example.com (B[mI0317 23:14:38.092108 20078 controller.go:624] quota admission added evaluator for: widgets.example.com widget.example.com/foo created customresourcedefinition.apiextensions.k8s.io/widgets.example.com unchanged [32mapply.sh:359: Successful get widget foo {{.metadata.name}}: foo ... skipping 34 lines ... (B[mmessage:902 has:902 pod "test-pod" deleted [32mapply.sh:415: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m+++ [0317 23:14:40] Testing upgrade kubectl client-side apply to server-side apply pod/test-pod created error: Apply failed with 1 conflict: conflict with "kubectl-client-side-apply" using v1: .metadata.labels.name Please review the fields above--they currently have other managers. Here are the ways you can resolve this warning: * If you intend to manage all of these fields, please re-run the apply command with the `--force-conflicts` flag. * If you do not intend to manage all of the fields, please edit your manifest to remove references to the fields that should keep their ... skipping 153 lines ... (B[mpod "nginx-extensions" deleted [32mSuccessful (B[mmessage:pod/test1 created has:pod/test1 created pod "test1" deleted [32mSuccessful (B[mmessage:error: Invalid image name "InvalidImageName": invalid reference format has:error: Invalid image name "InvalidImageName": invalid reference format +++ exit code: 0 Recording: run_kubectl_create_filter_tests Running command: run_kubectl_create_filter_tests +++ Running case: test-cmd.run_kubectl_create_filter_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes ... skipping 3 lines ... Context "test" modified. +++ [0317 23:14:47] Testing kubectl create filter [32mcreate.sh:50: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/selector-test-pod created [32mcreate.sh:54: Successful get pods selector-test-pod {{.metadata.labels.name}}: selector-test-pod (B[m[32mSuccessful (B[mmessage:Error from server (NotFound): pods "selector-test-pod-dont-apply" not found has:pods "selector-test-pod-dont-apply" not found pod "selector-test-pod" deleted +++ exit code: 0 Recording: run_kubectl_apply_deployments_tests Running command: run_kubectl_apply_deployments_tests ... skipping 29 lines ... I0317 23:14:50.957905 23174 event.go:307] "Event occurred" object="namespace-1679094888-21139/nginx" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-5645b79496 to 3" I0317 23:14:50.980392 23174 event.go:307] "Event occurred" object="namespace-1679094888-21139/nginx-5645b79496" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-5645b79496-24978" I0317 23:14:51.022785 23174 event.go:307] "Event occurred" object="namespace-1679094888-21139/nginx-5645b79496" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-5645b79496-qwp8k" I0317 23:14:51.022825 23174 event.go:307] "Event occurred" object="namespace-1679094888-21139/nginx-5645b79496" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-5645b79496-2qwbw" [32mapps.sh:183: Successful get deployment nginx {{.metadata.name}}: nginx (B[m[32mSuccessful (B[mmessage:Error from server (Conflict): error when applying patch: {"metadata":{"annotations":{"kubectl.kubernetes.io/last-applied-configuration":"{\"apiVersion\":\"apps/v1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"labels\":{\"name\":\"nginx\"},\"name\":\"nginx\",\"namespace\":\"namespace-1679094888-21139\",\"resourceVersion\":\"99\"},\"spec\":{\"replicas\":3,\"selector\":{\"matchLabels\":{\"name\":\"nginx2\"}},\"template\":{\"metadata\":{\"labels\":{\"name\":\"nginx2\"}},\"spec\":{\"containers\":[{\"image\":\"registry.k8s.io/nginx:test-cmd\",\"name\":\"nginx\",\"ports\":[{\"containerPort\":80}]}]}}}}\n"},"resourceVersion":"99"},"spec":{"selector":{"matchLabels":{"name":"nginx2"}},"template":{"metadata":{"labels":{"name":"nginx2"}}}}} to: Resource: "apps/v1, Resource=deployments", GroupVersionKind: "apps/v1, Kind=Deployment" Name: "nginx", Namespace: "namespace-1679094888-21139" for: "hack/testdata/deployment-label-change2.yaml": error when patching "hack/testdata/deployment-label-change2.yaml": Operation cannot be fulfilled on deployments.apps "nginx": the object has been modified; please apply your changes to the latest version and try again has:Error from server (Conflict) deployment.apps/nginx configured I0317 23:14:59.572317 23174 event.go:307] "Event occurred" object="namespace-1679094888-21139/nginx" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-5675dfc785 to 3" I0317 23:14:59.592841 23174 event.go:307] "Event occurred" object="namespace-1679094888-21139/nginx-5675dfc785" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-5675dfc785-jfjmr" I0317 23:14:59.630006 23174 event.go:307] "Event occurred" object="namespace-1679094888-21139/nginx-5675dfc785" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-5675dfc785-c5dfz" I0317 23:14:59.630856 23174 event.go:307] "Event occurred" object="namespace-1679094888-21139/nginx-5675dfc785" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-5675dfc785-x6msj" [32mSuccessful ... skipping 538 lines ... +++ [0317 23:15:25] Creating namespace namespace-1679094925-11584 namespace/namespace-1679094925-11584 created Context "test" modified. +++ [0317 23:15:25] Testing kubectl get [32mget.sh:29: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:Error from server (NotFound): pods "abc" not found has:pods "abc" not found [32mget.sh:37: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:Error from server (NotFound): pods "abc" not found has:pods "abc" not found [32mget.sh:45: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:{ "apiVersion": "v1", "items": [], ... skipping 21 lines ... has not:No resources found [32mSuccessful (B[mmessage:NAME has not:No resources found [32mget.sh:73: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:error: the server doesn't have a resource type "foobar" has not:No resources found [32mSuccessful (B[mmessage:No resources found in namespace-1679094925-11584 namespace. has:No resources found [32mSuccessful (B[mmessage: has not:No resources found [32mSuccessful (B[mmessage:No resources found in namespace-1679094925-11584 namespace. has:No resources found [32mget.sh:93: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:Error from server (NotFound): pods "abc" not found has:pods "abc" not found [32mSuccessful (B[mmessage:Error from server (NotFound): pods "abc" not found has not:List [32mSuccessful (B[mmessage:I0317 23:15:26.597910 35997 loader.go:373] Config loaded from file: /tmp/tmp.vT1Q3sqC26/.kube/config I0317 23:15:26.602995 35997 round_trippers.go:553] GET https://127.0.0.1:6443/version?timeout=32s 200 OK in 4 milliseconds I0317 23:15:26.619842 35997 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/namespaces/default/pods 200 OK in 1 milliseconds I0317 23:15:26.621637 35997 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/namespaces/default/replicationcontrollers 200 OK in 1 milliseconds ... skipping 597 lines ... } [32mget.sh:158: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[m<no value>[32mSuccessful (B[mmessage:valid-pod: has:valid-pod: [32mSuccessful (B[mmessage:error: error executing jsonpath "{.missing}": Error executing template: missing is not found. Printing more information for debugging the template: template was: {.missing} object given to jsonpath engine was: map[string]interface {}{"apiVersion":"v1", "kind":"Pod", "metadata":map[string]interface {}{"creationTimestamp":"2023-03-17T23:15:34Z", "labels":map[string]interface {}{"name":"valid-pod"}, "managedFields":[]interface {}{map[string]interface {}{"apiVersion":"v1", "fieldsType":"FieldsV1", "fieldsV1":map[string]interface {}{"f:metadata":map[string]interface {}{"f:labels":map[string]interface {}{".":map[string]interface {}{}, "f:name":map[string]interface {}{}}}, "f:spec":map[string]interface {}{"f:containers":map[string]interface {}{"k:{\"name\":\"kubernetes-serve-hostname\"}":map[string]interface {}{".":map[string]interface {}{}, "f:image":map[string]interface {}{}, "f:imagePullPolicy":map[string]interface {}{}, "f:name":map[string]interface {}{}, "f:resources":map[string]interface {}{".":map[string]interface {}{}, "f:limits":map[string]interface {}{".":map[string]interface {}{}, "f:cpu":map[string]interface {}{}, "f:memory":map[string]interface {}{}}, "f:requests":map[string]interface {}{".":map[string]interface {}{}, "f:cpu":map[string]interface {}{}, "f:memory":map[string]interface {}{}}}, "f:terminationMessagePath":map[string]interface {}{}, "f:terminationMessagePolicy":map[string]interface {}{}}}, "f:dnsPolicy":map[string]interface {}{}, "f:enableServiceLinks":map[string]interface {}{}, "f:restartPolicy":map[string]interface {}{}, "f:schedulerName":map[string]interface {}{}, "f:securityContext":map[string]interface {}{}, "f:terminationGracePeriodSeconds":map[string]interface {}{}}}, "manager":"kubectl-create", "operation":"Update", "time":"2023-03-17T23:15:34Z"}}, "name":"valid-pod", "namespace":"namespace-1679094933-20136", "resourceVersion":"1138", "uid":"bd8581b1-907f-47c7-939a-a10c1a9d889f"}, "spec":map[string]interface {}{"containers":[]interface {}{map[string]interface {}{"image":"registry.k8s.io/serve_hostname", "imagePullPolicy":"Always", "name":"kubernetes-serve-hostname", "resources":map[string]interface {}{"limits":map[string]interface {}{"cpu":"1", "memory":"512Mi"}, "requests":map[string]interface {}{"cpu":"1", "memory":"512Mi"}}, "terminationMessagePath":"/dev/termination-log", "terminationMessagePolicy":"File"}}, "dnsPolicy":"ClusterFirst", "enableServiceLinks":true, "preemptionPolicy":"PreemptLowerPriority", "priority":0, "restartPolicy":"Always", "schedulerName":"default-scheduler", "securityContext":map[string]interface {}{}, "terminationGracePeriodSeconds":30}, "status":map[string]interface {}{"phase":"Pending", "qosClass":"Guaranteed"}} has:missing is not found error: error executing template "{{.missing}}": template: output:1:2: executing "output" at <.missing>: map has no entry for key "missing" [32mSuccessful (B[mmessage:Error executing template: template: output:1:2: executing "output" at <.missing>: map has no entry for key "missing". Printing more information for debugging the template: template was: {{.missing}} raw data was: {"apiVersion":"v1","kind":"Pod","metadata":{"creationTimestamp":"2023-03-17T23:15:34Z","labels":{"name":"valid-pod"},"managedFields":[{"apiVersion":"v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"kubernetes-serve-hostname\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{".":{},"f:limits":{".":{},"f:cpu":{},"f:memory":{}},"f:requests":{".":{},"f:cpu":{},"f:memory":{}}},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}},"manager":"kubectl-create","operation":"Update","time":"2023-03-17T23:15:34Z"}],"name":"valid-pod","namespace":"namespace-1679094933-20136","resourceVersion":"1138","uid":"bd8581b1-907f-47c7-939a-a10c1a9d889f"},"spec":{"containers":[{"image":"registry.k8s.io/serve_hostname","imagePullPolicy":"Always","name":"kubernetes-serve-hostname","resources":{"limits":{"cpu":"1","memory":"512Mi"},"requests":{"cpu":"1","memory":"512Mi"}},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","enableServiceLinks":true,"preemptionPolicy":"PreemptLowerPriority","priority":0,"restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"terminationGracePeriodSeconds":30},"status":{"phase":"Pending","qosClass":"Guaranteed"}} object given to template engine was: map[apiVersion:v1 kind:Pod metadata:map[creationTimestamp:2023-03-17T23:15:34Z labels:map[name:valid-pod] managedFields:[map[apiVersion:v1 fieldsType:FieldsV1 fieldsV1:map[f:metadata:map[f:labels:map[.:map[] f:name:map[]]] f:spec:map[f:containers:map[k:{"name":"kubernetes-serve-hostname"}:map[.:map[] f:image:map[] f:imagePullPolicy:map[] f:name:map[] f:resources:map[.:map[] f:limits:map[.:map[] f:cpu:map[] f:memory:map[]] f:requests:map[.:map[] f:cpu:map[] f:memory:map[]]] f:terminationMessagePath:map[] f:terminationMessagePolicy:map[]]] f:dnsPolicy:map[] f:enableServiceLinks:map[] f:restartPolicy:map[] f:schedulerName:map[] f:securityContext:map[] f:terminationGracePeriodSeconds:map[]]] manager:kubectl-create operation:Update time:2023-03-17T23:15:34Z]] name:valid-pod namespace:namespace-1679094933-20136 resourceVersion:1138 uid:bd8581b1-907f-47c7-939a-a10c1a9d889f] spec:map[containers:[map[image:registry.k8s.io/serve_hostname imagePullPolicy:Always name:kubernetes-serve-hostname resources:map[limits:map[cpu:1 memory:512Mi] requests:map[cpu:1 memory:512Mi]] terminationMessagePath:/dev/termination-log terminationMessagePolicy:File]] dnsPolicy:ClusterFirst enableServiceLinks:true preemptionPolicy:PreemptLowerPriority priority:0 restartPolicy:Always schedulerName:default-scheduler securityContext:map[] terminationGracePeriodSeconds:30] status:map[phase:Pending qosClass:Guaranteed]] has:map has no entry for key "missing" [32mSuccessful (B[mmessage:NAME READY STATUS RESTARTS AGE valid-pod 0/1 Pending 0 0s has:valid-pod [32mSuccessful (B[mmessage:Error from server (NotFound): the server could not find the requested resource has:the server could not find the requested resource [32mSuccessful (B[mmessage:NAME READY STATUS RESTARTS AGE valid-pod 0/1 Pending 0 1s has:STATUS [32mSuccessful ... skipping 78 lines ... terminationGracePeriodSeconds: 30 status: phase: Pending qosClass: Guaranteed has:name: valid-pod [32mSuccessful (B[mmessage:Error from server (NotFound): pods "invalid-pod" not found has:"invalid-pod" not found pod "valid-pod" deleted [32mget.sh:204: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/redis-master created pod/valid-pod created [32mSuccessful ... skipping 1132 lines ... +++ [0317 23:15:49] Creating namespace namespace-1679094949-29566 namespace/namespace-1679094949-29566 created Context "test" modified. +++ [0317 23:15:49] Testing kubectl exec POD COMMAND [32mSuccessful (B[mmessage:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (NotFound): pods "abc" not found has:pods "abc" not found [32mSuccessful (B[mmessage:error: cannot exec into multiple objects at a time has:cannot exec into multiple objects at a time pod/test-pod created [32mSuccessful (B[mmessage:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (BadRequest): pod test-pod does not have a host assigned has not:pods "test-pod" not found [32mSuccessful (B[mmessage:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (BadRequest): pod test-pod does not have a host assigned has not:pod or type/name must be specified pod "test-pod" deleted +++ exit code: 0 Recording: run_kubectl_exec_resource_name_tests Running command: run_kubectl_exec_resource_name_tests ... skipping 3 lines ... +++ [0317 23:15:50] Creating namespace namespace-1679094950-8884 namespace/namespace-1679094950-8884 created Context "test" modified. +++ [0317 23:15:50] Testing kubectl exec TYPE/NAME COMMAND [32mSuccessful (B[mmessage:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. error: the server doesn't have a resource type "foo" has:error: [32mSuccessful (B[mmessage:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (NotFound): deployments.apps "bar" not found has:"bar" not found pod/test-pod created replicaset.apps/frontend created I0317 23:15:50.822742 23174 event.go:307] "Event occurred" object="namespace-1679094950-8884/frontend" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-d7g27" I0317 23:15:50.863825 23174 event.go:307] "Event occurred" object="namespace-1679094950-8884/frontend" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-h88zm" I0317 23:15:50.863907 23174 event.go:307] "Event occurred" object="namespace-1679094950-8884/frontend" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-w4r9d" configmap/test-set-env-config created [32mSuccessful (B[mmessage:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. error: cannot attach to *v1.ConfigMap: selector for *v1.ConfigMap not implemented has:not implemented [32mSuccessful (B[mmessage:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (BadRequest): pod test-pod does not have a host assigned has not:not found [32mSuccessful (B[mmessage:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (BadRequest): pod test-pod does not have a host assigned has not:pod, type/name or --filename must be specified [32mSuccessful (B[mmessage:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (BadRequest): pod frontend-d7g27 does not have a host assigned has not:not found [32mSuccessful (B[mmessage:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (BadRequest): pod frontend-d7g27 does not have a host assigned has not:pod, type/name or --filename must be specified pod "test-pod" deleted replicaset.apps "frontend" deleted configmap "test-set-env-config" deleted +++ exit code: 0 Recording: run_create_secret_tests Running command: run_create_secret_tests +++ Running case: test-cmd.run_create_secret_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_create_secret_tests [32mSuccessful (B[mmessage:Error from server (NotFound): secrets "mysecret" not found has:secrets "mysecret" not found [32mSuccessful (B[mmessage:user-specified has:user-specified [32mSuccessful (B[mmessage:Error from server (NotFound): secrets "mysecret" not found has:secrets "mysecret" not found [32mSuccessful (B[m{"kind":"ConfigMap","apiVersion":"v1","metadata":{"name":"tester-update-cm","namespace":"default","uid":"41a54903-59ac-4c0a-8aa4-a4ac6d1d3476","resourceVersion":"1238","creationTimestamp":"2023-03-17T23:15:51Z"}} [32mSuccessful (B[mmessage:{"kind":"ConfigMap","apiVersion":"v1","metadata":{"name":"tester-update-cm","namespace":"default","uid":"41a54903-59ac-4c0a-8aa4-a4ac6d1d3476","resourceVersion":"1239","creationTimestamp":"2023-03-17T23:15:51Z"},"data":{"key1":"config1"}} has:uid [32mSuccessful (B[mmessage:{"kind":"ConfigMap","apiVersion":"v1","metadata":{"name":"tester-update-cm","namespace":"default","uid":"41a54903-59ac-4c0a-8aa4-a4ac6d1d3476","resourceVersion":"1239","creationTimestamp":"2023-03-17T23:15:51Z"},"data":{"key1":"config1"}} has:config1 {"kind":"Status","apiVersion":"v1","metadata":{},"status":"Success","details":{"name":"tester-update-cm","kind":"configmaps","uid":"41a54903-59ac-4c0a-8aa4-a4ac6d1d3476"}} [32mSuccessful (B[mmessage:Error from server (NotFound): configmaps "tester-update-cm" not found has:configmaps "tester-update-cm" not found +++ exit code: 0 Recording: run_kubectl_create_kustomization_directory_tests Running command: run_kubectl_create_kustomization_directory_tests +++ Running case: test-cmd.run_kubectl_create_kustomization_directory_tests ... skipping 25 lines ... +++ command: run_kubectl_create_validate_tests +++ [0317 23:15:53] Creating namespace namespace-1679094953-4339 namespace/namespace-1679094953-4339 created Context "test" modified. +++ [0317 23:15:53] Testing kubectl create --validate Successful message:Error from server (BadRequest): error when creating "hack/testdata/invalid-deployment-unknown-and-duplicate-fields.yaml": Deployment in version "v1" cannot be handled as a Deployment: strict decoding error: unknown field "spec.baz", unknown field "spec.foo" has either:strict decoding error or:error validating data +++ [0317 23:15:53] Testing kubectl create --validate=true Successful message:Error from server (BadRequest): error when creating "hack/testdata/invalid-deployment-unknown-and-duplicate-fields.yaml": Deployment in version "v1" cannot be handled as a Deployment: strict decoding error: unknown field "spec.baz", unknown field "spec.foo" has either:strict decoding error or:error validating data +++ [0317 23:15:54] Testing kubectl create --validate=false [32mSuccessful (B[mmessage:deployment.apps/invalid-nginx-deployment created has:deployment.apps/invalid-nginx-deployment created I0317 23:15:54.164323 23174 event.go:307] "Event occurred" object="namespace-1679094953-4339/invalid-nginx-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set invalid-nginx-deployment-cbdccf466 to 4" I0317 23:15:54.183775 23174 namespace_controller.go:182] "Namespace has been deleted" namespace="test-events" I0317 23:15:54.186762 23174 event.go:307] "Event occurred" object="namespace-1679094953-4339/invalid-nginx-deployment-cbdccf466" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: invalid-nginx-deployment-cbdccf466-g6xp8" I0317 23:15:54.218645 23174 event.go:307] "Event occurred" object="namespace-1679094953-4339/invalid-nginx-deployment-cbdccf466" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: invalid-nginx-deployment-cbdccf466-l2qd7" I0317 23:15:54.218687 23174 event.go:307] "Event occurred" object="namespace-1679094953-4339/invalid-nginx-deployment-cbdccf466" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: invalid-nginx-deployment-cbdccf466-fmz4t" deployment.apps "invalid-nginx-deployment" deleted I0317 23:15:54.235911 23174 event.go:307] "Event occurred" object="namespace-1679094953-4339/invalid-nginx-deployment-cbdccf466" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: invalid-nginx-deployment-cbdccf466-p6dsq" +++ [0317 23:15:54] Testing kubectl create --validate=strict E0317 23:15:54.268864 23174 replica_set.go:544] sync "namespace-1679094953-4339/invalid-nginx-deployment-cbdccf466" failed with replicasets.apps "invalid-nginx-deployment-cbdccf466" not found E0317 23:15:54.272578 23174 replica_set.go:544] sync "namespace-1679094953-4339/invalid-nginx-deployment-cbdccf466" failed with replicasets.apps "invalid-nginx-deployment-cbdccf466" not found Successful message:Error from server (BadRequest): error when creating "hack/testdata/invalid-deployment-unknown-and-duplicate-fields.yaml": Deployment in version "v1" cannot be handled as a Deployment: strict decoding error: unknown field "spec.baz", unknown field "spec.foo" has either:strict decoding error or:error validating data +++ [0317 23:15:54] Testing kubectl create --validate=warn Warning: unknown field "spec.baz" Warning: unknown field "spec.foo" [32mSuccessful (B[mmessage:deployment.apps/invalid-nginx-deployment created has:deployment.apps/invalid-nginx-deployment created I0317 23:15:54.691622 23174 event.go:307] "Event occurred" object="namespace-1679094953-4339/invalid-nginx-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set invalid-nginx-deployment-cbdccf466 to 4" I0317 23:15:54.714159 23174 event.go:307] "Event occurred" object="namespace-1679094953-4339/invalid-nginx-deployment-cbdccf466" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: invalid-nginx-deployment-cbdccf466-92mlg" I0317 23:15:54.734682 23174 event.go:307] "Event occurred" object="namespace-1679094953-4339/invalid-nginx-deployment-cbdccf466" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: invalid-nginx-deployment-cbdccf466-hrrss" I0317 23:15:54.734725 23174 event.go:307] "Event occurred" object="namespace-1679094953-4339/invalid-nginx-deployment-cbdccf466" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: invalid-nginx-deployment-cbdccf466-8j8qq" I0317 23:15:54.753524 23174 event.go:307] "Event occurred" object="namespace-1679094953-4339/invalid-nginx-deployment-cbdccf466" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: invalid-nginx-deployment-cbdccf466-nmmc8" deployment.apps "invalid-nginx-deployment" deleted +++ [0317 23:15:54] Testing kubectl create --validate=ignore E0317 23:15:54.819430 23174 replica_set.go:544] sync "namespace-1679094953-4339/invalid-nginx-deployment-cbdccf466" failed with replicasets.apps "invalid-nginx-deployment-cbdccf466" not found [32mSuccessful (B[mmessage:deployment.apps/invalid-nginx-deployment created has:deployment.apps/invalid-nginx-deployment created I0317 23:15:54.906267 23174 event.go:307] "Event occurred" object="namespace-1679094953-4339/invalid-nginx-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set invalid-nginx-deployment-cbdccf466 to 4" I0317 23:15:54.927907 23174 event.go:307] "Event occurred" object="namespace-1679094953-4339/invalid-nginx-deployment-cbdccf466" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: invalid-nginx-deployment-cbdccf466-2bz8f" I0317 23:15:54.949645 23174 event.go:307] "Event occurred" object="namespace-1679094953-4339/invalid-nginx-deployment-cbdccf466" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: invalid-nginx-deployment-cbdccf466-cw4pc" I0317 23:15:54.949684 23174 event.go:307] "Event occurred" object="namespace-1679094953-4339/invalid-nginx-deployment-cbdccf466" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: invalid-nginx-deployment-cbdccf466-l5w27" deployment.apps "invalid-nginx-deployment" deleted I0317 23:15:54.970265 23174 event.go:307] "Event occurred" object="namespace-1679094953-4339/invalid-nginx-deployment-cbdccf466" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: invalid-nginx-deployment-cbdccf466-cpqsg" +++ [0317 23:15:54] Testing kubectl create E0317 23:15:55.001038 23174 replica_set.go:544] sync "namespace-1679094953-4339/invalid-nginx-deployment-cbdccf466" failed with replicasets.apps "invalid-nginx-deployment-cbdccf466" not found Successful message:Error from server (BadRequest): error when creating "hack/testdata/invalid-deployment-unknown-and-duplicate-fields.yaml": Deployment in version "v1" cannot be handled as a Deployment: strict decoding error: unknown field "spec.baz", unknown field "spec.foo" has either:strict decoding error or:error validating data +++ [0317 23:15:55] Testing kubectl create --validate=foo [32mSuccessful (B[mmessage:error: invalid - validate option "foo"; must be one of: strict (or true), warn, ignore (or false) has:invalid - validate option "foo" +++ exit code: 0 Recording: run_convert_tests Running command: run_convert_tests +++ Running case: test-cmd.run_convert_tests ... skipping 50 lines ... securityContext: {} terminationGracePeriodSeconds: 30 status: {} has:apps/v1beta1 deployment.apps "nginx" deleted [32mSuccessful (B[mmessage:error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing [32mSuccessful (B[mmessage:nginx: has:nginx: +++ exit code: 0 Recording: run_kubectl_delete_allnamespaces_tests ... skipping 103 lines ... has:Timeout [32mSuccessful (B[mmessage:NAME READY STATUS RESTARTS AGE valid-pod 0/1 Pending 0 2s has:valid-pod [32mSuccessful (B[mmessage:error: Invalid timeout value. Timeout must be a single integer in seconds, or an integer followed by a corresponding time unit (e.g. 1s | 2m | 3h) has:Invalid timeout value pod "valid-pod" deleted +++ exit code: 0 Recording: run_crd_tests Running command: run_crd_tests ... skipping 157 lines ... (B[mFlag --record has been deprecated, --record will be removed in the future foo.company.com/test patched [32mcrd.sh:296: Successful get foos/test {{.patched}}: value2 (B[mFlag --record has been deprecated, --record will be removed in the future foo.company.com/test patched [32mcrd.sh:298: Successful get foos/test {{.patched}}: <no value> (B[m+++ [0317 23:16:05] "kubectl patch --local" returns error as expected for CustomResource: error: strategic merge patch is not supported for company.com/v1, Kind=Foo locally, try --type merge { "apiVersion": "company.com/v1", "kind": "Foo", "metadata": { "annotations": { "kubernetes.io/change-cause": "kubectl patch foos/test --server=https://127.0.0.1:6443 --insecure-skip-tls-verify=true --match-server-version=true --patch={\"patched\":null} --type=merge --record=true" ... skipping 228 lines ... (B[m[32mcrd.sh:519: Successful get bars {{range.items}}{{.metadata.name}}:{{end}}: (B[mnamespace/non-native-resources created bar.company.com/test created [32mcrd.sh:524: Successful get bars {{len .items}}: 1 (B[mnamespace "non-native-resources" deleted [32mcrd.sh:527: Successful get bars {{len .items}}: 0 (B[mError from server (NotFound): namespaces "non-native-resources" not found customresourcedefinition.apiextensions.k8s.io "foos.company.com" deleted I0317 23:16:25.602131 20078 handler.go:165] Adding GroupVersion company.com v1 to ResourceManager I0317 23:16:25.615463 20078 handler.go:165] Adding GroupVersion company.com v1 to ResourceManager I0317 23:16:25.640512 20078 handler.go:165] Adding GroupVersion company.com v1 to ResourceManager I0317 23:16:25.782525 20078 handler.go:165] Adding GroupVersion company.com v1 to ResourceManager customresourcedefinition.apiextensions.k8s.io "bars.company.com" deleted ... skipping 15 lines ... +++ [0317 23:16:26] Testing recursive resources +++ [0317 23:16:26] Creating namespace namespace-1679094986-25922 namespace/namespace-1679094986-25922 created Context "test" modified. [32mgeneric-resources.sh:202: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mW0317 23:16:26.643165 20078 cacher.go:171] Terminating all watchers from cacher foos.company.com E0317 23:16:26.645080 23174 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource W0317 23:16:26.808569 20078 cacher.go:171] Terminating all watchers from cacher bars.company.com E0317 23:16:26.810270 23174 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource W0317 23:16:26.994675 20078 cacher.go:171] Terminating all watchers from cacher resources.mygroup.example.com E0317 23:16:26.996413 23174 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource W0317 23:16:27.187175 20078 cacher.go:171] Terminating all watchers from cacher validfoos.company.com E0317 23:16:27.188609 23174 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:206: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mSuccessful (B[mmessage:pod/busybox0 created pod/busybox1 created error: error validating "hack/testdata/recursive/pod/pod/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false has:error validating data: kind not set [32mgeneric-resources.sh:211: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:220: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: busybox:busybox: (B[m[32mSuccessful (B[mmessage:error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:227: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mW0317 23:16:27.756455 23174 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 23:16:27.756508 23174 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource W0317 23:16:27.854274 23174 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 23:16:27.854316 23174 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource W0317 23:16:27.899928 23174 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 23:16:27.899967 23174 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:231: Successful get pods {{range.items}}{{.metadata.labels.status}}:{{end}}: replaced:replaced: (B[m[32mSuccessful (B[mmessage:pod/busybox0 replaced pod/busybox1 replaced error: error validating "hack/testdata/recursive/pod-modify/pod/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false has:error validating data: kind not set [32mgeneric-resources.sh:236: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mW0317 23:16:28.499683 23174 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 23:16:28.499724 23174 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mSuccessful (B[mmessage:Name: busybox0 Namespace: namespace-1679094986-25922 Priority: 0 Node: <none> Labels: app=busybox0 ... skipping 158 lines ... has:Object 'Kind' is missing [32mgeneric-resources.sh:246: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:250: Successful get pods {{range.items}}{{.metadata.annotations.annotatekey}}:{{end}}: annotatevalue:annotatevalue: (B[m[32mSuccessful (B[mmessage:pod/busybox0 annotate pod/busybox1 annotate error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:255: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:259: Successful get pods {{range.items}}{{.metadata.labels.status}}:{{end}}: replaced:replaced: (B[m[32mSuccessful (B[mmessage:Warning: resource pods/busybox0 is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. pod/busybox0 configured Warning: resource pods/busybox1 is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. pod/busybox1 configured error: error validating "hack/testdata/recursive/pod-modify/pod/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false has:error validating data: kind not set [32mgeneric-resources.sh:264: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mSuccessful (B[mmessage:busybox0:busybox1:error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:busybox0:busybox1: [32mSuccessful (B[mmessage:busybox0:busybox1:error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:273: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mpod/busybox0 labeled pod/busybox1 labeled error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' [32mgeneric-resources.sh:278: Successful get pods {{range.items}}{{.metadata.labels.mylabel}}:{{end}}: myvalue:myvalue: (B[m[32mSuccessful (B[mmessage:pod/busybox0 labeled pod/busybox1 labeled error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:283: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mpod/busybox0 patched pod/busybox1 patched error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' [32mgeneric-resources.sh:288: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: prom/busybox:prom/busybox: (B[m[32mSuccessful (B[mmessage:pod/busybox0 patched pod/busybox1 patched error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:293: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:297: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. pod "busybox0" force deleted pod "busybox1" force deleted error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing W0317 23:16:30.328684 23174 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 23:16:30.328725 23174 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource I0317 23:16:30.336985 23174 namespace_controller.go:182] "Namespace has been deleted" namespace="non-native-resources" [32mgeneric-resources.sh:302: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: (B[mreplicationcontroller/busybox0 created I0317 23:16:30.647051 23174 event.go:307] "Event occurred" object="namespace-1679094986-25922/busybox0" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: busybox0-f7rzp" W0317 23:16:30.651191 23174 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 23:16:30.651241 23174 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource replicationcontroller/busybox1 created error: error validating "hack/testdata/recursive/rc/rc/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false I0317 23:16:30.738791 23174 event.go:307] "Event occurred" object="namespace-1679094986-25922/busybox1" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: busybox1-5mhg5" [32mgeneric-resources.sh:306: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:311: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mW0317 23:16:30.944430 23174 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 23:16:30.944471 23174 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:312: Successful get rc busybox0 {{.spec.replicas}}: 1 (B[m[32mgeneric-resources.sh:313: Successful get rc busybox1 {{.spec.replicas}}: 1 (B[m[32mgeneric-resources.sh:318: Successful get hpa busybox0 {{.spec.minReplicas}} {{.spec.maxReplicas}} {{(index .spec.metrics 0).resource.target.averageUtilization}}: 1 2 80 (B[m[32mgeneric-resources.sh:319: Successful get hpa busybox1 {{.spec.minReplicas}} {{.spec.maxReplicas}} {{(index .spec.metrics 0).resource.target.averageUtilization}}: 1 2 80 (B[m[32mSuccessful (B[mmessage:horizontalpodautoscaler.autoscaling/busybox0 autoscaled horizontalpodautoscaler.autoscaling/busybox1 autoscaled error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' has:Object 'Kind' is missing horizontalpodautoscaler.autoscaling "busybox0" deleted horizontalpodautoscaler.autoscaling "busybox1" deleted [32mgeneric-resources.sh:327: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:328: Successful get rc busybox0 {{.spec.replicas}}: 1 (B[m[32mgeneric-resources.sh:329: Successful get rc busybox1 {{.spec.replicas}}: 1 (B[mW0317 23:16:31.649110 23174 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 23:16:31.649154 23174 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource I0317 23:16:31.739609 20078 alloc.go:330] "allocated clusterIPs" service="namespace-1679094986-25922/busybox0" clusterIPs=map[IPv4:10.0.0.206] I0317 23:16:31.795152 20078 alloc.go:330] "allocated clusterIPs" service="namespace-1679094986-25922/busybox1" clusterIPs=map[IPv4:10.0.0.75] [32mgeneric-resources.sh:333: Successful get service busybox0 {{(index .spec.ports 0).name}} {{(index .spec.ports 0).port}}: <no value> 80 (B[m[32mgeneric-resources.sh:334: Successful get service busybox1 {{(index .spec.ports 0).name}} {{(index .spec.ports 0).port}}: <no value> 80 (B[m[32mSuccessful (B[mmessage:service/busybox0 exposed service/busybox1 exposed error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:340: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:341: Successful get rc busybox0 {{.spec.replicas}}: 1 (B[m[32mgeneric-resources.sh:342: Successful get rc busybox1 {{.spec.replicas}}: 1 (B[mI0317 23:16:32.280941 23174 event.go:307] "Event occurred" object="namespace-1679094986-25922/busybox0" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: busybox0-7vz89" I0317 23:16:32.340318 23174 event.go:307] "Event occurred" object="namespace-1679094986-25922/busybox1" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: busybox1-rcthq" [32mgeneric-resources.sh:346: Successful get rc busybox0 {{.spec.replicas}}: 2 (B[m[32mgeneric-resources.sh:347: Successful get rc busybox1 {{.spec.replicas}}: 2 (B[m[32mSuccessful (B[mmessage:replicationcontroller/busybox0 scaled replicationcontroller/busybox1 scaled error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:352: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:356: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. replicationcontroller "busybox0" force deleted replicationcontroller "busybox1" force deleted error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:361: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: (B[mdeployment.apps/nginx1-deployment created I0317 23:16:33.030141 23174 event.go:307] "Event occurred" object="namespace-1679094986-25922/nginx1-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx1-deployment-69c599568 to 2" error: error validating "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false I0317 23:16:33.077362 23174 event.go:307] "Event occurred" object="namespace-1679094986-25922/nginx1-deployment-69c599568" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx1-deployment-69c599568-bxrtk" deployment.apps/nginx0-deployment created I0317 23:16:33.104361 23174 event.go:307] "Event occurred" object="namespace-1679094986-25922/nginx0-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx0-deployment-5944978c6f to 2" I0317 23:16:33.104394 23174 event.go:307] "Event occurred" object="namespace-1679094986-25922/nginx1-deployment-69c599568" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx1-deployment-69c599568-qbfkp" I0317 23:16:33.129997 23174 event.go:307] "Event occurred" object="namespace-1679094986-25922/nginx0-deployment-5944978c6f" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx0-deployment-5944978c6f-dlpcr" I0317 23:16:33.147522 23174 event.go:307] "Event occurred" object="namespace-1679094986-25922/nginx0-deployment-5944978c6f" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx0-deployment-5944978c6f-m5zqs" [32mgeneric-resources.sh:365: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: nginx0-deployment:nginx1-deployment: (B[m[32mgeneric-resources.sh:366: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:1.7.9:registry.k8s.io/nginx:1.7.9: (B[m[32mgeneric-resources.sh:370: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:1.7.9:registry.k8s.io/nginx:1.7.9: (B[m[32mSuccessful (B[mmessage:deployment.apps/nginx1-deployment skipped rollback (current template already matches revision 1) deployment.apps/nginx0-deployment skipped rollback (current template already matches revision 1) error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"registry.k8s.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' has:Object 'Kind' is missing deployment.apps/nginx1-deployment paused deployment.apps/nginx0-deployment paused [32mgeneric-resources.sh:378: Successful get deployment {{range.items}}{{.spec.paused}}:{{end}}: true:true: (B[m[32mSuccessful (B[mmessage:unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"registry.k8s.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' has:Object 'Kind' is missing deployment.apps/nginx1-deployment resumed deployment.apps/nginx0-deployment resumed [32mgeneric-resources.sh:384: Successful get deployment {{range.items}}{{.spec.paused}}:{{end}}: <no value>:<no value>: (B[m[32mSuccessful (B[mmessage:unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"registry.k8s.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' has:Object 'Kind' is missing W0317 23:16:34.802504 23174 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 23:16:34.802542 23174 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mSuccessful (B[mmessage:Waiting for deployment "nginx1-deployment" rollout to finish: 0 of 2 updated replicas are available... timed out waiting for the condition unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"registry.k8s.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' has:Waiting for deployment "nginx1-deployment" rollout to finish [32mSuccessful (B[mmessage:Waiting for deployment "nginx1-deployment" rollout to finish: 0 of 2 updated replicas are available... timed out waiting for the condition unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"registry.k8s.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' has:Object 'Kind' is missing W0317 23:16:35.330086 23174 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 23:16:35.330127 23174 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource W0317 23:16:36.589820 23174 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 23:16:36.589868 23174 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mSuccessful (B[mmessage:Waiting for deployment "nginx1-deployment" rollout to finish: 0 of 2 updated replicas are available... Waiting for deployment "nginx0-deployment" rollout to finish: 0 of 2 updated replicas are available... timed out waiting for the condition timed out waiting for the condition unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"registry.k8s.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' ... skipping 18 lines ... 1 <none> deployment.apps/nginx0-deployment REVISION CHANGE-CAUSE 1 <none> error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"registry.k8s.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' has:nginx0-deployment [32mSuccessful (B[mmessage:deployment.apps/nginx1-deployment REVISION CHANGE-CAUSE 1 <none> deployment.apps/nginx0-deployment REVISION CHANGE-CAUSE 1 <none> error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"registry.k8s.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' has:nginx1-deployment [32mSuccessful (B[mmessage:deployment.apps/nginx1-deployment REVISION CHANGE-CAUSE 1 <none> deployment.apps/nginx0-deployment REVISION CHANGE-CAUSE 1 <none> error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"registry.k8s.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' has:Object 'Kind' is missing Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. deployment.apps "nginx1-deployment" force deleted deployment.apps "nginx0-deployment" force deleted error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"registry.k8s.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' W0317 23:16:37.440389 23174 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 23:16:37.440428 23174 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:411: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: (B[mreplicationcontroller/busybox0 created I0317 23:16:38.523483 23174 event.go:307] "Event occurred" object="namespace-1679094986-25922/busybox0" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: busybox0-qplg2" replicationcontroller/busybox1 created error: error validating "hack/testdata/recursive/rc/rc/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false I0317 23:16:38.571508 23174 event.go:307] "Event occurred" object="namespace-1679094986-25922/busybox1" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: busybox1-c5s94" [32mgeneric-resources.sh:415: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mSuccessful (B[mmessage:no rollbacker has been implemented for "ReplicationController" no rollbacker has been implemented for "ReplicationController" unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' ... skipping 2 lines ... (B[mmessage:no rollbacker has been implemented for "ReplicationController" no rollbacker has been implemented for "ReplicationController" unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' has:Object 'Kind' is missing [32mSuccessful (B[mmessage:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' error: replicationcontrollers "busybox0" pausing is not supported error: replicationcontrollers "busybox1" pausing is not supported has:Object 'Kind' is missing [32mSuccessful (B[mmessage:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' error: replicationcontrollers "busybox0" pausing is not supported error: replicationcontrollers "busybox1" pausing is not supported has:replicationcontrollers "busybox0" pausing is not supported [32mSuccessful (B[mmessage:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' error: replicationcontrollers "busybox0" pausing is not supported error: replicationcontrollers "busybox1" pausing is not supported has:replicationcontrollers "busybox1" pausing is not supported [32mSuccessful (B[mmessage:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' error: replicationcontrollers "busybox0" resuming is not supported error: replicationcontrollers "busybox1" resuming is not supported has:Object 'Kind' is missing [32mSuccessful (B[mmessage:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' error: replicationcontrollers "busybox0" resuming is not supported error: replicationcontrollers "busybox1" resuming is not supported has:replicationcontrollers "busybox0" resuming is not supported [32mSuccessful (B[mmessage:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' error: replicationcontrollers "busybox0" resuming is not supported error: replicationcontrollers "busybox1" resuming is not supported has:replicationcontrollers "busybox1" resuming is not supported Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. replicationcontroller "busybox0" force deleted replicationcontroller "busybox1" force deleted error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' +++ exit code: 0 Recording: run_namespace_tests Running command: run_namespace_tests +++ Running case: test-cmd.run_namespace_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_namespace_tests +++ [0317 23:16:40] Testing kubectl(v1:namespaces) [32mSuccessful (B[mmessage:Error from server (NotFound): namespaces "my-namespace" not found has: not found namespace/my-namespace created (dry run) namespace/my-namespace created (server dry run) [32mSuccessful (B[mmessage:Error from server (NotFound): namespaces "my-namespace" not found has: not found namespace/my-namespace created [32mcore.sh:1504: Successful get namespaces/my-namespace {{.metadata.name}}: my-namespace (B[mquery for namespaces had limit param query for resourcequotas had limit param query for limitranges had limit param ... skipping 136 lines ... I0317 23:16:40.722914 41994 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/namespaces/namespace-1679094986-25922/limitranges?limit=500 200 OK in 1 milliseconds (B[mnamespace "my-namespace" deleted I0317 23:16:43.784831 23174 shared_informer.go:311] Waiting for caches to sync for resource quota I0317 23:16:43.784894 23174 shared_informer.go:318] Caches are synced for resource quota I0317 23:16:44.003235 23174 shared_informer.go:311] Waiting for caches to sync for garbage collector I0317 23:16:44.003285 23174 shared_informer.go:318] Caches are synced for garbage collector W0317 23:16:44.148852 23174 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 23:16:44.148932 23174 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource W0317 23:16:44.808984 23174 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 23:16:44.809026 23174 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource W0317 23:16:45.420204 23174 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 23:16:45.420240 23174 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource W0317 23:16:45.739531 23174 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 23:16:45.739579 23174 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource I0317 23:16:46.103812 23174 horizontal.go:512] "Horizontal Pod Autoscaler has been deleted" HPA="namespace-1679094986-25922/busybox0" I0317 23:16:46.119511 23174 horizontal.go:512] "Horizontal Pod Autoscaler has been deleted" HPA="namespace-1679094986-25922/busybox1" namespace/my-namespace condition met [32mSuccessful (B[mmessage:Error from server (NotFound): namespaces "my-namespace" not found has: not found namespace/my-namespace created [32mcore.sh:1515: Successful get namespaces/my-namespace {{.metadata.name}}: my-namespace (B[m[32mSuccessful (B[mmessage:Warning: deleting cluster-scoped resources, not scoped to the provided namespace namespace "kube-node-lease" deleted ... skipping 34 lines ... namespace "namespace-1679094956-22424" deleted namespace "namespace-1679094956-26797" deleted namespace "namespace-1679094957-24770" deleted namespace "namespace-1679094959-5663" deleted namespace "namespace-1679094960-18424" deleted namespace "namespace-1679094986-25922" deleted Error from server (Forbidden): namespaces "default" is forbidden: this namespace may not be deleted Error from server (Forbidden): namespaces "kube-public" is forbidden: this namespace may not be deleted Error from server (Forbidden): namespaces "kube-system" is forbidden: this namespace may not be deleted has:Warning: deleting cluster-scoped resources [32mSuccessful (B[mmessage:Warning: deleting cluster-scoped resources, not scoped to the provided namespace namespace "kube-node-lease" deleted namespace "my-namespace" deleted namespace "namespace-1679094765-25922" deleted ... skipping 32 lines ... namespace "namespace-1679094956-22424" deleted namespace "namespace-1679094956-26797" deleted namespace "namespace-1679094957-24770" deleted namespace "namespace-1679094959-5663" deleted namespace "namespace-1679094960-18424" deleted namespace "namespace-1679094986-25922" deleted Error from server (Forbidden): namespaces "default" is forbidden: this namespace may not be deleted Error from server (Forbidden): namespaces "kube-public" is forbidden: this namespace may not be deleted Error from server (Forbidden): namespaces "kube-system" is forbidden: this namespace may not be deleted has:namespace "my-namespace" deleted namespace/quotas created [32mcore.sh:1522: Successful get namespaces/quotas {{.metadata.name}}: quotas (B[m[32mcore.sh:1523: Successful get quota --namespace=quotas {{range.items}}{{ if eq .metadata.name "test-quota" }}found{{end}}{{end}}:: : (B[mresourcequota/test-quota created (dry run) resourcequota/test-quota created (server dry run) ... skipping 15 lines ... [32mcore.sh:1548: Successful get namespaces/other {{.metadata.name}}: other (B[m[32mcore.sh:1552: Successful get pods --namespace=other {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/valid-pod created [32mcore.sh:1556: Successful get pods --namespace=other {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[m[32mcore.sh:1558: Successful get pods -n other {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[m[32mSuccessful (B[mmessage:error: a resource cannot be retrieved by name across all namespaces has:a resource cannot be retrieved by name across all namespaces [32mcore.sh:1565: Successful get pods --namespace=other {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[mWarning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. pod "valid-pod" force deleted [32mcore.sh:1569: Successful get pods --namespace=other {{range.items}}{{.metadata.name}}:{{end}}: (B[mnamespace "other" deleted ... skipping 19 lines ... I0317 23:16:58.113152 23174 namespace_controller.go:182] "Namespace has been deleted" namespace="namespace-1679094823-29834" I0317 23:16:58.177930 23174 namespace_controller.go:182] "Namespace has been deleted" namespace="namespace-1679094824-28756" I0317 23:16:58.287055 23174 namespace_controller.go:182] "Namespace has been deleted" namespace="namespace-1679094826-20310" I0317 23:16:58.314274 23174 namespace_controller.go:182] "Namespace has been deleted" namespace="namespace-1679094878-30124" I0317 23:16:58.449481 23174 namespace_controller.go:182] "Namespace has been deleted" namespace="namespace-1679094826-11123" I0317 23:16:58.483383 23174 namespace_controller.go:182] "Namespace has been deleted" namespace="namespace-1679094886-2426" W0317 23:16:58.590295 23174 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 23:16:58.590340 23174 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource I0317 23:16:58.655129 23174 namespace_controller.go:182] "Namespace has been deleted" namespace="namespace-1679094887-10844" I0317 23:16:58.776846 23174 namespace_controller.go:182] "Namespace has been deleted" namespace="namespace-1679094904-22047" I0317 23:16:58.909527 23174 namespace_controller.go:182] "Namespace has been deleted" namespace="namespace-1679094924-20947" I0317 23:16:59.042075 23174 namespace_controller.go:182] "Namespace has been deleted" namespace="namespace-1679094925-11584" I0317 23:16:59.143918 23174 namespace_controller.go:182] "Namespace has been deleted" namespace="namespace-1679094888-21139" I0317 23:16:59.206803 23174 namespace_controller.go:182] "Namespace has been deleted" namespace="namespace-1679094933-20136" ... skipping 5 lines ... I0317 23:16:59.703437 23174 namespace_controller.go:182] "Namespace has been deleted" namespace="namespace-1679094957-24770" I0317 23:16:59.753790 23174 namespace_controller.go:182] "Namespace has been deleted" namespace="namespace-1679094953-4339" I0317 23:16:59.778285 23174 namespace_controller.go:182] "Namespace has been deleted" namespace="namespace-1679094959-5663" I0317 23:16:59.811569 23174 namespace_controller.go:182] "Namespace has been deleted" namespace="namespace-1679094960-18424" I0317 23:16:59.904335 23174 namespace_controller.go:182] "Namespace has been deleted" namespace="quotas" I0317 23:17:00.013136 23174 namespace_controller.go:182] "Namespace has been deleted" namespace="namespace-1679094986-25922" W0317 23:17:00.571998 23174 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 23:17:00.572039 23174 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource +++ exit code: 0 Recording: run_secrets_test Running command: run_secrets_test +++ Running case: test-cmd.run_secrets_test +++ working dir: /home/prow/go/src/k8s.io/kubernetes ... skipping 40 lines ... name: test has not:example.com [32mcore.sh:831: Successful get namespaces {{range.items}}{{ if eq .metadata.name "test-secrets" }}found{{end}}{{end}}:: : (B[mnamespace/test-secrets created [32mcore.sh:835: Successful get namespaces/test-secrets {{.metadata.name}}: test-secrets (B[m[32mcore.sh:839: Successful get secrets --namespace=test-secrets {{range.items}}{{.metadata.name}}:{{end}}: (B[mW0317 23:17:01.788432 23174 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 23:17:01.788474 23174 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource secret/test-secret created [32mcore.sh:843: Successful get secret/test-secret --namespace=test-secrets {{.metadata.name}}: test-secret (B[m[32mcore.sh:844: Successful get secret/test-secret --namespace=test-secrets {{.type}}: test-type (B[mquery for secrets had limit param query for secrets had user-specified limit param [32mSuccessful describe secrets verbose logs: ... skipping 3 lines ... I0317 23:17:02.133784 42618 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/namespaces/test-secrets/secrets/test-secret 200 OK in 1 milliseconds (B[msecret "test-secret" deleted [32mcore.sh:856: Successful get secrets --namespace=test-secrets {{range.items}}{{.metadata.name}}:{{end}}: (B[msecret/test-secret created [32mcore.sh:860: Successful get secret/test-secret --namespace=test-secrets {{.metadata.name}}: test-secret (B[m[32mcore.sh:861: Successful get secret/test-secret --namespace=test-secrets {{.type}}: kubernetes.io/dockerconfigjson (B[mW0317 23:17:02.706748 23174 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 23:17:02.706786 23174 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource secret "test-secret" deleted [32mcore.sh:871: Successful get secrets --namespace=test-secrets {{range.items}}{{.metadata.name}}:{{end}}: (B[msecret/test-secret created [32mcore.sh:875: Successful get secret/test-secret --namespace=test-secrets {{.metadata.name}}: test-secret (B[m[32mcore.sh:876: Successful get secret/test-secret --namespace=test-secrets {{.type}}: kubernetes.io/dockerconfigjson (B[msecret "test-secret" deleted ... skipping 67 lines ... +++ command: run_client_config_tests +++ [0317 23:17:17] Creating namespace namespace-1679095037-17076 namespace/namespace-1679095037-17076 created Context "test" modified. +++ [0317 23:17:17] Testing client config [32mSuccessful (B[mmessage:error: stat missing: no such file or directory has:missing: no such file or directory [32mSuccessful (B[mmessage:error: stat missing: no such file or directory has:missing: no such file or directory [32mSuccessful (B[mmessage:error: stat missing: no such file or directory has:missing: no such file or directory [32mSuccessful (B[mmessage:Error in configuration: context was not found for specified context: missing-context has:context was not found for specified context: missing-context [32mSuccessful (B[mmessage:error: no server found for cluster "missing-cluster" has:no server found for cluster "missing-cluster" [32mSuccessful (B[mmessage:error: auth info "missing-user" does not exist has:auth info "missing-user" does not exist [32mSuccessful (B[mmessage:error: error loading config file "/tmp/newconfig.yaml": no kind "Config" is registered for version "v-1" in scheme "vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go:50" has:error loading config file [32mSuccessful (B[mmessage:error: stat missing-config: no such file or directory has:no such file or directory +++ exit code: 0 Recording: run_service_accounts_tests Running command: run_service_accounts_tests +++ Running case: test-cmd.run_service_accounts_tests ... skipping 57 lines ... Labels: <none> Annotations: <none> Schedule: 59 23 31 2 * Concurrency Policy: Allow Suspend: False Successful Job History Limit: 3 Failed Job History Limit: 1 Starting Deadline Seconds: <unset> Selector: <unset> Parallelism: <unset> Completions: <unset> Pod Template: Labels: <none> ... skipping 57 lines ... Annotations: batch.kubernetes.io/job-tracking: cronjob.kubernetes.io/instantiate: manual Parallelism: 1 Completions: 1 Completion Mode: NonIndexed Start Time: Fri, 17 Mar 2023 23:17:25 +0000 Pods Statuses: 1 Active (0 Ready) / 0 Succeeded / 0 Failed Pod Template: Labels: batch.kubernetes.io/controller-uid=24d729a3-71aa-4388-9948-8bf62c02b90d batch.kubernetes.io/job-name=test-job controller-uid=24d729a3-71aa-4388-9948-8bf62c02b90d job-name=test-job Containers: ... skipping 52 lines ... I0317 23:17:32.013207 23174 job_controller.go:523] enqueueing job namespace-1679095051-14268/test-job-pi job.batch/test-job-pi created I0317 23:17:32.035236 23174 event.go:307] "Event occurred" object="namespace-1679095051-14268/test-job-pi" fieldPath="" kind="Job" apiVersion="batch/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: test-job-pi-jfgc4" I0317 23:17:32.035277 23174 job_controller.go:523] enqueueing job namespace-1679095051-14268/test-job-pi I0317 23:17:32.058212 23174 job_controller.go:523] enqueueing job namespace-1679095051-14268/test-job-pi [32mcreate.sh:100: Successful get job test-job-pi {{(index .spec.template.spec.containers 0).image}}: registry.k8s.io/perl (B[mW0317 23:17:32.106194 23174 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 23:17:32.106232 23174 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource job.batch "test-job-pi" deleted I0317 23:17:32.163129 23174 job_controller.go:523] enqueueing job namespace-1679095051-14268/test-job-pi cronjob.batch/test-pi created W0317 23:17:32.315281 23174 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 23:17:32.315321 23174 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource I0317 23:17:32.328445 23174 job_controller.go:523] enqueueing job namespace-1679095051-14268/my-pi job.batch/my-pi created I0317 23:17:32.367883 23174 job_controller.go:523] enqueueing job namespace-1679095051-14268/my-pi I0317 23:17:32.368197 23174 event.go:307] "Event occurred" object="namespace-1679095051-14268/my-pi" fieldPath="" kind="Job" apiVersion="batch/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: my-pi-s87jw" I0317 23:17:32.392229 23174 job_controller.go:523] enqueueing job namespace-1679095051-14268/my-pi [32mSuccessful ... skipping 16 lines ... [32mcore.sh:1631: Successful get podtemplates {{range.items}}{{.metadata.name}}:{{end}}: (B[mI0317 23:17:33.105836 20078 controller.go:624] quota admission added evaluator for: podtemplates podtemplate/nginx created [32mcore.sh:1635: Successful get podtemplates {{range.items}}{{.metadata.name}}:{{end}}: nginx: (B[mNAME CONTAINERS IMAGES POD LABELS nginx nginx nginx name=nginx W0317 23:17:33.364586 23174 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 23:17:33.364628 23174 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mcore.sh:1643: Successful get podtemplates {{range.items}}{{.metadata.name}}:{{end}}: nginx: (B[mquery for podtemplates had limit param query for events had limit param query for podtemplates had user-specified limit param [32mSuccessful describe podtemplates verbose logs: I0317 23:17:33.491732 44656 loader.go:373] Config loaded from file: /tmp/tmp.vT1Q3sqC26/.kube/config ... skipping 371 lines ... type: ClusterIP status: loadBalancer: {} [32mSuccessful (B[mmessage:kubectl-create kubectl-set has:kubectl-set error: you must specify resources by --filename when --local is set. Example resource specifications include: '-f rsrc.yaml' '--filename=rsrc.json' [32mcore.sh:1034: Successful get services redis-master {{range.spec.selector}}{{.}}:{{end}}: redis:master:backend: (B[mservice/redis-master selector updated [32mSuccessful (B[mmessage:Error from server (Conflict): Operation cannot be fulfilled on services "redis-master": the object has been modified; please apply your changes to the latest version and try again has:Conflict [32mcore.sh:1047: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:redis-master: (B[mservice "redis-master" deleted I0317 23:17:36.452500 23174 namespace_controller.go:182] "Namespace has been deleted" namespace="test-jobs" [32mcore.sh:1054: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes: (B[m[32mcore.sh:1058: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes: ... skipping 305 lines ... (B[mmessage:daemonset.apps/bind REVISION CHANGE-CAUSE 2 kubectl apply --filename=hack/testdata/rollingupdate-daemonset-rv2.yaml --record=true --server=https://127.0.0.1:6443 --insecure-skip-tls-verify=true --match-server-version=true 3 kubectl apply --filename=hack/testdata/rollingupdate-daemonset.yaml --record=true --server=https://127.0.0.1:6443 --insecure-skip-tls-verify=true --match-server-version=true has:3 kubectl apply [32mSuccessful (B[mmessage:error: unable to find specified revision 1000000 in history has:unable to find specified revision [32mapps.sh:122: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/pause:2.0: (B[m[32mapps.sh:123: Successful get daemonset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 1 (B[mdaemonset.apps/bind rolled back [32mapps.sh:126: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/pause:latest: (B[m[32mapps.sh:127: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: registry.k8s.io/nginx:test-cmd: (B[m[32mapps.sh:128: Successful get daemonset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 2 (B[mW0317 23:17:45.758662 23174 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 23:17:45.758701 23174 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mSuccessful (B[mmessage:daemonset.apps/bind REVISION CHANGE-CAUSE 3 kubectl apply --filename=hack/testdata/rollingupdate-daemonset.yaml --record=true --server=https://127.0.0.1:6443 --insecure-skip-tls-verify=true --match-server-version=true 4 kubectl apply --filename=hack/testdata/rollingupdate-daemonset-rv2.yaml --record=true --server=https://127.0.0.1:6443 --insecure-skip-tls-verify=true --match-server-version=true has:daemonset.apps/bind ... skipping 29 lines ... +++ [0317 23:17:46] Testing kubectl(v1:replicationcontrollers) [32mcore.sh:1205: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: (B[mreplicationcontroller/frontend created I0317 23:17:46.429953 23174 event.go:307] "Event occurred" object="namespace-1679095065-24481/frontend" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-tzmsr" I0317 23:17:46.453726 23174 event.go:307] "Event occurred" object="namespace-1679095065-24481/frontend" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-dljtq" I0317 23:17:46.453843 23174 event.go:307] "Event occurred" object="namespace-1679095065-24481/frontend" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-5wvhr" E0317 23:17:46.496592 23174 replica_set.go:544] sync "namespace-1679095065-24481/frontend" failed with replicationcontrollers "frontend" not found replicationcontroller "frontend" deleted E0317 23:17:46.501416 23174 replica_set.go:544] sync "namespace-1679095065-24481/frontend" failed with replicationcontrollers "frontend" not found [32mcore.sh:1210: Successful get pods -l name=frontend {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mcore.sh:1214: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: (B[mreplicationcontroller/frontend created I0317 23:17:46.927085 23174 event.go:307] "Event occurred" object="namespace-1679095065-24481/frontend" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-f44lx" I0317 23:17:46.951816 23174 event.go:307] "Event occurred" object="namespace-1679095065-24481/frontend" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-bsnvh" I0317 23:17:46.952007 23174 event.go:307] "Event occurred" object="namespace-1679095065-24481/frontend" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-x8ws4" ... skipping 11 lines ... Namespace: namespace-1679095065-24481 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 17 lines ... Namespace: namespace-1679095065-24481 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 18 lines ... Namespace: namespace-1679095065-24481 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 12 lines ... Namespace: namespace-1679095065-24481 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 27 lines ... Namespace: namespace-1679095065-24481 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 17 lines ... Namespace: namespace-1679095065-24481 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 17 lines ... Namespace: namespace-1679095065-24481 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 11 lines ... Namespace: namespace-1679095065-24481 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 25 lines ... (B[m[32mcore.sh:1240: Successful get rc frontend {{.spec.replicas}}: 3 (B[mreplicationcontroller/frontend scaled E0317 23:17:48.025945 23174 replica_set.go:220] ReplicaSet has no controller: &ReplicaSet{ObjectMeta:{frontend namespace-1679095065-24481 414250b6-43a7-4346-bf24-8525b2af724c 2266 2 2023-03-17 23:17:46 +0000 UTC <nil> <nil> map[app:guestbook tier:frontend] map[] [] [] [{kubectl Update v1 <nil> FieldsV1 {"f:spec":{"f:replicas":{}}} scale} {kubectl-create Update v1 2023-03-17 23:17:46 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:app":{},"f:tier":{}}},"f:spec":{"f:selector":{},"f:template":{".":{},"f:metadata":{".":{},"f:creationTimestamp":{},"f:labels":{".":{},"f:app":{},"f:tier":{}}},"f:spec":{".":{},"f:containers":{".":{},"k:{\"name\":\"php-redis\"}":{".":{},"f:env":{".":{},"k:{\"name\":\"GET_HOSTS_FROM\"}":{".":{},"f:name":{},"f:value":{}}},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":80,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{".":{},"f:requests":{".":{},"f:cpu":{},"f:memory":{}}},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update v1 2023-03-17 23:17:47 +0000 UTC FieldsV1 {"f:status":{"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*2,Selector:&v1.LabelSelector{MatchLabels:map[string]string{app: guestbook,tier: frontend,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[app:guestbook tier:frontend] map[] [] [] []} {[] [] [{php-redis gcr.io/google_samples/gb-frontend:v4 [] [] [{ 0 80 TCP }] [] [{GET_HOSTS_FROM dns nil}] {map[] map[cpu:{{100 -3} {<nil>} 100m DecimalSI} memory:{{104857600 0} {<nil>} 100Mi BinarySI}] []} [] [] [] nil nil nil nil /dev/termination-log File IfNotPresent nil false false false}] [] Always 0xc00176f218 <nil> ClusterFirst map[] <nil> false false false <nil> PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] <nil> nil [] <nil> <nil> <nil> map[] [] <nil> nil <nil> [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:3,FullyLabeledReplicas:3,ObservedGeneration:1,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} I0317 23:17:48.062785 23174 event.go:307] "Event occurred" object="namespace-1679095065-24481/frontend" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: frontend-bsnvh" [32mcore.sh:1244: Successful get rc frontend {{.spec.replicas}}: 2 (B[m[32mcore.sh:1248: Successful get rc frontend {{.spec.replicas}}: 2 (B[merror: Expected replicas to be 3, was 2 [32mcore.sh:1252: Successful get rc frontend {{.spec.replicas}}: 2 (B[m[32mcore.sh:1256: Successful get rc frontend {{.spec.replicas}}: 2 (B[mreplicationcontroller/frontend scaled I0317 23:17:48.521081 23174 event.go:307] "Event occurred" object="namespace-1679095065-24481/frontend" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-lbkdb" [32mcore.sh:1260: Successful get rc frontend {{.spec.replicas}}: 3 (B[m[32mcore.sh:1264: Successful get rc frontend {{.spec.replicas}}: 3 ... skipping 70 lines ... I0317 23:17:51.728146 20078 alloc.go:330] "allocated clusterIPs" service="namespace-1679095065-24481/expose-test-deployment" clusterIPs=map[IPv4:10.0.0.34] [32mSuccessful (B[mmessage:service/expose-test-deployment exposed has:service/expose-test-deployment exposed service "expose-test-deployment" deleted [32mSuccessful (B[mmessage:error: couldn't retrieve selectors via --selector flag or introspection: invalid deployment: no selectors, therefore cannot be exposed has:invalid deployment: no selectors deployment.apps/nginx-deployment created I0317 23:17:52.149277 23174 event.go:307] "Event occurred" object="namespace-1679095065-24481/nginx-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-7df65dc9f4 to 3" I0317 23:17:52.187717 23174 event.go:307] "Event occurred" object="namespace-1679095065-24481/nginx-deployment-7df65dc9f4" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-7df65dc9f4-th2tw" I0317 23:17:52.211241 23174 event.go:307] "Event occurred" object="namespace-1679095065-24481/nginx-deployment-7df65dc9f4" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-7df65dc9f4-gsjrd" I0317 23:17:52.211693 23174 event.go:307] "Event occurred" object="namespace-1679095065-24481/nginx-deployment-7df65dc9f4" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-7df65dc9f4-lcb6h" ... skipping 24 lines ... (B[mpod "valid-pod" deleted service "frontend" deleted service "frontend-2" deleted service "frontend-3" deleted service "frontend-4" deleted [32mSuccessful (B[mmessage:error: cannot expose a Node has:cannot expose [32mSuccessful (B[mmessage:The Service "invalid-large-service-name-that-has-more-than-sixty-three-characters" is invalid: metadata.name: Invalid value: "invalid-large-service-name-that-has-more-than-sixty-three-characters": must be no more than 63 characters has:metadata.name: Invalid value I0317 23:17:54.549241 20078 alloc.go:330] "allocated clusterIPs" service="namespace-1679095065-24481/kubernetes-serve-hostname-testing-sixty-three-characters-in-len" clusterIPs=map[IPv4:10.0.0.48] [32mSuccessful ... skipping 32 lines ... (B[mhorizontalpodautoscaler.autoscaling/frontend autoscaled [32mcore.sh:1436: Successful get hpa frontend {{.spec.minReplicas}} {{.spec.maxReplicas}} {{(index .spec.metrics 0).resource.target.averageUtilization}}: 1 2 70 (B[mhorizontalpodautoscaler.autoscaling "frontend" deleted horizontalpodautoscaler.autoscaling/frontend autoscaled [32mcore.sh:1440: Successful get hpa frontend {{.spec.minReplicas}} {{.spec.maxReplicas}} {{(index .spec.metrics 0).resource.target.averageUtilization}}: 2 3 80 (B[mhorizontalpodautoscaler.autoscaling "frontend" deleted error: required flag(s) "max" not set replicationcontroller "frontend" deleted [32mcore.sh:1449: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: (B[mapiVersion: apps/v1 kind: Deployment metadata: creationTimestamp: null ... skipping 24 lines ... limits: cpu: 300m requests: cpu: 300m terminationGracePeriodSeconds: 0 status: {} Error from server (NotFound): deployments.apps "nginx-deployment-resources" not found deployment.apps/nginx-deployment-resources created I0317 23:17:57.830554 23174 event.go:307] "Event occurred" object="namespace-1679095065-24481/nginx-deployment-resources" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-resources-5f79767bf9 to 3" I0317 23:17:57.857052 23174 event.go:307] "Event occurred" object="namespace-1679095065-24481/nginx-deployment-resources-5f79767bf9" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-resources-5f79767bf9-c4wh2" I0317 23:17:57.883126 23174 event.go:307] "Event occurred" object="namespace-1679095065-24481/nginx-deployment-resources-5f79767bf9" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-resources-5f79767bf9-66mpn" I0317 23:17:57.883172 23174 event.go:307] "Event occurred" object="namespace-1679095065-24481/nginx-deployment-resources-5f79767bf9" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-resources-5f79767bf9-9mrnc" [32mcore.sh:1455: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: nginx-deployment-resources: (B[m[32mcore.sh:1456: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:test-cmd: (B[m[32mcore.sh:1457: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: registry.k8s.io/perl: (B[mdeployment.apps/nginx-deployment-resources resource requirements updated I0317 23:17:58.195656 23174 event.go:307] "Event occurred" object="namespace-1679095065-24481/nginx-deployment-resources" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-resources-77d775b4f9 to 1" I0317 23:17:58.220655 23174 event.go:307] "Event occurred" object="namespace-1679095065-24481/nginx-deployment-resources-77d775b4f9" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-resources-77d775b4f9-klm8q" [32mcore.sh:1460: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}: 100m: (B[m[32mcore.sh:1461: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}: 100m: (B[merror: unable to find container named redis deployment.apps/nginx-deployment-resources resource requirements updated I0317 23:17:58.569517 23174 event.go:307] "Event occurred" object="namespace-1679095065-24481/nginx-deployment-resources" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set nginx-deployment-resources-5f79767bf9 to 2 from 3" [32mcore.sh:1466: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}: 200m: (B[mI0317 23:17:58.642312 23174 event.go:307] "Event occurred" object="namespace-1679095065-24481/nginx-deployment-resources" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-resources-688f8b78b5 to 1 from 0" I0317 23:17:58.653320 23174 event.go:307] "Event occurred" object="namespace-1679095065-24481/nginx-deployment-resources-5f79767bf9" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: nginx-deployment-resources-5f79767bf9-c4wh2" I0317 23:17:58.664676 23174 event.go:307] "Event occurred" object="namespace-1679095065-24481/nginx-deployment-resources-688f8b78b5" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-resources-688f8b78b5-xssxf" ... skipping 155 lines ... status: "True" type: Progressing observedGeneration: 4 replicas: 4 unavailableReplicas: 4 updatedReplicas: 1 error: you must specify resources by --filename when --local is set. Example resource specifications include: '-f rsrc.yaml' '--filename=rsrc.json' [32mcore.sh:1477: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}: 200m: (B[m[32mcore.sh:1478: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}: 300m: (B[m[32mcore.sh:1479: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).resources.requests.cpu}}:{{end}}: 300m: ... skipping 46 lines ... pod-template-hash=859689d794 Annotations: deployment.kubernetes.io/desired-replicas: 1 deployment.kubernetes.io/max-replicas: 2 deployment.kubernetes.io/revision: 1 Controlled By: Deployment/test-nginx-apps Replicas: 1 current / 1 desired Pods Status: 0 Running / 1 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=test-nginx-apps pod-template-hash=859689d794 Containers: nginx: Image: registry.k8s.io/nginx:test-cmd ... skipping 67 lines ... (B[mdeployment.apps/nginx-deployment created I0317 23:18:02.391121 23174 event.go:307] "Event occurred" object="namespace-1679095079-23217/nginx-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-7df65dc9f4 to 3" I0317 23:18:02.437383 23174 event.go:307] "Event occurred" object="namespace-1679095079-23217/nginx-deployment-7df65dc9f4" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-7df65dc9f4-6td47" I0317 23:18:02.464936 23174 event.go:307] "Event occurred" object="namespace-1679095079-23217/nginx-deployment-7df65dc9f4" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-7df65dc9f4-r9hv9" I0317 23:18:02.464984 23174 event.go:307] "Event occurred" object="namespace-1679095079-23217/nginx-deployment-7df65dc9f4" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-7df65dc9f4-vvfnb" [32mapps.sh:280: Successful get rs {{range.items}}{{.spec.replicas}}{{end}}: 3 (B[mW0317 23:18:02.505680 23174 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 23:18:02.505726 23174 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource deployment.apps "nginx-deployment" deleted [32mapps.sh:284: Successful get rs {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mapps.sh:288: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mapps.sh:289: Successful get rs {{range.items}}{{.metadata.name}}:{{end}}: (B[mdeployment.apps/nginx-deployment created I0317 23:18:02.912209 23174 event.go:307] "Event occurred" object="namespace-1679095079-23217/nginx-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-55d75fc84b to 1" ... skipping 44 lines ... [32mapps.sh:340: Successful get deployment.apps {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:1.7.9: (B[m Image: registry.k8s.io/nginx:test-cmd deployment.apps/nginx rolled back (server dry run) [32mapps.sh:344: Successful get deployment.apps {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:1.7.9: (B[mdeployment.apps/nginx rolled back [32mapps.sh:348: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:test-cmd: (B[merror: unable to find specified revision 1000000 in history [32mapps.sh:351: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:test-cmd: (B[mdeployment.apps/nginx rolled back [32mapps.sh:355: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:1.7.9: (B[mdeployment.apps/nginx paused error: you cannot rollback a paused deployment; resume it first with 'kubectl rollout resume' and try again error: deployments.apps "nginx" can't restart paused deployment (run rollout resume first) deployment.apps/nginx resumed deployment.apps/nginx rolled back deployment.kubernetes.io/revision-history: 1,3 error: desired revision (3) is different from the running revision (5) deployment.apps/nginx restarted I0317 23:18:09.258834 23174 event.go:307] "Event occurred" object="namespace-1679095079-23217/nginx" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set nginx-6b9cd9ccf6 to 0 from 1" I0317 23:18:09.329596 23174 event.go:307] "Event occurred" object="namespace-1679095079-23217/nginx" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-6b68fc478 to 1 from 0" I0317 23:18:09.340138 23174 event.go:307] "Event occurred" object="namespace-1679095079-23217/nginx-6b9cd9ccf6" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: nginx-6b9cd9ccf6-xst9k" I0317 23:18:09.353207 23174 event.go:307] "Event occurred" object="namespace-1679095079-23217/nginx-6b68fc478" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-6b68fc478-9nbc2" [32mSuccessful ... skipping 61 lines ... deployment.apps/nginx2 created I0317 23:18:10.664059 23174 event.go:307] "Event occurred" object="namespace-1679095079-23217/nginx2" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx2-5744c8b44d to 3" I0317 23:18:10.690540 23174 event.go:307] "Event occurred" object="namespace-1679095079-23217/nginx2-5744c8b44d" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx2-5744c8b44d-bsfwh" I0317 23:18:10.743027 23174 event.go:307] "Event occurred" object="namespace-1679095079-23217/nginx2-5744c8b44d" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx2-5744c8b44d-rvn62" I0317 23:18:10.743073 23174 event.go:307] "Event occurred" object="namespace-1679095079-23217/nginx2-5744c8b44d" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx2-5744c8b44d-ln9ck" deployment.apps "nginx2" deleted E0317 23:18:10.790593 23174 replica_set.go:544] sync "namespace-1679095079-23217/nginx2-5744c8b44d" failed with replicasets.apps "nginx2-5744c8b44d" not found deployment.apps "nginx" deleted [32mapps.sh:389: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: (B[mdeployment.apps/nginx-deployment created I0317 23:18:11.251829 23174 event.go:307] "Event occurred" object="namespace-1679095079-23217/nginx-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-57bf7fbc68 to 3" I0317 23:18:11.273425 23174 event.go:307] "Event occurred" object="namespace-1679095079-23217/nginx-deployment-57bf7fbc68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-57bf7fbc68-9jln8" I0317 23:18:11.294723 23174 event.go:307] "Event occurred" object="namespace-1679095079-23217/nginx-deployment-57bf7fbc68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-57bf7fbc68-2gtgg" ... skipping 8 lines ... (B[mI0317 23:18:11.831547 23174 horizontal.go:512] "Horizontal Pod Autoscaler has been deleted" HPA="namespace-1679095065-24481/frontend" deployment.apps/nginx-deployment image updated I0317 23:18:11.918379 23174 event.go:307] "Event occurred" object="namespace-1679095079-23217/nginx-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-6444b54576 to 1" I0317 23:18:11.944977 23174 event.go:307] "Event occurred" object="namespace-1679095079-23217/nginx-deployment-6444b54576" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-6444b54576-zc4b5" [32mapps.sh:402: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:1.7.9: (B[m[32mapps.sh:403: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: registry.k8s.io/perl: (B[merror: unable to find container named "redis" deployment.apps/nginx-deployment image updated [32mapps.sh:408: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:test-cmd: (B[m[32mapps.sh:409: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: registry.k8s.io/perl: (B[mdeployment.apps/nginx-deployment image updated [32mapps.sh:412: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:1.7.9: (B[m[32mapps.sh:413: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: registry.k8s.io/perl: ... skipping 57 lines ... Warning: key username transferred to USERNAME deployment.apps/nginx-deployment env updated I0317 23:18:15.896096 23174 event.go:307] "Event occurred" object="namespace-1679095079-23217/nginx-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set nginx-deployment-d588bb564 to 0 from 1" deployment.apps/nginx-deployment env updated I0317 23:18:15.931927 23174 event.go:307] "Event occurred" object="namespace-1679095079-23217/nginx-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-ffc86458c to 1" [32mSuccessful (B[mmessage:error: standard input cannot be used for multiple arguments has:standard input cannot be used for multiple arguments deployment.apps "nginx-deployment" deleted I0317 23:18:16.087236 23174 event.go:307] "Event occurred" object="namespace-1679095079-23217/nginx-deployment-d588bb564" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: nginx-deployment-d588bb564-x6v5l" E0317 23:18:16.109877 23174 replica_set.go:544] sync "namespace-1679095079-23217/nginx-deployment-694d45dfd5" failed with replicasets.apps "nginx-deployment-694d45dfd5" not found I0317 23:18:16.168840 23174 event.go:307] "Event occurred" object="namespace-1679095079-23217/nginx-deployment-ffc86458c" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-ffc86458c-8qbtd" configmap "test-set-env-config" deleted E0317 23:18:16.201557 23174 replica_set.go:544] sync "namespace-1679095079-23217/nginx-deployment-57bf7fbc68" failed with Operation cannot be fulfilled on replicasets.apps "nginx-deployment-57bf7fbc68": StorageError: invalid object, Code: 4, Key: /registry/replicasets/namespace-1679095079-23217/nginx-deployment-57bf7fbc68, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: d6c806c4-4374-4cac-aa39-20b9fbc2c828, UID in object meta: secret "test-set-env-secret" deleted [32mapps.sh:474: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: (B[mE0317 23:18:16.447238 23174 replica_set.go:544] sync "namespace-1679095079-23217/nginx-deployment-d588bb564" failed with replicasets.apps "nginx-deployment-d588bb564" not found E0317 23:18:16.547066 23174 replica_set.go:544] sync "namespace-1679095079-23217/nginx-deployment-694d45dfd5" failed with replicasets.apps "nginx-deployment-694d45dfd5" not found deployment.apps/nginx-deployment created I0317 23:18:16.580696 23174 event.go:307] "Event occurred" object="namespace-1679095079-23217/nginx-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-57bf7fbc68 to 3" E0317 23:18:16.627961 23174 replica_set.go:544] sync "namespace-1679095079-23217/nginx-deployment-ffc86458c" failed with replicasets.apps "nginx-deployment-ffc86458c" not found [32mapps.sh:477: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: nginx-deployment: (B[mI0317 23:18:16.659947 23174 event.go:307] "Event occurred" object="namespace-1679095079-23217/nginx-deployment-57bf7fbc68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-57bf7fbc68-djk4v" I0317 23:18:16.709364 23174 event.go:307] "Event occurred" object="namespace-1679095079-23217/nginx-deployment-57bf7fbc68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-57bf7fbc68-zjzrc" [32mapps.sh:478: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:test-cmd: (B[mI0317 23:18:16.809668 23174 event.go:307] "Event occurred" object="namespace-1679095079-23217/nginx-deployment-57bf7fbc68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-57bf7fbc68-92rvf" [32mapps.sh:479: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: registry.k8s.io/perl: ... skipping 191 lines ... Environment: <none> Mounts: <none> Volumes: <none> has:registry.k8s.io/perl deployment.apps "nginx-deployment" deleted +++ exit code: 0 E0317 23:18:17.462083 23174 replica_set.go:544] sync "namespace-1679095079-23217/nginx-deployment-6444b54576" failed with Operation cannot be fulfilled on replicasets.apps "nginx-deployment-6444b54576": StorageError: invalid object, Code: 4, Key: /registry/replicasets/namespace-1679095079-23217/nginx-deployment-6444b54576, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: cdb7368c-e96d-4254-95ec-5e05ccb21875, UID in object meta: Recording: run_rs_tests Running command: run_rs_tests +++ Running case: test-cmd.run_rs_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_rs_tests ... skipping 4 lines ... [32mapps.sh:645: Successful get rs {{range.items}}{{.metadata.name}}:{{end}}: (B[mreplicaset.apps/frontend created +++ [0317 23:18:17] Deleting rs I0317 23:18:17.976997 23174 event.go:307] "Event occurred" object="namespace-1679095097-6044/frontend" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-nqcm6" I0317 23:18:18.004675 23174 event.go:307] "Event occurred" object="namespace-1679095097-6044/frontend" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-nb2gn" I0317 23:18:18.004717 23174 event.go:307] "Event occurred" object="namespace-1679095097-6044/frontend" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-bz2c7" E0317 23:18:18.077932 23174 replica_set.go:544] sync "namespace-1679095097-6044/frontend" failed with replicasets.apps "frontend" not found replicaset.apps "frontend" deleted E0317 23:18:18.082539 23174 replica_set.go:544] sync "namespace-1679095097-6044/frontend" failed with replicasets.apps "frontend" not found [32mapps.sh:651: Successful get pods -l tier=frontend {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mapps.sh:655: Successful get rs {{range.items}}{{.metadata.name}}:{{end}}: (B[mW0317 23:18:18.369412 23174 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 23:18:18.369463 23174 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource replicaset.apps/frontend created I0317 23:18:18.460253 23174 event.go:307] "Event occurred" object="namespace-1679095097-6044/frontend" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-5bs4j" I0317 23:18:18.481190 23174 event.go:307] "Event occurred" object="namespace-1679095097-6044/frontend" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-rbhqm" I0317 23:18:18.481228 23174 event.go:307] "Event occurred" object="namespace-1679095097-6044/frontend" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-hpl9h" [32mapps.sh:659: Successful get pods -l tier=frontend {{range.items}}{{(index .spec.containers 0).name}}:{{end}}: php-redis:php-redis:php-redis: (B[m+++ [0317 23:18:18] Deleting rs replicaset.apps "frontend" deleted E0317 23:18:18.691129 23174 replica_set.go:544] sync "namespace-1679095097-6044/frontend" failed with Operation cannot be fulfilled on replicasets.apps "frontend": the object has been modified; please apply your changes to the latest version and try again E0317 23:18:18.817008 23174 replica_set.go:544] sync "namespace-1679095097-6044/frontend" failed with replicasets.apps "frontend" not found [32mapps.sh:663: Successful get rs {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mapps.sh:665: Successful get pods -l tier=frontend {{range.items}}{{(index .spec.containers 0).name}}:{{end}}: php-redis:php-redis:php-redis: (B[mpod "frontend-5bs4j" deleted pod "frontend-hpl9h" deleted pod "frontend-rbhqm" deleted [32mapps.sh:668: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: ... skipping 16 lines ... Namespace: namespace-1679095097-6044 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 17 lines ... Namespace: namespace-1679095097-6044 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 18 lines ... Namespace: namespace-1679095097-6044 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 12 lines ... Namespace: namespace-1679095097-6044 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 25 lines ... Namespace: namespace-1679095097-6044 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 17 lines ... Namespace: namespace-1679095097-6044 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 17 lines ... Namespace: namespace-1679095097-6044 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 11 lines ... Namespace: namespace-1679095097-6044 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 163 lines ... I0317 23:18:22.725376 23174 event.go:307] "Event occurred" object="namespace-1679095097-6044/scale-3" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set scale-3-76b9689797 to 3 from 1" I0317 23:18:22.756011 23174 event.go:307] "Event occurred" object="namespace-1679095097-6044/scale-3-76b9689797" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: scale-3-76b9689797-ptnxl" I0317 23:18:22.803322 23174 event.go:307] "Event occurred" object="namespace-1679095097-6044/scale-3-76b9689797" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: scale-3-76b9689797-vqsd5" [32mapps.sh:730: Successful get deploy scale-1 {{.spec.replicas}}: 3 (B[m[32mapps.sh:731: Successful get deploy scale-2 {{.spec.replicas}}: 3 (B[m[32mapps.sh:732: Successful get deploy scale-3 {{.spec.replicas}}: 3 (B[mW0317 23:18:22.992136 23174 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 23:18:22.992177 23174 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource replicaset.apps "frontend" deleted deployment.apps "scale-1" deleted deployment.apps "scale-2" deleted deployment.apps "scale-3" deleted E0317 23:18:23.339349 23174 gc_controller.go:156] failed to get node : node "" not found E0317 23:18:23.339384 23174 gc_controller.go:156] failed to get node : node "" not found I0317 23:18:23.339407 23174 gc_controller.go:337] "PodGC is force deleting Pod" pod="namespace-1679095097-6044/scale-2-76b9689797-hcnkz" E0317 23:18:23.360052 23174 gc_controller.go:305] pods "scale-2-76b9689797-hcnkz" not found I0317 23:18:23.360088 23174 gc_controller.go:337] "PodGC is force deleting Pod" pod="namespace-1679095097-6044/scale-2-76b9689797-9927k" E0317 23:18:23.370406 23174 gc_controller.go:305] pods "scale-2-76b9689797-9927k" not found replicaset.apps/frontend created I0317 23:18:23.514757 23174 event.go:307] "Event occurred" object="namespace-1679095097-6044/frontend" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-kk6jl" ... skipping 50 lines ... horizontalpodautoscaler.autoscaling/frontend autoscaled [32mapps.sh:808: Successful get hpa frontend {{.spec.minReplicas}} {{.spec.maxReplicas}} {{(index .spec.metrics 0).resource.target.averageUtilization}}: 2 3 80 (B[m[32mSuccessful (B[mmessage:kubectl-autoscale has:kubectl-autoscale horizontalpodautoscaler.autoscaling "frontend" deleted error: required flag(s) "max" not set replicaset.apps "frontend" deleted +++ exit code: 0 Recording: run_stateful_set_tests Running command: run_stateful_set_tests +++ Running case: test-cmd.run_stateful_set_tests ... skipping 265 lines ... (B[mmessage:statefulset.apps/nginx REVISION CHANGE-CAUSE 2 kubectl apply --filename=hack/testdata/rollingupdate-statefulset-rv2.yaml --record=true --server=https://127.0.0.1:6443 --insecure-skip-tls-verify=true --match-server-version=true 3 kubectl apply --filename=hack/testdata/rollingupdate-statefulset.yaml --record=true --server=https://127.0.0.1:6443 --insecure-skip-tls-verify=true --match-server-version=true has:3 kubectl apply [32mSuccessful (B[mmessage:error: unable to find specified revision 1000000 in history has:unable to find specified revision [32mapps.sh:570: Successful get statefulset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx-slim:0.7: (B[m[32mapps.sh:571: Successful get statefulset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 1 (B[mstatefulset.apps/nginx rolled back [32mapps.sh:574: Successful get statefulset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx-slim:0.8: (B[m[32mapps.sh:575: Successful get statefulset {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: registry.k8s.io/pause:2.0: ... skipping 87 lines ... Name: mock Namespace: namespace-1679095112-22630 Selector: app=mock Labels: app=mock Annotations: <none> Replicas: 1 current / 1 desired Pods Status: 0 Running / 1 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=mock Containers: mock-container: Image: registry.k8s.io/pause:3.9 Port: 9949/TCP ... skipping 61 lines ... Name: mock Namespace: namespace-1679095112-22630 Selector: app=mock Labels: app=mock Annotations: <none> Replicas: 1 current / 1 desired Pods Status: 0 Running / 1 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=mock Containers: mock-container: Image: registry.k8s.io/pause:3.9 Port: 9949/TCP ... skipping 32 lines ... (B[m[32mgeneric-resources.sh:64: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: (B[mI0317 23:18:37.831567 20078 alloc.go:330] "allocated clusterIPs" service="namespace-1679095112-22630/mock" clusterIPs=map[IPv4:10.0.0.44] service/mock created replicationcontroller/mock created I0317 23:18:37.897345 23174 event.go:307] "Event occurred" object="namespace-1679095112-22630/mock" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: mock-vc2sz" [32mgeneric-resources.sh:72: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: mock: (B[mW0317 23:18:38.004618 23174 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 23:18:38.004680 23174 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:80: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: mock: (B[mNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/mock ClusterIP 10.0.0.44 <none> 99/TCP 1s NAME DESIRED CURRENT READY AGE replicationcontroller/mock 1 1 0 1s ... skipping 17 lines ... Name: mock Namespace: namespace-1679095112-22630 Selector: app=mock Labels: app=mock Annotations: <none> Replicas: 1 current / 1 desired Pods Status: 0 Running / 1 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=mock Containers: mock-container: Image: registry.k8s.io/pause:3.9 Port: 9949/TCP ... skipping 42 lines ... Namespace: namespace-1679095112-22630 Selector: app=mock Labels: app=mock status=replaced Annotations: <none> Replicas: 1 current / 1 desired Pods Status: 0 Running / 1 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=mock Containers: mock-container: Image: registry.k8s.io/pause:3.9 Port: 9949/TCP ... skipping 11 lines ... Namespace: namespace-1679095112-22630 Selector: app=mock2 Labels: app=mock2 status=replaced Annotations: <none> Replicas: 1 current / 1 desired Pods Status: 0 Running / 1 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=mock2 Containers: mock-container: Image: registry.k8s.io/pause:3.9 Port: 9949/TCP ... skipping 115 lines ... +++ [0317 23:18:45] Creating namespace namespace-1679095125-8138 namespace/namespace-1679095125-8138 created Context "test" modified. +++ [0317 23:18:45] Testing persistent volumes [32mstorage.sh:30: Successful get pv {{range.items}}{{.metadata.name}}:{{end}}: (B[mpersistentvolume/pv0001 created E0317 23:18:46.101683 23174 pv_protection_controller.go:113] PV pv0001 failed with : Operation cannot be fulfilled on persistentvolumes "pv0001": the object has been modified; please apply your changes to the latest version and try again [32mstorage.sh:33: Successful get pv {{range.items}}{{.metadata.name}}:{{end}}: pv0001: (B[mpersistentvolume "pv0001" deleted persistentvolume/pv0002 created E0317 23:18:46.613757 23174 pv_protection_controller.go:113] PV pv0002 failed with : Operation cannot be fulfilled on persistentvolumes "pv0002": the object has been modified; please apply your changes to the latest version and try again [32mstorage.sh:36: Successful get pv {{range.items}}{{.metadata.name}}:{{end}}: pv0002: (B[mpersistentvolume "pv0002" deleted persistentvolume/pv0003 created [32mstorage.sh:39: Successful get pv {{range.items}}{{.metadata.name}}:{{end}}: pv0003: (B[mquery for persistentvolumes had limit param query for events had limit param ... skipping 4 lines ... I0317 23:18:47.247812 54594 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/persistentvolumes?limit=500 200 OK in 1 milliseconds I0317 23:18:47.250303 54594 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/persistentvolumes/pv0003 200 OK in 1 milliseconds I0317 23:18:47.262249 54594 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/events?fieldSelector=involvedObject.name%3Dpv0003%2CinvolvedObject.namespace%3D%2CinvolvedObject.kind%3DPersistentVolume%2CinvolvedObject.uid%3De962dcf6-5e17-4ea1-8985-8cb34b530a98&limit=500 200 OK in 11 milliseconds (B[mpersistentvolume "pv0003" deleted [32mstorage.sh:44: Successful get pv {{range.items}}{{.metadata.name}}:{{end}}: (B[mpersistentvolume/pv0001 created E0317 23:18:47.847091 23174 pv_protection_controller.go:113] PV pv0001 failed with : Operation cannot be fulfilled on persistentvolumes "pv0001": the object has been modified; please apply your changes to the latest version and try again [32mstorage.sh:47: Successful get pv {{range.items}}{{.metadata.name}}:{{end}}: pv0001: (B[m[32mSuccessful (B[mmessage:Warning: deleting cluster-scoped resources, not scoped to the provided namespace persistentvolume "pv0001" deleted has:Warning: deleting cluster-scoped resources [32mSuccessful ... skipping 10 lines ... +++ command: run_persistent_volume_claims_tests +++ [0317 23:18:48] Creating namespace namespace-1679095128-10443 namespace/namespace-1679095128-10443 created Context "test" modified. +++ [0317 23:18:48] Testing persistent volumes claims [32mstorage.sh:66: Successful get pvc {{range.items}}{{.metadata.name}}:{{end}}: (B[mW0317 23:18:48.633571 23174 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 23:18:48.633613 23174 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource persistentvolumeclaim/myclaim-1 created I0317 23:18:48.731402 23174 event.go:307] "Event occurred" object="namespace-1679095128-10443/myclaim-1" fieldPath="" kind="PersistentVolumeClaim" apiVersion="v1" type="Normal" reason="FailedBinding" message="no persistent volumes available for this claim and no storage class is set" I0317 23:18:48.774239 23174 event.go:307] "Event occurred" object="namespace-1679095128-10443/myclaim-1" fieldPath="" kind="PersistentVolumeClaim" apiVersion="v1" type="Normal" reason="FailedBinding" message="no persistent volumes available for this claim and no storage class is set" [32mstorage.sh:69: Successful get pvc {{range.items}}{{.metadata.name}}:{{end}}: myclaim-1: (B[mquery for persistentvolumeclaims had limit param query for pods had limit param ... skipping 66 lines ... Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 save-managers: true CreationTimestamp: Fri, 17 Mar 2023 23:12:43 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Fri, 17 Mar 2023 23:12:43 +0000 Fri, 17 Mar 2023 23:13:43 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Fri, 17 Mar 2023 23:12:43 +0000 Fri, 17 Mar 2023 23:13:43 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Fri, 17 Mar 2023 23:12:43 +0000 Fri, 17 Mar 2023 23:13:43 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 34 lines ... Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 save-managers: true CreationTimestamp: Fri, 17 Mar 2023 23:12:43 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Fri, 17 Mar 2023 23:12:43 +0000 Fri, 17 Mar 2023 23:13:43 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Fri, 17 Mar 2023 23:12:43 +0000 Fri, 17 Mar 2023 23:13:43 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Fri, 17 Mar 2023 23:12:43 +0000 Fri, 17 Mar 2023 23:13:43 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 35 lines ... Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 save-managers: true CreationTimestamp: Fri, 17 Mar 2023 23:12:43 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Fri, 17 Mar 2023 23:12:43 +0000 Fri, 17 Mar 2023 23:13:43 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Fri, 17 Mar 2023 23:12:43 +0000 Fri, 17 Mar 2023 23:13:43 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Fri, 17 Mar 2023 23:12:43 +0000 Fri, 17 Mar 2023 23:13:43 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 31 lines ... Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 save-managers: true CreationTimestamp: Fri, 17 Mar 2023 23:12:43 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Fri, 17 Mar 2023 23:12:43 +0000 Fri, 17 Mar 2023 23:13:43 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Fri, 17 Mar 2023 23:12:43 +0000 Fri, 17 Mar 2023 23:13:43 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Fri, 17 Mar 2023 23:12:43 +0000 Fri, 17 Mar 2023 23:13:43 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 42 lines ... Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 save-managers: true CreationTimestamp: Fri, 17 Mar 2023 23:12:43 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Fri, 17 Mar 2023 23:12:43 +0000 Fri, 17 Mar 2023 23:13:43 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Fri, 17 Mar 2023 23:12:43 +0000 Fri, 17 Mar 2023 23:13:43 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Fri, 17 Mar 2023 23:12:43 +0000 Fri, 17 Mar 2023 23:13:43 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 34 lines ... Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 save-managers: true CreationTimestamp: Fri, 17 Mar 2023 23:12:43 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Fri, 17 Mar 2023 23:12:43 +0000 Fri, 17 Mar 2023 23:13:43 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Fri, 17 Mar 2023 23:12:43 +0000 Fri, 17 Mar 2023 23:13:43 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Fri, 17 Mar 2023 23:12:43 +0000 Fri, 17 Mar 2023 23:13:43 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 34 lines ... Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 save-managers: true CreationTimestamp: Fri, 17 Mar 2023 23:12:43 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Fri, 17 Mar 2023 23:12:43 +0000 Fri, 17 Mar 2023 23:13:43 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Fri, 17 Mar 2023 23:12:43 +0000 Fri, 17 Mar 2023 23:13:43 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Fri, 17 Mar 2023 23:12:43 +0000 Fri, 17 Mar 2023 23:13:43 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 30 lines ... Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 save-managers: true CreationTimestamp: Fri, 17 Mar 2023 23:12:43 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Fri, 17 Mar 2023 23:12:43 +0000 Fri, 17 Mar 2023 23:13:43 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Fri, 17 Mar 2023 23:12:43 +0000 Fri, 17 Mar 2023 23:13:43 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Fri, 17 Mar 2023 23:12:43 +0000 Fri, 17 Mar 2023 23:13:43 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 61 lines ... +++ [0317 23:18:52] exec credential plugin not triggered since kubectl was called with provided --username/--password certificatesigningrequest.certificates.k8s.io/testuser created [32mauthentication.sh:152: Successful get csr/testuser {{range.status.conditions}}{{.type}}{{end}}: (B[mcertificatesigningrequest.certificates.k8s.io/testuser approved [32mauthentication.sh:154: Successful get csr/testuser {{range.status.conditions}}{{.type}}{{end}}: Approved (B[m[32mauthentication.sh:156: Successful get csr/testuser {{.status.certificate}}: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMxekNDQWIrZ0F3SUJBZ0lSQU80d3lWSTFYeHY0bzBVczQvYnpldUV3RFFZSktvWklodmNOQVFFTEJRQXcKRkRFU01CQUdBMVVFQXd3Sk1USTNMakF1TUM0eE1CNFhEVEl6TURNeE56SXpNVE0xTTFvWERUSTBNRE14TmpJegpNVE0xTTFvd0V6RVJNQThHQTFVRUF4TUlkR1Z6ZEhWelpYSXdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCCkR3QXdnZ0VLQW9JQkFRQzBNV1Z6RjNRQzkyZFFTekEvdEJsYTNva2ROa1NOeWQrU2JuekZOeElHbW5tLzV2U2cKMVQyYjh5eDZzMElYTHpVSzJsYVk5Y0IxMm5TOTZtM1lWKzcxWUZWblhoR3BnY3hVbFFnY3IveWttY01sNTQ1SApZQlJzNWQybS92N2NmWWpxdG11dkZ3RWVRZVZ5STNEcFdMRXUzREZDYXMxVHBBQm5EZ2dPY0xEZTFZRWpCZ281CmhObG9qbkt1T3FLekpmSVdqamJoL3dldnZ4Uk1aKzVmZGk0aWxhU2gzSjEza212bXZyaEQybm42V3FVZGpoU3MKQzhvT1RtbnpibTFSTmJwZ082dzdTQ2JUcC9FeGIzWUl2cTFUNm16aW8vTjBONFZRZ1MydVErNjZFUmlWQjFpRwpIK0o1alNwaGNtOHNZQXR3WFF5ZCtBM2Q2MXdvMDhCYWV6QlZBZ01CQUFHakpUQWpNQk1HQTFVZEpRUU1NQW9HCkNDc0dBUVVGQndNQ01Bd0dBMVVkRXdFQi93UUNNQUF3RFFZSktvWklodmNOQVFFTEJRQURnZ0VCQUp0N1l1dmwKem12M2djYXdxZkQ5cmVSMEZLYTNOTStFbWZLeXZ0Vk1xa1VHbGtFTjJvVzJOQ2JPUXlpcGJBOGM0TVIvZ2JqbwpQNjRqNm81SFZHNDVGbmNFYzQ2WXdSRVR0OW1RNjIwZnpWV0RmSmNiOUwxcjlRS0RGRTRSQXRYc3laNVJwYXkwClVENmR4N1k4SnMzU3BhYjgvMEc0NmJzVGp3STJjcllUd0kwZ0R4cWZ5UjNyaUNhRVNlWFVyNGxZczVrcUt4TTUKa3lBYjl5eVQ1bldJUmpPRTFCTjVlNnJ5MnNqenI4ZUx3YUJUM0hXRWQxTDlnQVVrUzRqWHVwTm1LWGxkRStrQgpTckFscWhNQkU0Z2o2bDVBN1gwMjFpOXVwZWozdXUzcGdYUzlRZnQvRWRuekNMOHhwNmN2cFZsOGhpSm50UUIvClVrdFJJZWFmUmVCb0d4Zz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= (B[mW0317 23:18:53.629143 23174 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 23:18:53.629183 23174 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource +++ [0317 23:18:53] exec credential plugin not triggered since kubectl was called with provided --client-certificate/--client-key User "testuser" set. +++ [0317 23:18:53] exec credential plugin not triggered since kubeconfig was configured with --client-certificate/--client-key for authentication certificatesigningrequest.certificates.k8s.io "testuser" deleted +++ [0317 23:18:53] Testing kubectl with configured client.authentication.k8s.io/v1 exec credentials plugin +++ [0317 23:18:53] exec credential plugin not triggered since kubectl was called with provided --token ... skipping 99 lines ... yes has:the server doesn't have a resource type [32mSuccessful (B[mmessage:yes has:yes [32mSuccessful (B[mmessage:error: --subresource can not be used with NonResourceURL has:subresource can not be used with NonResourceURL [32mSuccessful (B[m[32mSuccessful (B[mmessage:yes 0 has:0 ... skipping 62 lines ... {Verbs:[get list watch] APIGroups:[] Resources:[configmaps] ResourceNames:[] NonResourceURLs:[]} [32mlegacy-script.sh:887: Successful get rolebindings -n some-other-random -l test-cmd=auth {{range.items}}{{.metadata.name}}:{{end}}: testing-RB: (B[m[32mlegacy-script.sh:888: Successful get roles -n some-other-random -l test-cmd=auth {{range.items}}{{.metadata.name}}:{{end}}: testing-R: (B[m[32mlegacy-script.sh:889: Successful get clusterrolebindings -l test-cmd=auth {{range.items}}{{.metadata.name}}:{{end}}: testing-CRB: (B[m[32mlegacy-script.sh:890: Successful get clusterroles -l test-cmd=auth {{range.items}}{{.metadata.name}}:{{end}}: testing-CR: (B[m[32mSuccessful (B[mmessage:error: only rbac.authorization.k8s.io/v1 is supported: not *v1beta1.ClusterRole has:only rbac.authorization.k8s.io/v1 is supported rolebinding.rbac.authorization.k8s.io "testing-RB" deleted role.rbac.authorization.k8s.io "testing-R" deleted Warning: deleting cluster-scoped resources, not scoped to the provided namespace clusterrole.rbac.authorization.k8s.io "testing-CR" deleted clusterrolebinding.rbac.authorization.k8s.io "testing-CRB" deleted ... skipping 512 lines ... namespace-1679095139-30480 default 0 13s namespace-1679095141-20083 default 0 11s some-other-random default 0 14s has:all-ns-test-2 namespace "all-ns-test-1" deleted namespace "all-ns-test-2" deleted W0317 23:19:21.800604 23174 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 23:19:21.800665 23174 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource I0317 23:19:22.686932 23174 namespace_controller.go:182] "Namespace has been deleted" namespace="all-ns-test-1" [32mget.sh:442: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[mWarning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. pod "valid-pod" force deleted [32mget.sh:446: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mget.sh:450: Successful get nodes {{range.items}}{{.metadata.name}}:{{end}}: 127.0.0.1: ... skipping 7 lines ... +++ Running case: test-cmd.run_deprecated_api_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_deprecated_api_tests +++ [0317 23:19:23] Testing deprecated APIs customresourcedefinition.apiextensions.k8s.io/deprecated.example.com created [1m[31mFAIL! (B[mmessage: has not:deprecated.example.com 516 /home/prow/go/src/k8s.io/kubernetes/test/cmd/../../test/cmd/get.sh 1: Waiting for CR API to be available I0317 23:19:23.755864 20078 handler.go:165] Adding GroupVersion example.com v1 to ResourceManager I0317 23:19:23.755914 20078 handler.go:165] Adding GroupVersion example.com v1beta1 to ResourceManager ... skipping 4 lines ... (B[mmessage:Warning: example.com/v1beta1 DeprecatedKind is deprecated; use example.com/v1 DeprecatedKind No resources found in namespace-1679095141-20083 namespace. has:example.com/v1beta1 DeprecatedKind is deprecated [32mSuccessful (B[mmessage:Warning: example.com/v1beta1 DeprecatedKind is deprecated; use example.com/v1 DeprecatedKind No resources found in namespace-1679095141-20083 namespace. error: 1 warning received has:example.com/v1beta1 DeprecatedKind is deprecated [32mSuccessful (B[mmessage:Warning: example.com/v1beta1 DeprecatedKind is deprecated; use example.com/v1 DeprecatedKind No resources found in namespace-1679095141-20083 namespace. error: 1 warning received has:error: 1 warning received I0317 23:19:25.032737 20078 handler.go:165] Adding GroupVersion example.com v1 to ResourceManager I0317 23:19:25.032793 20078 handler.go:165] Adding GroupVersion example.com v1beta1 to ResourceManager customresourcedefinition.apiextensions.k8s.io "deprecated.example.com" deleted I0317 23:19:25.047187 20078 handler.go:165] Adding GroupVersion example.com v1 to ResourceManager I0317 23:19:25.047236 20078 handler.go:165] Adding GroupVersion example.com v1beta1 to ResourceManager +++ exit code: 0 ... skipping 342 lines ... ], "kind": "List", "metadata": { "resourceVersion": "" } } W0317 23:19:31.804087 23174 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 23:19:31.804135 23174 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mcertificate.sh:32: Successful get csr/foo {{range.status.conditions}}{{.type}}{{end}}: Approved (B[mquery for certificatesigningrequests had limit param query for events had limit param query for certificatesigningrequests had user-specified limit param [32mSuccessful describe certificatesigningrequests verbose logs: I0317 23:19:31.918427 58881 loader.go:373] Config loaded from file: /tmp/tmp.vT1Q3sqC26/.kube/config ... skipping 218 lines ... node/127.0.0.1 cordoned (server dry run) Warning: deleting Pods that declare no controller: namespace-1679095174-1882/test-pod-1 evicting pod namespace-1679095174-1882/test-pod-1 (server dry run) node/127.0.0.1 drained (server dry run) [32mnode-management.sh:140: Successful get pods {{range .items}}{{.metadata.name}},{{end}}: test-pod-1,test-pod-2, (B[mWarning: deleting Pods that declare no controller: namespace-1679095174-1882/test-pod-1 W0317 23:19:38.651684 23174 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 23:19:38.651725 23174 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource W0317 23:19:39.418183 23174 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 23:19:39.418221 23174 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource W0317 23:20:04.834421 23174 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 23:20:04.834482 23174 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource W0317 23:20:06.430437 23174 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 23:20:06.430483 23174 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mSuccessful (B[mmessage:node/127.0.0.1 cordoned evicting pod namespace-1679095174-1882/test-pod-1 pod "test-pod-1" has DeletionTimestamp older than 1 seconds, skipping node/127.0.0.1 drained has:evicting pod .*/test-pod-1 ... skipping 14 lines ... (B[mmessage:node/127.0.0.1 already uncordoned (server dry run) has:already uncordoned [32mnode-management.sh:161: Successful get nodes 127.0.0.1 {{.spec.unschedulable}}: <no value> (B[mnode/127.0.0.1 labeled [32mnode-management.sh:166: Successful get nodes 127.0.0.1 {{.metadata.labels.test}}: label (B[m[32mSuccessful (B[mmessage:error: cannot specify both a node name and a --selector option See 'kubectl drain -h' for help and examples has:cannot specify both a node name [32mnode-management.sh:172: Successful get nodes 127.0.0.1 {{.metadata.labels.test}}: label (B[m[32mnode-management.sh:174: Successful get nodes 127.0.0.1 {{.spec.unschedulable}}: <no value> (B[m[32mnode-management.sh:176: Successful get pods {{range .items}}{{.metadata.name}},{{end}}: test-pod-1,test-pod-2, (B[m[32mSuccessful ... skipping 78 lines ... Warning: deleting Pods that declare no controller: namespace-1679095174-1882/test-pod-1, namespace-1679095174-1882/test-pod-2 evicting pod namespace-1679095174-1882/test-pod-1 (dry run) evicting pod namespace-1679095174-1882/test-pod-2 (dry run) node/127.0.0.1 drained (dry run) has:/v1/pods?fieldSelector=spec.nodeName%3D127.0.0.1&limit=500 200 OK [32mSuccessful (B[mmessage:error: USAGE: cordon NODE [flags] See 'kubectl cordon -h' for help and examples has:error\: USAGE\: cordon NODE node/127.0.0.1 already uncordoned [32mSuccessful (B[mmessage:error: You must provide one or more resources by argument or filename. Example resource specifications include: '-f rsrc.yaml' '--filename=rsrc.json' '<resource> <name>' '<resource>' has:must provide one or more resources ... skipping 18 lines ... +++ [0317 23:20:12] Testing kubectl plugins [32mSuccessful (B[mmessage:The following compatible plugins are available: test/fixtures/pkg/kubectl/plugins/version/kubectl-version - warning: kubectl-version overwrites existing command: "kubectl version" error: one plugin warning was found has:kubectl-version overwrites existing command: "kubectl version" [32mSuccessful (B[mmessage:The following compatible plugins are available: test/fixtures/pkg/kubectl/plugins/kubectl-foo test/fixtures/pkg/kubectl/plugins/foo/kubectl-foo - warning: test/fixtures/pkg/kubectl/plugins/foo/kubectl-foo is overshadowed by a similarly named plugin: test/fixtures/pkg/kubectl/plugins/kubectl-foo error: one plugin warning was found has:test/fixtures/pkg/kubectl/plugins/foo/kubectl-foo is overshadowed by a similarly named plugin [32mSuccessful (B[mmessage:The following compatible plugins are available: test/fixtures/pkg/kubectl/plugins/kubectl-foo has:plugins are available [32mSuccessful (B[mmessage:Unable to read directory "test/fixtures/pkg/kubectl/plugins/empty" from your PATH: open test/fixtures/pkg/kubectl/plugins/empty: no such file or directory. Skipping... error: unable to find any kubectl plugins in your PATH has:unable to find any kubectl plugins in your PATH [32mSuccessful (B[mmessage:I am plugin foo has:plugin foo [32mSuccessful (B[mmessage:I am plugin bar called with args test/fixtures/pkg/kubectl/plugins/bar/kubectl-bar arg1 ... skipping 13 lines ... +++ Running case: test-cmd.run_impersonation_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_impersonation_tests +++ [0317 23:20:12] Testing impersonation [32mSuccessful (B[mmessage:error: requesting uid, groups or user-extra for test-admin without impersonating a user has:without impersonating a user [32mSuccessful (B[mmessage:error: requesting uid, groups or user-extra for test-admin without impersonating a user has:without impersonating a user certificatesigningrequest.certificates.k8s.io/foo created [32mauthorization.sh:60: Successful get csr/foo {{.spec.username}}: user1 (B[m[32mauthorization.sh:61: Successful get csr/foo {{range .spec.groups}}{{.}}{{end}}: system:authenticated (B[mcertificatesigningrequest.certificates.k8s.io "foo" deleted certificatesigningrequest.certificates.k8s.io/foo created ... skipping 19 lines ... I0317 23:20:14.517581 23174 event.go:307] "Event occurred" object="namespace-1679095214-3930/test-1" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set test-1-7697bf65f7 to 1" I0317 23:20:14.546632 23174 event.go:307] "Event occurred" object="namespace-1679095214-3930/test-1-7697bf65f7" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: test-1-7697bf65f7-6hqsd" deployment.apps/test-2 created I0317 23:20:14.609693 23174 event.go:307] "Event occurred" object="namespace-1679095214-3930/test-2" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set test-2-675f68f47d to 1" I0317 23:20:14.625895 23174 event.go:307] "Event occurred" object="namespace-1679095214-3930/test-2-675f68f47d" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: test-2-675f68f47d-2s8x9" [32mwait.sh:36: Successful get deployments {{range .items}}{{.metadata.name}},{{end}}: test-1,test-2, (B[mW0317 23:20:17.652271 23174 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 23:20:17.652308 23174 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource W0317 23:20:38.537494 23174 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 23:20:38.537552 23174 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mSuccessful (B[mmessage:error: timed out waiting for the condition on deployments/test-1 has:timed out deployment.apps "test-1" deleted deployment.apps "test-2" deleted [32mSuccessful (B[mmessage:deployment.apps/test-1 condition met deployment.apps/test-2 condition met ... skipping 66 lines ... +++ Running case: test-cmd.run_kubectl_debug_general_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_kubectl_debug_general_tests +++ [0317 23:20:51] Creating namespace namespace-1679095251-17542 namespace/namespace-1679095251-17542 created W0317 23:20:51.749819 23174 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 23:20:51.749865 23174 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource Context "test" modified. +++ [0317 23:20:51] Testing kubectl debug profile general pod/target created [32mdebug.sh:140: Successful get pod {{range.items}}{{.metadata.name}}:{{end}}: target: (B[m[32mdebug.sh:144: Successful get pod {{range.items}}{{.metadata.name}}:{{end}}: target:target-copy: (B[m[32mdebug.sh:145: Successful get pod/target-copy {{range.spec.containers}}{{.name}}:{{end}}: target:debug-container: ... skipping 170 lines ... {"Time":"2023-03-17T23:32:22.562886274Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/client","Test":"TestPatch","Output":" \u001enode.kubernetes.io/unreachable\u0012\u0006Exists\u001a\u0000\"\tNoExecute(\ufffd\u0002\ufffd\u0001\u0000\ufffd\u0001\u0000\ufffd\u0001\u0001\ufffd\u0001\u0014PreemptLowerPriority\u001a!\n"} {"Time":"2023-03-17T23:32:22.577724964Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/client","Test":"TestPatch","Output":" \u0004name\u0012\u0005image*\u0000B\u0000j\u0014/dev/termination-logr\u0006Always\ufffd\u0001\u0000\ufffd\u0001\u0000\ufffd\u0001\u0000\ufffd\u0001\u0004File\u001a\u0006Always \u001e2\u000cClusterFirstB\u0000J\u0000R\u0000X\u0000`\u0000h\u0000r\u0000\ufffd\u0001\u0000\ufffd\u0001\u0000\ufffd\u0001\u0011default-scheduler\ufffd\u00016\n"} {"Time":"2023-03-17T23:32:22.577743829Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/client","Test":"TestPatch","Output":" \u001cnode.kubernetes.io/not-ready\u0012\u0006Exists\u001a\u0000\"\tNoExecute(\ufffd\u0002\ufffd\u00018\n"} {"Time":"2023-03-17T23:32:22.577749945Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/client","Test":"TestPatch","Output":" \u001enode.kubernetes.io/unreachable\u0012\u0006Exists\u001a\u0000\"\tNoExecute(\ufffd\u0002\ufffd\u0001\u0000\ufffd\u0001\u0000\ufffd\u0001\u0001\ufffd\u0001\u0014PreemptLowerPriority\u001a!\n"} {"Time":"2023-03-17T23:32:22.592304543Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/client","Test":"TestPatch","Output":" \u0004name\u0012\u0005image*\u0000B\u0000j\u0014/dev/termination-logr\u0006Always\ufffd\u0001\u0000\ufffd\u0001\u0000\ufffd\u0001\u0000\ufffd\u0001\u0004File\u001a\u0006Always \u001e2\u000cClusterFirstB\u0000J\u0000R\u0000X\u0000`\u0000h\u0000r\u0000\ufffd\u0001\u0000\ufffd\u0001\u0000\ufffd\u0001\u0011default-scheduler\ufffd\u00016\n"} {"Time":"2023-03-17T23:32:22.592312608Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/client","Test":"TestPatch","Output":" \u001cnode.kubernetes.io/not-ready\u0012\u0006Exists\u001a\u0000\"\tNoExecute(\ufffd\u0002\ufffd\u00018\n"} {"Time":"2023-03-17T23:32:22.592318811Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/client","Test":"Tes{"component":"entrypoint","file":"k8s.io/test-infra/prow/entrypoint/run.go:168","func":"k8s.io/test-infra/prow/entrypoint.Options.ExecuteProcess","level":"error","msg":"Entrypoint received interrupt: terminated","severity":"error","time":"2023-03-17T23:37:28Z"} ++ early_exit_handler ++ '[' -n 187 ']' ++ kill -TERM 187 ++ cleanup_dind ++ [[ true == \t\r\u\e ]] ++ echo 'Cleaning up after docker' ... skipping 4 lines ...