PR | pacoxu: Disable kubelet read-only port by default (only set the correct default value) |
Result | ABORTED |
Tests | 0 failed / 140 succeeded |
Started | |
Elapsed | 28m24s |
Revision | 83c4e057e437df4ad9b3fd567e9b4a54a496cfcc |
Refs |
100335 |
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdCompletion
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdCompletion/shell_not_expected
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdCompletion/unsupported_shell_type
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitAPIPort
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitAPIPort/accept_a_valid_port_number
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitAPIPort/fail_on_negative_port_number
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitAPIPort/fail_on_non-string_port
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitAPIPort/fail_on_too_large_port_number
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/can't_load_old_component_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/can't_load_v1beta1_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/can't_load_v1beta2_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/can_load_current_component_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/can_load_v1beta3_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/don't_allow_mixed_arguments_v1beta3
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/fail_on_non_existing_path
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitFeatureGates
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitFeatureGates/feature_gate_PublicKeysECDSA=true
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitFeatureGates/no_feature_gates_passed
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitKubernetesVersion
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitKubernetesVersion/invalid_semantic_version_string_is_detected
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitKubernetesVersion/valid_version_is_accepted
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitToken
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitToken/invalid_token_non-lowercase
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitToken/invalid_token_size
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitToken/valid_token_is_accepted
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinArgsMixed
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinArgsMixed/discovery-token_and_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinBadArgs
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinBadArgs/discovery-token_and_discovery-file_can't_both_be_set
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinBadArgs/discovery-token_or_discovery-file_must_be_set
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinConfig
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinConfig/config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinConfig/config_path
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryFile
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryFile/invalid_discovery_file
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryFile/valid_discovery_file
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryToken
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryToken/valid_discovery_token
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryToken/valid_discovery_token_url
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinNodeName
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinNodeName/valid_node_name
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinTLSBootstrapToken
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinTLSBootstrapToken/valid_bootstrap_token
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinTLSBootstrapToken/valid_bootstrap_token_url
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinToken
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinToken/valid_token
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinToken/valid_token_url
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdTokenDelete
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdTokenDelete/invalid_token
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdTokenDelete/no_token_provided
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdTokenGenerate
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdTokenGenerateTypoError
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersion
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersion/default_output
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersion/invalid_output_option
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersion/short_output
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersionOutputJsonOrYaml
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersionOutputJsonOrYaml/json_output
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersionOutputJsonOrYaml/yaml_output
test-cmd run_RESTMapper_evaluation_tests
test-cmd run_assert_categories_tests
test-cmd run_assert_short_name_tests
test-cmd run_assert_singular_name_tests
test-cmd run_authorization_tests
test-cmd run_certificates_tests
test-cmd run_client_config_tests
test-cmd run_cluster_management_tests
test-cmd run_clusterroles_tests
test-cmd run_configmap_tests
test-cmd run_convert_tests
test-cmd run_crd_deletion_recreation_tests
test-cmd run_crd_tests
test-cmd run_create_job_tests
test-cmd run_create_secret_tests
test-cmd run_daemonset_history_tests
test-cmd run_daemonset_tests
test-cmd run_deployment_tests
test-cmd run_deprecated_api_tests
test-cmd run_exec_credentials_interactive_tests
test-cmd run_exec_credentials_tests
test-cmd run_impersonation_tests
test-cmd run_job_tests
test-cmd run_kubectl_all_namespace_tests
test-cmd run_kubectl_apply_deployments_tests
test-cmd run_kubectl_apply_tests
test-cmd run_kubectl_config_set_cluster_tests
test-cmd run_kubectl_config_set_credentials_tests
test-cmd run_kubectl_config_set_tests
test-cmd run_kubectl_create_error_tests
test-cmd run_kubectl_create_filter_tests
test-cmd run_kubectl_create_kustomization_directory_tests
test-cmd run_kubectl_create_validate_tests
test-cmd run_kubectl_debug_baseline_node_tests
test-cmd run_kubectl_debug_baseline_tests
test-cmd run_kubectl_debug_general_node_tests
test-cmd run_kubectl_debug_general_tests
test-cmd run_kubectl_debug_node_tests
test-cmd run_kubectl_debug_pod_tests
test-cmd run_kubectl_delete_allnamespaces_tests
test-cmd run_kubectl_diff_same_names
test-cmd run_kubectl_diff_tests
test-cmd run_kubectl_events_tests
test-cmd run_kubectl_exec_pod_tests
test-cmd run_kubectl_exec_resource_name_tests
test-cmd run_kubectl_explain_tests
test-cmd run_kubectl_get_tests
test-cmd run_kubectl_help_tests
test-cmd run_kubectl_local_proxy_tests
test-cmd run_kubectl_request_timeout_tests
test-cmd run_kubectl_results_tests
test-cmd run_kubectl_run_tests
test-cmd run_kubectl_server_side_apply_tests
test-cmd run_kubectl_sort_by_tests
test-cmd run_kubectl_version_tests
test-cmd run_lists_tests
test-cmd run_multi_resources_tests
test-cmd run_namespace_tests
test-cmd run_nodes_tests
test-cmd run_persistent_volume_claims_tests
test-cmd run_persistent_volumes_tests
test-cmd run_plugins_tests
test-cmd run_pod_templates_tests
test-cmd run_pod_tests
test-cmd run_rc_tests
test-cmd run_recursive_resources_tests
test-cmd run_resource_aliasing_tests
test-cmd run_retrieve_multiple_tests
test-cmd run_role_tests
test-cmd run_rs_tests
test-cmd run_save_config_tests
test-cmd run_secrets_test
test-cmd run_service_accounts_tests
test-cmd run_service_tests
test-cmd run_stateful_set_tests
test-cmd run_statefulset_history_tests
test-cmd run_storage_class_tests
test-cmd run_swagger_tests
test-cmd run_template_output_tests
test-cmd run_wait_tests
... skipping 49 lines ... Recording: record_command_canary Running command: record_command_canary +++ Running case: test-cmd.record_command_canary +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: record_command_canary /home/prow/go/src/k8s.io/kubernetes/test/cmd/legacy-script.sh: line 164: bogus-expected-to-fail: command not found !!! [0327 10:38:59] Call tree: !!! [0327 10:38:59] 1: /home/prow/go/src/k8s.io/kubernetes/test/cmd/../../third_party/forked/shell2junit/sh2ju.sh:47 record_command_canary(...) !!! [0327 10:38:59] 2: /home/prow/go/src/k8s.io/kubernetes/test/cmd/../../third_party/forked/shell2junit/sh2ju.sh:112 eVal(...) !!! [0327 10:38:59] 3: /home/prow/go/src/k8s.io/kubernetes/test/cmd/legacy-script.sh:140 juLog(...) !!! [0327 10:38:59] 4: /home/prow/go/src/k8s.io/kubernetes/test/cmd/legacy-script.sh:168 record_command(...) !!! [0327 10:38:59] 5: hack/make-rules/test-cmd.sh:35 source(...) +++ exit code: 1 +++ error: 1 +++ [0327 10:38:59] Running kubeadm tests go version go1.20.2 linux/amd64 +++ [0327 10:39:03] Building go targets for linux/amd64 k8s.io/kubernetes/cmd/kubeadm (static) go version go1.20.2 linux/amd64 +++ [0327 10:39:56] Running tests without code coverage ... skipping 225 lines ... I0327 10:42:12.632516 19971 controller.go:83] Starting OpenAPI AggregationController I0327 10:42:12.632572 19971 crd_finalizer.go:266] Starting CRDFinalizer I0327 10:42:12.632616 19971 controller.go:80] Starting OpenAPI V3 AggregationController I0327 10:42:12.632559 19971 apiapproval_controller.go:186] Starting KubernetesAPIApprovalPolicyConformantConditionController I0327 10:42:12.635362 19971 apiservice_controller.go:97] Starting APIServiceRegistrationController I0327 10:42:12.635377 19971 cache.go:32] Waiting for caches to sync for APIServiceRegistrationController controller E0327 10:42:12.718476 19971 controller.go:146] "Failed to ensure lease exists, will retry" err="namespaces \"kube-system\" not found" interval="200ms" I0327 10:42:12.732559 19971 shared_informer.go:318] Caches are synced for configmaps I0327 10:42:12.732617 19971 cache.go:39] Caches are synced for autoregister controller I0327 10:42:12.732642 19971 shared_informer.go:318] Caches are synced for cluster_authentication_trust_controller I0327 10:42:12.732718 19971 cache.go:39] Caches are synced for AvailableConditionController controller I0327 10:42:12.732763 19971 apf_controller.go:366] Running API Priority and Fairness config worker I0327 10:42:12.732786 19971 apf_controller.go:369] Running API Priority and Fairness periodic rebalancing process ... skipping 16 lines ... go version go1.20.2 linux/amd64 +++ [0327 10:42:14] Building go targets for linux/amd64 k8s.io/kubernetes/cmd/kube-controller-manager (static) +++ [0327 10:42:49] Generate kubeconfig for controller-manager +++ [0327 10:42:49] Starting controller-manager I0327 10:42:50.027610 23048 serving.go:348] Generated self-signed cert in-memory W0327 10:42:50.271361 23048 authentication.go:446] failed to read in-cluster kubeconfig for delegated authentication: open /var/run/secrets/kubernetes.io/serviceaccount/token: no such file or directory W0327 10:42:50.271396 23048 authentication.go:339] No authentication-kubeconfig provided in order to lookup client-ca-file in configmap/extension-apiserver-authentication in kube-system, so client certificate authentication won't work. W0327 10:42:50.271413 23048 authentication.go:363] No authentication-kubeconfig provided in order to lookup requestheader-client-ca-file in configmap/extension-apiserver-authentication in kube-system, so request-header client certificate authentication won't work. W0327 10:42:50.271465 23048 authorization.go:225] failed to read in-cluster kubeconfig for delegated authorization: open /var/run/secrets/kubernetes.io/serviceaccount/token: no such file or directory W0327 10:42:50.271480 23048 authorization.go:193] No authorization-kubeconfig provided, so SubjectAccessReview of authorization tokens won't work. I0327 10:42:50.271927 23048 controllermanager.go:187] "Starting" version="v1.28.0-alpha.0.13+cd70d8fd63b369" I0327 10:42:50.271962 23048 controllermanager.go:189] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" I0327 10:42:50.273875 23048 secure_serving.go:210] Serving securely on [::]:10257 I0327 10:42:50.273963 23048 tlsconfig.go:240] "Starting DynamicServingCertificateController" I0327 10:42:50.274286 23048 leaderelection.go:245] attempting to acquire leader lease kube-system/kube-controller-manager... +++ [0327 10:42:50] On try 2, controller-manager: ok I0327 10:42:50.287132 23048 leaderelection.go:255] successfully acquired lease kube-system/kube-controller-manager I0327 10:42:50.287241 23048 event.go:307] "Event occurred" object="kube-system/kube-controller-manager" fieldPath="" kind="Lease" apiVersion="coordination.k8s.io/v1" type="Normal" reason="LeaderElection" message="2794cd86-cc8b-11ed-a320-fef29d8dcfe4_0d1d7f68-7ee2-4695-878b-ebad60600ce3 became leader" I0327 10:42:50.293249 23048 controllermanager.go:661] "Controller is disabled because there is no private key" controller="serviceaccount-token" W0327 10:42:50.293541 23048 mutation_detector.go:53] Mutation detector is enabled, this will result in memory leakage. W0327 10:42:50.293651 23048 mutation_detector.go:53] Mutation detector is enabled, this will result in memory leakage. E0327 10:42:50.293737 23048 core.go:92] "Failed to start service controller" err="WARNING: no cloud provider provided, services of type LoadBalancer will fail" I0327 10:42:50.293762 23048 controllermanager.go:616] "Warning: skipping controller" controller="service" W0327 10:42:50.293990 23048 mutation_detector.go:53] Mutation detector is enabled, this will result in memory leakage. I0327 10:42:50.294102 23048 controllermanager.go:638] "Started controller" controller="clusterrole-aggregation" I0327 10:42:50.294214 23048 clusterroleaggregation_controller.go:189] "Starting ClusterRoleAggregator controller" I0327 10:42:50.294244 23048 shared_informer.go:311] Waiting for caches to sync for ClusterRoleAggregator W0327 10:42:50.294337 23048 mutation_detector.go:53] Mutation detector is enabled, this will result in memory leakage. ... skipping 95 lines ... I0327 10:42:50.304647 23048 shared_informer.go:311] Waiting for caches to sync for PVC protection W0327 10:42:50.304634 23048 mutation_detector.go:53] Mutation detector is enabled, this will result in memory leakage. I0327 10:42:50.304701 23048 controllermanager.go:638] "Started controller" controller="root-ca-cert-publisher" I0327 10:42:50.305000 23048 controllermanager.go:638] "Started controller" controller="ephemeral-volume" W0327 10:42:50.305192 23048 mutation_detector.go:53] Mutation detector is enabled, this will result in memory leakage. I0327 10:42:50.305267 23048 controllermanager.go:638] "Started controller" controller="statefulset" E0327 10:42:50.305460 23048 core.go:213] "Failed to start cloud node lifecycle controller" err="no cloud provider provided" I0327 10:42:50.305486 23048 controllermanager.go:616] "Warning: skipping controller" controller="cloud-node-lifecycle" I0327 10:42:50.305813 23048 controllermanager.go:638] "Started controller" controller="ttl-after-finished" I0327 10:42:50.306058 23048 publisher.go:101] Starting root CA certificate configmap publisher I0327 10:42:50.306077 23048 shared_informer.go:311] Waiting for caches to sync for crt configmap I0327 10:42:50.306115 23048 controller.go:169] "Starting ephemeral volume controller" I0327 10:42:50.306122 23048 shared_informer.go:311] Waiting for caches to sync for ephemeral ... skipping 99 lines ... I0327 10:42:50.702499 23048 shared_informer.go:318] Caches are synced for endpoint_slice_mirroring I0327 10:42:50.702614 23048 shared_informer.go:318] Caches are synced for endpoint_slice I0327 10:42:50.716197 23048 shared_informer.go:318] Caches are synced for resource quota I0327 10:42:50.717283 23048 shared_informer.go:318] Caches are synced for endpoint I0327 10:42:50.719584 23048 shared_informer.go:318] Caches are synced for resource quota node/127.0.0.1 created I0327 10:42:50.863527 23048 actual_state_of_world.go:547] "Failed to update statusUpdateNeeded field in actual state of world" err="Failed to set statusUpdateNeeded to needed true, because nodeName=\"127.0.0.1\" does not exist" +++ [0327 10:42:50] Checking kubectl version WARNING: This version information is deprecated and will be replaced with the output from kubectl version --short. Use --output=yaml|json to get the full version. Client Version: version.Info{Major:"1", Minor:"28+", GitVersion:"v1.28.0-alpha.0.13+cd70d8fd63b369", GitCommit:"cd70d8fd63b3698aa26011c8822e4df802947054", GitTreeState:"clean", BuildDate:"2023-03-25T01:59:54Z", GoVersion:"go1.20.2", Compiler:"gc", Platform:"linux/amd64"} Kustomize Version: v5.0.1 Server Version: version.Info{Major:"1", Minor:"28+", GitVersion:"v1.28.0-alpha.0.13+cd70d8fd63b369", GitCommit:"cd70d8fd63b3698aa26011c8822e4df802947054", GitTreeState:"clean", BuildDate:"2023-03-25T01:59:54Z", GoVersion:"go1.20.2", Compiler:"gc", Platform:"linux/amd64"} I0327 10:42:51.117016 23048 shared_informer.go:318] Caches are synced for garbage collector I0327 10:42:51.117053 23048 garbagecollector.go:166] "All resource monitors have synced. Proceeding to collect garbage" I0327 10:42:51.137223 23048 shared_informer.go:318] Caches are synced for garbage collector The Service "kubernetes" is invalid: spec.clusterIPs: Invalid value: []string{"10.0.0.1"}: failed to allocate IP 10.0.0.1: provided IP is already allocated NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.0.0.1 <none> 443/TCP 37s Recording: run_kubectl_version_tests Running command: run_kubectl_version_tests +++ Running case: test-cmd.run_kubectl_version_tests ... skipping 196 lines ... +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_RESTMapper_evaluation_tests +++ [0327 10:42:55] Creating namespace namespace-1679913775-11640 namespace/namespace-1679913775-11640 created Context "test" modified. +++ [0327 10:42:55] Testing RESTMapper +++ [0327 10:42:55] "kubectl get unknownresourcetype" returns error as expected: error: the server doesn't have a resource type "unknownresourcetype" +++ exit code: 0 NAME SHORTNAMES APIVERSION NAMESPACED KIND bindings v1 true Binding componentstatuses cs v1 false ComponentStatus configmaps cm v1 true ConfigMap endpoints ep v1 true Endpoints ... skipping 60 lines ... namespace/namespace-1679913777-24246 created Context "test" modified. +++ [0327 10:42:57] Testing clusterroles [32mrbac.sh:29: Successful get clusterroles/cluster-admin {{.metadata.name}}: cluster-admin (B[m[32mrbac.sh:30: Successful get clusterrolebindings/cluster-admin {{.metadata.name}}: cluster-admin (B[m[32mSuccessful (B[mmessage:Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pod-admin" not found has:clusterroles.rbac.authorization.k8s.io "pod-admin" not found clusterrole.rbac.authorization.k8s.io/pod-admin created (dry run) clusterrole.rbac.authorization.k8s.io/pod-admin created (server dry run) [32mSuccessful (B[mmessage:Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pod-admin" not found has:clusterroles.rbac.authorization.k8s.io "pod-admin" not found clusterrole.rbac.authorization.k8s.io/pod-admin created [32mrbac.sh:42: Successful get clusterrole/pod-admin {{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}: *: (B[m[32mSuccessful (B[mmessage:Warning: deleting cluster-scoped resources, not scoped to the provided namespace clusterrole.rbac.authorization.k8s.io "pod-admin" deleted ... skipping 18 lines ... (B[mclusterrole.rbac.authorization.k8s.io/url-reader created [32mrbac.sh:61: Successful get clusterrole/url-reader {{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}: get: (B[m[32mrbac.sh:62: Successful get clusterrole/url-reader {{range.rules}}{{range.nonResourceURLs}}{{.}}:{{end}}{{end}}: /logs/*:/healthz/*: (B[mclusterrole.rbac.authorization.k8s.io/aggregation-reader created [32mrbac.sh:64: Successful get clusterrole/aggregation-reader {{.metadata.name}}: aggregation-reader (B[m[32mSuccessful (B[mmessage:Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "super-admin" not found has:clusterrolebindings.rbac.authorization.k8s.io "super-admin" not found clusterrolebinding.rbac.authorization.k8s.io/super-admin created (dry run) clusterrolebinding.rbac.authorization.k8s.io/super-admin created (server dry run) [32mSuccessful (B[mmessage:Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "super-admin" not found has:clusterrolebindings.rbac.authorization.k8s.io "super-admin" not found clusterrolebinding.rbac.authorization.k8s.io/super-admin created [32mrbac.sh:77: Successful get clusterrolebinding/super-admin {{range.subjects}}{{.name}}:{{end}}: super-admin: (B[mclusterrolebinding.rbac.authorization.k8s.io/super-admin subjects updated (dry run) clusterrolebinding.rbac.authorization.k8s.io/super-admin subjects updated (server dry run) [32mrbac.sh:80: Successful get clusterrolebinding/super-admin {{range.subjects}}{{.name}}:{{end}}: super-admin: ... skipping 64 lines ... [32mrbac.sh:102: Successful get clusterrolebinding/super-admin {{range.subjects}}{{.name}}:{{end}}: super-admin:foo:test-all-user: (B[m[32mrbac.sh:103: Successful get clusterrolebinding/super-group {{range.subjects}}{{.name}}:{{end}}: the-group:foo:test-all-user: (B[m[32mrbac.sh:104: Successful get clusterrolebinding/super-sa {{range.subjects}}{{.name}}:{{end}}: sa-name:foo:test-all-user: (B[mrolebinding.rbac.authorization.k8s.io/admin created (dry run) rolebinding.rbac.authorization.k8s.io/admin created (server dry run) [32mSuccessful (B[mmessage:Error from server (NotFound): rolebindings.rbac.authorization.k8s.io "admin" not found has: not found rolebinding.rbac.authorization.k8s.io/admin created [32mrbac.sh:113: Successful get rolebinding/admin {{.roleRef.kind}}: ClusterRole (B[m[32mrbac.sh:114: Successful get rolebinding/admin {{range.subjects}}{{.name}}:{{end}}: default-admin: (B[mrolebinding.rbac.authorization.k8s.io/admin subjects updated [32mrbac.sh:116: Successful get rolebinding/admin {{range.subjects}}{{.name}}:{{end}}: default-admin:foo: ... skipping 152 lines ... namespace/namespace-1679913783-3905 created Context "test" modified. +++ [0327 10:43:03] Testing role role.rbac.authorization.k8s.io/pod-admin created (dry run) role.rbac.authorization.k8s.io/pod-admin created (server dry run) [32mSuccessful (B[mmessage:Error from server (NotFound): roles.rbac.authorization.k8s.io "pod-admin" not found has: not found role.rbac.authorization.k8s.io/pod-admin created [32mrbac.sh:159: Successful get role/pod-admin {{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}: *: (B[m[32mrbac.sh:160: Successful get role/pod-admin {{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}: pods: (B[m[32mrbac.sh:161: Successful get role/pod-admin {{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}: : (B[m[32mSuccessful ... skipping 623 lines ... has:valid-pod [32mSuccessful (B[mmessage:NAME READY STATUS RESTARTS AGE valid-pod 0/1 Pending 0 0s has:valid-pod [32mcore.sh:194: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[merror: resource(s) were provided, but no name was specified [32mcore.sh:198: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[m[32mcore.sh:202: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[merror: setting 'all' parameter but found a non empty selector. [32mcore.sh:206: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[m[32mcore.sh:210: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[mWarning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. pod "valid-pod" force deleted [32mcore.sh:214: Successful get pods -lname=valid-pod {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mcore.sh:219: Successful get namespaces {{range.items}}{{ if eq .metadata.name "test-kubectl-describe-pod" }}found{{end}}{{end}}:: : ... skipping 30 lines ... I0327 10:43:19.793101 28209 round_trippers.go:553] GET https://127.0.0.1:6443/apis/policy/v1/namespaces/test-kubectl-describe-pod/poddisruptionbudgets/test-pdb-2 200 OK in 1 milliseconds I0327 10:43:19.794815 28209 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/namespaces/test-kubectl-describe-pod/events?fieldSelector=involvedObject.kind%3DPodDisruptionBudget%2CinvolvedObject.uid%3D1082892b-a6fa-4381-8846-1a95e6c2a0a4%2CinvolvedObject.name%3Dtest-pdb-2%2CinvolvedObject.namespace%3Dtest-kubectl-describe-pod&limit=500 200 OK in 1 milliseconds (B[mpoddisruptionbudget.policy/test-pdb-3 created [32mcore.sh:271: Successful get pdb/test-pdb-3 --namespace=test-kubectl-describe-pod {{.spec.maxUnavailable}}: 2 (B[mpoddisruptionbudget.policy/test-pdb-4 created [32mcore.sh:275: Successful get pdb/test-pdb-4 --namespace=test-kubectl-describe-pod {{.spec.maxUnavailable}}: 50% (B[merror: min-available and max-unavailable cannot be both specified [32mcore.sh:281: Successful get pods --namespace=test-kubectl-describe-pod {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/env-test-pod created matched TEST_CMD_1 matched <set to the key 'key-1' in secret 'test-secret'> matched TEST_CMD_2 matched <set to the key 'key-2' of config map 'test-configmap'> ... skipping 242 lines ... [32mcore.sh:542: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: registry.k8s.io/pause:3.9: (B[m[32mSuccessful (B[mmessage:kubectl-create kubectl-patch has:kubectl-patch pod/valid-pod patched [32mcore.sh:562: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: nginx: (B[m+++ [0327 10:43:34] "kubectl patch with resourceVersion 613" returns error as expected: Error from server (Conflict): Operation cannot be fulfilled on pods "valid-pod": the object has been modified; please apply your changes to the latest version and try again pod "valid-pod" deleted pod/valid-pod replaced [32mcore.sh:586: Successful get pod valid-pod {{(index .spec.containers 0).name}}: replaced-k8s-serve-hostname (B[m[32mSuccessful (B[mmessage:kubectl-replace has:kubectl-replace [32mSuccessful (B[mmessage:error: --grace-period must have --force specified has:\-\-grace-period must have \-\-force specified [32mSuccessful (B[mmessage:error: --timeout must have --force specified has:\-\-timeout must have \-\-force specified node/node-v1-test created I0327 10:43:35.059973 23048 actual_state_of_world.go:547] "Failed to update statusUpdateNeeded field in actual state of world" err="Failed to set statusUpdateNeeded to needed true, because nodeName=\"node-v1-test\" does not exist" [32mcore.sh:614: Successful get node node-v1-test {{range.items}}{{if .metadata.annotations.a}}found{{end}}{{end}}:: : (B[mnode/node-v1-test replaced (server dry run) node/node-v1-test replaced (dry run) [32mcore.sh:639: Successful get node node-v1-test {{range.items}}{{if .metadata.annotations.a}}found{{end}}{{end}}:: : (B[mI0327 10:43:35.606697 23048 event.go:307] "Event occurred" object="node-v1-test" fieldPath="" kind="Node" apiVersion="v1" type="Normal" reason="RegisteredNode" message="Node node-v1-test event: Registered Node node-v1-test in Controller" node/node-v1-test replaced ... skipping 30 lines ... spec: containers: - image: registry.k8s.io/pause:3.9 name: kubernetes-pause has:localonlyvalue [32mcore.sh:691: Successful get pod valid-pod {{.metadata.labels.name}}: valid-pod (B[merror: 'name' already has a value (valid-pod), and --overwrite is false [32mcore.sh:695: Successful get pod valid-pod {{.metadata.labels.name}}: valid-pod (B[m[32mcore.sh:699: Successful get pod valid-pod {{.metadata.labels.name}}: valid-pod (B[mpod/valid-pod labeled [32mcore.sh:703: Successful get pod valid-pod {{.metadata.labels.name}}: valid-pod-super-sayan (B[m[32mcore.sh:707: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[mWarning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. ... skipping 85 lines ... +++ Running case: test-cmd.run_kubectl_create_error_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_kubectl_create_error_tests +++ [0327 10:43:43] Creating namespace namespace-1679913823-10790 namespace/namespace-1679913823-10790 created Context "test" modified. +++ [0327 10:43:43] Testing kubectl create with error Error: must specify one of -f and -k Create a resource from a file or from stdin. JSON and YAML formats are accepted. Examples: ... skipping 63 lines ... If true, keep the managedFields when printing objects in JSON or YAML format. --template='': Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview]. --validate='strict': Must be one of: strict (or true), warn, ignore (or false). "true" or "strict" will use a schema to validate the input and fail the request if invalid. It will perform server side validation if ServerSideFieldValidation is enabled on the api-server, but will fall back to less reliable client-side validation if not. "warn" will warn about unknown or duplicate fields without blocking the request if server-side field validation is enabled on the API server, and behave as "ignore" otherwise. "false" or "ignore" will not perform any schema validation, silently dropping any unknown or duplicate fields. --windows-line-endings=false: Only relevant if --edit=true. Defaults to the line ending native to your platform. Usage: kubectl create -f FILENAME [options] ... skipping 38 lines ... I0327 10:43:46.299219 23048 event.go:307] "Event occurred" object="namespace-1679913824-27384/test-deployment-retainkeys-d65c44c97" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: test-deployment-retainkeys-d65c44c97-g4z7c" deployment.apps "test-deployment-retainkeys" deleted [32mapply.sh:88: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/selector-test-pod created [32mapply.sh:92: Successful get pods selector-test-pod {{.metadata.labels.name}}: selector-test-pod (B[m[32mSuccessful (B[mmessage:Error from server (NotFound): pods "selector-test-pod-dont-apply" not found has:pods "selector-test-pod-dont-apply" not found pod "selector-test-pod" deleted [32mapply.sh:101: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/test-pod created (dry run) pod/test-pod created (server dry run) [32mapply.sh:107: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: ... skipping 21 lines ... I0327 10:43:51.001267 19971 handler.go:232] Adding GroupVersion mygroup.example.com v1alpha1 to ResourceManager namespace/nsb created [32mapply.sh:181: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/a created [32mapply.sh:184: Successful get pods a -n nsb {{.metadata.name}}: a (B[mW0327 10:43:52.008236 19971 cacher.go:171] Terminating all watchers from cacher resources.mygroup.example.com E0327 10:43:52.009542 23048 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource pod/b created W0327 10:43:52.449984 32136 prune.go:71] Deprecated: kubectl apply will no longer prune non-namespaced resources by default when used with the --namespace flag in a future release. To preserve the current behaviour, list the resources you want to target explicitly in the --prune-allowlist flag. pod/a pruned W0327 10:43:53.422800 23048 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0327 10:43:53.422844 23048 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mapply.sh:188: Successful get pods -n nsb {{range.items}}{{.metadata.name}}:{{end}}: b: (B[mpod "b" deleted [32mapply.sh:195: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/a created [32mapply.sh:200: Successful get pods a {{.metadata.name}}: a (B[m[32mapply.sh:202: Successful get pods -n nsb {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/b created [32mapply.sh:207: Successful get pods a {{.metadata.name}}: a (B[m[32mapply.sh:208: Successful get pods b -n nsb {{.metadata.name}}: b (B[mpod "a" deleted pod "b" deleted [32mSuccessful (B[mmessage:error: all resources selected for prune without explicitly passing --all. To prune all resources, pass the --all flag. If you did not mean to prune all resources, specify a label selector has:all resources selected for prune without explicitly passing --all pod/a created pod/b created I0327 10:43:55.264262 19971 alloc.go:330] "allocated clusterIPs" service="namespace-1679913824-27384/prune-svc" clusterIPs=map[IPv4:10.0.0.241] service/prune-svc created W0327 10:43:55.264858 32304 prune.go:71] Deprecated: kubectl apply will no longer prune non-namespaced resources by default when used with the --namespace flag in a future release. To preserve the current behaviour, list the resources you want to target explicitly in the --prune-allowlist flag. W0327 10:43:55.982589 23048 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0327 10:43:55.982631 23048 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource I0327 10:43:58.412570 23048 horizontal.go:512] "Horizontal Pod Autoscaler has been deleted" HPA="namespace-1679913821-7671/frontend" [32mapply.sh:220: Successful get pods a {{.metadata.name}}: a (B[m[32mapply.sh:221: Successful get pods b -n nsb {{.metadata.name}}: b (B[mpod "a" deleted pod "b" deleted namespace "nsb" deleted W0327 10:44:01.322917 23048 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0327 10:44:01.322956 23048 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource persistentvolumeclaim/a-pvc created W0327 10:44:04.862859 32378 prune.go:71] Deprecated: kubectl apply will no longer prune non-namespaced resources by default when used with the --namespace flag in a future release. To preserve the current behaviour, list the resources you want to target explicitly in the --prune-allowlist flag. I0327 10:44:04.863457 23048 event.go:307] "Event occurred" object="namespace-1679913824-27384/a-pvc" fieldPath="" kind="PersistentVolumeClaim" apiVersion="v1" type="Normal" reason="FailedBinding" message="no persistent volumes available for this claim and no storage class is set" I0327 10:44:04.866708 23048 event.go:307] "Event occurred" object="namespace-1679913824-27384/a-pvc" fieldPath="" kind="PersistentVolumeClaim" apiVersion="v1" type="Normal" reason="FailedBinding" message="no persistent volumes available for this claim and no storage class is set" service/prune-svc pruned I0327 10:44:05.407089 23048 event.go:307] "Event occurred" object="namespace-1679913824-27384/a-pvc" fieldPath="" kind="PersistentVolumeClaim" apiVersion="v1" type="Normal" reason="FailedBinding" message="no persistent volumes available for this claim and no storage class is set" ... skipping 30 lines ... [32mapply.sh:258: Successful get pods a -n nsb {{.metadata.name}}: a (B[mpod/b created [32mapply.sh:261: Successful get pods b -n nsb {{.metadata.name}}: b (B[mpod/b unchanged W0327 10:44:12.755172 32675 prune.go:71] Deprecated: kubectl apply will no longer prune non-namespaced resources by default when used with the --namespace flag in a future release. To preserve the current behaviour, list the resources you want to target explicitly in the --prune-allowlist flag. pod/a pruned W0327 10:44:12.781021 23048 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0327 10:44:12.781055 23048 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mapply.sh:265: Successful get pods -n nsb {{range.items}}{{.metadata.name}}:{{end}}: b: (B[mnamespace "nsb" deleted [32mSuccessful (B[mmessage:error: the namespace from the provided object "nsb" does not match the namespace "foo". You must pass '--namespace=nsb' to perform this operation. has:the namespace from the provided object "nsb" does not match the namespace "foo". [32mapply.sh:276: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: (B[mservice/a created [32mapply.sh:280: Successful get services a {{.metadata.name}}: a (B[m[32mSuccessful (B[mmessage:The Service "a" is invalid: spec.clusterIPs[0]: Invalid value: []string{"10.0.0.12"}: may not change once set ... skipping 30 lines ... (B[m[32mapply.sh:302: Successful get deployment test-the-deployment {{.metadata.name}}: test-the-deployment (B[m[32mapply.sh:303: Successful get service test-the-service {{.metadata.name}}: test-the-service (B[mconfigmap "test-the-map" deleted service "test-the-service" deleted deployment.apps "test-the-deployment" deleted [32mSuccessful (B[mmessage:Error from server (NotFound): namespaces "multi-resource-ns" not found has:namespaces "multi-resource-ns" not found [32mapply.sh:311: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:namespace/multi-resource-ns created Error from server (NotFound): error when creating "hack/testdata/multi-resource-1.yaml": namespaces "multi-resource-ns" not found has:namespaces "multi-resource-ns" not found [32mSuccessful (B[mmessage:Error from server (NotFound): pods "test-pod" not found has:pods "test-pod" not found pod/test-pod created namespace/multi-resource-ns unchanged [32mapply.sh:319: Successful get pods test-pod -n multi-resource-ns {{.metadata.name}}: test-pod (B[mpod "test-pod" deleted namespace "multi-resource-ns" deleted I0327 10:44:24.358654 23048 namespace_controller.go:182] "Namespace has been deleted" namespace="nsb" [32mapply.sh:325: Successful get configmaps --field-selector=metadata.name=foo {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:configmap/foo created error: resource mapping not found for name: "foo" namespace: "" from "hack/testdata/multi-resource-2.yaml": no matches for kind "Bogus" in version "example.com/v1" ensure CRDs are installed first has:no matches for kind "Bogus" in version "example.com/v1" [32mapply.sh:331: Successful get configmaps foo {{.metadata.name}}: foo (B[mconfigmap "foo" deleted [32mapply.sh:337: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful ... skipping 6 lines ... (B[mpod "pod-a" deleted pod "pod-c" deleted [32mapply.sh:345: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mapply.sh:349: Successful get crds {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:customresourcedefinition.apiextensions.k8s.io/widgets.example.com created error: resource mapping not found for name: "foo" namespace: "" from "hack/testdata/multi-resource-4.yaml": no matches for kind "Widget" in version "example.com/v1" ensure CRDs are installed first has:no matches for kind "Widget" in version "example.com/v1" I0327 10:44:28.816777 19971 handler.go:232] Adding GroupVersion example.com v1 to ResourceManager customresourcedefinition.apiextensions.k8s.io/widgets.example.com condition met [32mSuccessful (B[mmessage:Error from server (NotFound): widgets.example.com "foo" not found has:widgets.example.com "foo" not found [32mapply.sh:356: Successful get crds widgets.example.com {{.metadata.name}}: widgets.example.com (B[mI0327 10:44:31.318764 19971 controller.go:624] quota admission added evaluator for: widgets.example.com widget.example.com/foo created customresourcedefinition.apiextensions.k8s.io/widgets.example.com unchanged [32mapply.sh:359: Successful get widget foo {{.metadata.name}}: foo ... skipping 34 lines ... (B[mmessage:891 has:891 pod "test-pod" deleted [32mapply.sh:415: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m+++ [0327 10:44:33] Testing upgrade kubectl client-side apply to server-side apply pod/test-pod created error: Apply failed with 1 conflict: conflict with "kubectl-client-side-apply" using v1: .metadata.labels.name Please review the fields above--they currently have other managers. Here are the ways you can resolve this warning: * If you intend to manage all of these fields, please re-run the apply command with the `--force-conflicts` flag. * If you do not intend to manage all of the fields, please edit your manifest to remove references to the fields that should keep their ... skipping 115 lines ... has:configmap/ssa-test created [32mapply.sh:559: Successful get configmap ssa-test {{ .data.key }}: value1 (B[m[32mSuccessful (B[mmessage:configmap/ssa-test serverside-applied has:configmap/ssa-test serverside-applied [32mapply.sh:577: Successful get configmap ssa-test {{ .data.key }}: value1 (B[mW0327 10:44:37.403070 23048 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0327 10:44:37.403123 23048 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mSuccessful (B[mmessage:configmap/ssa-test serverside-applied has:configmap/ssa-test serverside-applied [32mapply.sh:594: Successful get configmap ssa-test {{ .data.key }}: value2 (B[m[32mapply.sh:595: Successful get configmap ssa-test {{ .data.legacy }}: <no value> (B[mconfigmap "ssa-test" deleted ... skipping 26 lines ... (B[mpod "nginx-extensions" deleted [32mSuccessful (B[mmessage:pod/test1 created has:pod/test1 created pod "test1" deleted [32mSuccessful (B[mmessage:error: Invalid image name "InvalidImageName": invalid reference format has:error: Invalid image name "InvalidImageName": invalid reference format +++ exit code: 0 Recording: run_kubectl_create_filter_tests Running command: run_kubectl_create_filter_tests +++ Running case: test-cmd.run_kubectl_create_filter_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes ... skipping 3 lines ... Context "test" modified. +++ [0327 10:44:39] Testing kubectl create filter [32mcreate.sh:50: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/selector-test-pod created [32mcreate.sh:54: Successful get pods selector-test-pod {{.metadata.labels.name}}: selector-test-pod (B[m[32mSuccessful (B[mmessage:Error from server (NotFound): pods "selector-test-pod-dont-apply" not found has:pods "selector-test-pod-dont-apply" not found pod "selector-test-pod" deleted +++ exit code: 0 Recording: run_kubectl_apply_deployments_tests Running command: run_kubectl_apply_deployments_tests ... skipping 18 lines ... [32mapps.sh:165: Successful get deployments my-depl {{.spec.template.metadata.labels.l1}}: l1 (B[m[32mapps.sh:166: Successful get deployments my-depl {{.spec.selector.matchLabels.l1}}: l1 (B[m[32mapps.sh:167: Successful get deployments my-depl {{.metadata.labels.l1}}: <no value> (B[mdeployment.apps "my-depl" deleted replicaset.apps "my-depl-bfb57d6df" deleted pod "my-depl-bfb57d6df-d8krw" deleted E0327 10:44:41.120056 23048 replica_set.go:544] sync "namespace-1679913879-4715/my-depl-bfb57d6df" failed with Operation cannot be fulfilled on replicasets.apps "my-depl-bfb57d6df": StorageError: invalid object, Code: 4, Key: /registry/replicasets/namespace-1679913879-4715/my-depl-bfb57d6df, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: ce2404c5-5486-47c9-9eb0-12df8d50a80f, UID in object meta: [32mapps.sh:173: Successful get deployments {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mapps.sh:174: Successful get replicasets {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mapps.sh:175: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mapps.sh:179: Successful get deployments {{range.items}}{{.metadata.name}}:{{end}}: (B[mdeployment.apps/nginx created I0327 10:44:41.651839 23048 event.go:307] "Event occurred" object="namespace-1679913879-4715/nginx" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-5645b79496 to 3" I0327 10:44:41.655021 23048 event.go:307] "Event occurred" object="namespace-1679913879-4715/nginx-5645b79496" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-5645b79496-jcvft" I0327 10:44:41.658777 23048 event.go:307] "Event occurred" object="namespace-1679913879-4715/nginx-5645b79496" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-5645b79496-4m6ql" I0327 10:44:41.658955 23048 event.go:307] "Event occurred" object="namespace-1679913879-4715/nginx-5645b79496" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-5645b79496-v2c8r" [32mapps.sh:183: Successful get deployment nginx {{.metadata.name}}: nginx (B[m[32mSuccessful (B[mmessage:Error from server (Conflict): error when applying patch: {"metadata":{"annotations":{"kubectl.kubernetes.io/last-applied-configuration":"{\"apiVersion\":\"apps/v1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"labels\":{\"name\":\"nginx\"},\"name\":\"nginx\",\"namespace\":\"namespace-1679913879-4715\",\"resourceVersion\":\"99\"},\"spec\":{\"replicas\":3,\"selector\":{\"matchLabels\":{\"name\":\"nginx2\"}},\"template\":{\"metadata\":{\"labels\":{\"name\":\"nginx2\"}},\"spec\":{\"containers\":[{\"image\":\"registry.k8s.io/nginx:test-cmd\",\"name\":\"nginx\",\"ports\":[{\"containerPort\":80}]}]}}}}\n"},"resourceVersion":"99"},"spec":{"selector":{"matchLabels":{"name":"nginx2"}},"template":{"metadata":{"labels":{"name":"nginx2"}}}}} to: Resource: "apps/v1, Resource=deployments", GroupVersionKind: "apps/v1, Kind=Deployment" Name: "nginx", Namespace: "namespace-1679913879-4715" for: "hack/testdata/deployment-label-change2.yaml": error when patching "hack/testdata/deployment-label-change2.yaml": Operation cannot be fulfilled on deployments.apps "nginx": the object has been modified; please apply your changes to the latest version and try again has:Error from server (Conflict) deployment.apps/nginx configured I0327 10:44:50.152625 23048 event.go:307] "Event occurred" object="namespace-1679913879-4715/nginx" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-5675dfc785 to 3" I0327 10:44:50.157373 23048 event.go:307] "Event occurred" object="namespace-1679913879-4715/nginx-5675dfc785" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-5675dfc785-dpgdj" I0327 10:44:50.160091 23048 event.go:307] "Event occurred" object="namespace-1679913879-4715/nginx-5675dfc785" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-5675dfc785-l2qng" I0327 10:44:50.161063 23048 event.go:307] "Event occurred" object="namespace-1679913879-4715/nginx-5675dfc785" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-5675dfc785-j6jrx" [32mSuccessful ... skipping 477 lines ... +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_kubectl_diff_same_names +++ [0327 10:45:14] Creating namespace namespace-1679913914-28473 namespace/namespace-1679913914-28473 created Context "test" modified. +++ [0327 10:45:14] Test kubectl diff with multiple resources with the same name W0327 10:45:14.131431 23048 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0327 10:45:14.131472 23048 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mSuccessful (B[mmessage:/tmp/LIVE-3245444577 /tmp/LIVE-3245444577/v1.Secret.namespace-1679913914-28473.test /tmp/LIVE-3245444577/apps.v1.Deployment.namespace-1679913914-28473.test /tmp/LIVE-3245444577/v1.ConfigMap.namespace-1679913914-28473.test /tmp/LIVE-3245444577/v1.Pod.namespace-1679913914-28473.test ... skipping 49 lines ... +++ [0327 10:45:14] Creating namespace namespace-1679913914-30399 namespace/namespace-1679913914-30399 created Context "test" modified. +++ [0327 10:45:14] Testing kubectl get [32mget.sh:29: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:Error from server (NotFound): pods "abc" not found has:pods "abc" not found [32mget.sh:37: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:Error from server (NotFound): pods "abc" not found has:pods "abc" not found [32mget.sh:45: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:{ "apiVersion": "v1", "items": [], ... skipping 21 lines ... has not:No resources found [32mSuccessful (B[mmessage:NAME has not:No resources found [32mget.sh:73: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:error: the server doesn't have a resource type "foobar" has not:No resources found [32mSuccessful (B[mmessage:No resources found in namespace-1679913914-30399 namespace. has:No resources found [32mSuccessful (B[mmessage: has not:No resources found [32mSuccessful (B[mmessage:No resources found in namespace-1679913914-30399 namespace. has:No resources found [32mget.sh:93: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:Error from server (NotFound): pods "abc" not found has:pods "abc" not found [32mSuccessful (B[mmessage:Error from server (NotFound): pods "abc" not found has not:List [32mSuccessful (B[mmessage:I0327 10:45:15.617635 35935 loader.go:373] Config loaded from file: /tmp/tmp.Yu0BLSZ4TL/.kube/config I0327 10:45:15.623828 35935 round_trippers.go:553] GET https://127.0.0.1:6443/version?timeout=32s 200 OK in 5 milliseconds I0327 10:45:15.638820 35935 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/namespaces/default/pods 200 OK in 1 milliseconds I0327 10:45:15.640334 35935 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/namespaces/default/replicationcontrollers 200 OK in 1 milliseconds ... skipping 597 lines ... } [32mget.sh:158: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[m<no value>[32mSuccessful (B[mmessage:valid-pod: has:valid-pod: [32mSuccessful (B[mmessage:error: error executing jsonpath "{.missing}": Error executing template: missing is not found. Printing more information for debugging the template: template was: {.missing} object given to jsonpath engine was: map[string]interface {}{"apiVersion":"v1", "kind":"Pod", "metadata":map[string]interface {}{"creationTimestamp":"2023-03-27T10:45:23Z", "labels":map[string]interface {}{"name":"valid-pod"}, "managedFields":[]interface {}{map[string]interface {}{"apiVersion":"v1", "fieldsType":"FieldsV1", "fieldsV1":map[string]interface {}{"f:metadata":map[string]interface {}{"f:labels":map[string]interface {}{".":map[string]interface {}{}, "f:name":map[string]interface {}{}}}, "f:spec":map[string]interface {}{"f:containers":map[string]interface {}{"k:{\"name\":\"kubernetes-serve-hostname\"}":map[string]interface {}{".":map[string]interface {}{}, "f:image":map[string]interface {}{}, "f:imagePullPolicy":map[string]interface {}{}, "f:name":map[string]interface {}{}, "f:resources":map[string]interface {}{".":map[string]interface {}{}, "f:limits":map[string]interface {}{".":map[string]interface {}{}, "f:cpu":map[string]interface {}{}, "f:memory":map[string]interface {}{}}, "f:requests":map[string]interface {}{".":map[string]interface {}{}, "f:cpu":map[string]interface {}{}, "f:memory":map[string]interface {}{}}}, "f:terminationMessagePath":map[string]interface {}{}, "f:terminationMessagePolicy":map[string]interface {}{}}}, "f:dnsPolicy":map[string]interface {}{}, "f:enableServiceLinks":map[string]interface {}{}, "f:restartPolicy":map[string]interface {}{}, "f:schedulerName":map[string]interface {}{}, "f:securityContext":map[string]interface {}{}, "f:terminationGracePeriodSeconds":map[string]interface {}{}}}, "manager":"kubectl-create", "operation":"Update", "time":"2023-03-27T10:45:23Z"}}, "name":"valid-pod", "namespace":"namespace-1679913922-14908", "resourceVersion":"1122", "uid":"76ae277b-b001-4e57-9ef6-16ae7e78da5e"}, "spec":map[string]interface {}{"containers":[]interface {}{map[string]interface {}{"image":"registry.k8s.io/serve_hostname", "imagePullPolicy":"Always", "name":"kubernetes-serve-hostname", "resources":map[string]interface {}{"limits":map[string]interface {}{"cpu":"1", "memory":"512Mi"}, "requests":map[string]interface {}{"cpu":"1", "memory":"512Mi"}}, "terminationMessagePath":"/dev/termination-log", "terminationMessagePolicy":"File"}}, "dnsPolicy":"ClusterFirst", "enableServiceLinks":true, "preemptionPolicy":"PreemptLowerPriority", "priority":0, "restartPolicy":"Always", "schedulerName":"default-scheduler", "securityContext":map[string]interface {}{}, "terminationGracePeriodSeconds":30}, "status":map[string]interface {}{"phase":"Pending", "qosClass":"Guaranteed"}} has:missing is not found error: error executing template "{{.missing}}": template: output:1:2: executing "output" at <.missing>: map has no entry for key "missing" [32mSuccessful (B[mmessage:Error executing template: template: output:1:2: executing "output" at <.missing>: map has no entry for key "missing". Printing more information for debugging the template: template was: {{.missing}} raw data was: {"apiVersion":"v1","kind":"Pod","metadata":{"creationTimestamp":"2023-03-27T10:45:23Z","labels":{"name":"valid-pod"},"managedFields":[{"apiVersion":"v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"kubernetes-serve-hostname\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{".":{},"f:limits":{".":{},"f:cpu":{},"f:memory":{}},"f:requests":{".":{},"f:cpu":{},"f:memory":{}}},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}},"manager":"kubectl-create","operation":"Update","time":"2023-03-27T10:45:23Z"}],"name":"valid-pod","namespace":"namespace-1679913922-14908","resourceVersion":"1122","uid":"76ae277b-b001-4e57-9ef6-16ae7e78da5e"},"spec":{"containers":[{"image":"registry.k8s.io/serve_hostname","imagePullPolicy":"Always","name":"kubernetes-serve-hostname","resources":{"limits":{"cpu":"1","memory":"512Mi"},"requests":{"cpu":"1","memory":"512Mi"}},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","enableServiceLinks":true,"preemptionPolicy":"PreemptLowerPriority","priority":0,"restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"terminationGracePeriodSeconds":30},"status":{"phase":"Pending","qosClass":"Guaranteed"}} object given to template engine was: map[apiVersion:v1 kind:Pod metadata:map[creationTimestamp:2023-03-27T10:45:23Z labels:map[name:valid-pod] managedFields:[map[apiVersion:v1 fieldsType:FieldsV1 fieldsV1:map[f:metadata:map[f:labels:map[.:map[] f:name:map[]]] f:spec:map[f:containers:map[k:{"name":"kubernetes-serve-hostname"}:map[.:map[] f:image:map[] f:imagePullPolicy:map[] f:name:map[] f:resources:map[.:map[] f:limits:map[.:map[] f:cpu:map[] f:memory:map[]] f:requests:map[.:map[] f:cpu:map[] f:memory:map[]]] f:terminationMessagePath:map[] f:terminationMessagePolicy:map[]]] f:dnsPolicy:map[] f:enableServiceLinks:map[] f:restartPolicy:map[] f:schedulerName:map[] f:securityContext:map[] f:terminationGracePeriodSeconds:map[]]] manager:kubectl-create operation:Update time:2023-03-27T10:45:23Z]] name:valid-pod namespace:namespace-1679913922-14908 resourceVersion:1122 uid:76ae277b-b001-4e57-9ef6-16ae7e78da5e] spec:map[containers:[map[image:registry.k8s.io/serve_hostname imagePullPolicy:Always name:kubernetes-serve-hostname resources:map[limits:map[cpu:1 memory:512Mi] requests:map[cpu:1 memory:512Mi]] terminationMessagePath:/dev/termination-log terminationMessagePolicy:File]] dnsPolicy:ClusterFirst enableServiceLinks:true preemptionPolicy:PreemptLowerPriority priority:0 restartPolicy:Always schedulerName:default-scheduler securityContext:map[] terminationGracePeriodSeconds:30] status:map[phase:Pending qosClass:Guaranteed]] has:map has no entry for key "missing" [32mSuccessful (B[mmessage:NAME READY STATUS RESTARTS AGE valid-pod 0/1 Pending 0 0s has:valid-pod [32mSuccessful (B[mmessage:Error from server (NotFound): the server could not find the requested resource has:the server could not find the requested resource [32mSuccessful (B[mmessage:NAME READY STATUS RESTARTS AGE valid-pod 0/1 Pending 0 0s has:STATUS [32mSuccessful ... skipping 78 lines ... terminationGracePeriodSeconds: 30 status: phase: Pending qosClass: Guaranteed has:name: valid-pod [32mSuccessful (B[mmessage:Error from server (NotFound): pods "invalid-pod" not found has:"invalid-pod" not found pod "valid-pod" deleted [32mget.sh:204: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/redis-master created pod/valid-pod created [32mSuccessful ... skipping 1136 lines ... +++ [0327 10:45:36] Creating namespace namespace-1679913936-4114 namespace/namespace-1679913936-4114 created Context "test" modified. +++ [0327 10:45:36] Testing kubectl exec POD COMMAND [32mSuccessful (B[mmessage:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (NotFound): pods "abc" not found has:pods "abc" not found [32mSuccessful (B[mmessage:error: cannot exec into multiple objects at a time has:cannot exec into multiple objects at a time pod/test-pod created [32mSuccessful (B[mmessage:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (BadRequest): pod test-pod does not have a host assigned has not:pods "test-pod" not found [32mSuccessful (B[mmessage:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (BadRequest): pod test-pod does not have a host assigned has not:pod or type/name must be specified pod "test-pod" deleted +++ exit code: 0 Recording: run_kubectl_exec_resource_name_tests Running command: run_kubectl_exec_resource_name_tests ... skipping 3 lines ... +++ [0327 10:45:37] Creating namespace namespace-1679913937-10218 namespace/namespace-1679913937-10218 created Context "test" modified. +++ [0327 10:45:37] Testing kubectl exec TYPE/NAME COMMAND [32mSuccessful (B[mmessage:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. error: the server doesn't have a resource type "foo" has:error: [32mSuccessful (B[mmessage:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (NotFound): deployments.apps "bar" not found has:"bar" not found pod/test-pod created replicaset.apps/frontend created I0327 10:45:37.749068 23048 event.go:307] "Event occurred" object="namespace-1679913937-10218/frontend" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-ck2gn" I0327 10:45:37.752221 23048 event.go:307] "Event occurred" object="namespace-1679913937-10218/frontend" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-48jcr" I0327 10:45:37.752794 23048 event.go:307] "Event occurred" object="namespace-1679913937-10218/frontend" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-wc2xn" configmap/test-set-env-config created [32mSuccessful (B[mmessage:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. error: cannot attach to *v1.ConfigMap: selector for *v1.ConfigMap not implemented has:not implemented [32mSuccessful (B[mmessage:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (BadRequest): pod test-pod does not have a host assigned has not:not found [32mSuccessful (B[mmessage:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (BadRequest): pod test-pod does not have a host assigned has not:pod, type/name or --filename must be specified [32mSuccessful (B[mmessage:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (BadRequest): pod frontend-48jcr does not have a host assigned has not:not found [32mSuccessful (B[mmessage:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (BadRequest): pod frontend-48jcr does not have a host assigned has not:pod, type/name or --filename must be specified pod "test-pod" deleted replicaset.apps "frontend" deleted configmap "test-set-env-config" deleted +++ exit code: 0 Recording: run_create_secret_tests Running command: run_create_secret_tests +++ Running case: test-cmd.run_create_secret_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_create_secret_tests [32mSuccessful (B[mmessage:Error from server (NotFound): secrets "mysecret" not found has:secrets "mysecret" not found [32mSuccessful (B[mmessage:user-specified has:user-specified [32mSuccessful (B[mmessage:Error from server (NotFound): secrets "mysecret" not found has:secrets "mysecret" not found [32mSuccessful (B[m{"kind":"ConfigMap","apiVersion":"v1","metadata":{"name":"tester-update-cm","namespace":"default","uid":"a57fa360-2efa-4ec5-9280-ec6a79dd26e7","resourceVersion":"1221","creationTimestamp":"2023-03-27T10:45:38Z"}} [32mSuccessful (B[mmessage:{"kind":"ConfigMap","apiVersion":"v1","metadata":{"name":"tester-update-cm","namespace":"default","uid":"a57fa360-2efa-4ec5-9280-ec6a79dd26e7","resourceVersion":"1223","creationTimestamp":"2023-03-27T10:45:38Z"},"data":{"key1":"config1"}} has:uid [32mSuccessful (B[mmessage:{"kind":"ConfigMap","apiVersion":"v1","metadata":{"name":"tester-update-cm","namespace":"default","uid":"a57fa360-2efa-4ec5-9280-ec6a79dd26e7","resourceVersion":"1223","creationTimestamp":"2023-03-27T10:45:38Z"},"data":{"key1":"config1"}} has:config1 {"kind":"Status","apiVersion":"v1","metadata":{},"status":"Success","details":{"name":"tester-update-cm","kind":"configmaps","uid":"a57fa360-2efa-4ec5-9280-ec6a79dd26e7"}} [32mSuccessful (B[mmessage:Error from server (NotFound): configmaps "tester-update-cm" not found has:configmaps "tester-update-cm" not found +++ exit code: 0 Recording: run_kubectl_create_kustomization_directory_tests Running command: run_kubectl_create_kustomization_directory_tests +++ Running case: test-cmd.run_kubectl_create_kustomization_directory_tests ... skipping 25 lines ... +++ command: run_kubectl_create_validate_tests +++ [0327 10:45:39] Creating namespace namespace-1679913939-15014 namespace/namespace-1679913939-15014 created Context "test" modified. +++ [0327 10:45:39] Testing kubectl create --validate Successful message:Error from server (BadRequest): error when creating "hack/testdata/invalid-deployment-unknown-and-duplicate-fields.yaml": Deployment in version "v1" cannot be handled as a Deployment: strict decoding error: unknown field "spec.baz", unknown field "spec.foo" has either:strict decoding error or:error validating data +++ [0327 10:45:40] Testing kubectl create --validate=true Successful message:Error from server (BadRequest): error when creating "hack/testdata/invalid-deployment-unknown-and-duplicate-fields.yaml": Deployment in version "v1" cannot be handled as a Deployment: strict decoding error: unknown field "spec.baz", unknown field "spec.foo" has either:strict decoding error or:error validating data +++ [0327 10:45:40] Testing kubectl create --validate=false I0327 10:45:40.432784 23048 event.go:307] "Event occurred" object="namespace-1679913939-15014/invalid-nginx-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set invalid-nginx-deployment-cbdccf466 to 4" [32mSuccessful (B[mmessage:deployment.apps/invalid-nginx-deployment created has:deployment.apps/invalid-nginx-deployment created I0327 10:45:40.440878 23048 event.go:307] "Event occurred" object="namespace-1679913939-15014/invalid-nginx-deployment-cbdccf466" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: invalid-nginx-deployment-cbdccf466-kczjh" I0327 10:45:40.444479 23048 event.go:307] "Event occurred" object="namespace-1679913939-15014/invalid-nginx-deployment-cbdccf466" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: invalid-nginx-deployment-cbdccf466-bfbtp" I0327 10:45:40.451895 23048 event.go:307] "Event occurred" object="namespace-1679913939-15014/invalid-nginx-deployment-cbdccf466" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: invalid-nginx-deployment-cbdccf466-wpnqg" I0327 10:45:40.454950 23048 event.go:307] "Event occurred" object="namespace-1679913939-15014/invalid-nginx-deployment-cbdccf466" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: invalid-nginx-deployment-cbdccf466-6dwqs" deployment.apps "invalid-nginx-deployment" deleted +++ [0327 10:45:40] Testing kubectl create --validate=strict Successful message:Error from server (BadRequest): error when creating "hack/testdata/invalid-deployment-unknown-and-duplicate-fields.yaml": Deployment in version "v1" cannot be handled as a Deployment: strict decoding error: unknown field "spec.baz", unknown field "spec.foo" has either:strict decoding error or:error validating data +++ [0327 10:45:40] Testing kubectl create --validate=warn Warning: unknown field "spec.baz" Warning: unknown field "spec.foo" I0327 10:45:40.907673 23048 event.go:307] "Event occurred" object="namespace-1679913939-15014/invalid-nginx-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set invalid-nginx-deployment-cbdccf466 to 4" [32mSuccessful (B[mmessage:deployment.apps/invalid-nginx-deployment created ... skipping 12 lines ... I0327 10:45:41.049546 23048 event.go:307] "Event occurred" object="namespace-1679913939-15014/invalid-nginx-deployment-cbdccf466" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: invalid-nginx-deployment-cbdccf466-7vrt5" I0327 10:45:41.052611 23048 event.go:307] "Event occurred" object="namespace-1679913939-15014/invalid-nginx-deployment-cbdccf466" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: invalid-nginx-deployment-cbdccf466-dp76m" I0327 10:45:41.056359 23048 event.go:307] "Event occurred" object="namespace-1679913939-15014/invalid-nginx-deployment-cbdccf466" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: invalid-nginx-deployment-cbdccf466-szfmh" deployment.apps "invalid-nginx-deployment" deleted +++ [0327 10:45:41] Testing kubectl create Successful message:Error from server (BadRequest): error when creating "hack/testdata/invalid-deployment-unknown-and-duplicate-fields.yaml": Deployment in version "v1" cannot be handled as a Deployment: strict decoding error: unknown field "spec.baz", unknown field "spec.foo" has either:strict decoding error or:error validating data +++ [0327 10:45:41] Testing kubectl create --validate=foo [32mSuccessful (B[mmessage:error: invalid - validate option "foo"; must be one of: strict (or true), warn, ignore (or false) has:invalid - validate option "foo" +++ exit code: 0 Recording: run_convert_tests Running command: run_convert_tests +++ Running case: test-cmd.run_convert_tests ... skipping 51 lines ... securityContext: {} terminationGracePeriodSeconds: 30 status: {} has:apps/v1beta1 deployment.apps "nginx" deleted [32mSuccessful (B[mmessage:error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing [32mSuccessful (B[mmessage:nginx: has:nginx: +++ exit code: 0 Recording: run_kubectl_delete_allnamespaces_tests ... skipping 103 lines ... has:Timeout [32mSuccessful (B[mmessage:NAME READY STATUS RESTARTS AGE valid-pod 0/1 Pending 0 1s has:valid-pod [32mSuccessful (B[mmessage:error: Invalid timeout value. Timeout must be a single integer in seconds, or an integer followed by a corresponding time unit (e.g. 1s | 2m | 3h) has:Invalid timeout value pod "valid-pod" deleted +++ exit code: 0 Recording: run_crd_tests Running command: run_crd_tests ... skipping 167 lines ... (B[mFlag --record has been deprecated, --record will be removed in the future foo.company.com/test patched [32mcrd.sh:296: Successful get foos/test {{.patched}}: value2 (B[mFlag --record has been deprecated, --record will be removed in the future foo.company.com/test patched [32mcrd.sh:298: Successful get foos/test {{.patched}}: <no value> (B[m+++ [0327 10:45:51] "kubectl patch --local" returns error as expected for CustomResource: error: strategic merge patch is not supported for company.com/v1, Kind=Foo locally, try --type merge { "apiVersion": "company.com/v1", "kind": "Foo", "metadata": { "annotations": { "kubernetes.io/change-cause": "kubectl patch foos/test --server=https://127.0.0.1:6443 --insecure-skip-tls-verify=true --match-server-version=true --patch={\"patched\":null} --type=merge --record=true" ... skipping 216 lines ... (B[m[32mcrd.sh:519: Successful get bars {{range.items}}{{.metadata.name}}:{{end}}: (B[mnamespace/non-native-resources created bar.company.com/test created [32mcrd.sh:524: Successful get bars {{len .items}}: 1 (B[mnamespace "non-native-resources" deleted [32mcrd.sh:527: Successful get bars {{len .items}}: 0 (B[mError from server (NotFound): namespaces "non-native-resources" not found customresourcedefinition.apiextensions.k8s.io "foos.company.com" deleted I0327 10:46:04.145149 19971 handler.go:232] Adding GroupVersion company.com v1 to ResourceManager I0327 10:46:04.146692 19971 handler.go:232] Adding GroupVersion company.com v1 to ResourceManager I0327 10:46:04.156450 19971 handler.go:232] Adding GroupVersion company.com v1 to ResourceManager I0327 10:46:04.302863 19971 handler.go:232] Adding GroupVersion company.com v1 to ResourceManager customresourcedefinition.apiextensions.k8s.io "bars.company.com" deleted ... skipping 15 lines ... +++ [0327 10:46:04] Testing recursive resources +++ [0327 10:46:04] Creating namespace namespace-1679913964-20606 namespace/namespace-1679913964-20606 created Context "test" modified. [32mgeneric-resources.sh:202: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mW0327 10:46:05.157090 19971 cacher.go:171] Terminating all watchers from cacher foos.company.com E0327 10:46:05.158402 23048 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource W0327 10:46:05.311923 19971 cacher.go:171] Terminating all watchers from cacher bars.company.com E0327 10:46:05.313312 23048 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource W0327 10:46:05.473811 19971 cacher.go:171] Terminating all watchers from cacher resources.mygroup.example.com E0327 10:46:05.475157 23048 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:206: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mW0327 10:46:05.638817 19971 cacher.go:171] Terminating all watchers from cacher validfoos.company.com [32mSuccessful (B[mmessage:pod/busybox0 created pod/busybox1 created error: error validating "hack/testdata/recursive/pod/pod/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false has:error validating data: kind not set E0327 10:46:05.640022 23048 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:211: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:220: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: busybox:busybox: (B[m[32mSuccessful (B[mmessage:error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:227: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:231: Successful get pods {{range.items}}{{.metadata.labels.status}}:{{end}}: replaced:replaced: (B[m[32mSuccessful (B[mmessage:pod/busybox0 replaced pod/busybox1 replaced error: error validating "hack/testdata/recursive/pod-modify/pod/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false has:error validating data: kind not set [32mgeneric-resources.sh:236: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mSuccessful (B[mmessage:Name: busybox0 Namespace: namespace-1679913964-20606 Priority: 0 Node: <none> ... skipping 154 lines ... QoS Class: BestEffort Node-Selectors: <none> Tolerations: <none> Events: <none> unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing W0327 10:46:06.357712 23048 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0327 10:46:06.357754 23048 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:246: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mW0327 10:46:06.431376 23048 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0327 10:46:06.431418 23048 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:250: Successful get pods {{range.items}}{{.metadata.annotations.annotatekey}}:{{end}}: annotatevalue:annotatevalue: (B[m[32mSuccessful (B[mmessage:pod/busybox0 annotate pod/busybox1 annotate error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:255: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:259: Successful get pods {{range.items}}{{.metadata.labels.status}}:{{end}}: replaced:replaced: (B[m[32mSuccessful (B[mmessage:Warning: resource pods/busybox0 is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. pod/busybox0 configured Warning: resource pods/busybox1 is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. pod/busybox1 configured error: error validating "hack/testdata/recursive/pod-modify/pod/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false has:error validating data: kind not set [32mgeneric-resources.sh:264: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mSuccessful (B[mmessage:busybox0:busybox1:error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:busybox0:busybox1: [32mSuccessful (B[mmessage:busybox0:busybox1:error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:273: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mW0327 10:46:07.165130 23048 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0327 10:46:07.165181 23048 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource pod/busybox0 labeled pod/busybox1 labeled error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' [32mgeneric-resources.sh:278: Successful get pods {{range.items}}{{.metadata.labels.mylabel}}:{{end}}: myvalue:myvalue: (B[m[32mSuccessful (B[mmessage:pod/busybox0 labeled pod/busybox1 labeled error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:283: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mpod/busybox0 patched pod/busybox1 patched error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' [32mgeneric-resources.sh:288: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: prom/busybox:prom/busybox: (B[m[32mSuccessful (B[mmessage:pod/busybox0 patched pod/busybox1 patched error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:293: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:297: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. pod "busybox0" force deleted pod "busybox1" force deleted error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:302: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: (B[mreplicationcontroller/busybox0 created I0327 10:46:07.917991 23048 event.go:307] "Event occurred" object="namespace-1679913964-20606/busybox0" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: busybox0-4mjl5" replicationcontroller/busybox1 created error: error validating "hack/testdata/recursive/rc/rc/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false I0327 10:46:07.958497 23048 event.go:307] "Event occurred" object="namespace-1679913964-20606/busybox1" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: busybox1-8q97x" [32mgeneric-resources.sh:306: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:311: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:312: Successful get rc busybox0 {{.spec.replicas}}: 1 (B[m[32mgeneric-resources.sh:313: Successful get rc busybox1 {{.spec.replicas}}: 1 (B[m[32mgeneric-resources.sh:318: Successful get hpa busybox0 {{.spec.minReplicas}} {{.spec.maxReplicas}} {{(index .spec.metrics 0).resource.target.averageUtilization}}: 1 2 80 (B[m[32mgeneric-resources.sh:319: Successful get hpa busybox1 {{.spec.minReplicas}} {{.spec.maxReplicas}} {{(index .spec.metrics 0).resource.target.averageUtilization}}: 1 2 80 (B[m[32mSuccessful (B[mmessage:horizontalpodautoscaler.autoscaling/busybox0 autoscaled horizontalpodautoscaler.autoscaling/busybox1 autoscaled error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' has:Object 'Kind' is missing horizontalpodautoscaler.autoscaling "busybox0" deleted horizontalpodautoscaler.autoscaling "busybox1" deleted [32mgeneric-resources.sh:327: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:328: Successful get rc busybox0 {{.spec.replicas}}: 1 (B[mW0327 10:46:08.706037 23048 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0327 10:46:08.706077 23048 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:329: Successful get rc busybox1 {{.spec.replicas}}: 1 (B[mI0327 10:46:08.777870 19971 alloc.go:330] "allocated clusterIPs" service="namespace-1679913964-20606/busybox0" clusterIPs=map[IPv4:10.0.0.102] I0327 10:46:08.782937 19971 alloc.go:330] "allocated clusterIPs" service="namespace-1679913964-20606/busybox1" clusterIPs=map[IPv4:10.0.0.37] [32mgeneric-resources.sh:333: Successful get service busybox0 {{(index .spec.ports 0).name}} {{(index .spec.ports 0).port}}: <no value> 80 (B[m[32mgeneric-resources.sh:334: Successful get service busybox1 {{(index .spec.ports 0).name}} {{(index .spec.ports 0).port}}: <no value> 80 (B[m[32mSuccessful (B[mmessage:service/busybox0 exposed service/busybox1 exposed error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' has:Object 'Kind' is missing I0327 10:46:08.954297 23048 namespace_controller.go:182] "Namespace has been deleted" namespace="non-native-resources" [32mgeneric-resources.sh:340: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:341: Successful get rc busybox0 {{.spec.replicas}}: 1 (B[m[32mgeneric-resources.sh:342: Successful get rc busybox1 {{.spec.replicas}}: 1 (B[mW0327 10:46:09.109025 23048 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0327 10:46:09.109060 23048 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource I0327 10:46:09.167922 23048 event.go:307] "Event occurred" object="namespace-1679913964-20606/busybox0" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: busybox0-ct2tb" I0327 10:46:09.173498 23048 event.go:307] "Event occurred" object="namespace-1679913964-20606/busybox1" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: busybox1-m7vcv" [32mgeneric-resources.sh:346: Successful get rc busybox0 {{.spec.replicas}}: 2 (B[m[32mgeneric-resources.sh:347: Successful get rc busybox1 {{.spec.replicas}}: 2 (B[m[32mSuccessful (B[mmessage:replicationcontroller/busybox0 scaled replicationcontroller/busybox1 scaled error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:352: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mW0327 10:46:09.445579 23048 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0327 10:46:09.445613 23048 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:356: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. replicationcontroller "busybox0" force deleted replicationcontroller "busybox1" force deleted error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:361: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: (B[mdeployment.apps/nginx1-deployment created I0327 10:46:09.742149 23048 event.go:307] "Event occurred" object="namespace-1679913964-20606/nginx1-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx1-deployment-69c599568 to 2" I0327 10:46:09.746606 23048 event.go:307] "Event occurred" object="namespace-1679913964-20606/nginx1-deployment-69c599568" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx1-deployment-69c599568-5rr5j" I0327 10:46:09.749448 23048 event.go:307] "Event occurred" object="namespace-1679913964-20606/nginx1-deployment-69c599568" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx1-deployment-69c599568-l5bcg" deployment.apps/nginx0-deployment created error: error validating "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false I0327 10:46:09.763234 23048 event.go:307] "Event occurred" object="namespace-1679913964-20606/nginx0-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx0-deployment-5944978c6f to 2" I0327 10:46:09.766339 23048 event.go:307] "Event occurred" object="namespace-1679913964-20606/nginx0-deployment-5944978c6f" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx0-deployment-5944978c6f-sm4g6" I0327 10:46:09.770112 23048 event.go:307] "Event occurred" object="namespace-1679913964-20606/nginx0-deployment-5944978c6f" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx0-deployment-5944978c6f-s6qxp" [32mgeneric-resources.sh:365: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: nginx0-deployment:nginx1-deployment: (B[m[32mgeneric-resources.sh:366: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:1.7.9:registry.k8s.io/nginx:1.7.9: (B[m[32mgeneric-resources.sh:370: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:1.7.9:registry.k8s.io/nginx:1.7.9: (B[m[32mSuccessful (B[mmessage:deployment.apps/nginx1-deployment skipped rollback (current template already matches revision 1) deployment.apps/nginx0-deployment skipped rollback (current template already matches revision 1) error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"registry.k8s.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' has:Object 'Kind' is missing deployment.apps/nginx1-deployment paused deployment.apps/nginx0-deployment paused [32mgeneric-resources.sh:378: Successful get deployment {{range.items}}{{.spec.paused}}:{{end}}: true:true: (B[m[32mSuccessful (B[mmessage:unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"registry.k8s.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' ... skipping 41 lines ... 1 <none> deployment.apps/nginx0-deployment REVISION CHANGE-CAUSE 1 <none> error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"registry.k8s.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' has:nginx0-deployment [32mSuccessful (B[mmessage:deployment.apps/nginx1-deployment REVISION CHANGE-CAUSE 1 <none> deployment.apps/nginx0-deployment REVISION CHANGE-CAUSE 1 <none> error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"registry.k8s.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' has:nginx1-deployment [32mSuccessful (B[mmessage:deployment.apps/nginx1-deployment REVISION CHANGE-CAUSE 1 <none> deployment.apps/nginx0-deployment REVISION CHANGE-CAUSE 1 <none> error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"registry.k8s.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' has:Object 'Kind' is missing Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. deployment.apps "nginx1-deployment" force deleted deployment.apps "nginx0-deployment" force deleted error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"registry.k8s.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' W0327 10:46:13.837336 23048 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0327 10:46:13.837373 23048 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource W0327 10:46:14.553882 23048 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0327 10:46:14.553919 23048 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:411: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: (B[mreplicationcontroller/busybox0 created I0327 10:46:14.860461 23048 event.go:307] "Event occurred" object="namespace-1679913964-20606/busybox0" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: busybox0-9h8mc" replicationcontroller/busybox1 created error: error validating "hack/testdata/recursive/rc/rc/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false I0327 10:46:14.895403 23048 event.go:307] "Event occurred" object="namespace-1679913964-20606/busybox1" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: busybox1-wjgpl" [32mgeneric-resources.sh:415: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mSuccessful (B[mmessage:no rollbacker has been implemented for "ReplicationController" no rollbacker has been implemented for "ReplicationController" unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' ... skipping 2 lines ... (B[mmessage:no rollbacker has been implemented for "ReplicationController" no rollbacker has been implemented for "ReplicationController" unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' has:Object 'Kind' is missing [32mSuccessful (B[mmessage:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' error: replicationcontrollers "busybox0" pausing is not supported error: replicationcontrollers "busybox1" pausing is not supported has:Object 'Kind' is missing [32mSuccessful (B[mmessage:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' error: replicationcontrollers "busybox0" pausing is not supported error: replicationcontrollers "busybox1" pausing is not supported has:replicationcontrollers "busybox0" pausing is not supported [32mSuccessful (B[mmessage:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' error: replicationcontrollers "busybox0" pausing is not supported error: replicationcontrollers "busybox1" pausing is not supported has:replicationcontrollers "busybox1" pausing is not supported [32mSuccessful (B[mmessage:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' error: replicationcontrollers "busybox0" resuming is not supported error: replicationcontrollers "busybox1" resuming is not supported has:Object 'Kind' is missing [32mSuccessful (B[mmessage:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' error: replicationcontrollers "busybox0" resuming is not supported error: replicationcontrollers "busybox1" resuming is not supported has:replicationcontrollers "busybox0" resuming is not supported [32mSuccessful (B[mmessage:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' error: replicationcontrollers "busybox0" resuming is not supported error: replicationcontrollers "busybox1" resuming is not supported has:replicationcontrollers "busybox1" resuming is not supported Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. replicationcontroller "busybox0" force deleted replicationcontroller "busybox1" force deleted error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' W0327 10:46:15.616191 23048 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0327 10:46:15.616242 23048 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource +++ exit code: 0 Recording: run_namespace_tests Running command: run_namespace_tests +++ Running case: test-cmd.run_namespace_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_namespace_tests +++ [0327 10:46:16] Testing kubectl(v1:namespaces) [32mSuccessful (B[mmessage:Error from server (NotFound): namespaces "my-namespace" not found has: not found namespace/my-namespace created (dry run) namespace/my-namespace created (server dry run) [32mSuccessful (B[mmessage:Error from server (NotFound): namespaces "my-namespace" not found has: not found namespace/my-namespace created [32mcore.sh:1504: Successful get namespaces/my-namespace {{.metadata.name}}: my-namespace (B[mquery for namespaces had limit param query for resourcequotas had limit param query for limitranges had limit param ... skipping 138 lines ... I0327 10:46:20.947043 23048 shared_informer.go:311] Waiting for caches to sync for resource quota I0327 10:46:20.947086 23048 shared_informer.go:318] Caches are synced for resource quota I0327 10:46:21.171076 23048 shared_informer.go:311] Waiting for caches to sync for garbage collector I0327 10:46:21.171122 23048 shared_informer.go:318] Caches are synced for garbage collector namespace/my-namespace condition met [32mSuccessful (B[mmessage:Error from server (NotFound): namespaces "my-namespace" not found has: not found namespace/my-namespace created [32mcore.sh:1515: Successful get namespaces/my-namespace {{.metadata.name}}: my-namespace (B[m[32mSuccessful (B[mmessage:Warning: deleting cluster-scoped resources, not scoped to the provided namespace namespace "kube-node-lease" deleted ... skipping 34 lines ... namespace "namespace-1679913942-10886" deleted namespace "namespace-1679913942-26367" deleted namespace "namespace-1679913943-31932" deleted namespace "namespace-1679913945-19936" deleted namespace "namespace-1679913946-27291" deleted namespace "namespace-1679913964-20606" deleted Error from server (Forbidden): namespaces "default" is forbidden: this namespace may not be deleted Error from server (Forbidden): namespaces "kube-public" is forbidden: this namespace may not be deleted Error from server (Forbidden): namespaces "kube-system" is forbidden: this namespace may not be deleted has:Warning: deleting cluster-scoped resources [32mSuccessful (B[mmessage:Warning: deleting cluster-scoped resources, not scoped to the provided namespace namespace "kube-node-lease" deleted namespace "my-namespace" deleted namespace "namespace-1679913772-15052" deleted ... skipping 32 lines ... namespace "namespace-1679913942-10886" deleted namespace "namespace-1679913942-26367" deleted namespace "namespace-1679913943-31932" deleted namespace "namespace-1679913945-19936" deleted namespace "namespace-1679913946-27291" deleted namespace "namespace-1679913964-20606" deleted Error from server (Forbidden): namespaces "default" is forbidden: this namespace may not be deleted Error from server (Forbidden): namespaces "kube-public" is forbidden: this namespace may not be deleted Error from server (Forbidden): namespaces "kube-system" is forbidden: this namespace may not be deleted has:namespace "my-namespace" deleted namespace/quotas created [32mcore.sh:1522: Successful get namespaces/quotas {{.metadata.name}}: quotas (B[m[32mcore.sh:1523: Successful get quota --namespace=quotas {{range.items}}{{ if eq .metadata.name "test-quota" }}found{{end}}{{end}}:: : (B[mresourcequota/test-quota created (dry run) resourcequota/test-quota created (server dry run) ... skipping 9 lines ... I0327 10:46:23.125032 42144 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/namespaces/quotas/resourcequotas/test-quota 200 OK in 1 milliseconds (B[mresourcequota "test-quota" deleted I0327 10:46:23.240787 23048 resource_quota_controller.go:337] "Resource quota has been deleted" key="quotas/test-quota" I0327 10:46:23.279151 23048 horizontal.go:512] "Horizontal Pod Autoscaler has been deleted" HPA="namespace-1679913964-20606/busybox0" I0327 10:46:23.283364 23048 horizontal.go:512] "Horizontal Pod Autoscaler has been deleted" HPA="namespace-1679913964-20606/busybox1" namespace "quotas" deleted W0327 10:46:24.668109 23048 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0327 10:46:24.668151 23048 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource W0327 10:46:26.923929 23048 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0327 10:46:26.923964 23048 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource W0327 10:46:27.901105 23048 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0327 10:46:27.901151 23048 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mcore.sh:1544: Successful get namespaces {{range.items}}{{ if eq .metadata.name "other" }}found{{end}}{{end}}:: : (B[mnamespace/other created [32mcore.sh:1548: Successful get namespaces/other {{.metadata.name}}: other (B[m[32mcore.sh:1552: Successful get pods --namespace=other {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/valid-pod created [32mcore.sh:1556: Successful get pods --namespace=other {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[m[32mcore.sh:1558: Successful get pods -n other {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[m[32mSuccessful (B[mmessage:error: a resource cannot be retrieved by name across all namespaces has:a resource cannot be retrieved by name across all namespaces [32mcore.sh:1565: Successful get pods --namespace=other {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[mWarning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. pod "valid-pod" force deleted [32mcore.sh:1569: Successful get pods --namespace=other {{range.items}}{{.metadata.name}}:{{end}}: (B[mnamespace "other" deleted ... skipping 127 lines ... (B[m[32mcore.sh:921: Successful get secret/secret-string-data --namespace=test-secrets {{.stringData}}: <no value> (B[msecret "secret-string-data" deleted [32mcore.sh:930: Successful get secrets --namespace=test-secrets {{range.items}}{{.metadata.name}}:{{end}}: (B[msecret "test-secret" deleted namespace "test-secrets" deleted I0327 10:46:39.343940 23048 namespace_controller.go:182] "Namespace has been deleted" namespace="other" W0327 10:46:40.473757 23048 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0327 10:46:40.473798 23048 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource W0327 10:46:41.043107 23048 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0327 10:46:41.043144 23048 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource +++ exit code: 0 Recording: run_configmap_tests Running command: run_configmap_tests +++ Running case: test-cmd.run_configmap_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes ... skipping 30 lines ... I0327 10:46:43.654043 43310 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/namespaces/test-configmaps/events?fieldSelector=involvedObject.name%3Dtest-binary-configmap%2CinvolvedObject.namespace%3Dtest-configmaps%2CinvolvedObject.kind%3DConfigMap%2CinvolvedObject.uid%3Dcdc2baa3-a08f-4296-b9db-e603d4fa5a8f&limit=500 200 OK in 1 milliseconds I0327 10:46:43.655652 43310 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/namespaces/test-configmaps/configmaps/test-configmap 200 OK in 1 milliseconds I0327 10:46:43.656935 43310 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/namespaces/test-configmaps/events?fieldSelector=involvedObject.name%3Dtest-configmap%2CinvolvedObject.namespace%3Dtest-configmaps%2CinvolvedObject.kind%3DConfigMap%2CinvolvedObject.uid%3D6b6188c0-2a6b-4100-8625-a1d2589cbafa&limit=500 200 OK in 1 milliseconds (B[mconfigmap "test-configmap" deleted configmap "test-binary-configmap" deleted namespace "test-configmaps" deleted W0327 10:46:46.539946 23048 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0327 10:46:46.539981 23048 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource I0327 10:46:47.285754 23048 namespace_controller.go:182] "Namespace has been deleted" namespace="test-secrets" +++ exit code: 0 Recording: run_client_config_tests Running command: run_client_config_tests +++ Running case: test-cmd.run_client_config_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_client_config_tests +++ [0327 10:46:49] Creating namespace namespace-1679914009-19166 namespace/namespace-1679914009-19166 created Context "test" modified. +++ [0327 10:46:49] Testing client config [32mSuccessful (B[mmessage:error: stat missing: no such file or directory has:missing: no such file or directory [32mSuccessful (B[mmessage:error: stat missing: no such file or directory has:missing: no such file or directory [32mSuccessful (B[mmessage:error: stat missing: no such file or directory has:missing: no such file or directory [32mSuccessful (B[mmessage:Error in configuration: context was not found for specified context: missing-context has:context was not found for specified context: missing-context [32mSuccessful (B[mmessage:error: no server found for cluster "missing-cluster" has:no server found for cluster "missing-cluster" [32mSuccessful (B[mmessage:error: auth info "missing-user" does not exist has:auth info "missing-user" does not exist [32mSuccessful (B[mmessage:error: error loading config file "/tmp/newconfig.yaml": no kind "Config" is registered for version "v-1" in scheme "vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go:50" has:error loading config file [32mSuccessful (B[mmessage:error: stat missing-config: no such file or directory has:no such file or directory +++ exit code: 0 Recording: run_service_accounts_tests Running command: run_service_accounts_tests +++ Running case: test-cmd.run_service_accounts_tests ... skipping 57 lines ... Labels: <none> Annotations: <none> Schedule: 59 23 31 2 * Concurrency Policy: Allow Suspend: False Successful Job History Limit: 3 Failed Job History Limit: 1 Starting Deadline Seconds: <unset> Selector: <unset> Parallelism: <unset> Completions: <unset> Pod Template: Labels: <none> ... skipping 57 lines ... Annotations: batch.kubernetes.io/job-tracking: cronjob.kubernetes.io/instantiate: manual Parallelism: 1 Completions: 1 Completion Mode: NonIndexed Start Time: Mon, 27 Mar 2023 10:46:56 +0000 Pods Statuses: 1 Active (0 Ready) / 0 Succeeded / 0 Failed Pod Template: Labels: batch.kubernetes.io/controller-uid=2f1b3994-76f9-4fe0-9e67-e4b10e1452a9 batch.kubernetes.io/job-name=test-job controller-uid=2f1b3994-76f9-4fe0-9e67-e4b10e1452a9 job-name=test-job Containers: ... skipping 28 lines ... I0327 10:46:57.203738 44185 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/namespaces/test-jobs/events?fieldSelector=involvedObject.kind%3DJob%2CinvolvedObject.uid%3D2f1b3994-76f9-4fe0-9e67-e4b10e1452a9%2CinvolvedObject.name%3Dtest-job%2CinvolvedObject.namespace%3Dtest-jobs&limit=500 200 OK in 1 milliseconds (B[mjob.batch "test-job" deleted I0327 10:46:57.329744 23048 job_controller.go:523] enqueueing job test-jobs/test-job cronjob.batch "pi" deleted namespace "test-jobs" deleted I0327 10:47:00.563544 23048 namespace_controller.go:182] "Namespace has been deleted" namespace="test-service-accounts" W0327 10:47:00.787478 23048 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0327 10:47:00.787514 23048 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource +++ exit code: 0 Recording: run_create_job_tests Running command: run_create_job_tests +++ Running case: test-cmd.run_create_job_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes ... skipping 426 lines ... type: ClusterIP status: loadBalancer: {} [32mSuccessful (B[mmessage:kubectl-create kubectl-set has:kubectl-set error: you must specify resources by --filename when --local is set. Example resource specifications include: '-f rsrc.yaml' '--filename=rsrc.json' [32mcore.sh:1034: Successful get services redis-master {{range.spec.selector}}{{.}}:{{end}}: redis:master:backend: (B[mservice/redis-master selector updated [32mSuccessful (B[mmessage:Error from server (Conflict): Operation cannot be fulfilled on services "redis-master": the object has been modified; please apply your changes to the latest version and try again has:Conflict [32mcore.sh:1047: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:redis-master: (B[mservice "redis-master" deleted [32mcore.sh:1054: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes: (B[m[32mcore.sh:1058: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes: (B[mI0327 10:47:06.997053 19971 alloc.go:330] "allocated clusterIPs" service="default/redis-master" clusterIPs=map[IPv4:10.0.0.195] ... skipping 6 lines ... (B[mI0327 10:47:07.537339 23048 namespace_controller.go:182] "Namespace has been deleted" namespace="test-jobs" service/service-v1-test replaced [32mcore.sh:1094: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:redis-master:service-v1-test: (B[mservice "redis-master" deleted service "service-v1-test" deleted [32mcore.sh:1102: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes: (B[m[32mW0327 10:47:07.958439 23048 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0327 10:47:07.958482 23048 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource core.sh:1106: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes: (B[mI0327 10:47:08.159402 19971 alloc.go:330] "allocated clusterIPs" service="default/redis-master" clusterIPs=map[IPv4:10.0.0.71] service/redis-master created I0327 10:47:08.365835 19971 alloc.go:330] "allocated clusterIPs" service="default/redis-slave" clusterIPs=map[IPv4:10.0.0.68] service/redis-slave created [32mcore.sh:1111: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:redis-master:redis-slave: ... skipping 94 lines ... +++ [0327 10:47:11] Testing kubectl(v1:daemonsets, v1:controllerrevisions) [32mapps.sh:71: Successful get daemonsets {{range.items}}{{.metadata.name}}:{{end}}: (B[mFlag --record has been deprecated, --record will be removed in the future daemonset.apps/bind created [32mapps.sh:75: Successful get controllerrevisions {{range.items}}{{.metadata.annotations}}:{{end}}: map[deprecated.daemonset.template.generation:1 kubectl.kubernetes.io/last-applied-configuration:{"apiVersion":"apps/v1","kind":"DaemonSet","metadata":{"annotations":{"kubernetes.io/change-cause":"kubectl apply --filename=hack/testdata/rollingupdate-daemonset.yaml --record=true --server=https://127.0.0.1:6443 --insecure-skip-tls-verify=true --match-server-version=true"},"labels":{"service":"bind"},"name":"bind","namespace":"namespace-1679914031-2586"},"spec":{"selector":{"matchLabels":{"service":"bind"}},"template":{"metadata":{"labels":{"service":"bind"}},"spec":{"affinity":{"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchExpressions":[{"key":"service","operator":"In","values":["bind"]}]},"namespaces":[],"topologyKey":"kubernetes.io/hostname"}]}},"containers":[{"image":"registry.k8s.io/pause:2.0","name":"kubernetes-pause"}]}},"updateStrategy":{"rollingUpdate":{"maxUnavailable":"10%"},"type":"RollingUpdate"}}} kubernetes.io/change-cause:kubectl apply --filename=hack/testdata/rollingupdate-daemonset.yaml --record=true --server=https://127.0.0.1:6443 --insecure-skip-tls-verify=true --match-server-version=true]: (B[mW0327 10:47:12.121335 23048 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0327 10:47:12.121379 23048 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource daemonset.apps/bind skipped rollback (current template already matches revision 1) [32mapps.sh:78: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/pause:2.0: (B[m[32mapps.sh:79: Successful get daemonset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 1 (B[mFlag --record has been deprecated, --record will be removed in the future daemonset.apps/bind configured [32mapps.sh:82: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/pause:latest: ... skipping 181 lines ... (B[mmessage:daemonset.apps/bind REVISION CHANGE-CAUSE 2 kubectl apply --filename=hack/testdata/rollingupdate-daemonset-rv2.yaml --record=true --server=https://127.0.0.1:6443 --insecure-skip-tls-verify=true --match-server-version=true 3 kubectl apply --filename=hack/testdata/rollingupdate-daemonset.yaml --record=true --server=https://127.0.0.1:6443 --insecure-skip-tls-verify=true --match-server-version=true has:3 kubectl apply [32mSuccessful (B[mmessage:error: unable to find specified revision 1000000 in history has:unable to find specified revision [32mapps.sh:122: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/pause:2.0: (B[m[32mapps.sh:123: Successful get daemonset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 1 (B[mdaemonset.apps/bind rolled back [32mapps.sh:126: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/pause:latest: (B[m[32mapps.sh:127: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: registry.k8s.io/nginx:test-cmd: ... skipping 60 lines ... Namespace: namespace-1679914034-19750 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 17 lines ... Namespace: namespace-1679914034-19750 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 18 lines ... Namespace: namespace-1679914034-19750 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 12 lines ... Namespace: namespace-1679914034-19750 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 27 lines ... Namespace: namespace-1679914034-19750 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 17 lines ... Namespace: namespace-1679914034-19750 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 17 lines ... Namespace: namespace-1679914034-19750 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 11 lines ... Namespace: namespace-1679914034-19750 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 25 lines ... (B[m[32mcore.sh:1240: Successful get rc frontend {{.spec.replicas}}: 3 (B[mreplicationcontroller/frontend scaled E0327 10:47:15.831635 23048 replica_set.go:220] ReplicaSet has no controller: &ReplicaSet{ObjectMeta:{frontend namespace-1679914034-19750 22882689-e6af-4036-bd4b-170cf7616075 2246 2 2023-03-27 10:47:14 +0000 UTC <nil> <nil> map[app:guestbook tier:frontend] map[] [] [] [{kubectl Update v1 <nil> FieldsV1 {"f:spec":{"f:replicas":{}}} scale} {kube-controller-manager Update v1 2023-03-27 10:47:14 +0000 UTC FieldsV1 {"f:status":{"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:replicas":{}}} status} {kubectl-create Update v1 2023-03-27 10:47:14 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:app":{},"f:tier":{}}},"f:spec":{"f:selector":{},"f:template":{".":{},"f:metadata":{".":{},"f:creationTimestamp":{},"f:labels":{".":{},"f:app":{},"f:tier":{}}},"f:spec":{".":{},"f:containers":{".":{},"k:{\"name\":\"php-redis\"}":{".":{},"f:env":{".":{},"k:{\"name\":\"GET_HOSTS_FROM\"}":{".":{},"f:name":{},"f:value":{}}},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":80,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{".":{},"f:requests":{".":{},"f:cpu":{},"f:memory":{}}},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} }]},Spec:ReplicaSetSpec{Replicas:*2,Selector:&v1.LabelSelector{MatchLabels:map[string]string{app: guestbook,tier: frontend,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[app:guestbook tier:frontend] map[] [] [] []} {[] [] [{php-redis gcr.io/google_samples/gb-frontend:v4 [] [] [{ 0 80 TCP }] [] [{GET_HOSTS_FROM dns nil}] {map[] map[cpu:{{100 -3} {<nil>} 100m DecimalSI} memory:{{104857600 0} {<nil>} 100Mi BinarySI}] []} [] [] [] nil nil nil nil /dev/termination-log File IfNotPresent nil false false false}] [] Always 0xc002d39388 <nil> ClusterFirst map[] <nil> false false false <nil> PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] <nil> nil [] <nil> <nil> <nil> map[] [] <nil> nil <nil> [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:3,FullyLabeledReplicas:3,ObservedGeneration:1,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} I0327 10:47:15.835918 23048 event.go:307] "Event occurred" object="namespace-1679914034-19750/frontend" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: frontend-wdzdw" [32mcore.sh:1244: Successful get rc frontend {{.spec.replicas}}: 2 (B[m[32mcore.sh:1248: Successful get rc frontend {{.spec.replicas}}: 2 (B[merror: Expected replicas to be 3, was 2 [32mcore.sh:1252: Successful get rc frontend {{.spec.replicas}}: 2 (B[m[32mcore.sh:1256: Successful get rc frontend {{.spec.replicas}}: 2 (B[mreplicationcontroller/frontend scaled I0327 10:47:16.210483 23048 event.go:307] "Event occurred" object="namespace-1679914034-19750/frontend" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-sr6dg" [32mcore.sh:1260: Successful get rc frontend {{.spec.replicas}}: 3 (B[m[32mcore.sh:1264: Successful get rc frontend {{.spec.replicas}}: 3 ... skipping 70 lines ... I0327 10:47:18.661620 19971 alloc.go:330] "allocated clusterIPs" service="namespace-1679914034-19750/expose-test-deployment" clusterIPs=map[IPv4:10.0.0.83] [32mSuccessful (B[mmessage:service/expose-test-deployment exposed has:service/expose-test-deployment exposed service "expose-test-deployment" deleted [32mSuccessful (B[mmessage:error: couldn't retrieve selectors via --selector flag or introspection: invalid deployment: no selectors, therefore cannot be exposed has:invalid deployment: no selectors deployment.apps/nginx-deployment created I0327 10:47:18.964225 23048 event.go:307] "Event occurred" object="namespace-1679914034-19750/nginx-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-7df65dc9f4 to 3" I0327 10:47:18.968000 23048 event.go:307] "Event occurred" object="namespace-1679914034-19750/nginx-deployment-7df65dc9f4" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-7df65dc9f4-2mmfc" I0327 10:47:18.972607 23048 event.go:307] "Event occurred" object="namespace-1679914034-19750/nginx-deployment-7df65dc9f4" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-7df65dc9f4-vb6kk" I0327 10:47:18.972646 23048 event.go:307] "Event occurred" object="namespace-1679914034-19750/nginx-deployment-7df65dc9f4" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-7df65dc9f4-6txk7" ... skipping 24 lines ... (B[mpod "valid-pod" deleted service "frontend" deleted service "frontend-2" deleted service "frontend-3" deleted service "frontend-4" deleted [32mSuccessful (B[mmessage:error: cannot expose a Node has:cannot expose [32mSuccessful (B[mmessage:The Service "invalid-large-service-name-that-has-more-than-sixty-three-characters" is invalid: metadata.name: Invalid value: "invalid-large-service-name-that-has-more-than-sixty-three-characters": must be no more than 63 characters has:metadata.name: Invalid value I0327 10:47:20.540563 19971 alloc.go:330] "allocated clusterIPs" service="namespace-1679914034-19750/kubernetes-serve-hostname-testing-sixty-three-characters-in-len" clusterIPs=map[IPv4:10.0.0.241] [32mSuccessful ... skipping 32 lines ... (B[mhorizontalpodautoscaler.autoscaling/frontend autoscaled [32mcore.sh:1436: Successful get hpa frontend {{.spec.minReplicas}} {{.spec.maxReplicas}} {{(index .spec.metrics 0).resource.target.averageUtilization}}: 1 2 70 (B[mhorizontalpodautoscaler.autoscaling "frontend" deleted horizontalpodautoscaler.autoscaling/frontend autoscaled [32mcore.sh:1440: Successful get hpa frontend {{.spec.minReplicas}} {{.spec.maxReplicas}} {{(index .spec.metrics 0).resource.target.averageUtilization}}: 2 3 80 (B[mhorizontalpodautoscaler.autoscaling "frontend" deleted error: required flag(s) "max" not set replicationcontroller "frontend" deleted [32mcore.sh:1449: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: (B[mapiVersion: apps/v1 kind: Deployment metadata: creationTimestamp: null ... skipping 24 lines ... limits: cpu: 300m requests: cpu: 300m terminationGracePeriodSeconds: 0 status: {} Error from server (NotFound): deployments.apps "nginx-deployment-resources" not found deployment.apps/nginx-deployment-resources created I0327 10:47:22.916902 23048 event.go:307] "Event occurred" object="namespace-1679914034-19750/nginx-deployment-resources" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-resources-5f79767bf9 to 3" I0327 10:47:22.920904 23048 event.go:307] "Event occurred" object="namespace-1679914034-19750/nginx-deployment-resources-5f79767bf9" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-resources-5f79767bf9-4hkv4" I0327 10:47:22.924825 23048 event.go:307] "Event occurred" object="namespace-1679914034-19750/nginx-deployment-resources-5f79767bf9" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-resources-5f79767bf9-7khfr" I0327 10:47:22.924953 23048 event.go:307] "Event occurred" object="namespace-1679914034-19750/nginx-deployment-resources-5f79767bf9" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-resources-5f79767bf9-qmrpc" [32mcore.sh:1455: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: nginx-deployment-resources: (B[m[32mcore.sh:1456: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:test-cmd: (B[m[32mcore.sh:1457: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: registry.k8s.io/perl: (B[mdeployment.apps/nginx-deployment-resources resource requirements updated I0327 10:47:23.184337 23048 event.go:307] "Event occurred" object="namespace-1679914034-19750/nginx-deployment-resources" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-resources-77d775b4f9 to 1" I0327 10:47:23.187943 23048 event.go:307] "Event occurred" object="namespace-1679914034-19750/nginx-deployment-resources-77d775b4f9" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-resources-77d775b4f9-qx9j2" [32mcore.sh:1460: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}: 100m: (B[m[32mcore.sh:1461: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}: 100m: (B[merror: unable to find container named redis deployment.apps/nginx-deployment-resources resource requirements updated I0327 10:47:23.444171 23048 event.go:307] "Event occurred" object="namespace-1679914034-19750/nginx-deployment-resources" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set nginx-deployment-resources-5f79767bf9 to 2 from 3" I0327 10:47:23.449960 23048 event.go:307] "Event occurred" object="namespace-1679914034-19750/nginx-deployment-resources-5f79767bf9" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: nginx-deployment-resources-5f79767bf9-4hkv4" I0327 10:47:23.452754 23048 event.go:307] "Event occurred" object="namespace-1679914034-19750/nginx-deployment-resources" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-resources-688f8b78b5 to 1 from 0" I0327 10:47:23.458265 23048 event.go:307] "Event occurred" object="namespace-1679914034-19750/nginx-deployment-resources-688f8b78b5" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-resources-688f8b78b5-26vmb" [32mcore.sh:1466: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}: 200m: ... skipping 155 lines ... status: "True" type: Progressing observedGeneration: 4 replicas: 4 unavailableReplicas: 4 updatedReplicas: 1 error: you must specify resources by --filename when --local is set. Example resource specifications include: '-f rsrc.yaml' '--filename=rsrc.json' [32mcore.sh:1477: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}: 200m: (B[m[32mcore.sh:1478: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}: 300m: (B[m[32mcore.sh:1479: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).resources.requests.cpu}}:{{end}}: 300m: ... skipping 46 lines ... pod-template-hash=859689d794 Annotations: deployment.kubernetes.io/desired-replicas: 1 deployment.kubernetes.io/max-replicas: 2 deployment.kubernetes.io/revision: 1 Controlled By: Deployment/test-nginx-apps Replicas: 1 current / 1 desired Pods Status: 0 Running / 1 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=test-nginx-apps pod-template-hash=859689d794 Containers: nginx: Image: registry.k8s.io/nginx:test-cmd ... skipping 123 lines ... [32mapps.sh:340: Successful get deployment.apps {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:1.7.9: (B[m Image: registry.k8s.io/nginx:test-cmd deployment.apps/nginx rolled back (server dry run) [32mapps.sh:344: Successful get deployment.apps {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:1.7.9: (B[mdeployment.apps/nginx rolled back [32mapps.sh:348: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:test-cmd: (B[merror: unable to find specified revision 1000000 in history [32mapps.sh:351: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:test-cmd: (B[mdeployment.apps/nginx rolled back [32mapps.sh:355: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:1.7.9: (B[mdeployment.apps/nginx paused error: you cannot rollback a paused deployment; resume it first with 'kubectl rollout resume' and try again error: deployments.apps "nginx" can't restart paused deployment (run rollout resume first) deployment.apps/nginx resumed deployment.apps/nginx rolled back deployment.kubernetes.io/revision-history: 1,3 error: desired revision (3) is different from the running revision (5) deployment.apps/nginx restarted I0327 10:47:32.143000 23048 event.go:307] "Event occurred" object="namespace-1679914044-262/nginx" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set nginx-6b9cd9ccf6 to 0 from 1" I0327 10:47:32.148101 23048 event.go:307] "Event occurred" object="namespace-1679914044-262/nginx-6b9cd9ccf6" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: nginx-6b9cd9ccf6-sq25v" I0327 10:47:32.151493 23048 event.go:307] "Event occurred" object="namespace-1679914044-262/nginx" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-7976dc9f9f to 1 from 0" I0327 10:47:32.155317 23048 event.go:307] "Event occurred" object="namespace-1679914044-262/nginx-7976dc9f9f" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-7976dc9f9f-xdbrn" [32mSuccessful ... skipping 80 lines ... (B[m[32mapps.sh:399: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: registry.k8s.io/perl: (B[mdeployment.apps/nginx-deployment image updated I0327 10:47:34.440197 23048 event.go:307] "Event occurred" object="namespace-1679914044-262/nginx-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-6444b54576 to 1" I0327 10:47:34.444663 23048 event.go:307] "Event occurred" object="namespace-1679914044-262/nginx-deployment-6444b54576" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-6444b54576-d7zws" [32mapps.sh:402: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:1.7.9: (B[m[32mapps.sh:403: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: registry.k8s.io/perl: (B[merror: unable to find container named "redis" deployment.apps/nginx-deployment image updated [32mapps.sh:408: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:test-cmd: (B[m[32mapps.sh:409: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: registry.k8s.io/perl: (B[mdeployment.apps/nginx-deployment image updated [32mapps.sh:412: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:1.7.9: (B[m[32mapps.sh:413: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: registry.k8s.io/perl: ... skipping 47 lines ... I0327 10:47:37.141471 23048 event.go:307] "Event occurred" object="namespace-1679914044-262/nginx-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-6bdc9df444 to 1 from 0" I0327 10:47:37.145980 23048 event.go:307] "Event occurred" object="namespace-1679914044-262/nginx-deployment-6bdc9df444" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-6bdc9df444-ldhq9" I0327 10:47:37.155875 23048 horizontal.go:512] "Horizontal Pod Autoscaler has been deleted" HPA="namespace-1679914034-19750/frontend" deployment.apps/nginx-deployment env updated I0327 10:47:37.217111 23048 event.go:307] "Event occurred" object="namespace-1679914044-262/nginx-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set nginx-deployment-57bf7fbc68 to 0 from 1" I0327 10:47:37.225748 23048 event.go:307] "Event occurred" object="namespace-1679914044-262/nginx-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-5446b4888c to 1 from 0" E0327 10:47:37.231844 23048 replica_set.go:544] sync "namespace-1679914044-262/nginx-deployment-57bf7fbc68" failed with Operation cannot be fulfilled on replicasets.apps "nginx-deployment-57bf7fbc68": the object has been modified; please apply your changes to the latest version and try again Warning: key username transferred to USERNAME deployment.apps/nginx-deployment env updated W0327 10:47:37.304360 23048 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0327 10:47:37.304395 23048 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource I0327 10:47:37.325729 23048 event.go:307] "Event occurred" object="namespace-1679914044-262/nginx-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set nginx-deployment-6bf769bd to 0 from 1" I0327 10:47:37.334164 23048 event.go:307] "Event occurred" object="namespace-1679914044-262/nginx-deployment-57bf7fbc68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: nginx-deployment-57bf7fbc68-5twld" Warning: key password transferred to PASSWORD Warning: key username transferred to USERNAME deployment.apps/nginx-deployment env updated deployment.apps/nginx-deployment env updated [32mSuccessful (B[mmessage:error: standard input cannot be used for multiple arguments has:standard input cannot be used for multiple arguments I0327 10:47:37.475578 23048 event.go:307] "Event occurred" object="namespace-1679914044-262/nginx-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-56795f96bc to 1" I0327 10:47:37.483694 23048 event.go:307] "Event occurred" object="namespace-1679914044-262/nginx-deployment-6bf769bd" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: nginx-deployment-6bf769bd-pl6mf" deployment.apps "nginx-deployment" deleted configmap "test-set-env-config" deleted E0327 10:47:37.629997 23048 replica_set.go:544] sync "namespace-1679914044-262/nginx-deployment-5446b4888c" failed with Operation cannot be fulfilled on replicasets.apps "nginx-deployment-5446b4888c": StorageError: invalid object, Code: 4, Key: /registry/replicasets/namespace-1679914044-262/nginx-deployment-5446b4888c, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 6401d1a2-402f-4c96-8307-29cad6456fa0, UID in object meta: secret "test-set-env-secret" deleted I0327 10:47:37.682641 23048 event.go:307] "Event occurred" object="namespace-1679914044-262/nginx-deployment-56795f96bc" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-56795f96bc-pqp4x" [32mapps.sh:474: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: (B[mE0327 10:47:37.830391 23048 replica_set.go:544] sync "namespace-1679914044-262/nginx-deployment-57bf7fbc68" failed with replicasets.apps "nginx-deployment-57bf7fbc68" not found deployment.apps/nginx-deployment created I0327 10:47:37.917967 23048 event.go:307] "Event occurred" object="namespace-1679914044-262/nginx-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-57bf7fbc68 to 3" E0327 10:47:37.979951 23048 replica_set.go:544] sync "namespace-1679914044-262/nginx-deployment-6bf769bd" failed with replicasets.apps "nginx-deployment-6bf769bd" not found [32mapps.sh:477: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: nginx-deployment: (B[m[32mapps.sh:478: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:test-cmd: (B[mE0327 10:47:38.080228 23048 replica_set.go:544] sync "namespace-1679914044-262/nginx-deployment-ffc86458c" failed with replicasets.apps "nginx-deployment-ffc86458c" not found [32mapps.sh:479: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: registry.k8s.io/perl: (B[mI0327 10:47:38.133613 23048 event.go:307] "Event occurred" object="namespace-1679914044-262/nginx-deployment-57bf7fbc68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-57bf7fbc68-59gpb" deployment.apps/nginx-deployment image updated E0327 10:47:38.180060 23048 replica_set.go:544] sync "namespace-1679914044-262/nginx-deployment-56795f96bc" failed with replicasets.apps "nginx-deployment-56795f96bc" not found I0327 10:47:38.180439 23048 event.go:307] "Event occurred" object="namespace-1679914044-262/nginx-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-6444b54576 to 1" I0327 10:47:38.231722 23048 event.go:307] "Event occurred" object="namespace-1679914044-262/nginx-deployment-57bf7fbc68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-57bf7fbc68-lw4b7" [32mapps.sh:482: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:1.7.9: (B[m[32mapps.sh:483: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: registry.k8s.io/perl: (B[mI0327 10:47:38.332713 23048 event.go:307] "Event occurred" object="namespace-1679914044-262/nginx-deployment-57bf7fbc68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-57bf7fbc68-b69fj" [32mSuccessful ... skipping 196 lines ... +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_rs_tests +++ [0327 10:47:38] Creating namespace namespace-1679914058-28642 namespace/namespace-1679914058-28642 created Context "test" modified. +++ [0327 10:47:38] Testing kubectl(v1:replicasets) E0327 10:47:38.780558 23048 replica_set.go:544] sync "namespace-1679914044-262/nginx-deployment-57bf7fbc68" failed with replicasets.apps "nginx-deployment-57bf7fbc68" not found [32mapps.sh:645: Successful get rs {{range.items}}{{.metadata.name}}:{{end}}: (B[mE0327 10:47:38.829981 23048 replica_set.go:544] sync "namespace-1679914044-262/nginx-deployment-6444b54576" failed with replicasets.apps "nginx-deployment-6444b54576" not found replicaset.apps/frontend created I0327 10:47:38.993604 23048 event.go:307] "Event occurred" object="namespace-1679914058-28642/frontend" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-xwd5q" +++ [0327 10:47:38] Deleting rs I0327 10:47:38.996824 23048 event.go:307] "Event occurred" object="namespace-1679914058-28642/frontend" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-khfz4" I0327 10:47:39.035444 23048 event.go:307] "Event occurred" object="namespace-1679914058-28642/frontend" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-mhpg2" replicaset.apps "frontend" deleted [32mapps.sh:651: Successful get pods -l tier=frontend {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mapps.sh:655: Successful get rs {{range.items}}{{.metadata.name}}:{{end}}: (B[mE0327 10:47:39.232820 23048 replica_set.go:544] sync "namespace-1679914058-28642/frontend" failed with replicasets.apps "frontend" not found replicaset.apps/frontend created I0327 10:47:39.363407 23048 event.go:307] "Event occurred" object="namespace-1679914058-28642/frontend" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-zc4xb" I0327 10:47:39.383577 23048 event.go:307] "Event occurred" object="namespace-1679914058-28642/frontend" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-tr9ns" Waiting for Get pods -l tier=frontend {{range.items}}{{(index .spec.containers 0).name}}:{{end}} : expected: php-redis:php-redis:php-redis:, got: php-redis:php-redis: I0327 10:47:39.432068 23048 event.go:307] "Event occurred" object="namespace-1679914058-28642/frontend" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-qg27v" [32mapps.sh:659: Successful get pods -l tier=frontend {{range.items}}{{(index .spec.containers 0).name}}:{{end}}: php-redis:php-redis:php-redis: (B[m+++ [0327 10:47:39] Deleting rs replicaset.apps "frontend" deleted E0327 10:47:39.680892 23048 replica_set.go:544] sync "namespace-1679914058-28642/frontend" failed with replicasets.apps "frontend" not found [32mapps.sh:663: Successful get rs {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mapps.sh:665: Successful get pods -l tier=frontend {{range.items}}{{(index .spec.containers 0).name}}:{{end}}: php-redis:php-redis:php-redis: (B[mpod "frontend-qg27v" deleted pod "frontend-tr9ns" deleted pod "frontend-zc4xb" deleted [32mapps.sh:668: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: ... skipping 15 lines ... Namespace: namespace-1679914058-28642 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 17 lines ... Namespace: namespace-1679914058-28642 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 18 lines ... Namespace: namespace-1679914058-28642 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 12 lines ... Namespace: namespace-1679914058-28642 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 25 lines ... Namespace: namespace-1679914058-28642 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 17 lines ... Namespace: namespace-1679914058-28642 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 17 lines ... Namespace: namespace-1679914058-28642 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 11 lines ... Namespace: namespace-1679914058-28642 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 226 lines ... horizontalpodautoscaler.autoscaling/frontend autoscaled [32mapps.sh:808: Successful get hpa frontend {{.spec.minReplicas}} {{.spec.maxReplicas}} {{(index .spec.metrics 0).resource.target.averageUtilization}}: 2 3 80 (B[m[32mSuccessful (B[mmessage:kubectl-autoscale has:kubectl-autoscale horizontalpodautoscaler.autoscaling "frontend" deleted error: required flag(s) "max" not set replicaset.apps "frontend" deleted +++ exit code: 0 Recording: run_stateful_set_tests Running command: run_stateful_set_tests +++ Running case: test-cmd.run_stateful_set_tests ... skipping 265 lines ... (B[mmessage:statefulset.apps/nginx REVISION CHANGE-CAUSE 2 kubectl apply --filename=hack/testdata/rollingupdate-statefulset-rv2.yaml --record=true --server=https://127.0.0.1:6443 --insecure-skip-tls-verify=true --match-server-version=true 3 kubectl apply --filename=hack/testdata/rollingupdate-statefulset.yaml --record=true --server=https://127.0.0.1:6443 --insecure-skip-tls-verify=true --match-server-version=true has:3 kubectl apply [32mSuccessful (B[mmessage:error: unable to find specified revision 1000000 in history has:unable to find specified revision [32mapps.sh:570: Successful get statefulset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx-slim:0.7: (B[m[32mapps.sh:571: Successful get statefulset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 1 (B[mstatefulset.apps/nginx rolled back [32mapps.sh:574: Successful get statefulset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx-slim:0.8: (B[m[32mapps.sh:575: Successful get statefulset {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: registry.k8s.io/pause:2.0: ... skipping 87 lines ... Name: mock Namespace: namespace-1679914070-14714 Selector: app=mock Labels: app=mock Annotations: <none> Replicas: 1 current / 1 desired Pods Status: 0 Running / 1 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=mock Containers: mock-container: Image: registry.k8s.io/pause:3.9 Port: 9949/TCP ... skipping 61 lines ... Name: mock Namespace: namespace-1679914070-14714 Selector: app=mock Labels: app=mock Annotations: <none> Replicas: 1 current / 1 desired Pods Status: 0 Running / 1 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=mock Containers: mock-container: Image: registry.k8s.io/pause:3.9 Port: 9949/TCP ... skipping 61 lines ... Name: mock Namespace: namespace-1679914070-14714 Selector: app=mock Labels: app=mock Annotations: <none> Replicas: 1 current / 1 desired Pods Status: 0 Running / 1 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=mock Containers: mock-container: Image: registry.k8s.io/pause:3.9 Port: 9949/TCP ... skipping 42 lines ... Namespace: namespace-1679914070-14714 Selector: app=mock Labels: app=mock status=replaced Annotations: <none> Replicas: 1 current / 1 desired Pods Status: 0 Running / 1 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=mock Containers: mock-container: Image: registry.k8s.io/pause:3.9 Port: 9949/TCP ... skipping 11 lines ... Namespace: namespace-1679914070-14714 Selector: app=mock2 Labels: app=mock2 status=replaced Annotations: <none> Replicas: 1 current / 1 desired Pods Status: 0 Running / 1 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=mock2 Containers: mock-container: Image: registry.k8s.io/pause:3.9 Port: 9949/TCP ... skipping 4 lines ... Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal SuccessfulCreate 0s replication-controller Created pod: mock2-2xqrv replicationcontroller "mock" deleted replicationcontroller "mock2" deleted W0327 10:47:56.740626 23048 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0327 10:47:56.740656 23048 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource replicationcontroller/mock replaced replicationcontroller/mock2 replaced I0327 10:47:56.805729 23048 event.go:307] "Event occurred" object="namespace-1679914070-14714/mock" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: mock-9j2dr" I0327 10:47:56.809394 23048 event.go:307] "Event occurred" object="namespace-1679914070-14714/mock2" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: mock2-v6xbv" [32mgeneric-resources.sh:102: Successful get rc mock {{.metadata.labels.status}}: replaced (B[m[32mgeneric-resources.sh:104: Successful get rc mock2 {{.metadata.labels.status}}: replaced ... skipping 67 lines ... [32mgeneric-resources.sh:114: Successful get services mock {{.metadata.labels.status}}: edited (B[m[32mgeneric-resources.sh:116: Successful get services mock2 {{.metadata.labels.status}}: edited (B[mservice/mock labeled service/mock2 labeled [32mgeneric-resources.sh:134: Successful get services mock {{.metadata.labels.labeled}}: true (B[m[32mgeneric-resources.sh:136: Successful get services mock2 {{.metadata.labels.labeled}}: true (B[mW0327 10:47:59.153855 23048 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0327 10:47:59.153902 23048 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource service/mock annotate service/mock2 annotate [32mgeneric-resources.sh:153: Successful get services mock {{.metadata.annotations.annotated}}: true (B[m[32mgeneric-resources.sh:155: Successful get services mock2 {{.metadata.annotations.annotated}}: true (B[mservice "mock" deleted service "mock2" deleted ... skipping 23 lines ... [32mstorage.sh:30: Successful get pv {{range.items}}{{.metadata.name}}:{{end}}: (B[mI0327 10:48:00.847780 23048 horizontal.go:512] "Horizontal Pod Autoscaler has been deleted" HPA="namespace-1679914058-28642/frontend" persistentvolume/pv0001 created [32mstorage.sh:33: Successful get pv {{range.items}}{{.metadata.name}}:{{end}}: pv0001: (B[mpersistentvolume "pv0001" deleted persistentvolume/pv0002 created E0327 10:48:01.319409 23048 pv_protection_controller.go:113] PV pv0002 failed with : Operation cannot be fulfilled on persistentvolumes "pv0002": the object has been modified; please apply your changes to the latest version and try again [32mstorage.sh:36: Successful get pv {{range.items}}{{.metadata.name}}:{{end}}: pv0002: (B[mpersistentvolume "pv0002" deleted persistentvolume/pv0003 created E0327 10:48:01.777537 23048 pv_protection_controller.go:113] PV pv0003 failed with : Operation cannot be fulfilled on persistentvolumes "pv0003": the object has been modified; please apply your changes to the latest version and try again [32mstorage.sh:39: Successful get pv {{range.items}}{{.metadata.name}}:{{end}}: pv0003: (B[mquery for persistentvolumes had limit param query for events had limit param query for persistentvolumes had user-specified limit param [32mSuccessful describe persistentvolumes verbose logs: I0327 10:48:01.885052 54598 loader.go:373] Config loaded from file: /tmp/tmp.Yu0BLSZ4TL/.kube/config I0327 10:48:01.889811 54598 round_trippers.go:553] GET https://127.0.0.1:6443/version?timeout=32s 200 OK in 4 milliseconds I0327 10:48:01.896131 54598 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/persistentvolumes?limit=500 200 OK in 1 milliseconds I0327 10:48:01.898296 54598 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/persistentvolumes/pv0003 200 OK in 1 milliseconds I0327 10:48:01.907325 54598 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/events?fieldSelector=involvedObject.uid%3Dfb044f9b-f918-41a9-a170-befe750a3e99%2CinvolvedObject.name%3Dpv0003%2CinvolvedObject.namespace%3D%2CinvolvedObject.kind%3DPersistentVolume&limit=500 200 OK in 8 milliseconds (B[mpersistentvolume "pv0003" deleted [32mstorage.sh:44: Successful get pv {{range.items}}{{.metadata.name}}:{{end}}: (B[mpersistentvolume/pv0001 created E0327 10:48:02.403437 23048 pv_protection_controller.go:113] PV pv0001 failed with : Operation cannot be fulfilled on persistentvolumes "pv0001": the object has been modified; please apply your changes to the latest version and try again [32mstorage.sh:47: Successful get pv {{range.items}}{{.metadata.name}}:{{end}}: pv0001: (B[m[32mSuccessful (B[mmessage:Warning: deleting cluster-scoped resources, not scoped to the provided namespace persistentvolume "pv0001" deleted has:Warning: deleting cluster-scoped resources [32mSuccessful ... skipping 36 lines ... [32mstorage.sh:75: Successful get pvc {{range.items}}{{.metadata.name}}:{{end}}: myclaim-2: (B[mpersistentvolumeclaim "myclaim-2" deleted I0327 10:48:03.823790 23048 event.go:307] "Event occurred" object="namespace-1679914082-31632/myclaim-2" fieldPath="" kind="PersistentVolumeClaim" apiVersion="v1" type="Normal" reason="FailedBinding" message="no persistent volumes available for this claim and no storage class is set" persistentvolumeclaim/myclaim-3 created I0327 10:48:04.131523 23048 event.go:307] "Event occurred" object="namespace-1679914082-31632/myclaim-3" fieldPath="" kind="PersistentVolumeClaim" apiVersion="v1" type="Normal" reason="FailedBinding" message="no persistent volumes available for this claim and no storage class is set" I0327 10:48:04.134690 23048 event.go:307] "Event occurred" object="namespace-1679914082-31632/myclaim-3" fieldPath="" kind="PersistentVolumeClaim" apiVersion="v1" type="Normal" reason="FailedBinding" message="no persistent volumes available for this claim and no storage class is set" W0327 10:48:04.168477 23048 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0327 10:48:04.168512 23048 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mstorage.sh:79: Successful get pvc {{range.items}}{{.metadata.name}}:{{end}}: myclaim-3: (B[mpersistentvolumeclaim "myclaim-3" deleted I0327 10:48:04.257709 23048 event.go:307] "Event occurred" object="namespace-1679914082-31632/myclaim-3" fieldPath="" kind="PersistentVolumeClaim" apiVersion="v1" type="Normal" reason="FailedBinding" message="no persistent volumes available for this claim and no storage class is set" [32mstorage.sh:82: Successful get pvc {{range.items}}{{.metadata.name}}:{{end}}: (B[m+++ exit code: 0 Recording: run_storage_class_tests ... skipping 40 lines ... Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 save-managers: true CreationTimestamp: Mon, 27 Mar 2023 10:42:50 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Mon, 27 Mar 2023 10:42:50 +0000 Mon, 27 Mar 2023 10:43:50 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Mon, 27 Mar 2023 10:42:50 +0000 Mon, 27 Mar 2023 10:43:50 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Mon, 27 Mar 2023 10:42:50 +0000 Mon, 27 Mar 2023 10:43:50 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 34 lines ... Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 save-managers: true CreationTimestamp: Mon, 27 Mar 2023 10:42:50 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Mon, 27 Mar 2023 10:42:50 +0000 Mon, 27 Mar 2023 10:43:50 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Mon, 27 Mar 2023 10:42:50 +0000 Mon, 27 Mar 2023 10:43:50 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Mon, 27 Mar 2023 10:42:50 +0000 Mon, 27 Mar 2023 10:43:50 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 35 lines ... Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 save-managers: true CreationTimestamp: Mon, 27 Mar 2023 10:42:50 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Mon, 27 Mar 2023 10:42:50 +0000 Mon, 27 Mar 2023 10:43:50 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Mon, 27 Mar 2023 10:42:50 +0000 Mon, 27 Mar 2023 10:43:50 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Mon, 27 Mar 2023 10:42:50 +0000 Mon, 27 Mar 2023 10:43:50 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 31 lines ... Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 save-managers: true CreationTimestamp: Mon, 27 Mar 2023 10:42:50 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Mon, 27 Mar 2023 10:42:50 +0000 Mon, 27 Mar 2023 10:43:50 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Mon, 27 Mar 2023 10:42:50 +0000 Mon, 27 Mar 2023 10:43:50 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Mon, 27 Mar 2023 10:42:50 +0000 Mon, 27 Mar 2023 10:43:50 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 42 lines ... Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 save-managers: true CreationTimestamp: Mon, 27 Mar 2023 10:42:50 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Mon, 27 Mar 2023 10:42:50 +0000 Mon, 27 Mar 2023 10:43:50 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Mon, 27 Mar 2023 10:42:50 +0000 Mon, 27 Mar 2023 10:43:50 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Mon, 27 Mar 2023 10:42:50 +0000 Mon, 27 Mar 2023 10:43:50 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 34 lines ... Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 save-managers: true CreationTimestamp: Mon, 27 Mar 2023 10:42:50 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Mon, 27 Mar 2023 10:42:50 +0000 Mon, 27 Mar 2023 10:43:50 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Mon, 27 Mar 2023 10:42:50 +0000 Mon, 27 Mar 2023 10:43:50 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Mon, 27 Mar 2023 10:42:50 +0000 Mon, 27 Mar 2023 10:43:50 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 34 lines ... Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 save-managers: true CreationTimestamp: Mon, 27 Mar 2023 10:42:50 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Mon, 27 Mar 2023 10:42:50 +0000 Mon, 27 Mar 2023 10:43:50 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Mon, 27 Mar 2023 10:42:50 +0000 Mon, 27 Mar 2023 10:43:50 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Mon, 27 Mar 2023 10:42:50 +0000 Mon, 27 Mar 2023 10:43:50 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 30 lines ... Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 save-managers: true CreationTimestamp: Mon, 27 Mar 2023 10:42:50 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Mon, 27 Mar 2023 10:42:50 +0000 Mon, 27 Mar 2023 10:43:50 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Mon, 27 Mar 2023 10:42:50 +0000 Mon, 27 Mar 2023 10:43:50 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Mon, 27 Mar 2023 10:42:50 +0000 Mon, 27 Mar 2023 10:43:50 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 172 lines ... yes has:the server doesn't have a resource type [32mSuccessful (B[mmessage:yes has:yes [32mSuccessful (B[mmessage:error: --subresource can not be used with NonResourceURL has:subresource can not be used with NonResourceURL [32mSuccessful (B[m[32mSuccessful (B[mmessage:yes 0 has:0 ... skipping 62 lines ... {Verbs:[get list watch] APIGroups:[] Resources:[configmaps] ResourceNames:[] NonResourceURLs:[]} [32mlegacy-script.sh:886: Successful get rolebindings -n some-other-random -l test-cmd=auth {{range.items}}{{.metadata.name}}:{{end}}: testing-RB: (B[m[32mlegacy-script.sh:887: Successful get roles -n some-other-random -l test-cmd=auth {{range.items}}{{.metadata.name}}:{{end}}: testing-R: (B[m[32mlegacy-script.sh:888: Successful get clusterrolebindings -l test-cmd=auth {{range.items}}{{.metadata.name}}:{{end}}: testing-CRB: (B[m[32mlegacy-script.sh:889: Successful get clusterroles -l test-cmd=auth {{range.items}}{{.metadata.name}}:{{end}}: testing-CR: (B[m[32mSuccessful (B[mmessage:error: only rbac.authorization.k8s.io/v1 is supported: not *v1beta1.ClusterRole has:only rbac.authorization.k8s.io/v1 is supported rolebinding.rbac.authorization.k8s.io "testing-RB" deleted role.rbac.authorization.k8s.io "testing-R" deleted Warning: deleting cluster-scoped resources, not scoped to the provided namespace clusterrole.rbac.authorization.k8s.io "testing-CR" deleted clusterrolebinding.rbac.authorization.k8s.io "testing-CRB" deleted ... skipping 24 lines ... [32mdiscovery.sh:236: Successful get all -l app=cassandra {{range.items}}{{range .metadata.labels}}{{.}}:{{end}}{{end}}: cassandra:cassandra:cassandra:cassandra: (B[mpod "cassandra-dfl6x" deleted I0327 10:48:13.089143 23048 event.go:307] "Event occurred" object="namespace-1679914092-19360/cassandra" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: cassandra-rbs5z" pod "cassandra-ntw2w" deleted I0327 10:48:13.104897 23048 event.go:307] "Event occurred" object="namespace-1679914092-19360/cassandra" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: cassandra-ld5bj" replicationcontroller "cassandra" deleted E0327 10:48:13.122909 23048 replica_set.go:544] sync "namespace-1679914092-19360/cassandra" failed with replicationcontrollers "cassandra" not found service "cassandra" deleted +++ exit code: 0 Recording: run_kubectl_explain_tests Running command: run_kubectl_explain_tests +++ Running case: test-cmd.run_kubectl_explain_tests ... skipping 115 lines ... +++ exit code: 0 Recording: run_crd_deletion_recreation_tests Running command: run_crd_deletion_recreation_tests +++ Running case: test-cmd.run_crd_deletion_recreation_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes W0327 10:48:13.967473 23048 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0327 10:48:13.967515 23048 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource +++ command: run_crd_deletion_recreation_tests +++ [0327 10:48:13] Creating namespace namespace-1679914093-12063 namespace/namespace-1679914093-12063 created Context "test" modified. +++ [0327 10:48:14] Testing resource creation, deletion, and re-creation I0327 10:48:14.248617 19971 handler.go:232] Adding GroupVersion test.com v1 to ResourceManager ... skipping 380 lines ... (B[mmessage:Warning: example.com/v1beta1 DeprecatedKind is deprecated; use example.com/v1 DeprecatedKind No resources found in namespace-1679914093-12063 namespace. has:example.com/v1beta1 DeprecatedKind is deprecated [32mSuccessful (B[mmessage:Warning: example.com/v1beta1 DeprecatedKind is deprecated; use example.com/v1 DeprecatedKind No resources found in namespace-1679914093-12063 namespace. error: 1 warning received has:example.com/v1beta1 DeprecatedKind is deprecated [32mSuccessful (B[mmessage:Warning: example.com/v1beta1 DeprecatedKind is deprecated; use example.com/v1 DeprecatedKind No resources found in namespace-1679914093-12063 namespace. error: 1 warning received has:error: 1 warning received I0327 10:48:34.738054 19971 handler.go:232] Adding GroupVersion example.com v1 to ResourceManager I0327 10:48:34.738106 19971 handler.go:232] Adding GroupVersion example.com v1beta1 to ResourceManager customresourcedefinition.apiextensions.k8s.io "deprecated.example.com" deleted I0327 10:48:34.742607 19971 handler.go:232] Adding GroupVersion example.com v1 to ResourceManager I0327 10:48:34.742658 19971 handler.go:232] Adding GroupVersion example.com v1beta1 to ResourceManager +++ exit code: 0 ... skipping 75 lines ... [32mSuccessful (B[mmessage:valid-pod: has:valid-pod: [32mSuccessful (B[mmessage:valid-pod: has:valid-pod: W0327 10:48:36.036217 23048 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0327 10:48:36.036257 23048 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mSuccessful (B[mmessage:valid-pod: has:valid-pod: [32mSuccessful (B[mmessage:scale-1: has:scale-1: ... skipping 471 lines ... evicting pod namespace-1679914121-7894/test-pod-1 (server dry run) node/127.0.0.1 drained (server dry run) [32mnode-management.sh:129: Successful get nodes {{range.items}}{{.metadata.name}}:{{end}}: 127.0.0.1: (B[m[32mnode-management.sh:130: Successful get nodes 127.0.0.1 {{.spec.unschedulable}}: <no value> (B[m[32mnode-management.sh:134: Successful get nodes 127.0.0.1 {{.spec.unschedulable}}: <no value> (B[m[32mnode-management.sh:136: Successful get pods {{range .items}}{{.metadata.name}},{{end}}: test-pod-1,test-pod-2, (B[mW0327 10:48:43.796823 23048 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0327 10:48:43.796857 23048 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource node/127.0.0.1 cordoned (dry run) Warning: deleting Pods that declare no controller: namespace-1679914121-7894/test-pod-1 evicting pod namespace-1679914121-7894/test-pod-1 (dry run) node/127.0.0.1 drained (dry run) node/127.0.0.1 cordoned (server dry run) Warning: deleting Pods that declare no controller: namespace-1679914121-7894/test-pod-1 evicting pod namespace-1679914121-7894/test-pod-1 (server dry run) node/127.0.0.1 drained (server dry run) [32mnode-management.sh:140: Successful get pods {{range .items}}{{.metadata.name}},{{end}}: test-pod-1,test-pod-2, (B[mWarning: deleting Pods that declare no controller: namespace-1679914121-7894/test-pod-1 W0327 10:48:55.219554 23048 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0327 10:48:55.219591 23048 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource W0327 10:49:13.770337 23048 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0327 10:49:13.770374 23048 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mSuccessful (B[mmessage:node/127.0.0.1 cordoned evicting pod namespace-1679914121-7894/test-pod-1 pod "test-pod-1" has DeletionTimestamp older than 1 seconds, skipping node/127.0.0.1 drained has:evicting pod .*/test-pod-1 ... skipping 14 lines ... (B[mmessage:node/127.0.0.1 already uncordoned (server dry run) has:already uncordoned [32mnode-management.sh:161: Successful get nodes 127.0.0.1 {{.spec.unschedulable}}: <no value> (B[mnode/127.0.0.1 labeled [32mnode-management.sh:166: Successful get nodes 127.0.0.1 {{.metadata.labels.test}}: label (B[m[32mSuccessful (B[mmessage:error: cannot specify both a node name and a --selector option See 'kubectl drain -h' for help and examples has:cannot specify both a node name [32mnode-management.sh:172: Successful get nodes 127.0.0.1 {{.metadata.labels.test}}: label (B[m[32mnode-management.sh:174: Successful get nodes 127.0.0.1 {{.spec.unschedulable}}: <no value> (B[m[32mnode-management.sh:176: Successful get pods {{range .items}}{{.metadata.name}},{{end}}: test-pod-1,test-pod-2, (B[m[32mSuccessful ... skipping 78 lines ... Warning: deleting Pods that declare no controller: namespace-1679914121-7894/test-pod-1, namespace-1679914121-7894/test-pod-2 evicting pod namespace-1679914121-7894/test-pod-1 (dry run) evicting pod namespace-1679914121-7894/test-pod-2 (dry run) node/127.0.0.1 drained (dry run) has:/v1/pods?fieldSelector=spec.nodeName%3D127.0.0.1&limit=500 200 OK [32mSuccessful (B[mmessage:error: USAGE: cordon NODE [flags] See 'kubectl cordon -h' for help and examples has:error\: USAGE\: cordon NODE node/127.0.0.1 already uncordoned [32mSuccessful (B[mmessage:error: You must provide one or more resources by argument or filename. Example resource specifications include: '-f rsrc.yaml' '--filename=rsrc.json' '<resource> <name>' '<resource>' has:must provide one or more resources ... skipping 18 lines ... +++ [0327 10:49:18] Testing kubectl plugins [32mSuccessful (B[mmessage:The following compatible plugins are available: test/fixtures/pkg/kubectl/plugins/version/kubectl-version - warning: kubectl-version overwrites existing command: "kubectl version" error: one plugin warning was found has:kubectl-version overwrites existing command: "kubectl version" [32mSuccessful (B[mmessage:The following compatible plugins are available: test/fixtures/pkg/kubectl/plugins/kubectl-foo test/fixtures/pkg/kubectl/plugins/foo/kubectl-foo - warning: test/fixtures/pkg/kubectl/plugins/foo/kubectl-foo is overshadowed by a similarly named plugin: test/fixtures/pkg/kubectl/plugins/kubectl-foo error: one plugin warning was found has:test/fixtures/pkg/kubectl/plugins/foo/kubectl-foo is overshadowed by a similarly named plugin [32mSuccessful (B[mmessage:The following compatible plugins are available: test/fixtures/pkg/kubectl/plugins/kubectl-foo has:plugins are available [32mSuccessful (B[mmessage:Unable to read directory "test/fixtures/pkg/kubectl/plugins/empty" from your PATH: open test/fixtures/pkg/kubectl/plugins/empty: no such file or directory. Skipping... error: unable to find any kubectl plugins in your PATH has:unable to find any kubectl plugins in your PATH [32mSuccessful (B[mmessage:I am plugin foo has:plugin foo [32mSuccessful (B[mmessage:I am plugin bar called with args test/fixtures/pkg/kubectl/plugins/bar/kubectl-bar arg1 ... skipping 13 lines ... +++ Running case: test-cmd.run_impersonation_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_impersonation_tests +++ [0327 10:49:18] Testing impersonation [32mSuccessful (B[mmessage:error: requesting uid, groups or user-extra for test-admin without impersonating a user has:without impersonating a user [32mSuccessful (B[mmessage:error: requesting uid, groups or user-extra for test-admin without impersonating a user has:without impersonating a user certificatesigningrequest.certificates.k8s.io/foo created [32mauthorization.sh:60: Successful get csr/foo {{.spec.username}}: user1 (B[m[32mauthorization.sh:61: Successful get csr/foo {{range .spec.groups}}{{.}}{{end}}: system:authenticated (B[mcertificatesigningrequest.certificates.k8s.io "foo" deleted certificatesigningrequest.certificates.k8s.io/foo created ... skipping 19 lines ... I0327 10:49:20.100366 23048 event.go:307] "Event occurred" object="namespace-1679914159-2746/test-1" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set test-1-7697bf65f7 to 1" I0327 10:49:20.105609 23048 event.go:307] "Event occurred" object="namespace-1679914159-2746/test-1-7697bf65f7" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: test-1-7697bf65f7-hd8f4" deployment.apps/test-2 created I0327 10:49:20.164237 23048 event.go:307] "Event occurred" object="namespace-1679914159-2746/test-2" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set test-2-675f68f47d to 1" I0327 10:49:20.169076 23048 event.go:307] "Event occurred" object="namespace-1679914159-2746/test-2-675f68f47d" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: test-2-675f68f47d-g2jnm" [32mwait.sh:36: Successful get deployments {{range .items}}{{.metadata.name}},{{end}}: test-1,test-2, (B[mW0327 10:49:21.100107 23048 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0327 10:49:21.100155 23048 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource W0327 10:49:22.181638 23048 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0327 10:49:22.181680 23048 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource W0327 10:49:31.273009 23048 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0327 10:49:31.273045 23048 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mSuccessful (B[mmessage:error: timed out waiting for the condition on deployments/test-1 has:timed out deployment.apps "test-1" deleted deployment.apps "test-2" deleted [32mSuccessful (B[mmessage:deployment.apps/test-1 condition met deployment.apps/test-2 condition met ... skipping 39 lines ... (B[m[32mdebug.sh:49: Successful get pod/target-copy {{range.spec.containers}}{{.name}}:{{end}}: target:debug-container: (B[m[32mdebug.sh:50: Successful get pod/target-copy {{range.spec.containers}}{{.image}}:{{end}}: registry.k8s.io/nginx:1.7.9:busybox: (B[mpod "target" deleted pod "target-copy" deleted pod/target created [32mdebug.sh:56: Successful get pod {{range.items}}{{.metadata.name}}:{{end}}: target: (B[mW0327 10:49:55.019906 23048 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0327 10:49:55.019954 23048 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mdebug.sh:60: Successful get pod {{range.items}}{{.metadata.name}}:{{end}}: target-copy: (B[m[32mdebug.sh:61: Successful get pod/target-copy {{range.spec.containers}}{{.name}}:{{end}}: target:debug-container: (B[m[32mdebug.sh:62: Successful get pod/target-copy {{range.spec.containers}}{{.image}}:{{end}}: registry.k8s.io/nginx:1.7.9:busybox: (B[mpod "target-copy" deleted pod/target created [32mdebug.sh:68: Successful get pod {{range.items}}{{.metadata.name}}:{{end}}: target: ... skipping 165 lines ...