PR | alculquicondor: Fix deleting UIDs tracking expectations |
Result | ABORTED |
Tests | 0 failed / 135 succeeded |
Started | |
Elapsed | 47m13s |
Revision | b1572d249f0ac66a8f78a969b7b39b1e2daf75b7 |
Refs |
111721 |
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdCompletion
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdCompletion/shell_not_expected
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdCompletion/unsupported_shell_type
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitAPIPort
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitAPIPort/accept_a_valid_port_number
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitAPIPort/fail_on_negative_port_number
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitAPIPort/fail_on_non-string_port
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitAPIPort/fail_on_too_large_port_number
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/can't_load_old_component_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/can't_load_v1beta1_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/can_load_current_component_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/can_load_v1beta2_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/can_load_v1beta3_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/don't_allow_mixed_arguments_v1beta2
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/don't_allow_mixed_arguments_v1beta3
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/fail_on_non_existing_path
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitFeatureGates
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitFeatureGates/feature_gate_PublicKeysECDSA=true
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitFeatureGates/no_feature_gates_passed
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitKubernetesVersion
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitKubernetesVersion/invalid_semantic_version_string_is_detected
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitKubernetesVersion/valid_version_is_accepted
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitToken
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitToken/invalid_token_non-lowercase
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitToken/invalid_token_size
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitToken/valid_token_is_accepted
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinArgsMixed
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinArgsMixed/discovery-token_and_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinBadArgs
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinBadArgs/discovery-token_and_discovery-file_can't_both_be_set
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinBadArgs/discovery-token_or_discovery-file_must_be_set
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinConfig
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinConfig/config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinConfig/config_path
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryFile
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryFile/invalid_discovery_file
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryFile/valid_discovery_file
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryToken
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryToken/valid_discovery_token
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryToken/valid_discovery_token_url
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinNodeName
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinNodeName/valid_node_name
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinTLSBootstrapToken
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinTLSBootstrapToken/valid_bootstrap_token
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinTLSBootstrapToken/valid_bootstrap_token_url
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinToken
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinToken/valid_token
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinToken/valid_token_url
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdTokenDelete
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdTokenDelete/invalid_token
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdTokenDelete/no_token_provided
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdTokenGenerate
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdTokenGenerateTypoError
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersion
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersion/default_output
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersion/invalid_output_option
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersion/short_output
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersionOutputJsonOrYaml
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersionOutputJsonOrYaml/json_output
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersionOutputJsonOrYaml/yaml_output
test-cmd run_RESTMapper_evaluation_tests
test-cmd run_assert_categories_tests
test-cmd run_assert_short_name_tests
test-cmd run_authorization_tests
test-cmd run_certificates_tests
test-cmd run_client_config_tests
test-cmd run_cluster_management_tests
test-cmd run_clusterroles_tests
test-cmd run_configmap_tests
test-cmd run_convert_tests
test-cmd run_crd_deletion_recreation_tests
test-cmd run_crd_tests
test-cmd run_create_job_tests
test-cmd run_create_secret_tests
test-cmd run_daemonset_history_tests
test-cmd run_daemonset_tests
test-cmd run_deployment_tests
test-cmd run_deprecated_api_tests
test-cmd run_exec_credentials_interactive_tests
test-cmd run_exec_credentials_tests
test-cmd run_impersonation_tests
test-cmd run_job_tests
test-cmd run_kubectl_all_namespace_tests
test-cmd run_kubectl_apply_deployments_tests
test-cmd run_kubectl_apply_tests
test-cmd run_kubectl_config_set_cluster_tests
test-cmd run_kubectl_config_set_credentials_tests
test-cmd run_kubectl_config_set_tests
test-cmd run_kubectl_create_error_tests
test-cmd run_kubectl_create_filter_tests
test-cmd run_kubectl_create_kustomization_directory_tests
test-cmd run_kubectl_create_validate_tests
test-cmd run_kubectl_debug_node_tests
test-cmd run_kubectl_debug_pod_tests
test-cmd run_kubectl_delete_allnamespaces_tests
test-cmd run_kubectl_diff_same_names
test-cmd run_kubectl_diff_tests
test-cmd run_kubectl_events_tests
test-cmd run_kubectl_exec_pod_tests
test-cmd run_kubectl_exec_resource_name_tests
test-cmd run_kubectl_explain_tests
test-cmd run_kubectl_get_tests
test-cmd run_kubectl_local_proxy_tests
test-cmd run_kubectl_request_timeout_tests
test-cmd run_kubectl_results_tests
test-cmd run_kubectl_run_tests
test-cmd run_kubectl_server_side_apply_tests
test-cmd run_kubectl_sort_by_tests
test-cmd run_kubectl_version_tests
test-cmd run_lists_tests
test-cmd run_multi_resources_tests
test-cmd run_namespace_tests
test-cmd run_nodes_tests
test-cmd run_persistent_volume_claims_tests
test-cmd run_persistent_volumes_tests
test-cmd run_plugins_tests
test-cmd run_pod_templates_tests
test-cmd run_pod_tests
test-cmd run_rc_tests
test-cmd run_recursive_resources_tests
test-cmd run_resource_aliasing_tests
test-cmd run_retrieve_multiple_tests
test-cmd run_role_tests
test-cmd run_rs_tests
test-cmd run_save_config_tests
test-cmd run_secrets_test
test-cmd run_service_accounts_tests
test-cmd run_service_tests
test-cmd run_stateful_set_tests
test-cmd run_statefulset_history_tests
test-cmd run_storage_class_tests
test-cmd run_swagger_tests
test-cmd run_template_output_tests
test-cmd run_wait_tests
... skipping 74 lines ... Recording: record_command_canary Running command: record_command_canary +++ Running case: test-cmd.record_command_canary +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: record_command_canary /home/prow/go/src/k8s.io/kubernetes/test/cmd/legacy-script.sh: line 164: bogus-expected-to-fail: command not found !!! [0805 16:11:54] Call tree: !!! [0805 16:11:54] 1: /home/prow/go/src/k8s.io/kubernetes/test/cmd/../../third_party/forked/shell2junit/sh2ju.sh:47 record_command_canary(...) !!! [0805 16:11:54] 2: /home/prow/go/src/k8s.io/kubernetes/test/cmd/../../third_party/forked/shell2junit/sh2ju.sh:112 eVal(...) !!! [0805 16:11:54] 3: /home/prow/go/src/k8s.io/kubernetes/test/cmd/legacy-script.sh:140 juLog(...) !!! [0805 16:11:54] 4: /home/prow/go/src/k8s.io/kubernetes/test/cmd/legacy-script.sh:168 record_command(...) !!! [0805 16:11:54] 5: hack/make-rules/test-cmd.sh:35 source(...) +++ exit code: 1 +++ error: 1 +++ [0805 16:11:54] Running kubeadm tests +++ [0805 16:11:55] Building go targets for linux/amd64 k8s.io/kubernetes/hack/make-rules/helpers/go2make (non-static) +++ [0805 16:11:58] Building go targets for linux/amd64 k8s.io/kubernetes/cmd/kubeadm (static) +++ [0805 16:12:43] Building go targets for linux/amd64 ... skipping 220 lines ... k8s.io/kubernetes/hack/make-rules/helpers/go2make (non-static) +++ [0805 16:15:46] Building go targets for linux/amd64 k8s.io/kubernetes/cmd/kube-controller-manager (static) +++ [0805 16:16:15] Generate kubeconfig for controller-manager +++ [0805 16:16:15] Starting controller-manager I0805 16:16:16.417857 56975 serving.go:348] Generated self-signed cert in-memory W0805 16:16:16.833050 56975 authentication.go:423] failed to read in-cluster kubeconfig for delegated authentication: open /var/run/secrets/kubernetes.io/serviceaccount/token: no such file or directory W0805 16:16:16.833096 56975 authentication.go:317] No authentication-kubeconfig provided in order to lookup client-ca-file in configmap/extension-apiserver-authentication in kube-system, so client certificate authentication won't work. W0805 16:16:16.833108 56975 authentication.go:341] No authentication-kubeconfig provided in order to lookup requestheader-client-ca-file in configmap/extension-apiserver-authentication in kube-system, so request-header client certificate authentication won't work. W0805 16:16:16.833129 56975 authorization.go:226] failed to read in-cluster kubeconfig for delegated authorization: open /var/run/secrets/kubernetes.io/serviceaccount/token: no such file or directory W0805 16:16:16.833153 56975 authorization.go:194] No authorization-kubeconfig provided, so SubjectAccessReview of authorization tokens won't work. I0805 16:16:16.833184 56975 controllermanager.go:178] Version: v1.25.0-beta.0.9+1ddbf2f222c044 I0805 16:16:16.833202 56975 controllermanager.go:180] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" I0805 16:16:16.834728 56975 secure_serving.go:210] Serving securely on [::]:10257 I0805 16:16:16.834856 56975 tlsconfig.go:240] "Starting DynamicServingCertificateController" I0805 16:16:16.835054 56975 leaderelection.go:248] attempting to acquire leader lease kube-system/kube-controller-manager... ... skipping 96 lines ... I0805 16:16:16.874964 56975 controllermanager.go:602] Started "disruption" I0805 16:16:16.875038 56975 disruption.go:421] Sending events to api server. I0805 16:16:16.875085 56975 disruption.go:432] Starting disruption controller I0805 16:16:16.875093 56975 shared_informer.go:255] Waiting for caches to sync for disruption W0805 16:16:16.875171 56975 mutation_detector.go:53] Mutation detector is enabled, this will result in memory leakage. I0805 16:16:16.875199 56975 controllermanager.go:602] Started "pv-protection" E0805 16:16:16.875344 56975 core.go:210] failed to start cloud node lifecycle controller: no cloud provider provided W0805 16:16:16.875357 56975 controllermanager.go:580] Skipping "cloud-node-lifecycle" I0805 16:16:16.875383 56975 pv_protection_controller.go:79] Starting PV protection controller I0805 16:16:16.875393 56975 shared_informer.go:255] Waiting for caches to sync for PV protection W0805 16:16:16.875761 56975 mutation_detector.go:53] Mutation detector is enabled, this will result in memory leakage. I0805 16:16:16.875786 56975 controllermanager.go:602] Started "persistentvolume-binder" I0805 16:16:16.876016 56975 controllermanager.go:602] Started "persistentvolume-expander" ... skipping 9 lines ... I0805 16:16:16.879785 56975 job_controller.go:196] Starting job controller I0805 16:16:16.879795 56975 shared_informer.go:255] Waiting for caches to sync for job I0805 16:16:16.880375 56975 node_lifecycle_controller.go:497] Controller will reconcile labels. I0805 16:16:16.880386 56975 stateful_set.go:152] Starting stateful set controller I0805 16:16:16.880402 56975 shared_informer.go:255] Waiting for caches to sync for stateful set I0805 16:16:16.880409 56975 controllermanager.go:602] Started "nodelifecycle" E0805 16:16:16.881004 56975 core.go:90] Failed to start service controller: WARNING: no cloud provider provided, services of type LoadBalancer will fail W0805 16:16:16.881029 56975 controllermanager.go:580] Skipping "service" I0805 16:16:16.881349 56975 controllermanager.go:602] Started "podgc" I0805 16:16:16.882235 56975 gc_controller.go:99] Starting GC controller I0805 16:16:16.882249 56975 shared_informer.go:255] Waiting for caches to sync for GC I0805 16:16:16.885027 56975 controllermanager.go:602] Started "horizontalpodautoscaling" I0805 16:16:16.885088 56975 horizontal.go:168] Starting HPA controller ... skipping 90 lines ... I0805 16:16:17.191127 56975 shared_informer.go:262] Caches are synced for ephemeral I0805 16:16:17.274703 56975 shared_informer.go:262] Caches are synced for resource quota I0805 16:16:17.275939 56975 shared_informer.go:262] Caches are synced for disruption I0805 16:16:17.280139 56975 shared_informer.go:262] Caches are synced for ReplicationController I0805 16:16:17.291931 56975 shared_informer.go:262] Caches are synced for resource quota node/127.0.0.1 created W0805 16:16:17.550421 56975 actual_state_of_world.go:541] Failed to update statusUpdateNeeded field in actual state of world: Failed to set statusUpdateNeeded to needed true, because nodeName="127.0.0.1" does not exist +++ [0805 16:16:17] Checking kubectl version WARNING: This version information is deprecated and will be replaced with the output from kubectl version --short. Use --output=yaml|json to get the full version. Client Version: version.Info{Major:"1", Minor:"25+", GitVersion:"v1.25.0-beta.0.9+1ddbf2f222c044", GitCommit:"1ddbf2f222c0440bbe7f2c1b43f696cf2a72006e", GitTreeState:"clean", BuildDate:"2022-08-05T00:25:47Z", GoVersion:"go1.19", Compiler:"gc", Platform:"linux/amd64"} Kustomize Version: v4.5.7 Server Version: version.Info{Major:"1", Minor:"25+", GitVersion:"v1.25.0-beta.0.9+1ddbf2f222c044", GitCommit:"1ddbf2f222c0440bbe7f2c1b43f696cf2a72006e", GitTreeState:"clean", BuildDate:"2022-08-05T00:25:47Z", GoVersion:"go1.19", Compiler:"gc", Platform:"linux/amd64"} I0805 16:16:17.612642 56975 shared_informer.go:262] Caches are synced for garbage collector I0805 16:16:17.674822 56975 shared_informer.go:262] Caches are synced for garbage collector I0805 16:16:17.674855 56975 garbagecollector.go:163] Garbage collector: all resource monitors have synced. Proceeding to collect garbage The Service "kubernetes" is invalid: spec.clusterIPs: Invalid value: []string{"10.0.0.1"}: failed to allocate IP 10.0.0.1: provided IP is already allocated NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.0.0.1 <none> 443/TCP 36s Recording: run_kubectl_version_tests Running command: run_kubectl_version_tests +++ Running case: test-cmd.run_kubectl_version_tests ... skipping 196 lines ... +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_RESTMapper_evaluation_tests +++ [0805 16:16:22] Creating namespace namespace-1659716182-7034 namespace/namespace-1659716182-7034 created Context "test" modified. +++ [0805 16:16:22] Testing RESTMapper +++ [0805 16:16:22] "kubectl get unknownresourcetype" returns error as expected: error: the server doesn't have a resource type "unknownresourcetype" +++ exit code: 0 NAME SHORTNAMES APIVERSION NAMESPACED KIND bindings v1 true Binding componentstatuses cs v1 false ComponentStatus configmaps cm v1 true ConfigMap endpoints ep v1 true Endpoints ... skipping 60 lines ... namespace/namespace-1659716185-26881 created Context "test" modified. +++ [0805 16:16:25] Testing clusterroles [32mrbac.sh:29: Successful get clusterroles/cluster-admin {{.metadata.name}}: cluster-admin (B[m[32mrbac.sh:30: Successful get clusterrolebindings/cluster-admin {{.metadata.name}}: cluster-admin (B[m[32mSuccessful (B[mmessage:Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pod-admin" not found has:clusterroles.rbac.authorization.k8s.io "pod-admin" not found clusterrole.rbac.authorization.k8s.io/pod-admin created (dry run) clusterrole.rbac.authorization.k8s.io/pod-admin created (server dry run) [32mSuccessful (B[mmessage:Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pod-admin" not found has:clusterroles.rbac.authorization.k8s.io "pod-admin" not found clusterrole.rbac.authorization.k8s.io/pod-admin created [32mrbac.sh:42: Successful get clusterrole/pod-admin {{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}: *: (B[m[32mSuccessful (B[mmessage:Warning: deleting cluster-scoped resources, not scoped to the provided namespace clusterrole.rbac.authorization.k8s.io "pod-admin" deleted ... skipping 18 lines ... (B[mclusterrole.rbac.authorization.k8s.io/url-reader created [32mrbac.sh:61: Successful get clusterrole/url-reader {{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}: get: (B[m[32mrbac.sh:62: Successful get clusterrole/url-reader {{range.rules}}{{range.nonResourceURLs}}{{.}}:{{end}}{{end}}: /logs/*:/healthz/*: (B[mclusterrole.rbac.authorization.k8s.io/aggregation-reader created [32mrbac.sh:64: Successful get clusterrole/aggregation-reader {{.metadata.name}}: aggregation-reader (B[m[32mSuccessful (B[mmessage:Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "super-admin" not found has:clusterrolebindings.rbac.authorization.k8s.io "super-admin" not found clusterrolebinding.rbac.authorization.k8s.io/super-admin created (dry run) clusterrolebinding.rbac.authorization.k8s.io/super-admin created (server dry run) [32mSuccessful (B[mmessage:Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "super-admin" not found has:clusterrolebindings.rbac.authorization.k8s.io "super-admin" not found clusterrolebinding.rbac.authorization.k8s.io/super-admin created [32mrbac.sh:77: Successful get clusterrolebinding/super-admin {{range.subjects}}{{.name}}:{{end}}: super-admin: (B[mclusterrolebinding.rbac.authorization.k8s.io/super-admin subjects updated (dry run) clusterrolebinding.rbac.authorization.k8s.io/super-admin subjects updated (server dry run) [32mrbac.sh:80: Successful get clusterrolebinding/super-admin {{range.subjects}}{{.name}}:{{end}}: super-admin: ... skipping 64 lines ... [32mrbac.sh:102: Successful get clusterrolebinding/super-admin {{range.subjects}}{{.name}}:{{end}}: super-admin:foo:test-all-user: (B[m[32mrbac.sh:103: Successful get clusterrolebinding/super-group {{range.subjects}}{{.name}}:{{end}}: the-group:foo:test-all-user: (B[m[32mrbac.sh:104: Successful get clusterrolebinding/super-sa {{range.subjects}}{{.name}}:{{end}}: sa-name:foo:test-all-user: (B[mrolebinding.rbac.authorization.k8s.io/admin created (dry run) rolebinding.rbac.authorization.k8s.io/admin created (server dry run) [32mSuccessful (B[mmessage:Error from server (NotFound): rolebindings.rbac.authorization.k8s.io "admin" not found has: not found rolebinding.rbac.authorization.k8s.io/admin created [32mrbac.sh:113: Successful get rolebinding/admin {{.roleRef.kind}}: ClusterRole (B[m[32mrbac.sh:114: Successful get rolebinding/admin {{range.subjects}}{{.name}}:{{end}}: default-admin: (B[mrolebinding.rbac.authorization.k8s.io/admin subjects updated [32mrbac.sh:116: Successful get rolebinding/admin {{range.subjects}}{{.name}}:{{end}}: default-admin:foo: ... skipping 152 lines ... namespace/namespace-1659716191-27089 created Context "test" modified. +++ [0805 16:16:31] Testing role role.rbac.authorization.k8s.io/pod-admin created (dry run) role.rbac.authorization.k8s.io/pod-admin created (server dry run) [32mSuccessful (B[mmessage:Error from server (NotFound): roles.rbac.authorization.k8s.io "pod-admin" not found has: not found role.rbac.authorization.k8s.io/pod-admin created [32mrbac.sh:159: Successful get role/pod-admin {{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}: *: (B[m[32mrbac.sh:160: Successful get role/pod-admin {{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}: pods: (B[m[32mrbac.sh:161: Successful get role/pod-admin {{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}: : (B[m[32mSuccessful ... skipping 439 lines ... has:valid-pod [32mSuccessful (B[mmessage:NAME READY STATUS RESTARTS AGE valid-pod 0/1 Pending 0 0s has:valid-pod [32mcore.sh:194: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[merror: resource(s) were provided, but no name was specified [32mcore.sh:198: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[m[32mcore.sh:202: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[merror: setting 'all' parameter but found a non empty selector. [32mcore.sh:206: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[m[32mcore.sh:210: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[mWarning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. pod "valid-pod" force deleted [32mcore.sh:214: Successful get pods -l'name in (valid-pod)' {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mcore.sh:219: Successful get namespaces {{range.items}}{{ if eq .metadata.name \"test-kubectl-describe-pod\" }}found{{end}}{{end}}:: : ... skipping 30 lines ... I0805 16:16:41.515851 61763 round_trippers.go:553] GET https://127.0.0.1:6443/apis/policy/v1/namespaces/test-kubectl-describe-pod/poddisruptionbudgets/test-pdb-2 200 OK in 1 milliseconds I0805 16:16:41.517506 61763 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/namespaces/test-kubectl-describe-pod/events?fieldSelector=involvedObject.name%3Dtest-pdb-2%2CinvolvedObject.namespace%3Dtest-kubectl-describe-pod%2CinvolvedObject.kind%3DPodDisruptionBudget%2CinvolvedObject.uid%3D3112c438-b27a-4b0f-a875-adaf0bc508ab&limit=500 200 OK in 1 milliseconds (B[mpoddisruptionbudget.policy/test-pdb-3 created [32mcore.sh:271: Successful get pdb/test-pdb-3 --namespace=test-kubectl-describe-pod {{.spec.maxUnavailable}}: 2 (B[mpoddisruptionbudget.policy/test-pdb-4 created [32mcore.sh:275: Successful get pdb/test-pdb-4 --namespace=test-kubectl-describe-pod {{.spec.maxUnavailable}}: 50% (B[merror: min-available and max-unavailable cannot be both specified [32mcore.sh:281: Successful get pods --namespace=test-kubectl-describe-pod {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/env-test-pod created matched TEST_CMD_1 matched <set to the key 'key-1' in secret 'test-secret'> matched TEST_CMD_2 matched <set to the key 'key-2' of config map 'test-configmap'> ... skipping 242 lines ... [32mcore.sh:542: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: registry.k8s.io/pause:3.8: (B[m[32mSuccessful (B[mmessage:kubectl-create kubectl-patch has:kubectl-patch pod/valid-pod patched [32mcore.sh:562: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: nginx: (B[m+++ [0805 16:16:56] "kubectl patch with resourceVersion 582" returns error as expected: Error from server (Conflict): Operation cannot be fulfilled on pods "valid-pod": the object has been modified; please apply your changes to the latest version and try again pod "valid-pod" deleted pod/valid-pod replaced [32mcore.sh:586: Successful get pod valid-pod {{(index .spec.containers 0).name}}: replaced-k8s-serve-hostname (B[m[32mSuccessful (B[mmessage:kubectl-replace has:kubectl-replace [32mSuccessful (B[mmessage:error: --grace-period must have --force specified has:\-\-grace-period must have \-\-force specified [32mSuccessful (B[mmessage:error: --timeout must have --force specified has:\-\-timeout must have \-\-force specified node/node-v1-test created W0805 16:16:57.258654 56975 actual_state_of_world.go:541] Failed to update statusUpdateNeeded field in actual state of world: Failed to set statusUpdateNeeded to needed true, because nodeName="node-v1-test" does not exist [32mcore.sh:614: Successful get node node-v1-test {{range.items}}{{if .metadata.annotations.a}}found{{end}}{{end}}:: : (B[mnode/node-v1-test replaced (server dry run) node/node-v1-test replaced (dry run) [32mcore.sh:639: Successful get node node-v1-test {{range.items}}{{if .metadata.annotations.a}}found{{end}}{{end}}:: : (B[mnode/node-v1-test replaced [32mcore.sh:655: Successful get node node-v1-test {{.metadata.annotations.a}}: b ... skipping 29 lines ... spec: containers: - image: registry.k8s.io/pause:3.8 name: kubernetes-pause has:localonlyvalue [32mcore.sh:691: Successful get pod valid-pod {{.metadata.labels.name}}: valid-pod (B[merror: 'name' already has a value (valid-pod), and --overwrite is false [32mcore.sh:695: Successful get pod valid-pod {{.metadata.labels.name}}: valid-pod (B[m[32mcore.sh:699: Successful get pod valid-pod {{.metadata.labels.name}}: valid-pod (B[mpod/valid-pod labeled [32mcore.sh:703: Successful get pod valid-pod {{.metadata.labels.name}}: valid-pod-super-sayan (B[m[32mcore.sh:707: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[mWarning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. ... skipping 84 lines ... +++ Running case: test-cmd.run_kubectl_create_error_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_kubectl_create_error_tests +++ [0805 16:17:05] Creating namespace namespace-1659716225-181 namespace/namespace-1659716225-181 created Context "test" modified. +++ [0805 16:17:05] Testing kubectl create with error Error: must specify one of -f and -k Create a resource from a file or from stdin. JSON and YAML formats are accepted. Examples: ... skipping 63 lines ... If true, keep the managedFields when printing objects in JSON or YAML format. --template='': Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview]. --validate='strict': Must be one of: strict (or true), warn, ignore (or false). "true" or "strict" will use a schema to validate the input and fail the request if invalid. It will perform server side validation if ServerSideFieldValidation is enabled on the api-server, but will fall back to less reliable client-side validation if not. "warn" will warn about unknown or duplicate fields without blocking the request if server-side field validation is enabled on the API server, and behave as "ignore" otherwise. "false" or "ignore" will not perform any schema validation, silently dropping any unknown or duplicate fields. --windows-line-endings=false: Only relevant if --edit=true. Defaults to the line ending native to your platform. Usage: kubectl create -f FILENAME [options] ... skipping 38 lines ... I0805 16:17:08.282947 56975 event.go:294] "Event occurred" object="namespace-1659716226-234/test-deployment-retainkeys-586ff4b6c7" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: test-deployment-retainkeys-586ff4b6c7-k7w2t" deployment.apps "test-deployment-retainkeys" deleted [32mapply.sh:88: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/selector-test-pod created [32mapply.sh:92: Successful get pods selector-test-pod {{.metadata.labels.name}}: selector-test-pod (B[m[32mSuccessful (B[mmessage:Error from server (NotFound): pods "selector-test-pod-dont-apply" not found has:pods "selector-test-pod-dont-apply" not found pod "selector-test-pod" deleted [32mapply.sh:101: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mW0805 16:17:09.143260 65465 helpers.go:653] --dry-run=true is deprecated (boolean value) and can be replaced with --dry-run=client. pod/test-pod created (dry run) pod/test-pod created (dry run) ... skipping 29 lines ... (B[mpod/b created [32mapply.sh:208: Successful get pods a {{.metadata.name}}: a (B[m[32mapply.sh:209: Successful get pods b -n nsb {{.metadata.name}}: b (B[mpod "a" deleted pod "b" deleted [32mSuccessful (B[mmessage:error: all resources selected for prune without explicitly passing --all. To prune all resources, pass the --all flag. If you did not mean to prune all resources, specify a label selector has:all resources selected for prune without explicitly passing --all pod/a created pod/b created I0805 16:17:17.238518 53408 alloc.go:327] "allocated clusterIPs" service="namespace-1659716226-234/prune-svc" clusterIPs=map[IPv4:10.0.0.146] service/prune-svc created I0805 16:17:20.235498 56975 horizontal.go:360] Horizontal Pod Autoscaler frontend has been deleted in namespace-1659716223-26188 ... skipping 37 lines ... [32mapply.sh:262: Successful get pods b -n nsb {{.metadata.name}}: b (B[mpod/b unchanged pod/a pruned [32mapply.sh:266: Successful get pods -n nsb {{range.items}}{{.metadata.name}}:{{end}}: b: (B[mnamespace "nsb" deleted [32mSuccessful (B[mmessage:error: the namespace from the provided object "nsb" does not match the namespace "foo". You must pass '--namespace=nsb' to perform this operation. has:the namespace from the provided object "nsb" does not match the namespace "foo". [32mapply.sh:277: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: (B[mservice/a created [32mapply.sh:281: Successful get services a {{.metadata.name}}: a (B[m[32mSuccessful (B[mmessage:The Service "a" is invalid: spec.clusterIPs[0]: Invalid value: []string{"10.0.0.12"}: may not change once set ... skipping 28 lines ... (B[m[32mapply.sh:303: Successful get deployment test-the-deployment {{.metadata.name}}: test-the-deployment (B[m[32mapply.sh:304: Successful get service test-the-service {{.metadata.name}}: test-the-service (B[mconfigmap "test-the-map" deleted service "test-the-service" deleted deployment.apps "test-the-deployment" deleted [32mSuccessful (B[mmessage:Error from server (NotFound): namespaces "multi-resource-ns" not found has:namespaces "multi-resource-ns" not found [32mapply.sh:312: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:namespace/multi-resource-ns created Error from server (NotFound): error when creating "hack/testdata/multi-resource-1.yaml": namespaces "multi-resource-ns" not found has:namespaces "multi-resource-ns" not found [32mSuccessful (B[mmessage:Error from server (NotFound): pods "test-pod" not found has:pods "test-pod" not found pod/test-pod created namespace/multi-resource-ns unchanged [32mapply.sh:320: Successful get pods test-pod -n multi-resource-ns {{.metadata.name}}: test-pod (B[mpod "test-pod" deleted namespace "multi-resource-ns" deleted I0805 16:17:46.126040 56975 namespace_controller.go:185] Namespace has been deleted nsb [32mapply.sh:326: Successful get configmaps --field-selector=metadata.name=foo {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:configmap/foo created error: resource mapping not found for name: "foo" namespace: "" from "hack/testdata/multi-resource-2.yaml": no matches for kind "Bogus" in version "example.com/v1" ensure CRDs are installed first has:no matches for kind "Bogus" in version "example.com/v1" [32mapply.sh:332: Successful get configmaps foo {{.metadata.name}}: foo (B[mconfigmap "foo" deleted [32mapply.sh:338: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful ... skipping 6 lines ... (B[mpod "pod-a" deleted pod "pod-c" deleted [32mapply.sh:346: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mapply.sh:350: Successful get crds {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:customresourcedefinition.apiextensions.k8s.io/widgets.example.com created error: resource mapping not found for name: "foo" namespace: "" from "hack/testdata/multi-resource-4.yaml": no matches for kind "Widget" in version "example.com/v1" ensure CRDs are installed first has:no matches for kind "Widget" in version "example.com/v1" [32mSuccessful (B[mmessage:Error from server (NotFound): widgets.example.com "foo" not found has:widgets.example.com "foo" not found [32mapply.sh:356: Successful get crds widgets.example.com {{.metadata.name}}: widgets.example.com (B[mI0805 16:17:52.341719 53408 controller.go:616] quota admission added evaluator for: widgets.example.com widget.example.com/foo created customresourcedefinition.apiextensions.k8s.io/widgets.example.com unchanged [32mapply.sh:359: Successful get widget foo {{.metadata.name}}: foo ... skipping 32 lines ... (B[mmessage:852 has:852 pod "test-pod" deleted [32mapply.sh:415: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m+++ [0805 16:17:55] Testing upgrade kubectl client-side apply to server-side apply pod/test-pod created error: Apply failed with 1 conflict: conflict with "kubectl-client-side-apply" using v1: .metadata.labels.name Please review the fields above--they currently have other managers. Here are the ways you can resolve this warning: * If you intend to manage all of these fields, please re-run the apply command with the `--force-conflicts` flag. * If you do not intend to manage all of the fields, please edit your manifest to remove references to the fields that should keep their ... skipping 75 lines ... (B[mpod "nginx-extensions" deleted [32mSuccessful (B[mmessage:pod/test1 created has:pod/test1 created pod "test1" deleted [32mSuccessful (B[mmessage:error: Invalid image name "InvalidImageName": invalid reference format has:error: Invalid image name "InvalidImageName": invalid reference format +++ exit code: 0 Recording: run_kubectl_create_filter_tests Running command: run_kubectl_create_filter_tests +++ Running case: test-cmd.run_kubectl_create_filter_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes ... skipping 3 lines ... Context "test" modified. +++ [0805 16:17:58] Testing kubectl create filter [32mcreate.sh:50: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/selector-test-pod created [32mcreate.sh:54: Successful get pods selector-test-pod {{.metadata.labels.name}}: selector-test-pod (B[m[32mSuccessful (B[mmessage:Error from server (NotFound): pods "selector-test-pod-dont-apply" not found has:pods "selector-test-pod-dont-apply" not found pod "selector-test-pod" deleted +++ exit code: 0 Recording: run_kubectl_apply_deployments_tests Running command: run_kubectl_apply_deployments_tests ... skipping 29 lines ... I0805 16:18:00.543308 56975 event.go:294] "Event occurred" object="namespace-1659716278-22970/nginx" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-59959c88b4 to 3" I0805 16:18:00.546726 56975 event.go:294] "Event occurred" object="namespace-1659716278-22970/nginx-59959c88b4" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-59959c88b4-cz9ln" I0805 16:18:00.550008 56975 event.go:294] "Event occurred" object="namespace-1659716278-22970/nginx-59959c88b4" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-59959c88b4-gcpr7" I0805 16:18:00.550613 56975 event.go:294] "Event occurred" object="namespace-1659716278-22970/nginx-59959c88b4" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-59959c88b4-fl9tw" [32mapps.sh:154: Successful get deployment nginx {{.metadata.name}}: nginx (B[m[32mSuccessful (B[mmessage:Error from server (Conflict): error when applying patch: {"metadata":{"annotations":{"kubectl.kubernetes.io/last-applied-configuration":"{\"apiVersion\":\"apps/v1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"labels\":{\"name\":\"nginx\"},\"name\":\"nginx\",\"namespace\":\"namespace-1659716278-22970\",\"resourceVersion\":\"99\"},\"spec\":{\"replicas\":3,\"selector\":{\"matchLabels\":{\"name\":\"nginx2\"}},\"template\":{\"metadata\":{\"labels\":{\"name\":\"nginx2\"}},\"spec\":{\"containers\":[{\"image\":\"registry.k8s.io/nginx:test-cmd\",\"name\":\"nginx\",\"ports\":[{\"containerPort\":80}]}]}}}}\n"},"resourceVersion":"99"},"spec":{"selector":{"matchLabels":{"name":"nginx2"}},"template":{"metadata":{"labels":{"name":"nginx2"}}}}} to: Resource: "apps/v1, Resource=deployments", GroupVersionKind: "apps/v1, Kind=Deployment" Name: "nginx", Namespace: "namespace-1659716278-22970" for: "hack/testdata/deployment-label-change2.yaml": error when patching "hack/testdata/deployment-label-change2.yaml": Operation cannot be fulfilled on deployments.apps "nginx": the object has been modified; please apply your changes to the latest version and try again has:Error from server (Conflict) deployment.apps/nginx configured I0805 16:18:09.019433 56975 event.go:294] "Event occurred" object="namespace-1659716278-22970/nginx" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-89c976f65 to 3" I0805 16:18:09.022903 56975 event.go:294] "Event occurred" object="namespace-1659716278-22970/nginx-89c976f65" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-89c976f65-hsdj4" I0805 16:18:09.028933 56975 event.go:294] "Event occurred" object="namespace-1659716278-22970/nginx-89c976f65" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-89c976f65-6btl4" I0805 16:18:09.028965 56975 event.go:294] "Event occurred" object="namespace-1659716278-22970/nginx-89c976f65" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-89c976f65-kgwf6" [32mSuccessful ... skipping 368 lines ... +++ [0805 16:18:21] Creating namespace namespace-1659716301-4457 namespace/namespace-1659716301-4457 created Context "test" modified. +++ [0805 16:18:21] Testing kubectl get [32mget.sh:29: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:Error from server (NotFound): pods "abc" not found has:pods "abc" not found [32mget.sh:37: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:Error from server (NotFound): pods "abc" not found has:pods "abc" not found [32mget.sh:45: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:{ "apiVersion": "v1", "items": [], ... skipping 21 lines ... has not:No resources found [32mSuccessful (B[mmessage:NAME has not:No resources found [32mget.sh:73: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:error: the server doesn't have a resource type "foobar" has not:No resources found [32mSuccessful (B[mmessage:No resources found in namespace-1659716301-4457 namespace. has:No resources found [32mSuccessful (B[mmessage: has not:No resources found [32mSuccessful (B[mmessage:No resources found in namespace-1659716301-4457 namespace. has:No resources found [32mget.sh:93: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:Error from server (NotFound): pods "abc" not found has:pods "abc" not found [32mSuccessful (B[mmessage:Error from server (NotFound): pods "abc" not found has not:List [32mSuccessful (B[mmessage:I0805 16:18:22.556491 68993 loader.go:374] Config loaded from file: /tmp/tmp.LiXfhpA4H4/.kube/config I0805 16:18:22.563424 68993 round_trippers.go:553] GET https://127.0.0.1:6443/version?timeout=32s 200 OK in 6 milliseconds I0805 16:18:22.579602 68993 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/namespaces/default/pods 200 OK in 1 milliseconds I0805 16:18:22.581044 68993 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/namespaces/default/replicationcontrollers 200 OK in 1 milliseconds ... skipping 596 lines ... } [32mget.sh:158: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[m<no value>[32mSuccessful (B[mmessage:valid-pod: has:valid-pod: [32mSuccessful (B[mmessage:error: error executing jsonpath "{.missing}": Error executing template: missing is not found. Printing more information for debugging the template: template was: {.missing} object given to jsonpath engine was: map[string]interface {}{"apiVersion":"v1", "kind":"Pod", "metadata":map[string]interface {}{"creationTimestamp":"2022-08-05T16:18:30Z", "labels":map[string]interface {}{"name":"valid-pod"}, "managedFields":[]interface {}{map[string]interface {}{"apiVersion":"v1", "fieldsType":"FieldsV1", "fieldsV1":map[string]interface {}{"f:metadata":map[string]interface {}{"f:labels":map[string]interface {}{".":map[string]interface {}{}, "f:name":map[string]interface {}{}}}, "f:spec":map[string]interface {}{"f:containers":map[string]interface {}{"k:{\"name\":\"kubernetes-serve-hostname\"}":map[string]interface {}{".":map[string]interface {}{}, "f:image":map[string]interface {}{}, "f:imagePullPolicy":map[string]interface {}{}, "f:name":map[string]interface {}{}, "f:resources":map[string]interface {}{".":map[string]interface {}{}, "f:limits":map[string]interface {}{".":map[string]interface {}{}, "f:cpu":map[string]interface {}{}, "f:memory":map[string]interface {}{}}, "f:requests":map[string]interface {}{".":map[string]interface {}{}, "f:cpu":map[string]interface {}{}, "f:memory":map[string]interface {}{}}}, "f:terminationMessagePath":map[string]interface {}{}, "f:terminationMessagePolicy":map[string]interface {}{}}}, "f:dnsPolicy":map[string]interface {}{}, "f:enableServiceLinks":map[string]interface {}{}, "f:restartPolicy":map[string]interface {}{}, "f:schedulerName":map[string]interface {}{}, "f:securityContext":map[string]interface {}{}, "f:terminationGracePeriodSeconds":map[string]interface {}{}}}, "manager":"kubectl-create", "operation":"Update", "time":"2022-08-05T16:18:30Z"}}, "name":"valid-pod", "namespace":"namespace-1659716309-12508", "resourceVersion":"1028", "uid":"d7a964a0-b388-4e7f-8f21-3fc87091e2d3"}, "spec":map[string]interface {}{"containers":[]interface {}{map[string]interface {}{"image":"registry.k8s.io/serve_hostname", "imagePullPolicy":"Always", "name":"kubernetes-serve-hostname", "resources":map[string]interface {}{"limits":map[string]interface {}{"cpu":"1", "memory":"512Mi"}, "requests":map[string]interface {}{"cpu":"1", "memory":"512Mi"}}, "terminationMessagePath":"/dev/termination-log", "terminationMessagePolicy":"File"}}, "dnsPolicy":"ClusterFirst", "enableServiceLinks":true, "preemptionPolicy":"PreemptLowerPriority", "priority":0, "restartPolicy":"Always", "schedulerName":"default-scheduler", "securityContext":map[string]interface {}{}, "terminationGracePeriodSeconds":30}, "status":map[string]interface {}{"phase":"Pending", "qosClass":"Guaranteed"}} has:missing is not found error: error executing template "{{.missing}}": template: output:1:2: executing "output" at <.missing>: map has no entry for key "missing" [32mSuccessful (B[mmessage:Error executing template: template: output:1:2: executing "output" at <.missing>: map has no entry for key "missing". Printing more information for debugging the template: template was: {{.missing}} raw data was: {"apiVersion":"v1","kind":"Pod","metadata":{"creationTimestamp":"2022-08-05T16:18:30Z","labels":{"name":"valid-pod"},"managedFields":[{"apiVersion":"v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"kubernetes-serve-hostname\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{".":{},"f:limits":{".":{},"f:cpu":{},"f:memory":{}},"f:requests":{".":{},"f:cpu":{},"f:memory":{}}},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}},"manager":"kubectl-create","operation":"Update","time":"2022-08-05T16:18:30Z"}],"name":"valid-pod","namespace":"namespace-1659716309-12508","resourceVersion":"1028","uid":"d7a964a0-b388-4e7f-8f21-3fc87091e2d3"},"spec":{"containers":[{"image":"registry.k8s.io/serve_hostname","imagePullPolicy":"Always","name":"kubernetes-serve-hostname","resources":{"limits":{"cpu":"1","memory":"512Mi"},"requests":{"cpu":"1","memory":"512Mi"}},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","enableServiceLinks":true,"preemptionPolicy":"PreemptLowerPriority","priority":0,"restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"terminationGracePeriodSeconds":30},"status":{"phase":"Pending","qosClass":"Guaranteed"}} object given to template engine was: map[apiVersion:v1 kind:Pod metadata:map[creationTimestamp:2022-08-05T16:18:30Z labels:map[name:valid-pod] managedFields:[map[apiVersion:v1 fieldsType:FieldsV1 fieldsV1:map[f:metadata:map[f:labels:map[.:map[] f:name:map[]]] f:spec:map[f:containers:map[k:{"name":"kubernetes-serve-hostname"}:map[.:map[] f:image:map[] f:imagePullPolicy:map[] f:name:map[] f:resources:map[.:map[] f:limits:map[.:map[] f:cpu:map[] f:memory:map[]] f:requests:map[.:map[] f:cpu:map[] f:memory:map[]]] f:terminationMessagePath:map[] f:terminationMessagePolicy:map[]]] f:dnsPolicy:map[] f:enableServiceLinks:map[] f:restartPolicy:map[] f:schedulerName:map[] f:securityContext:map[] f:terminationGracePeriodSeconds:map[]]] manager:kubectl-create operation:Update time:2022-08-05T16:18:30Z]] name:valid-pod namespace:namespace-1659716309-12508 resourceVersion:1028 uid:d7a964a0-b388-4e7f-8f21-3fc87091e2d3] spec:map[containers:[map[image:registry.k8s.io/serve_hostname imagePullPolicy:Always name:kubernetes-serve-hostname resources:map[limits:map[cpu:1 memory:512Mi] requests:map[cpu:1 memory:512Mi]] terminationMessagePath:/dev/termination-log terminationMessagePolicy:File]] dnsPolicy:ClusterFirst enableServiceLinks:true preemptionPolicy:PreemptLowerPriority priority:0 restartPolicy:Always schedulerName:default-scheduler securityContext:map[] terminationGracePeriodSeconds:30] status:map[phase:Pending qosClass:Guaranteed]] has:map has no entry for key "missing" [32mSuccessful (B[mmessage:NAME READY STATUS RESTARTS AGE valid-pod 0/1 Pending 0 0s has:valid-pod [32mSuccessful (B[mmessage:Error from server (NotFound): the server could not find the requested resource has:the server could not find the requested resource [32mSuccessful (B[mmessage:NAME READY STATUS RESTARTS AGE valid-pod 0/1 Pending 0 0s has:STATUS [32mSuccessful ... skipping 78 lines ... terminationGracePeriodSeconds: 30 status: phase: Pending qosClass: Guaranteed has:name: valid-pod [32mSuccessful (B[mmessage:Error from server (NotFound): pods "invalid-pod" not found has:"invalid-pod" not found pod "valid-pod" deleted [32mget.sh:204: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/redis-master created pod/valid-pod created [32mSuccessful ... skipping 245 lines ... +++ [0805 16:18:42] Creating namespace namespace-1659716322-12975 namespace/namespace-1659716322-12975 created Context "test" modified. +++ [0805 16:18:42] Testing kubectl exec POD COMMAND [32mSuccessful (B[mmessage:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (NotFound): pods "abc" not found has:pods "abc" not found pod/test-pod created [32mSuccessful (B[mmessage:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (BadRequest): pod test-pod does not have a host assigned has not:pods "test-pod" not found [32mSuccessful (B[mmessage:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (BadRequest): pod test-pod does not have a host assigned has not:pod or type/name must be specified pod "test-pod" deleted +++ exit code: 0 Recording: run_kubectl_exec_resource_name_tests Running command: run_kubectl_exec_resource_name_tests ... skipping 3 lines ... +++ [0805 16:18:43] Creating namespace namespace-1659716323-23405 namespace/namespace-1659716323-23405 created Context "test" modified. +++ [0805 16:18:43] Testing kubectl exec TYPE/NAME COMMAND [32mSuccessful (B[mmessage:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. error: the server doesn't have a resource type "foo" has:error: [32mSuccessful (B[mmessage:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (NotFound): deployments.apps "bar" not found has:"bar" not found pod/test-pod created replicaset.apps/frontend created I0805 16:18:43.843452 56975 event.go:294] "Event occurred" object="namespace-1659716323-23405/frontend" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-ks9nb" I0805 16:18:43.846845 56975 event.go:294] "Event occurred" object="namespace-1659716323-23405/frontend" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-g6flq" I0805 16:18:43.846974 56975 event.go:294] "Event occurred" object="namespace-1659716323-23405/frontend" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-hsf8d" configmap/test-set-env-config created [32mSuccessful (B[mmessage:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. error: cannot attach to *v1.ConfigMap: selector for *v1.ConfigMap not implemented has:not implemented [32mSuccessful (B[mmessage:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (BadRequest): pod test-pod does not have a host assigned has not:not found [32mSuccessful (B[mmessage:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (BadRequest): pod test-pod does not have a host assigned has not:pod, type/name or --filename must be specified [32mSuccessful (B[mmessage:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (BadRequest): pod frontend-g6flq does not have a host assigned has not:not found [32mSuccessful (B[mmessage:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (BadRequest): pod frontend-g6flq does not have a host assigned has not:pod, type/name or --filename must be specified pod "test-pod" deleted replicaset.apps "frontend" deleted configmap "test-set-env-config" deleted +++ exit code: 0 Recording: run_create_secret_tests Running command: run_create_secret_tests +++ Running case: test-cmd.run_create_secret_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_create_secret_tests [32mSuccessful (B[mmessage:Error from server (NotFound): secrets "mysecret" not found has:secrets "mysecret" not found [32mSuccessful (B[mmessage:user-specified has:user-specified [32mSuccessful (B[mmessage:Error from server (NotFound): secrets "mysecret" not found has:secrets "mysecret" not found [32mSuccessful (B[m{"kind":"ConfigMap","apiVersion":"v1","metadata":{"name":"tester-update-cm","namespace":"default","uid":"6bb448b6-53fe-4bc2-abf4-53d6f9e1ffb4","resourceVersion":"1125","creationTimestamp":"2022-08-05T16:18:44Z"}} [32mSuccessful (B[mmessage:{"kind":"ConfigMap","apiVersion":"v1","metadata":{"name":"tester-update-cm","namespace":"default","uid":"6bb448b6-53fe-4bc2-abf4-53d6f9e1ffb4","resourceVersion":"1126","creationTimestamp":"2022-08-05T16:18:44Z"},"data":{"key1":"config1"}} has:uid [32mSuccessful (B[mmessage:{"kind":"ConfigMap","apiVersion":"v1","metadata":{"name":"tester-update-cm","namespace":"default","uid":"6bb448b6-53fe-4bc2-abf4-53d6f9e1ffb4","resourceVersion":"1126","creationTimestamp":"2022-08-05T16:18:44Z"},"data":{"key1":"config1"}} has:config1 {"kind":"Status","apiVersion":"v1","metadata":{},"status":"Success","details":{"name":"tester-update-cm","kind":"configmaps","uid":"6bb448b6-53fe-4bc2-abf4-53d6f9e1ffb4"}} [32mSuccessful (B[mmessage:Error from server (NotFound): configmaps "tester-update-cm" not found has:configmaps "tester-update-cm" not found +++ exit code: 0 Recording: run_kubectl_create_kustomization_directory_tests Running command: run_kubectl_create_kustomization_directory_tests +++ Running case: test-cmd.run_kubectl_create_kustomization_directory_tests ... skipping 25 lines ... +++ command: run_kubectl_create_validate_tests +++ [0805 16:18:45] Creating namespace namespace-1659716325-20506 namespace/namespace-1659716325-20506 created Context "test" modified. +++ [0805 16:18:46] Testing kubectl create --validate Successful message:Error from server (BadRequest): error when creating "hack/testdata/invalid-deployment-unknown-and-duplicate-fields.yaml": Deployment in version "v1" cannot be handled as a Deployment: strict decoding error: unknown field "spec.baz", unknown field "spec.foo" has either:strict decoding error or:error validating data +++ [0805 16:18:46] Testing kubectl create --validate=true Successful message:Error from server (BadRequest): error when creating "hack/testdata/invalid-deployment-unknown-and-duplicate-fields.yaml": Deployment in version "v1" cannot be handled as a Deployment: strict decoding error: unknown field "spec.baz", unknown field "spec.foo" has either:strict decoding error or:error validating data +++ [0805 16:18:46] Testing kubectl create --validate=false I0805 16:18:46.402427 56975 event.go:294] "Event occurred" object="namespace-1659716325-20506/invalid-nginx-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set invalid-nginx-deployment-7fb96c846b to 4" [32mSuccessful (B[mmessage:deployment.apps/invalid-nginx-deployment created has:deployment.apps/invalid-nginx-deployment created I0805 16:18:46.407192 56975 event.go:294] "Event occurred" object="namespace-1659716325-20506/invalid-nginx-deployment-7fb96c846b" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: invalid-nginx-deployment-7fb96c846b-qwvx7" I0805 16:18:46.410496 56975 event.go:294] "Event occurred" object="namespace-1659716325-20506/invalid-nginx-deployment-7fb96c846b" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: invalid-nginx-deployment-7fb96c846b-qcpfn" I0805 16:18:46.410739 56975 event.go:294] "Event occurred" object="namespace-1659716325-20506/invalid-nginx-deployment-7fb96c846b" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: invalid-nginx-deployment-7fb96c846b-2xrqn" I0805 16:18:46.414644 56975 event.go:294] "Event occurred" object="namespace-1659716325-20506/invalid-nginx-deployment-7fb96c846b" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: invalid-nginx-deployment-7fb96c846b-d89c4" deployment.apps "invalid-nginx-deployment" deleted +++ [0805 16:18:46] Testing kubectl create --validate=strict Successful message:Error from server (BadRequest): error when creating "hack/testdata/invalid-deployment-unknown-and-duplicate-fields.yaml": Deployment in version "v1" cannot be handled as a Deployment: strict decoding error: unknown field "spec.baz", unknown field "spec.foo" has either:strict decoding error or:error validating data +++ [0805 16:18:46] Testing kubectl create --validate=warn Warning: unknown field "spec.baz" Warning: unknown field "spec.foo" I0805 16:18:46.799863 56975 event.go:294] "Event occurred" object="namespace-1659716325-20506/invalid-nginx-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set invalid-nginx-deployment-7fb96c846b to 4" [32mSuccessful (B[mmessage:deployment.apps/invalid-nginx-deployment created ... skipping 11 lines ... I0805 16:18:46.945583 56975 event.go:294] "Event occurred" object="namespace-1659716325-20506/invalid-nginx-deployment-7fb96c846b" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: invalid-nginx-deployment-7fb96c846b-rvps9" I0805 16:18:46.948884 56975 event.go:294] "Event occurred" object="namespace-1659716325-20506/invalid-nginx-deployment-7fb96c846b" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: invalid-nginx-deployment-7fb96c846b-kcgtx" I0805 16:18:46.948919 56975 event.go:294] "Event occurred" object="namespace-1659716325-20506/invalid-nginx-deployment-7fb96c846b" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: invalid-nginx-deployment-7fb96c846b-twsx5" I0805 16:18:46.953033 56975 event.go:294] "Event occurred" object="namespace-1659716325-20506/invalid-nginx-deployment-7fb96c846b" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: invalid-nginx-deployment-7fb96c846b-rvvsn" deployment.apps "invalid-nginx-deployment" deleted +++ [0805 16:18:47] Testing kubectl create E0805 16:18:47.055060 56975 replica_set.go:550] sync "namespace-1659716325-20506/invalid-nginx-deployment-7fb96c846b" failed with Operation cannot be fulfilled on replicasets.apps "invalid-nginx-deployment-7fb96c846b": StorageError: invalid object, Code: 4, Key: /registry/replicasets/namespace-1659716325-20506/invalid-nginx-deployment-7fb96c846b, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: bafc8693-d589-4d7a-a68e-857e641cddb5, UID in object meta: Successful message:Error from server (BadRequest): error when creating "hack/testdata/invalid-deployment-unknown-and-duplicate-fields.yaml": Deployment in version "v1" cannot be handled as a Deployment: strict decoding error: unknown field "spec.baz", unknown field "spec.foo" has either:strict decoding error or:error validating data +++ [0805 16:18:47] Testing kubectl create --validate=foo [32mSuccessful (B[mmessage:error: invalid - validate option "foo"; must be one of: strict (or true), warn, ignore (or false) has:invalid - validate option "foo" +++ exit code: 0 Recording: run_convert_tests Running command: run_convert_tests +++ Running case: test-cmd.run_convert_tests ... skipping 51 lines ... securityContext: {} terminationGracePeriodSeconds: 30 status: {} has:apps/v1beta1 deployment.apps "nginx" deleted [32mSuccessful (B[mmessage:error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing [32mSuccessful (B[mmessage:nginx: has:nginx: +++ exit code: 0 Recording: run_kubectl_delete_allnamespaces_tests ... skipping 103 lines ... has:Timeout [32mSuccessful (B[mmessage:NAME READY STATUS RESTARTS AGE valid-pod 0/1 Pending 0 1s has:valid-pod [32mSuccessful (B[mmessage:error: Invalid timeout value. Timeout must be a single integer in seconds, or an integer followed by a corresponding time unit (e.g. 1s | 2m | 3h) has:Invalid timeout value pod "valid-pod" deleted +++ exit code: 0 Recording: run_crd_tests Running command: run_crd_tests ... skipping 149 lines ... (B[mFlag --record has been deprecated, --record will be removed in the future foo.company.com/test patched [32mcrd.sh:296: Successful get foos/test {{.patched}}: value2 (B[mFlag --record has been deprecated, --record will be removed in the future foo.company.com/test patched [32mcrd.sh:298: Successful get foos/test {{.patched}}: <no value> (B[m+++ [0805 16:18:58] "kubectl patch --local" returns error as expected for CustomResource: error: strategic merge patch is not supported for company.com/v1, Kind=Foo locally, try --type merge { "apiVersion": "company.com/v1", "kind": "Foo", "metadata": { "annotations": { "kubernetes.io/change-cause": "kubectl patch foos/test --server=https://127.0.0.1:6443 --insecure-skip-tls-verify=true --match-server-version=true --patch={\"patched\":null} --type=merge --record=true" ... skipping 324 lines ... (B[m[32mcrd.sh:519: Successful get bars {{range.items}}{{.metadata.name}}:{{end}}: (B[mnamespace/non-native-resources created bar.company.com/test created [32mcrd.sh:524: Successful get bars {{len .items}}: 1 (B[mnamespace "non-native-resources" deleted [32mcrd.sh:527: Successful get bars {{len .items}}: 0 (B[mError from server (NotFound): namespaces "non-native-resources" not found customresourcedefinition.apiextensions.k8s.io "foos.company.com" deleted customresourcedefinition.apiextensions.k8s.io "bars.company.com" deleted customresourcedefinition.apiextensions.k8s.io "resources.mygroup.example.com" deleted customresourcedefinition.apiextensions.k8s.io "validfoos.company.com" deleted +++ exit code: 0 Recording: run_recursive_resources_tests ... skipping 5 lines ... +++ [0805 16:19:28] Testing recursive resources +++ [0805 16:19:28] Creating namespace namespace-1659716368-5751 namespace/namespace-1659716368-5751 created Context "test" modified. [32mgeneric-resources.sh:202: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mW0805 16:19:29.198499 53408 cacher.go:155] Terminating all watchers from cacher *unstructured.Unstructured E0805 16:19:29.199788 56975 reflector.go:140] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource W0805 16:19:29.360056 53408 cacher.go:155] Terminating all watchers from cacher *unstructured.Unstructured E0805 16:19:29.361376 56975 reflector.go:140] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource W0805 16:19:29.528393 53408 cacher.go:155] Terminating all watchers from cacher *unstructured.Unstructured E0805 16:19:29.529778 56975 reflector.go:140] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource W0805 16:19:29.700692 53408 cacher.go:155] Terminating all watchers from cacher *unstructured.Unstructured E0805 16:19:29.701913 56975 reflector.go:140] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:206: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mSuccessful (B[mmessage:pod/busybox0 created pod/busybox1 created error: error validating "hack/testdata/recursive/pod/pod/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false has:error validating data: kind not set [32mgeneric-resources.sh:211: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:220: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: busybox:busybox: (B[m[32mSuccessful (B[mmessage:error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:227: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mW0805 16:19:30.263514 56975 reflector.go:424] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0805 16:19:30.263555 56975 reflector.go:140] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:231: Successful get pods {{range.items}}{{.metadata.labels.status}}:{{end}}: replaced:replaced: (B[m[32mSuccessful (B[mmessage:pod/busybox0 replaced pod/busybox1 replaced error: error validating "hack/testdata/recursive/pod-modify/pod/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false has:error validating data: kind not set W0805 16:19:30.290746 56975 reflector.go:424] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0805 16:19:30.290786 56975 reflector.go:140] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:236: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mSuccessful (B[mmessage:Name: busybox0 Namespace: namespace-1659716368-5751 Priority: 0 Node: <none> ... skipping 159 lines ... has:Object 'Kind' is missing [32mgeneric-resources.sh:246: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:250: Successful get pods {{range.items}}{{.metadata.annotations.annotatekey}}:{{end}}: annotatevalue:annotatevalue: (B[m[32mSuccessful (B[mmessage:pod/busybox0 annotated pod/busybox1 annotated error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:255: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mW0805 16:19:30.694311 56975 reflector.go:424] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0805 16:19:30.694348 56975 reflector.go:140] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:259: Successful get pods {{range.items}}{{.metadata.labels.status}}:{{end}}: replaced:replaced: (B[m[32mSuccessful (B[mmessage:Warning: resource pods/busybox0 is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. pod/busybox0 configured Warning: resource pods/busybox1 is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. pod/busybox1 configured error: error validating "hack/testdata/recursive/pod-modify/pod/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false has:error validating data: kind not set [32mgeneric-resources.sh:264: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mSuccessful (B[mmessage:busybox0:busybox1:error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:busybox0:busybox1: [32mSuccessful (B[mmessage:busybox0:busybox1:error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:273: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mW0805 16:19:31.162559 56975 reflector.go:424] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0805 16:19:31.162599 56975 reflector.go:140] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource pod/busybox0 labeled pod/busybox1 labeled error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' [32mgeneric-resources.sh:278: Successful get pods {{range.items}}{{.metadata.labels.mylabel}}:{{end}}: myvalue:myvalue: (B[m[32mSuccessful (B[mmessage:pod/busybox0 labeled pod/busybox1 labeled error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:283: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mpod/busybox0 patched pod/busybox1 patched error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' [32mgeneric-resources.sh:288: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: prom/busybox:prom/busybox: (B[m[32mSuccessful (B[mmessage:pod/busybox0 patched pod/busybox1 patched error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:293: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:297: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. pod "busybox0" force deleted pod "busybox1" force deleted error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:302: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: (B[mreplicationcontroller/busybox0 created I0805 16:19:31.927722 56975 event.go:294] "Event occurred" object="namespace-1659716368-5751/busybox0" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: busybox0-m6n47" replicationcontroller/busybox1 created error: error validating "hack/testdata/recursive/rc/rc/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false I0805 16:19:31.934693 56975 event.go:294] "Event occurred" object="namespace-1659716368-5751/busybox1" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: busybox1-qjgks" [32mgeneric-resources.sh:306: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:311: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:312: Successful get rc busybox0 {{.spec.replicas}}: 1 (B[m[32mgeneric-resources.sh:313: Successful get rc busybox1 {{.spec.replicas}}: 1 (B[m[32mgeneric-resources.sh:318: Successful get hpa busybox0 {{.spec.minReplicas}} {{.spec.maxReplicas}} {{(index .spec.metrics 0).resource.target.averageUtilization}}: 1 2 80 (B[m[32mgeneric-resources.sh:319: Successful get hpa busybox1 {{.spec.minReplicas}} {{.spec.maxReplicas}} {{(index .spec.metrics 0).resource.target.averageUtilization}}: 1 2 80 (B[m[32mSuccessful (B[mmessage:horizontalpodautoscaler.autoscaling/busybox0 autoscaled horizontalpodautoscaler.autoscaling/busybox1 autoscaled error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' has:Object 'Kind' is missing horizontalpodautoscaler.autoscaling "busybox0" deleted horizontalpodautoscaler.autoscaling "busybox1" deleted [32mgeneric-resources.sh:327: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:328: Successful get rc busybox0 {{.spec.replicas}}: 1 (B[m[32mgeneric-resources.sh:329: Successful get rc busybox1 {{.spec.replicas}}: 1 (B[mW0805 16:19:32.801501 56975 reflector.go:424] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0805 16:19:32.801545 56975 reflector.go:140] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource I0805 16:19:32.811904 53408 alloc.go:327] "allocated clusterIPs" service="namespace-1659716368-5751/busybox0" clusterIPs=map[IPv4:10.0.0.153] I0805 16:19:32.818505 53408 alloc.go:327] "allocated clusterIPs" service="namespace-1659716368-5751/busybox1" clusterIPs=map[IPv4:10.0.0.118] [32mgeneric-resources.sh:333: Successful get service busybox0 {{(index .spec.ports 0).name}} {{(index .spec.ports 0).port}}: <no value> 80 (B[m[32mgeneric-resources.sh:334: Successful get service busybox1 {{(index .spec.ports 0).name}} {{(index .spec.ports 0).port}}: <no value> 80 (B[m[32mSuccessful (B[mmessage:service/busybox0 exposed service/busybox1 exposed error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' has:Object 'Kind' is missing I0805 16:19:32.983802 56975 namespace_controller.go:185] Namespace has been deleted non-native-resources W0805 16:19:33.015262 56975 reflector.go:424] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0805 16:19:33.015324 56975 reflector.go:140] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:340: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:341: Successful get rc busybox0 {{.spec.replicas}}: 1 (B[m[32mgeneric-resources.sh:342: Successful get rc busybox1 {{.spec.replicas}}: 1 (B[mI0805 16:19:33.224253 56975 event.go:294] "Event occurred" object="namespace-1659716368-5751/busybox0" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: busybox0-vvz9r" I0805 16:19:33.232195 56975 event.go:294] "Event occurred" object="namespace-1659716368-5751/busybox1" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: busybox1-6vlcc" [32mgeneric-resources.sh:346: Successful get rc busybox0 {{.spec.replicas}}: 2 (B[m[32mgeneric-resources.sh:347: Successful get rc busybox1 {{.spec.replicas}}: 2 (B[m[32mSuccessful (B[mmessage:replicationcontroller/busybox0 scaled replicationcontroller/busybox1 scaled error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' has:Object 'Kind' is missing W0805 16:19:33.405839 56975 reflector.go:424] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0805 16:19:33.405882 56975 reflector.go:140] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:352: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:356: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. replicationcontroller "busybox0" force deleted replicationcontroller "busybox1" force deleted error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:361: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: (B[mdeployment.apps/nginx1-deployment created deployment.apps/nginx0-deployment created error: error validating "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false I0805 16:19:33.804813 56975 event.go:294] "Event occurred" object="namespace-1659716368-5751/nginx1-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx1-deployment-7dcf5745b8 to 2" I0805 16:19:33.808256 56975 event.go:294] "Event occurred" object="namespace-1659716368-5751/nginx0-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx0-deployment-cfdcb9657 to 2" I0805 16:19:33.808539 56975 event.go:294] "Event occurred" object="namespace-1659716368-5751/nginx1-deployment-7dcf5745b8" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx1-deployment-7dcf5745b8-2r88z" I0805 16:19:33.812351 56975 event.go:294] "Event occurred" object="namespace-1659716368-5751/nginx0-deployment-cfdcb9657" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx0-deployment-cfdcb9657-sqdqb" I0805 16:19:33.812387 56975 event.go:294] "Event occurred" object="namespace-1659716368-5751/nginx1-deployment-7dcf5745b8" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx1-deployment-7dcf5745b8-cmcsm" I0805 16:19:33.817539 56975 event.go:294] "Event occurred" object="namespace-1659716368-5751/nginx0-deployment-cfdcb9657" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx0-deployment-cfdcb9657-2lqpb" [32mgeneric-resources.sh:365: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: nginx0-deployment:nginx1-deployment: (B[m[32mgeneric-resources.sh:366: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:1.7.9:registry.k8s.io/nginx:1.7.9: (B[m[32mgeneric-resources.sh:370: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:1.7.9:registry.k8s.io/nginx:1.7.9: (B[m[32mSuccessful (B[mmessage:deployment.apps/nginx1-deployment skipped rollback (current template already matches revision 1) deployment.apps/nginx0-deployment skipped rollback (current template already matches revision 1) error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"registry.k8s.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' has:Object 'Kind' is missing deployment.apps/nginx1-deployment paused deployment.apps/nginx0-deployment paused [32mgeneric-resources.sh:378: Successful get deployment {{range.items}}{{.spec.paused}}:{{end}}: true:true: (B[m[32mSuccessful (B[mmessage:unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"registry.k8s.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' has:Object 'Kind' is missing W0805 16:19:34.316169 56975 reflector.go:424] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0805 16:19:34.316205 56975 reflector.go:140] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource deployment.apps/nginx1-deployment resumed deployment.apps/nginx0-deployment resumed [32mgeneric-resources.sh:384: Successful get deployment {{range.items}}{{.spec.paused}}:{{end}}: <no value>:<no value>: (B[m[32mSuccessful (B[mmessage:unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"registry.k8s.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' has:Object 'Kind' is missing ... skipping 4 lines ... has:Waiting for deployment "nginx1-deployment" rollout to finish [32mSuccessful (B[mmessage:Waiting for deployment "nginx1-deployment" rollout to finish: 0 of 2 updated replicas are available... timed out waiting for the condition unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"registry.k8s.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' has:Object 'Kind' is missing W0805 16:19:37.129669 56975 reflector.go:424] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0805 16:19:37.129702 56975 reflector.go:140] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mSuccessful (B[mmessage:Waiting for deployment "nginx1-deployment" rollout to finish: 0 of 2 updated replicas are available... Waiting for deployment "nginx0-deployment" rollout to finish: 0 of 2 updated replicas are available... timed out waiting for the condition timed out waiting for the condition unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"registry.k8s.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' ... skipping 18 lines ... 1 <none> deployment.apps/nginx0-deployment REVISION CHANGE-CAUSE 1 <none> error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"registry.k8s.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' has:nginx0-deployment [32mSuccessful (B[mmessage:deployment.apps/nginx1-deployment REVISION CHANGE-CAUSE 1 <none> deployment.apps/nginx0-deployment REVISION CHANGE-CAUSE 1 <none> error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"registry.k8s.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' has:nginx1-deployment [32mSuccessful (B[mmessage:deployment.apps/nginx1-deployment REVISION CHANGE-CAUSE 1 <none> deployment.apps/nginx0-deployment REVISION CHANGE-CAUSE 1 <none> error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"registry.k8s.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' has:Object 'Kind' is missing Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. deployment.apps "nginx1-deployment" force deleted deployment.apps "nginx0-deployment" force deleted error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"registry.k8s.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' W0805 16:19:38.333621 56975 reflector.go:424] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0805 16:19:38.333657 56975 reflector.go:140] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:411: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: (B[mW0805 16:19:38.929012 56975 reflector.go:424] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0805 16:19:38.929060 56975 reflector.go:140] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource replicationcontroller/busybox0 created I0805 16:19:38.952920 56975 event.go:294] "Event occurred" object="namespace-1659716368-5751/busybox0" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: busybox0-z6xkj" replicationcontroller/busybox1 created error: error validating "hack/testdata/recursive/rc/rc/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false I0805 16:19:38.959736 56975 event.go:294] "Event occurred" object="namespace-1659716368-5751/busybox1" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: busybox1-snsbn" [32mgeneric-resources.sh:415: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mSuccessful (B[mmessage:no rollbacker has been implemented for "ReplicationController" no rollbacker has been implemented for "ReplicationController" unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' ... skipping 2 lines ... (B[mmessage:no rollbacker has been implemented for "ReplicationController" no rollbacker has been implemented for "ReplicationController" unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' has:Object 'Kind' is missing [32mSuccessful (B[mmessage:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' error: replicationcontrollers "busybox0" pausing is not supported error: replicationcontrollers "busybox1" pausing is not supported has:Object 'Kind' is missing [32mSuccessful (B[mmessage:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' error: replicationcontrollers "busybox0" pausing is not supported error: replicationcontrollers "busybox1" pausing is not supported has:replicationcontrollers "busybox0" pausing is not supported [32mSuccessful (B[mmessage:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' error: replicationcontrollers "busybox0" pausing is not supported error: replicationcontrollers "busybox1" pausing is not supported has:replicationcontrollers "busybox1" pausing is not supported [32mSuccessful (B[mmessage:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' error: replicationcontrollers "busybox0" resuming is not supported error: replicationcontrollers "busybox1" resuming is not supported has:Object 'Kind' is missing [32mSuccessful (B[mmessage:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' error: replicationcontrollers "busybox0" resuming is not supported error: replicationcontrollers "busybox1" resuming is not supported has:replicationcontrollers "busybox0" resuming is not supported [32mSuccessful (B[mmessage:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' error: replicationcontrollers "busybox0" resuming is not supported error: replicationcontrollers "busybox1" resuming is not supported has:replicationcontrollers "busybox1" resuming is not supported Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. replicationcontroller "busybox0" force deleted replicationcontroller "busybox1" force deleted error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' W0805 16:19:39.792931 56975 reflector.go:424] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0805 16:19:39.792967 56975 reflector.go:140] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource +++ exit code: 0 Recording: run_namespace_tests Running command: run_namespace_tests +++ Running case: test-cmd.run_namespace_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_namespace_tests +++ [0805 16:19:40] Testing kubectl(v1:namespaces) [32mSuccessful (B[mmessage:Error from server (NotFound): namespaces "my-namespace" not found has: not found namespace/my-namespace created (dry run) namespace/my-namespace created (server dry run) [32mSuccessful (B[mmessage:Error from server (NotFound): namespaces "my-namespace" not found has: not found namespace/my-namespace created [32mcore.sh:1471: Successful get namespaces/my-namespace {{.metadata.name}}: my-namespace (B[mquery for namespaces had limit param query for resourcequotas had limit param query for limitranges had limit param ... skipping 132 lines ... I0805 16:19:41.057323 74707 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/namespaces/namespace-1659716368-5751/resourcequotas?limit=500 200 OK in 0 milliseconds I0805 16:19:41.058357 74707 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/namespaces/namespace-1659716368-5751/limitranges?limit=500 200 OK in 0 milliseconds I0805 16:19:41.059804 74707 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/namespaces/nsb 200 OK in 1 milliseconds I0805 16:19:41.060990 74707 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/namespaces/nsb/resourcequotas?limit=500 200 OK in 1 milliseconds I0805 16:19:41.062153 74707 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/namespaces/nsb/limitranges?limit=500 200 OK in 1 milliseconds (B[mnamespace "my-namespace" deleted W0805 16:19:45.056427 56975 reflector.go:424] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0805 16:19:45.056465 56975 reflector.go:140] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource namespace/my-namespace condition met [32mSuccessful (B[mmessage:Error from server (NotFound): namespaces "my-namespace" not found has: not found namespace/my-namespace created [32mcore.sh:1482: Successful get namespaces/my-namespace {{.metadata.name}}: my-namespace (B[m[32mSuccessful (B[mmessage:Warning: deleting cluster-scoped resources, not scoped to the provided namespace namespace "kube-node-lease" deleted ... skipping 34 lines ... namespace "namespace-1659716328-20342" deleted namespace "namespace-1659716329-4052" deleted namespace "namespace-1659716331-2582" deleted namespace "namespace-1659716332-11381" deleted namespace "namespace-1659716368-5751" deleted namespace "nsb" deleted Error from server (Forbidden): namespaces "default" is forbidden: this namespace may not be deleted Error from server (Forbidden): namespaces "kube-public" is forbidden: this namespace may not be deleted Error from server (Forbidden): namespaces "kube-system" is forbidden: this namespace may not be deleted has:Warning: deleting cluster-scoped resources [32mSuccessful (B[mmessage:Warning: deleting cluster-scoped resources, not scoped to the provided namespace namespace "kube-node-lease" deleted namespace "my-namespace" deleted namespace "namespace-1659716179-11109" deleted ... skipping 32 lines ... namespace "namespace-1659716328-20342" deleted namespace "namespace-1659716329-4052" deleted namespace "namespace-1659716331-2582" deleted namespace "namespace-1659716332-11381" deleted namespace "namespace-1659716368-5751" deleted namespace "nsb" deleted Error from server (Forbidden): namespaces "default" is forbidden: this namespace may not be deleted Error from server (Forbidden): namespaces "kube-public" is forbidden: this namespace may not be deleted Error from server (Forbidden): namespaces "kube-system" is forbidden: this namespace may not be deleted has:namespace "my-namespace" deleted namespace/quotas created W0805 16:19:46.932809 56975 reflector.go:424] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0805 16:19:46.932850 56975 reflector.go:140] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mcore.sh:1489: Successful get namespaces/quotas {{.metadata.name}}: quotas (B[m[32mcore.sh:1490: Successful get quota --namespace=quotas {{range.items}}{{ if eq .metadata.name \"test-quota\" }}found{{end}}{{end}}:: : (B[mresourcequota/test-quota created (dry run) resourcequota/test-quota created (server dry run) [32mcore.sh:1494: Successful get quota --namespace=quotas {{range.items}}{{ if eq .metadata.name \"test-quota\" }}found{{end}}{{end}}:: : (B[mI0805 16:19:47.277060 56975 horizontal.go:360] Horizontal Pod Autoscaler busybox0 has been deleted in namespace-1659716368-5751 ... skipping 11 lines ... I0805 16:19:47.449089 74907 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/namespaces/quotas/resourcequotas/test-quota 200 OK in 1 milliseconds (B[mresourcequota "test-quota" deleted I0805 16:19:47.572357 56975 resource_quota_controller.go:315] Resource quota has been deleted quotas/test-quota namespace "quotas" deleted I0805 16:19:47.657054 56975 shared_informer.go:255] Waiting for caches to sync for garbage collector I0805 16:19:47.657107 56975 shared_informer.go:262] Caches are synced for garbage collector W0805 16:19:49.461850 56975 reflector.go:424] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0805 16:19:49.461885 56975 reflector.go:140] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource W0805 16:19:51.384917 56975 reflector.go:424] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0805 16:19:51.384950 56975 reflector.go:140] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mcore.sh:1511: Successful get namespaces {{range.items}}{{ if eq .metadata.name \"other\" }}found{{end}}{{end}}:: : (B[mnamespace/other created [32mcore.sh:1515: Successful get namespaces/other {{.metadata.name}}: other (B[m[32mcore.sh:1519: Successful get pods --namespace=other {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/valid-pod created [32mcore.sh:1523: Successful get pods --namespace=other {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[m[32mcore.sh:1525: Successful get pods -n other {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[m[32mSuccessful (B[mmessage:error: a resource cannot be retrieved by name across all namespaces has:a resource cannot be retrieved by name across all namespaces [32mcore.sh:1532: Successful get pods --namespace=other {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[mWarning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. pod "valid-pod" force deleted [32mcore.sh:1536: Successful get pods --namespace=other {{range.items}}{{.metadata.name}}:{{end}}: (B[mnamespace "other" deleted ... skipping 126 lines ... (B[m[32mcore.sh:920: Successful get secret/secret-string-data --namespace=test-secrets {{.data}}: map[k1:djE= k2:djI=] (B[m[32mcore.sh:921: Successful get secret/secret-string-data --namespace=test-secrets {{.stringData}}: <no value> (B[msecret "secret-string-data" deleted [32mcore.sh:930: Successful get secrets --namespace=test-secrets {{range.items}}{{.metadata.name}}:{{end}}: (B[msecret "test-secret" deleted namespace "test-secrets" deleted W0805 16:20:01.666291 56975 reflector.go:424] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0805 16:20:01.666332 56975 reflector.go:140] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource I0805 16:20:03.676682 56975 namespace_controller.go:185] Namespace has been deleted other +++ exit code: 0 W0805 16:20:06.753162 56975 reflector.go:424] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0805 16:20:06.753196 56975 reflector.go:140] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource Recording: run_configmap_tests Running command: run_configmap_tests +++ Running case: test-cmd.run_configmap_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_configmap_tests +++ [0805 16:20:06] Creating namespace namespace-1659716406-3497 namespace/namespace-1659716406-3497 created Context "test" modified. +++ [0805 16:20:06] Testing configmaps W0805 16:20:07.023712 56975 reflector.go:424] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0805 16:20:07.023757 56975 reflector.go:140] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource configmap/test-configmap created [32mcore.sh:28: Successful get configmap/test-configmap {{.metadata.name}}: test-configmap (B[mconfigmap "test-configmap" deleted [32mcore.sh:33: Successful get namespaces {{range.items}}{{ if eq .metadata.name \"test-configmaps\" }}found{{end}}{{end}}:: : (B[mnamespace/test-configmaps created [32mcore.sh:37: Successful get namespaces/test-configmaps {{.metadata.name}}: test-configmaps ... skipping 16 lines ... I0805 16:20:08.215855 76079 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/namespaces/test-configmaps/configmaps/kube-root-ca.crt 200 OK in 1 milliseconds I0805 16:20:08.217477 76079 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/namespaces/test-configmaps/events?fieldSelector=involvedObject.namespace%3Dtest-configmaps%2CinvolvedObject.kind%3DConfigMap%2CinvolvedObject.uid%3Dc1846456-08be-4b11-bf9c-8ce1ee680487%2CinvolvedObject.name%3Dkube-root-ca.crt&limit=500 200 OK in 1 milliseconds I0805 16:20:08.219878 76079 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/namespaces/test-configmaps/configmaps/test-binary-configmap 200 OK in 1 milliseconds I0805 16:20:08.221300 76079 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/namespaces/test-configmaps/events?fieldSelector=involvedObject.name%3Dtest-binary-configmap%2CinvolvedObject.namespace%3Dtest-configmaps%2CinvolvedObject.kind%3DConfigMap%2CinvolvedObject.uid%3De0360b31-0c94-493b-a345-ddbd906d4a46&limit=500 200 OK in 1 milliseconds I0805 16:20:08.222888 76079 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/namespaces/test-configmaps/configmaps/test-configmap 200 OK in 1 milliseconds I0805 16:20:08.224234 76079 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/namespaces/test-configmaps/events?fieldSelector=involvedObject.uid%3Db6f5f35d-51be-4392-89c1-a2418fe3281c%2CinvolvedObject.name%3Dtest-configmap%2CinvolvedObject.namespace%3Dtest-configmaps%2CinvolvedObject.kind%3DConfigMap&limit=500 200 OK in 1 milliseconds (B[mW0805 16:20:08.356193 56975 reflector.go:424] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0805 16:20:08.356239 56975 reflector.go:140] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource configmap "test-configmap" deleted configmap "test-binary-configmap" deleted namespace "test-configmaps" deleted I0805 16:20:11.724783 56975 namespace_controller.go:185] Namespace has been deleted test-secrets +++ exit code: 0 Recording: run_client_config_tests ... skipping 4 lines ... +++ command: run_client_config_tests +++ [0805 16:20:13] Creating namespace namespace-1659716413-8655 namespace/namespace-1659716413-8655 created Context "test" modified. +++ [0805 16:20:13] Testing client config [32mSuccessful (B[mmessage:error: stat missing: no such file or directory has:missing: no such file or directory [32mSuccessful (B[mmessage:error: stat missing: no such file or directory has:missing: no such file or directory [32mSuccessful (B[mmessage:error: stat missing: no such file or directory has:missing: no such file or directory [32mSuccessful (B[mmessage:Error in configuration: context was not found for specified context: missing-context has:context was not found for specified context: missing-context [32mSuccessful (B[mmessage:error: no server found for cluster "missing-cluster" has:no server found for cluster "missing-cluster" [32mSuccessful (B[mmessage:error: auth info "missing-user" does not exist has:auth info "missing-user" does not exist [32mSuccessful (B[mmessage:error: error loading config file "/tmp/newconfig.yaml": no kind "Config" is registered for version "v-1" in scheme "vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go:50" has:error loading config file [32mSuccessful (B[mmessage:error: stat missing-config: no such file or directory has:no such file or directory +++ exit code: 0 Recording: run_service_accounts_tests Running command: run_service_accounts_tests +++ Running case: test-cmd.run_service_accounts_tests ... skipping 57 lines ... Labels: <none> Annotations: <none> Schedule: 59 23 31 2 * Concurrency Policy: Allow Suspend: False Successful Job History Limit: 3 Failed Job History Limit: 1 Starting Deadline Seconds: <unset> Selector: <unset> Parallelism: <unset> Completions: <unset> Pod Template: Labels: <none> ... skipping 55 lines ... Annotations: batch.kubernetes.io/job-tracking: cronjob.kubernetes.io/instantiate: manual Parallelism: 1 Completions: 1 Completion Mode: NonIndexed Start Time: Fri, 05 Aug 2022 16:20:22 +0000 Pods Statuses: 1 Active (0 Ready) / 0 Succeeded / 0 Failed Pod Template: Labels: controller-uid=1699708e-97e7-449b-9ae3-4a8a70d0435a job-name=test-job Containers: pi: Image: registry.k8s.io/perl ... skipping 92 lines ... I0805 16:20:29.328620 77377 loader.go:374] Config loaded from file: /tmp/tmp.LiXfhpA4H4/.kube/config I0805 16:20:29.333988 77377 round_trippers.go:553] GET https://127.0.0.1:6443/version?timeout=32s 200 OK in 4 milliseconds I0805 16:20:29.340912 77377 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/namespaces/namespace-1659716428-31247/podtemplates?limit=500 200 OK in 1 milliseconds I0805 16:20:29.343311 77377 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/namespaces/namespace-1659716428-31247/podtemplates/nginx 200 OK in 1 milliseconds I0805 16:20:29.344699 77377 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/namespaces/namespace-1659716428-31247/events?fieldSelector=involvedObject.namespace%3Dnamespace-1659716428-31247%2CinvolvedObject.kind%3DPodTemplate%2CinvolvedObject.uid%3D6393c4de-1919-4fba-b889-98491a3c53f6%2CinvolvedObject.name%3Dnginx&limit=500 200 OK in 1 milliseconds (B[mpodtemplate "nginx" deleted W0805 16:20:29.525234 56975 reflector.go:424] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0805 16:20:29.525277 56975 reflector.go:140] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mcore.sh:1616: Successful get podtemplate {{range.items}}{{.metadata.name}}:{{end}}: (B[m+++ exit code: 0 Recording: run_service_tests Running command: run_service_tests +++ Running case: test-cmd.run_service_tests ... skipping 360 lines ... type: ClusterIP status: loadBalancer: {} [32mSuccessful (B[mmessage:kubectl-create kubectl-set has:kubectl-set error: you must specify resources by --filename when --local is set. Example resource specifications include: '-f rsrc.yaml' '--filename=rsrc.json' [32mcore.sh:1034: Successful get services redis-master {{range.spec.selector}}{{.}}:{{end}}: redis:master:backend: (B[mservice/redis-master selector updated [32mSuccessful (B[mmessage:Error from server (Conflict): Operation cannot be fulfilled on services "redis-master": the object has been modified; please apply your changes to the latest version and try again has:Conflict [32mcore.sh:1047: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:redis-master: (B[mservice "redis-master" deleted [32mcore.sh:1054: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes: (B[m[32mcore.sh:1058: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes: (B[mI0805 16:20:32.254794 53408 alloc.go:327] "allocated clusterIPs" service="default/redis-master" clusterIPs=map[IPv4:10.0.0.113] ... skipping 25 lines ... (B[mservice "redis-master" deleted service "redis-slave" deleted [32mcore.sh:1128: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes: (B[m[32mcore.sh:1132: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes: (B[mservice/beep-boop created (dry run) service/beep-boop created (server dry run) W0805 16:20:34.015064 56975 reflector.go:424] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0805 16:20:34.015102 56975 reflector.go:140] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mcore.sh:1136: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes: (B[mservice/beep-boop created [32mcore.sh:1140: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: beep-boop:kubernetes: (B[m[32mcore.sh:1144: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: beep-boop:kubernetes: (B[mservice "beep-boop" deleted [32mcore.sh:1151: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes: ... skipping 105 lines ... (B[m[32mapps.sh:90: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: registry.k8s.io/nginx:test-cmd: (B[m[32mapps.sh:91: Successful get daemonset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 2 (B[mdaemonset.apps/bind rolled back [32mapps.sh:94: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/pause:2.0: (B[m[32mapps.sh:95: Successful get daemonset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 1 (B[m[32mSuccessful (B[mmessage:error: unable to find specified revision 1000000 in history has:unable to find specified revision [32mapps.sh:99: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/pause:2.0: (B[m[32mapps.sh:100: Successful get daemonset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 1 (B[mdaemonset.apps/bind rolled back [32mapps.sh:103: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/pause:latest: (B[m[32mapps.sh:104: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: registry.k8s.io/nginx:test-cmd: ... skipping 36 lines ... Namespace: namespace-1659716439-11957 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 17 lines ... Namespace: namespace-1659716439-11957 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 18 lines ... Namespace: namespace-1659716439-11957 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 12 lines ... Namespace: namespace-1659716439-11957 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 27 lines ... Namespace: namespace-1659716439-11957 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 17 lines ... Namespace: namespace-1659716439-11957 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 17 lines ... Namespace: namespace-1659716439-11957 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 11 lines ... Namespace: namespace-1659716439-11957 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 25 lines ... (B[m[32mcore.sh:1240: Successful get rc frontend {{.spec.replicas}}: 3 (B[mreplicationcontroller/frontend scaled E0805 16:20:41.086882 56975 replica_set.go:224] ReplicaSet has no controller: &ReplicaSet{ObjectMeta:{frontend namespace-1659716439-11957 f8035f96-95af-4cda-aac1-ed5b02348c78 2158 2 2022-08-05 16:20:40 +0000 UTC <nil> <nil> map[app:guestbook tier:frontend] map[] [] [] [{kubectl Update v1 <nil> FieldsV1 {"f:spec":{"f:replicas":{}}} scale} {kube-controller-manager Update v1 2022-08-05 16:20:40 +0000 UTC FieldsV1 {"f:status":{"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:replicas":{}}} status} {kubectl-create Update v1 2022-08-05 16:20:40 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:app":{},"f:tier":{}}},"f:spec":{"f:selector":{},"f:template":{".":{},"f:metadata":{".":{},"f:creationTimestamp":{},"f:labels":{".":{},"f:app":{},"f:tier":{}}},"f:spec":{".":{},"f:containers":{".":{},"k:{\"name\":\"php-redis\"}":{".":{},"f:env":{".":{},"k:{\"name\":\"GET_HOSTS_FROM\"}":{".":{},"f:name":{},"f:value":{}}},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":80,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{".":{},"f:requests":{".":{},"f:cpu":{},"f:memory":{}}},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} }]},Spec:ReplicaSetSpec{Replicas:*2,Selector:&v1.LabelSelector{MatchLabels:map[string]string{app: guestbook,tier: frontend,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[app:guestbook tier:frontend] map[] [] [] []} {[] [] [{php-redis gcr.io/google_samples/gb-frontend:v4 [] [] [{ 0 80 TCP }] [] [{GET_HOSTS_FROM dns nil}] {map[] map[cpu:{{100 -3} {<nil>} 100m DecimalSI} memory:{{104857600 0} {<nil>} 100Mi BinarySI}]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent nil false false false}] [] Always 0xc0028a7c98 <nil> ClusterFirst map[] <nil> false false false <nil> PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] <nil> nil [] <nil> <nil> <nil> map[] [] <nil> nil <nil>}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:3,FullyLabeledReplicas:3,ObservedGeneration:1,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} I0805 16:20:41.091738 56975 event.go:294] "Event occurred" object="namespace-1659716439-11957/frontend" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: frontend-fkbgz" [32mcore.sh:1244: Successful get rc frontend {{.spec.replicas}}: 2 (B[m[32mcore.sh:1248: Successful get rc frontend {{.spec.replicas}}: 2 (B[merror: Expected replicas to be 3, was 2 [32mcore.sh:1252: Successful get rc frontend {{.spec.replicas}}: 2 (B[m[32mcore.sh:1256: Successful get rc frontend {{.spec.replicas}}: 2 (B[mreplicationcontroller/frontend scaled I0805 16:20:41.491468 56975 event.go:294] "Event occurred" object="namespace-1659716439-11957/frontend" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-c8hrl" [32mcore.sh:1260: Successful get rc frontend {{.spec.replicas}}: 3 (B[m[32mcore.sh:1264: Successful get rc frontend {{.spec.replicas}}: 3 ... skipping 32 lines ... I0805 16:20:42.876293 53408 alloc.go:327] "allocated clusterIPs" service="namespace-1659716439-11957/expose-test-deployment" clusterIPs=map[IPv4:10.0.0.200] [32mSuccessful (B[mmessage:service/expose-test-deployment exposed has:service/expose-test-deployment exposed service "expose-test-deployment" deleted [32mSuccessful (B[mmessage:error: couldn't retrieve selectors via --selector flag or introspection: invalid deployment: no selectors, therefore cannot be exposed has:invalid deployment: no selectors deployment.apps/nginx-deployment created I0805 16:20:43.168019 56975 event.go:294] "Event occurred" object="namespace-1659716439-11957/nginx-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-6bff6db to 3" I0805 16:20:43.172245 56975 event.go:294] "Event occurred" object="namespace-1659716439-11957/nginx-deployment-6bff6db" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-6bff6db-6cs74" I0805 16:20:43.175342 56975 event.go:294] "Event occurred" object="namespace-1659716439-11957/nginx-deployment-6bff6db" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-6bff6db-4pxps" I0805 16:20:43.175375 56975 event.go:294] "Event occurred" object="namespace-1659716439-11957/nginx-deployment-6bff6db" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-6bff6db-2ghqd" ... skipping 24 lines ... (B[mpod "valid-pod" deleted service "frontend" deleted service "frontend-2" deleted service "frontend-3" deleted service "frontend-4" deleted [32mSuccessful (B[mmessage:error: cannot expose a Node has:cannot expose [32mSuccessful (B[mmessage:The Service "invalid-large-service-name-that-has-more-than-sixty-three-characters" is invalid: metadata.name: Invalid value: "invalid-large-service-name-that-has-more-than-sixty-three-characters": must be no more than 63 characters has:metadata.name: Invalid value I0805 16:20:44.744814 53408 alloc.go:327] "allocated clusterIPs" service="namespace-1659716439-11957/kubernetes-serve-hostname-testing-sixty-three-characters-in-len" clusterIPs=map[IPv4:10.0.0.124] [32mSuccessful ... skipping 32 lines ... (B[mhorizontalpodautoscaler.autoscaling/frontend autoscaled [32mcore.sh:1403: Successful get hpa frontend {{.spec.minReplicas}} {{.spec.maxReplicas}} {{(index .spec.metrics 0).resource.target.averageUtilization}}: 1 2 70 (B[mhorizontalpodautoscaler.autoscaling "frontend" deleted horizontalpodautoscaler.autoscaling/frontend autoscaled [32mcore.sh:1407: Successful get hpa frontend {{.spec.minReplicas}} {{.spec.maxReplicas}} {{(index .spec.metrics 0).resource.target.averageUtilization}}: 2 3 80 (B[mhorizontalpodautoscaler.autoscaling "frontend" deleted error: required flag(s) "max" not set replicationcontroller "frontend" deleted [32mcore.sh:1416: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: (B[mapiVersion: apps/v1 kind: Deployment metadata: creationTimestamp: null ... skipping 24 lines ... limits: cpu: 300m requests: cpu: 300m terminationGracePeriodSeconds: 0 status: {} Error from server (NotFound): deployments.apps "nginx-deployment-resources" not found deployment.apps/nginx-deployment-resources created I0805 16:20:47.130421 56975 event.go:294] "Event occurred" object="namespace-1659716439-11957/nginx-deployment-resources" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-resources-576b8799f to 3" I0805 16:20:47.133863 56975 event.go:294] "Event occurred" object="namespace-1659716439-11957/nginx-deployment-resources-576b8799f" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-resources-576b8799f-8f5b6" I0805 16:20:47.136681 56975 event.go:294] "Event occurred" object="namespace-1659716439-11957/nginx-deployment-resources-576b8799f" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-resources-576b8799f-bqdh5" I0805 16:20:47.137310 56975 event.go:294] "Event occurred" object="namespace-1659716439-11957/nginx-deployment-resources-576b8799f" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-resources-576b8799f-cb7bj" [32mcore.sh:1422: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: nginx-deployment-resources: (B[m[32mcore.sh:1423: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:test-cmd: (B[m[32mcore.sh:1424: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: registry.k8s.io/perl: (B[mdeployment.apps/nginx-deployment-resources resource requirements updated I0805 16:20:47.415594 56975 event.go:294] "Event occurred" object="namespace-1659716439-11957/nginx-deployment-resources" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-resources-75b54ff5f to 1" I0805 16:20:47.420034 56975 event.go:294] "Event occurred" object="namespace-1659716439-11957/nginx-deployment-resources-75b54ff5f" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-resources-75b54ff5f-g74d9" [32mcore.sh:1427: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}: 100m: (B[m[32mcore.sh:1428: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}: 100m: (B[merror: unable to find container named redis deployment.apps/nginx-deployment-resources resource requirements updated I0805 16:20:47.698618 56975 event.go:294] "Event occurred" object="namespace-1659716439-11957/nginx-deployment-resources" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set nginx-deployment-resources-576b8799f to 2 from 3" I0805 16:20:47.726005 56975 event.go:294] "Event occurred" object="namespace-1659716439-11957/nginx-deployment-resources-576b8799f" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: nginx-deployment-resources-576b8799f-8f5b6" I0805 16:20:47.727894 56975 event.go:294] "Event occurred" object="namespace-1659716439-11957/nginx-deployment-resources" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-resources-84bc47dc69 to 1 from 0" I0805 16:20:47.731739 56975 event.go:294] "Event occurred" object="namespace-1659716439-11957/nginx-deployment-resources-84bc47dc69" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-resources-84bc47dc69-s92mz" [32mcore.sh:1433: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}: 200m: ... skipping 155 lines ... status: "True" type: Progressing observedGeneration: 4 replicas: 4 unavailableReplicas: 4 updatedReplicas: 1 error: you must specify resources by --filename when --local is set. Example resource specifications include: '-f rsrc.yaml' '--filename=rsrc.json' [32mcore.sh:1444: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}: 200m: (B[m[32mcore.sh:1445: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}: 300m: (B[m[32mcore.sh:1446: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).resources.requests.cpu}}:{{end}}: 300m: ... skipping 46 lines ... pod-template-hash=69f9b68fc4 Annotations: deployment.kubernetes.io/desired-replicas: 1 deployment.kubernetes.io/max-replicas: 2 deployment.kubernetes.io/revision: 1 Controlled By: Deployment/test-nginx-apps Replicas: 1 current / 1 desired Pods Status: 0 Running / 1 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=test-nginx-apps pod-template-hash=69f9b68fc4 Containers: nginx: Image: registry.k8s.io/nginx:test-cmd ... skipping 122 lines ... I0805 16:20:53.539844 56975 event.go:294] "Event occurred" object="namespace-1659716448-21311/nginx-7c77c749d8" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-7c77c749d8-xfjb4" [32mapps.sh:311: Successful get deployment.apps {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:1.7.9: (B[m Image: registry.k8s.io/nginx:test-cmd deployment.apps/nginx rolled back (server dry run) [32mapps.sh:315: Successful get deployment.apps {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:1.7.9: (B[mdeployment.apps/nginx rolled back W0805 16:20:54.909816 56975 reflector.go:424] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0805 16:20:54.909851 56975 reflector.go:140] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mapps.sh:319: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:test-cmd: (B[merror: unable to find specified revision 1000000 in history [32mapps.sh:322: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:test-cmd: (B[mdeployment.apps/nginx rolled back W0805 16:20:56.222454 56975 reflector.go:424] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0805 16:20:56.222494 56975 reflector.go:140] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mapps.sh:326: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:1.7.9: (B[mdeployment.apps/nginx paused error: you cannot rollback a paused deployment; resume it first with 'kubectl rollout resume deployment/nginx' and try again error: deployments.apps "nginx" can't restart paused deployment (run rollout resume first) deployment.apps/nginx resumed deployment.apps/nginx rolled back deployment.kubernetes.io/revision-history: 1,3 error: desired revision (3) is different from the running revision (5) deployment.apps/nginx restarted I0805 16:20:57.044989 56975 event.go:294] "Event occurred" object="namespace-1659716448-21311/nginx" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set nginx-5ddbc6cdcd to 2 from 3" I0805 16:20:57.049544 56975 event.go:294] "Event occurred" object="namespace-1659716448-21311/nginx-5ddbc6cdcd" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: nginx-5ddbc6cdcd-pdv4p" I0805 16:20:57.054437 56975 event.go:294] "Event occurred" object="namespace-1659716448-21311/nginx" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-5b587cf885 to 1 from 0" I0805 16:20:57.058020 56975 event.go:294] "Event occurred" object="namespace-1659716448-21311/nginx-5b587cf885" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-5b587cf885-2vzcc" [32mSuccessful ... skipping 80 lines ... (B[m[32mapps.sh:370: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: registry.k8s.io/perl: (B[mdeployment.apps/nginx-deployment image updated I0805 16:20:59.488310 56975 event.go:294] "Event occurred" object="namespace-1659716448-21311/nginx-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-56d5c589b4 to 1" I0805 16:20:59.492180 56975 event.go:294] "Event occurred" object="namespace-1659716448-21311/nginx-deployment-56d5c589b4" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-56d5c589b4-sm54q" [32mapps.sh:373: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:1.7.9: (B[m[32mapps.sh:374: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: registry.k8s.io/perl: (B[merror: unable to find container named "redis" deployment.apps/nginx-deployment image updated [32mapps.sh:379: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:test-cmd: (B[m[32mapps.sh:380: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: registry.k8s.io/perl: (B[mdeployment.apps/nginx-deployment image updated [32mapps.sh:383: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:1.7.9: (B[m[32mapps.sh:384: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: registry.k8s.io/perl: ... skipping 58 lines ... Warning: key password transferred to PASSWORD Warning: key username transferred to USERNAME deployment.apps/nginx-deployment env updated deployment.apps/nginx-deployment env updated I0805 16:21:02.764064 56975 event.go:294] "Event occurred" object="namespace-1659716448-21311/nginx-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-76859c686c to 1" [32mSuccessful (B[mmessage:error: standard input cannot be used for multiple arguments has:standard input cannot be used for multiple arguments I0805 16:21:02.817247 56975 event.go:294] "Event occurred" object="namespace-1659716448-21311/nginx-deployment-7d9b97d484" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: nginx-deployment-7d9b97d484-db526" deployment.apps "nginx-deployment" deleted E0805 16:21:02.866587 56975 replica_set.go:550] sync "namespace-1659716448-21311/nginx-deployment-6d9595bfb4" failed with replicasets.apps "nginx-deployment-6d9595bfb4" not found configmap "test-set-env-config" deleted I0805 16:21:02.967883 56975 event.go:294] "Event occurred" object="namespace-1659716448-21311/nginx-deployment-76859c686c" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-76859c686c-xp44p" secret "test-set-env-secret" deleted +++ exit code: 0 Recording: run_rs_tests Running command: run_rs_tests +++ Running case: test-cmd.run_rs_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_rs_tests +++ [0805 16:21:03] Creating namespace namespace-1659716463-32700 E0805 16:21:03.115808 56975 replica_set.go:550] sync "namespace-1659716448-21311/nginx-deployment-7c5d647975" failed with replicasets.apps "nginx-deployment-7c5d647975" not found namespace/namespace-1659716463-32700 created Context "test" modified. +++ [0805 16:21:03] Testing kubectl(v1:replicasets) [32mapps.sh:553: Successful get rs {{range.items}}{{.metadata.name}}:{{end}}: (B[mE0805 16:21:03.265827 56975 replica_set.go:550] sync "namespace-1659716448-21311/nginx-deployment-7d9b97d484" failed with replicasets.apps "nginx-deployment-7d9b97d484" not found E0805 16:21:03.315844 56975 replica_set.go:550] sync "namespace-1659716448-21311/nginx-deployment-76859c686c" failed with replicasets.apps "nginx-deployment-76859c686c" not found replicaset.apps/frontend created I0805 16:21:03.417358 56975 event.go:294] "Event occurred" object="namespace-1659716463-32700/frontend" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-pnz5c" +++ [0805 16:21:03] Deleting rs I0805 16:21:03.420906 56975 event.go:294] "Event occurred" object="namespace-1659716463-32700/frontend" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-4nxt7" replicaset.apps "frontend" deleted I0805 16:21:03.516608 56975 event.go:294] "Event occurred" object="namespace-1659716463-32700/frontend" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-xgjr8" [32mapps.sh:559: Successful get pods -l "tier=frontend" {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mapps.sh:563: Successful get rs {{range.items}}{{.metadata.name}}:{{end}}: (B[mE0805 16:21:03.716049 56975 replica_set.go:550] sync "namespace-1659716463-32700/frontend" failed with replicasets.apps "frontend" not found replicaset.apps/frontend created I0805 16:21:03.792023 56975 event.go:294] "Event occurred" object="namespace-1659716463-32700/frontend" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-6rfs4" Waiting for Get pods -l "tier=frontend" {{range.items}}{{(index .spec.containers 0).name}}:{{end}} : expected: php-redis:php-redis:php-redis:, got: php-redis: I0805 16:21:03.867173 56975 event.go:294] "Event occurred" object="namespace-1659716463-32700/frontend" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-plbl9" I0805 16:21:03.918562 56975 event.go:294] "Event occurred" object="namespace-1659716463-32700/frontend" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-pxfsw" [32mapps.sh:567: Successful get pods -l "tier=frontend" {{range.items}}{{(index .spec.containers 0).name}}:{{end}}: php-redis:php-redis:php-redis: (B[m+++ [0805 16:21:03] Deleting rs replicaset.apps "frontend" deleted E0805 16:21:04.115475 56975 replica_set.go:550] sync "namespace-1659716463-32700/frontend" failed with replicasets.apps "frontend" not found [32mapps.sh:571: Successful get rs {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mapps.sh:573: Successful get pods -l "tier=frontend" {{range.items}}{{(index .spec.containers 0).name}}:{{end}}: php-redis:php-redis:php-redis: (B[mpod "frontend-6rfs4" deleted pod "frontend-plbl9" deleted pod "frontend-pxfsw" deleted [32mapps.sh:576: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mapps.sh:580: Successful get rs {{range.items}}{{.metadata.name}}:{{end}}: (B[mreplicaset.apps/frontend created I0805 16:21:04.624610 56975 event.go:294] "Event occurred" object="namespace-1659716463-32700/frontend" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-z5g88" I0805 16:21:04.628643 56975 event.go:294] "Event occurred" object="namespace-1659716463-32700/frontend" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-gxbmb" I0805 16:21:04.628754 56975 event.go:294] "Event occurred" object="namespace-1659716463-32700/frontend" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-86bgq" W0805 16:21:04.671572 56975 reflector.go:424] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0805 16:21:04.671616 56975 reflector.go:140] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mapps.sh:584: Successful get rs {{range.items}}{{.metadata.name}}:{{end}}: frontend: (B[mmatched Name: matched Pod Template: matched Labels: matched Selector: matched Replicas: ... skipping 4 lines ... Namespace: namespace-1659716463-32700 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 17 lines ... Namespace: namespace-1659716463-32700 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 18 lines ... Namespace: namespace-1659716463-32700 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 12 lines ... Namespace: namespace-1659716463-32700 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 25 lines ... Namespace: namespace-1659716463-32700 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 17 lines ... Namespace: namespace-1659716463-32700 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 17 lines ... Namespace: namespace-1659716463-32700 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 11 lines ... Namespace: namespace-1659716463-32700 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 168 lines ... [32mapps.sh:639: Successful get deploy scale-2 {{.spec.replicas}}: 3 (B[m[32mapps.sh:640: Successful get deploy scale-3 {{.spec.replicas}}: 3 (B[mreplicaset.apps "frontend" deleted deployment.apps "scale-1" deleted deployment.apps "scale-2" deleted deployment.apps "scale-3" deleted E0805 16:21:07.816352 56975 replica_set.go:550] sync "namespace-1659716463-32700/scale-1-6c6f76d7d5" failed with Operation cannot be fulfilled on replicasets.apps "scale-1-6c6f76d7d5": StorageError: invalid object, Code: 4, Key: /registry/replicasets/namespace-1659716463-32700/scale-1-6c6f76d7d5, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 7c793487-5e0d-48ef-8864-a54a120fd868, UID in object meta: E0805 16:21:07.865809 56975 replica_set.go:550] sync "namespace-1659716463-32700/scale-3-6c6f76d7d5" failed with Operation cannot be fulfilled on replicasets.apps "scale-3-6c6f76d7d5": StorageError: invalid object, Code: 4, Key: /registry/replicasets/namespace-1659716463-32700/scale-3-6c6f76d7d5, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 387a8864-5c0c-4b11-8bee-16d002786d40, UID in object meta: replicaset.apps/frontend created I0805 16:21:07.948245 56975 event.go:294] "Event occurred" object="namespace-1659716463-32700/frontend" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-zj466" [32mI0805 16:21:08.017837 56975 event.go:294] "Event occurred" object="namespace-1659716463-32700/frontend" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-wl4d6" apps.sh:648: Successful get rs frontend {{.spec.replicas}}: 3 (B[mI0805 16:21:08.067092 56975 event.go:294] "Event occurred" object="namespace-1659716463-32700/frontend" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-8j6lq" I0805 16:21:08.086437 53408 alloc.go:327] "allocated clusterIPs" service="namespace-1659716463-32700/frontend" clusterIPs=map[IPv4:10.0.0.202] service/frontend exposed [32mapps.sh:652: Successful get service frontend {{(index .spec.ports 0).name}} {{(index .spec.ports 0).port}}: <no value> 80 (B[mservice "frontend" deleted [32mapps.sh:658: Successful get rs frontend {{.metadata.generation}}: 1 (B[mreplicaset.apps/frontend image updated E0805 16:21:08.417686 56975 replica_set.go:550] sync "namespace-1659716463-32700/frontend" failed with Operation cannot be fulfilled on replicasets.apps "frontend": the object has been modified; please apply your changes to the latest version and try again [32mapps.sh:660: Successful get rs frontend {{.metadata.generation}}: 2 (B[mreplicaset.apps/frontend env updated [32mapps.sh:662: Successful get rs frontend {{.metadata.generation}}: 3 (B[mreplicaset.apps/frontend resource requirements updated (dry run) replicaset.apps/frontend resource requirements updated (server dry run) [32mapps.sh:665: Successful get rs frontend {{.metadata.generation}}: 3 ... skipping 35 lines ... horizontalpodautoscaler.autoscaling/frontend autoscaled [32mapps.sh:716: Successful get hpa frontend {{.spec.minReplicas}} {{.spec.maxReplicas}} {{(index .spec.metrics 0).resource.target.averageUtilization}}: 2 3 80 (B[m[32mSuccessful (B[mmessage:kubectl-autoscale has:kubectl-autoscale horizontalpodautoscaler.autoscaling "frontend" deleted error: required flag(s) "max" not set replicaset.apps "frontend" deleted +++ exit code: 0 Recording: run_stateful_set_tests Running command: run_stateful_set_tests +++ Running case: test-cmd.run_stateful_set_tests ... skipping 74 lines ... (B[m[32mapps.sh:475: Successful get statefulset {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: registry.k8s.io/pause:2.0: (B[m[32mapps.sh:476: Successful get statefulset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 2 (B[mstatefulset.apps/nginx rolled back [32mapps.sh:479: Successful get statefulset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx-slim:0.7: (B[m[32mapps.sh:480: Successful get statefulset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 1 (B[m[32mSuccessful (B[mmessage:error: unable to find specified revision 1000000 in history has:unable to find specified revision [32mapps.sh:484: Successful get statefulset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx-slim:0.7: (B[m[32mapps.sh:485: Successful get statefulset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 1 (B[mstatefulset.apps/nginx rolled back [32mapps.sh:488: Successful get statefulset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx-slim:0.8: (B[m[32mapps.sh:489: Successful get statefulset {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: registry.k8s.io/pause:2.0: ... skipping 63 lines ... Name: mock Namespace: namespace-1659716475-2178 Selector: app=mock Labels: app=mock Annotations: <none> Replicas: 1 current / 1 desired Pods Status: 0 Running / 1 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=mock Containers: mock-container: Image: registry.k8s.io/pause:3.8 Port: 9949/TCP ... skipping 61 lines ... Name: mock Namespace: namespace-1659716475-2178 Selector: app=mock Labels: app=mock Annotations: <none> Replicas: 1 current / 1 desired Pods Status: 0 Running / 1 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=mock Containers: mock-container: Image: registry.k8s.io/pause:3.8 Port: 9949/TCP ... skipping 61 lines ... Name: mock Namespace: namespace-1659716475-2178 Selector: app=mock Labels: app=mock Annotations: <none> Replicas: 1 current / 1 desired Pods Status: 0 Running / 1 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=mock Containers: mock-container: Image: registry.k8s.io/pause:3.8 Port: 9949/TCP ... skipping 42 lines ... Namespace: namespace-1659716475-2178 Selector: app=mock Labels: app=mock status=replaced Annotations: <none> Replicas: 1 current / 1 desired Pods Status: 0 Running / 1 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=mock Containers: mock-container: Image: registry.k8s.io/pause:3.8 Port: 9949/TCP ... skipping 11 lines ... Namespace: namespace-1659716475-2178 Selector: app=mock2 Labels: app=mock2 status=replaced Annotations: <none> Replicas: 1 current / 1 desired Pods Status: 0 Running / 1 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=mock2 Containers: mock-container: Image: registry.k8s.io/pause:3.8 Port: 9949/TCP ... skipping 115 lines ... I0805 16:21:25.788020 56975 horizontal.go:360] Horizontal Pod Autoscaler frontend has been deleted in namespace-1659716463-32700 namespace/namespace-1659716485-31335 created Context "test" modified. +++ [0805 16:21:25] Testing persistent volumes [32mstorage.sh:30: Successful get pv {{range.items}}{{.metadata.name}}:{{end}}: (B[mpersistentvolume/pv0001 created E0805 16:21:26.133922 56975 pv_protection_controller.go:114] PV pv0001 failed with : Operation cannot be fulfilled on persistentvolumes "pv0001": the object has been modified; please apply your changes to the latest version and try again [32mstorage.sh:33: Successful get pv {{range.items}}{{.metadata.name}}:{{end}}: pv0001: (B[mpersistentvolume "pv0001" deleted persistentvolume/pv0002 created [32mstorage.sh:36: Successful get pv {{range.items}}{{.metadata.name}}:{{end}}: pv0002: (B[mpersistentvolume "pv0002" deleted persistentvolume/pv0003 created E0805 16:21:26.945317 56975 pv_protection_controller.go:114] PV pv0003 failed with : Operation cannot be fulfilled on persistentvolumes "pv0003": the object has been modified; please apply your changes to the latest version and try again [32mstorage.sh:39: Successful get pv {{range.items}}{{.metadata.name}}:{{end}}: pv0003: (B[mquery for persistentvolumes had limit param query for events had limit param query for persistentvolumes had user-specified limit param [32mSuccessful describe persistentvolumes verbose logs: I0805 16:21:27.059751 86836 loader.go:374] Config loaded from file: /tmp/tmp.LiXfhpA4H4/.kube/config I0805 16:21:27.064680 86836 round_trippers.go:553] GET https://127.0.0.1:6443/version?timeout=32s 200 OK in 4 milliseconds I0805 16:21:27.071811 86836 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/persistentvolumes?limit=500 200 OK in 1 milliseconds I0805 16:21:27.073998 86836 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/persistentvolumes/pv0003 200 OK in 1 milliseconds I0805 16:21:27.085520 86836 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/events?fieldSelector=involvedObject.kind%3DPersistentVolume%2CinvolvedObject.uid%3Dcfa3e8af-5420-4f4f-ab23-ef5f7bfe5a7e%2CinvolvedObject.name%3Dpv0003%2CinvolvedObject.namespace%3D&limit=500 200 OK in 10 milliseconds (B[mpersistentvolume "pv0003" deleted [32mstorage.sh:44: Successful get pv {{range.items}}{{.metadata.name}}:{{end}}: (B[mpersistentvolume/pv0001 created E0805 16:21:27.553618 56975 pv_protection_controller.go:114] PV pv0001 failed with : Operation cannot be fulfilled on persistentvolumes "pv0001": the object has been modified; please apply your changes to the latest version and try again [32mstorage.sh:47: Successful get pv {{range.items}}{{.metadata.name}}:{{end}}: pv0001: (B[m[32mSuccessful (B[mmessage:Warning: deleting cluster-scoped resources, not scoped to the provided namespace persistentvolume "pv0001" deleted has:Warning: deleting cluster-scoped resources [32mSuccessful ... skipping 61 lines ... [32mSuccessful describe storageclasses verbose logs: I0805 16:21:30.072669 87318 loader.go:374] Config loaded from file: /tmp/tmp.LiXfhpA4H4/.kube/config I0805 16:21:30.079855 87318 round_trippers.go:553] GET https://127.0.0.1:6443/version?timeout=32s 200 OK in 6 milliseconds I0805 16:21:30.088321 87318 round_trippers.go:553] GET https://127.0.0.1:6443/apis/storage.k8s.io/v1/storageclasses?limit=500 200 OK in 2 milliseconds I0805 16:21:30.091352 87318 round_trippers.go:553] GET https://127.0.0.1:6443/apis/storage.k8s.io/v1/storageclasses/storage-class-name 200 OK in 1 milliseconds I0805 16:21:30.100771 87318 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/events?fieldSelector=involvedObject.namespace%3D%2CinvolvedObject.kind%3DStorageClass%2CinvolvedObject.uid%3Da455a864-cb94-4721-ad2a-7d0d59d061c4%2CinvolvedObject.name%3Dstorage-class-name&limit=500 200 OK in 8 milliseconds (B[mW0805 16:21:30.187914 56975 reflector.go:424] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0805 16:21:30.187964 56975 reflector.go:140] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource storageclass.storage.k8s.io "storage-class-name" deleted [32mstorage.sh:118: Successful get storageclass {{range.items}}{{.metadata.name}}:{{end}}: (B[m+++ exit code: 0 Recording: run_nodes_tests Running command: run_nodes_tests ... skipping 15 lines ... Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 save-managers: true CreationTimestamp: Fri, 05 Aug 2022 16:16:17 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Fri, 05 Aug 2022 16:16:17 +0000 Fri, 05 Aug 2022 16:17:17 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Fri, 05 Aug 2022 16:16:17 +0000 Fri, 05 Aug 2022 16:17:17 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Fri, 05 Aug 2022 16:16:17 +0000 Fri, 05 Aug 2022 16:17:17 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 34 lines ... Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 save-managers: true CreationTimestamp: Fri, 05 Aug 2022 16:16:17 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Fri, 05 Aug 2022 16:16:17 +0000 Fri, 05 Aug 2022 16:17:17 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Fri, 05 Aug 2022 16:16:17 +0000 Fri, 05 Aug 2022 16:17:17 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Fri, 05 Aug 2022 16:16:17 +0000 Fri, 05 Aug 2022 16:17:17 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 35 lines ... Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 save-managers: true CreationTimestamp: Fri, 05 Aug 2022 16:16:17 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Fri, 05 Aug 2022 16:16:17 +0000 Fri, 05 Aug 2022 16:17:17 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Fri, 05 Aug 2022 16:16:17 +0000 Fri, 05 Aug 2022 16:17:17 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Fri, 05 Aug 2022 16:16:17 +0000 Fri, 05 Aug 2022 16:17:17 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 31 lines ... Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 save-managers: true CreationTimestamp: Fri, 05 Aug 2022 16:16:17 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Fri, 05 Aug 2022 16:16:17 +0000 Fri, 05 Aug 2022 16:17:17 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Fri, 05 Aug 2022 16:16:17 +0000 Fri, 05 Aug 2022 16:17:17 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Fri, 05 Aug 2022 16:16:17 +0000 Fri, 05 Aug 2022 16:17:17 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 42 lines ... Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 save-managers: true CreationTimestamp: Fri, 05 Aug 2022 16:16:17 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Fri, 05 Aug 2022 16:16:17 +0000 Fri, 05 Aug 2022 16:17:17 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Fri, 05 Aug 2022 16:16:17 +0000 Fri, 05 Aug 2022 16:17:17 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Fri, 05 Aug 2022 16:16:17 +0000 Fri, 05 Aug 2022 16:17:17 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 34 lines ... Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 save-managers: true CreationTimestamp: Fri, 05 Aug 2022 16:16:17 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Fri, 05 Aug 2022 16:16:17 +0000 Fri, 05 Aug 2022 16:17:17 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Fri, 05 Aug 2022 16:16:17 +0000 Fri, 05 Aug 2022 16:17:17 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Fri, 05 Aug 2022 16:16:17 +0000 Fri, 05 Aug 2022 16:17:17 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 34 lines ... Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 save-managers: true CreationTimestamp: Fri, 05 Aug 2022 16:16:17 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Fri, 05 Aug 2022 16:16:17 +0000 Fri, 05 Aug 2022 16:17:17 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Fri, 05 Aug 2022 16:16:17 +0000 Fri, 05 Aug 2022 16:17:17 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Fri, 05 Aug 2022 16:16:17 +0000 Fri, 05 Aug 2022 16:17:17 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 30 lines ... Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 save-managers: true CreationTimestamp: Fri, 05 Aug 2022 16:16:17 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Fri, 05 Aug 2022 16:16:17 +0000 Fri, 05 Aug 2022 16:17:17 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Fri, 05 Aug 2022 16:16:17 +0000 Fri, 05 Aug 2022 16:17:17 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Fri, 05 Aug 2022 16:16:17 +0000 Fri, 05 Aug 2022 16:17:17 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 172 lines ... yes has:the server doesn't have a resource type [32mSuccessful (B[mmessage:yes has:yes [32mSuccessful (B[mmessage:error: --subresource can not be used with NonResourceURL has:subresource can not be used with NonResourceURL [32mSuccessful (B[m[32mSuccessful (B[mmessage:yes 0 has:0 ... skipping 62 lines ... {Verbs:[get list watch] APIGroups:[] Resources:[configmaps] ResourceNames:[] NonResourceURLs:[]} [32mlegacy-script.sh:870: Successful get rolebindings -n some-other-random -l test-cmd=auth {{range.items}}{{.metadata.name}}:{{end}}: testing-RB: (B[m[32mlegacy-script.sh:871: Successful get roles -n some-other-random -l test-cmd=auth {{range.items}}{{.metadata.name}}:{{end}}: testing-R: (B[m[32mlegacy-script.sh:872: Successful get clusterrolebindings -l test-cmd=auth {{range.items}}{{.metadata.name}}:{{end}}: testing-CRB: (B[m[32mlegacy-script.sh:873: Successful get clusterroles -l test-cmd=auth {{range.items}}{{.metadata.name}}:{{end}}: testing-CR: (B[m[32mSuccessful (B[mmessage:error: only rbac.authorization.k8s.io/v1 is supported: not *v1beta1.ClusterRole has:only rbac.authorization.k8s.io/v1 is supported rolebinding.rbac.authorization.k8s.io "testing-RB" deleted role.rbac.authorization.k8s.io "testing-R" deleted Warning: deleting cluster-scoped resources, not scoped to the provided namespace clusterrole.rbac.authorization.k8s.io "testing-CR" deleted clusterrolebinding.rbac.authorization.k8s.io "testing-CRB" deleted ... skipping 24 lines ... [32mdiscovery.sh:91: Successful get all -l'app=cassandra' {{range.items}}{{range .metadata.labels}}{{.}}:{{end}}{{end}}: cassandra:cassandra:cassandra:cassandra: (B[mpod "cassandra-mpm7w" deleted I0805 16:21:38.512415 56975 event.go:294] "Event occurred" object="namespace-1659716497-582/cassandra" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: cassandra-9mqtd" pod "cassandra-zq28n" deleted I0805 16:21:38.519906 56975 event.go:294] "Event occurred" object="namespace-1659716497-582/cassandra" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: cassandra-wc5hd" replicationcontroller "cassandra" deleted E0805 16:21:38.524781 56975 replica_set.go:550] sync "namespace-1659716497-582/cassandra" failed with replicationcontrollers "cassandra" not found service "cassandra" deleted +++ exit code: 0 Recording: run_kubectl_explain_tests Running command: run_kubectl_explain_tests +++ Running case: test-cmd.run_kubectl_explain_tests ... skipping 104 lines ... status <Object> Current status of a cron job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +++ exit code: 0 W0805 16:21:39.275626 56975 reflector.go:424] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0805 16:21:39.275670 56975 reflector.go:140] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource Recording: run_crd_deletion_recreation_tests Running command: run_crd_deletion_recreation_tests +++ Running case: test-cmd.run_crd_deletion_recreation_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_crd_deletion_recreation_tests +++ [0805 16:21:39] Creating namespace namespace-1659716499-29439 namespace/namespace-1659716499-29439 created Context "test" modified. +++ [0805 16:21:39] Testing resource creation, deletion, and re-creation W0805 16:21:39.465158 56975 reflector.go:424] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0805 16:21:39.465216 56975 reflector.go:140] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mSuccessful (B[mmessage:customresourcedefinition.apiextensions.k8s.io/examples.test.com created has:created [32mSuccessful (B[mmessage:example.test.com/test created has:created ... skipping 339 lines ... namespace-1659716497-582 default 0 12s namespace-1659716499-29439 default 0 10s some-other-random default 0 12s has:all-ns-test-2 namespace "all-ns-test-1" deleted namespace "all-ns-test-2" deleted W0805 16:21:57.522632 56975 reflector.go:424] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0805 16:21:57.522673 56975 reflector.go:140] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource I0805 16:21:59.645484 56975 namespace_controller.go:185] Namespace has been deleted all-ns-test-1 [32mget.sh:400: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[mWarning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. pod "valid-pod" force deleted [32mget.sh:404: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mget.sh:408: Successful get nodes {{range.items}}{{.metadata.name}}:{{end}}: 127.0.0.1: ... skipping 17 lines ... (B[mmessage:Warning: example.com/v1beta1 DeprecatedKind is deprecated; use example.com/v1 DeprecatedKind No resources found in namespace-1659716499-29439 namespace. has:example.com/v1beta1 DeprecatedKind is deprecated [32mSuccessful (B[mmessage:Warning: example.com/v1beta1 DeprecatedKind is deprecated; use example.com/v1 DeprecatedKind No resources found in namespace-1659716499-29439 namespace. error: 1 warning received has:example.com/v1beta1 DeprecatedKind is deprecated [32mSuccessful (B[mmessage:Warning: example.com/v1beta1 DeprecatedKind is deprecated; use example.com/v1 DeprecatedKind No resources found in namespace-1659716499-29439 namespace. error: 1 warning received has:error: 1 warning received customresourcedefinition.apiextensions.k8s.io "deprecated.example.com" deleted +++ exit code: 0 Recording: run_template_output_tests Running command: run_template_output_tests +++ Running case: test-cmd.run_template_output_tests ... skipping 545 lines ... (B[m[32mnode-management.sh:125: Successful get nodes 127.0.0.1 {{.spec.unschedulable}}: <no value> (B[mnode/127.0.0.1 cordoned (dry run) Warning: deleting Pods that declare no controller: namespace-1659716527-14830/test-pod-1, namespace-1659716527-14830/test-pod-2 evicting pod namespace-1659716527-14830/test-pod-1 (dry run) evicting pod namespace-1659716527-14830/test-pod-2 (dry run) node/127.0.0.1 drained (dry run) W0805 16:22:09.739662 56975 reflector.go:424] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0805 16:22:09.739700 56975 reflector.go:140] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource node/127.0.0.1 cordoned (server dry run) Warning: deleting Pods that declare no controller: namespace-1659716527-14830/test-pod-1, namespace-1659716527-14830/test-pod-2 evicting pod namespace-1659716527-14830/test-pod-2 (server dry run) evicting pod namespace-1659716527-14830/test-pod-1 (server dry run) node/127.0.0.1 drained (server dry run) [32mnode-management.sh:129: Successful get nodes {{range.items}}{{.metadata.name}}:{{end}}: 127.0.0.1: (B[m[32mnode-management.sh:130: Successful get nodes 127.0.0.1 {{.spec.unschedulable}}: <no value> (B[m[32mnode-management.sh:134: Successful get nodes 127.0.0.1 {{.spec.unschedulable}}: <no value> (B[mW0805 16:22:09.987952 56975 reflector.go:424] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0805 16:22:09.987994 56975 reflector.go:140] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mnode-management.sh:136: Successful get pods {{range .items}}{{.metadata.name}},{{end}}: test-pod-1,test-pod-2, (B[mnode/127.0.0.1 cordoned (dry run) Warning: deleting Pods that declare no controller: namespace-1659716527-14830/test-pod-1 evicting pod namespace-1659716527-14830/test-pod-1 (dry run) node/127.0.0.1 drained (dry run) node/127.0.0.1 cordoned (server dry run) Warning: deleting Pods that declare no controller: namespace-1659716527-14830/test-pod-1 evicting pod namespace-1659716527-14830/test-pod-1 (server dry run) node/127.0.0.1 drained (server dry run) [32mnode-management.sh:140: Successful get pods {{range .items}}{{.metadata.name}},{{end}}: test-pod-1,test-pod-2, (B[mWarning: deleting Pods that declare no controller: namespace-1659716527-14830/test-pod-1 W0805 16:22:25.764893 56975 reflector.go:424] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0805 16:22:25.764931 56975 reflector.go:140] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mSuccessful (B[mmessage:node/127.0.0.1 cordoned evicting pod namespace-1659716527-14830/test-pod-1 pod "test-pod-1" has DeletionTimestamp older than 1 seconds, skipping node/127.0.0.1 drained has:evicting pod .*/test-pod-1 ... skipping 14 lines ... (B[mmessage:node/127.0.0.1 already uncordoned (server dry run) has:already uncordoned [32mnode-management.sh:161: Successful get nodes 127.0.0.1 {{.spec.unschedulable}}: <no value> (B[mnode/127.0.0.1 labeled [32mnode-management.sh:166: Successful get nodes 127.0.0.1 {{.metadata.labels.test}}: label (B[m[32mSuccessful (B[mmessage:error: cannot specify both a node name and a --selector option See 'kubectl drain -h' for help and examples has:cannot specify both a node name [32mnode-management.sh:172: Successful get nodes 127.0.0.1 {{.metadata.labels.test}}: label (B[m[32mnode-management.sh:174: Successful get nodes 127.0.0.1 {{.spec.unschedulable}}: <no value> (B[m[32mnode-management.sh:176: Successful get pods {{range .items}}{{.metadata.name}},{{end}}: test-pod-1,test-pod-2, (B[m[32mSuccessful ... skipping 78 lines ... Warning: deleting Pods that declare no controller: namespace-1659716527-14830/test-pod-1, namespace-1659716527-14830/test-pod-2 evicting pod namespace-1659716527-14830/test-pod-1 (dry run) evicting pod namespace-1659716527-14830/test-pod-2 (dry run) node/127.0.0.1 drained (dry run) has:/v1/pods?fieldSelector=spec.nodeName%3D127.0.0.1&limit=500 200 OK [32mSuccessful (B[mmessage:error: USAGE: cordon NODE [flags] See 'kubectl cordon -h' for help and examples has:error\: USAGE\: cordon NODE node/127.0.0.1 already uncordoned [32mSuccessful (B[mmessage:error: You must provide one or more resources by argument or filename. Example resource specifications include: '-f rsrc.yaml' '--filename=rsrc.json' '<resource> <name>' '<resource>' has:must provide one or more resources ... skipping 18 lines ... +++ [0805 16:22:44] Testing kubectl plugins [32mSuccessful (B[mmessage:The following compatible plugins are available: test/fixtures/pkg/kubectl/plugins/version/kubectl-version - warning: kubectl-version overwrites existing command: "kubectl version" error: one plugin warning was found has:kubectl-version overwrites existing command: "kubectl version" [32mSuccessful (B[mmessage:The following compatible plugins are available: test/fixtures/pkg/kubectl/plugins/kubectl-foo test/fixtures/pkg/kubectl/plugins/foo/kubectl-foo - warning: test/fixtures/pkg/kubectl/plugins/foo/kubectl-foo is overshadowed by a similarly named plugin: test/fixtures/pkg/kubectl/plugins/kubectl-foo error: one plugin warning was found has:test/fixtures/pkg/kubectl/plugins/foo/kubectl-foo is overshadowed by a similarly named plugin [32mSuccessful (B[mmessage:The following compatible plugins are available: test/fixtures/pkg/kubectl/plugins/kubectl-foo has:plugins are available [32mSuccessful (B[mmessage:Unable to read directory "test/fixtures/pkg/kubectl/plugins/empty" from your PATH: open test/fixtures/pkg/kubectl/plugins/empty: no such file or directory. Skipping... error: unable to find any kubectl plugins in your PATH has:unable to find any kubectl plugins in your PATH [32mSuccessful (B[mmessage:I am plugin foo has:plugin foo [32mSuccessful (B[mmessage:I am plugin bar called with args test/fixtures/pkg/kubectl/plugins/bar/kubectl-bar arg1 ... skipping 13 lines ... +++ Running case: test-cmd.run_impersonation_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_impersonation_tests +++ [0805 16:22:45] Testing impersonation [32mSuccessful (B[mmessage:error: requesting uid, groups or user-extra for test-admin without impersonating a user has:without impersonating a user [32mSuccessful (B[mmessage:error: requesting uid, groups or user-extra for test-admin without impersonating a user has:without impersonating a user certificatesigningrequest.certificates.k8s.io/foo created [32mauthorization.sh:60: Successful get csr/foo {{.spec.username}}: user1 (B[m[32mauthorization.sh:61: Successful get csr/foo {{range .spec.groups}}{{.}}{{end}}: system:authenticated (B[mcertificatesigningrequest.certificates.k8s.io "foo" deleted certificatesigningrequest.certificates.k8s.io/foo created ... skipping 19 lines ... I0805 16:22:46.484231 56975 event.go:294] "Event occurred" object="namespace-1659716566-4927/test-1" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set test-1-659d96d845 to 1" I0805 16:22:46.489308 56975 event.go:294] "Event occurred" object="namespace-1659716566-4927/test-1-659d96d845" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: test-1-659d96d845-qx99f" deployment.apps/test-2 created I0805 16:22:46.552112 56975 event.go:294] "Event occurred" object="namespace-1659716566-4927/test-2" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set test-2-f995bb75d to 1" I0805 16:22:46.556123 56975 event.go:294] "Event occurred" object="namespace-1659716566-4927/test-2-f995bb75d" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: test-2-f995bb75d-xvcsm" [32mwait.sh:36: Successful get deployments {{range .items}}{{.metadata.name}},{{end}}: test-1,test-2, (B[mW0805 16:22:46.719479 56975 reflector.go:424] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0805 16:22:46.719522 56975 reflector.go:140] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:90: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource deployment.apps "test-1" deleted deployment.apps "test-2" deleted [32mSuccessful (B[mmessage:deployment.apps/test-1 condition met deployment.apps/test-2 condition met has:test-1 condition met ... skipping 82 lines ... +++ [0805 16:23:03] Running tests without code coverage {"Time":"2022-08-05T16:26:33.158988844Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/apimachinery","Output":"ok \tk8s.io/kubernetes/test/integration/apimachinery\t68.706s\n"} {"Time":"2022-08-05T16:27:49.655974302Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/apiserver/certreload","Output":"ok \tk8s.io/kubernetes/test/integration/apiserver/certreload\t71.202s\n"} {"Time":"2022-08-05T16:27:52.167648166Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/apiserver/admissionwebhook","Output":"ok \tk8s.io/kubernetes/test/integration/apiserver/admissionwebhook\t134.046s\n"} {"Time":"2022-08-05T16:29:00.786157099Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/apiserver/openapi","Output":"ok \tk8s.io/kubernetes/test/integration/apiserver/openapi\t62.059s\n"} {"Time":"2022-08-05T16:29:11.347602686Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/apiserver/podlogs","Output":"ok \tk8s.io/kubernetes/test/integration/apiserver/podlogs\t5.086s\n"} {"Time":"2022-08-05T16:29:12.98334819Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/apiserver","Test":"TestWatchCacheUpdatedByEtcd","Output":"WARNING: 2022/08/05 16:29:12 [core] grpc: addrConn.createTransport failed to connect to {127.0.0.1:44015 127.0.0.1:44015 \u003cnil\u003e 0 \u003cnil\u003e}. Err: connection error: desc = \"transport: Error while dialing dial tcp 127.0.0.1:44015: connect: connection refused\". Reconnecting...\n"} {"Time":"2022-08-05T16:29:12.990150851Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/apiserver","Test":"TestWatchCacheUpdatedByEtcd","Output":"WARNING: 2022/08/05 16:29:12 [core] grpc: addrConn.createTransport failed to connect to {127.0.0.1:39957 127.0.0.1:39957 \u003cnil\u003e 0 \u003cnil\u003e}. Err: connection error: desc = \"transport: Error while dialing dial tcp 127.0.0.1:39957: connect: connection refused\". Reconnecting...\n"} {"Time":"2022-08-05T16:29:13.119242405Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/apiserver","Output":"ok \tk8s.io/kubernetes/test/integration/apiserver\t210.748s\n"} {"Time":"2022-08-05T16:29:24.961350709Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/apiserver/tracing","Output":"ok \tk8s.io/kubernetes/test/integration/apiserver/tracing\t8.017s\n"} {"Time":"2022-08-05T16:30:13.726585329Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/apiserver/flowcontrol","Output":"ok \tk8s.io/kubernetes/test/integration/apiserver/flowcontrol\t138.182s\n"} {"Time":"2022-08-05T16:30:25.808259713Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/apiserver/apply","Output":"ok \tk8s.io/kubernetes/test/integration/apiserver/apply\t287.271s\n"} {"Time":"2022-08-05T16:30:37.212613065Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/configmap","Output":"ok \tk8s.io/kubernetes/test/integration/configmap\t5.620s\n"} {"Time":"2022-08-05T16:30:56.28214199Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/certificates","Output":"ok \tk8s.io/kubernetes/test/integration/certificates\t83.841s\n"} ... skipping 159 lines ... {"Time":"2022-08-05T16:37:58.034548932Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/garbagecollector","Test":"TestCascadingDeletion","Output":"toragecapacities storage.k8s.io/v1, Resource=storageclasses storage.k8s.io/v1, Resource=volumeattachments], removed: []\n"} {"Time":"2022-08-05T16:38:03.870854112Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/kubelet","Output":"ok \tk8s.io/kubernetes/test/integration/kubelet\t7.872s\n"} {"Time":"2022-08-05T16:38:04.778347058Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/logs/benchmark","Test":"TestData/data/values.log/structured","Output":"\"Example\" pod=\"system/kube-scheduler\" pv=\"volume\" someString=\"hello world\" someValue=1\n"} {"Time":"2022-08-05T16:38:04.781506293Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/logs/benchmark","Output":"ok \tk8s.io/kubernetes/test/integration/logs/benchmark\t0.013s\n"} {"Time":"2022-08-05T16:38:04.979528071Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/logs/benchmark/contextual-logging","Output":"ok \tk8s.io/kubernetes/test/integration/logs/benchmark/contextual-logging\t0.002s [no tests to run]\n"} {"Time":"2022-08-05T16:38:05.020053857Z","Action":"output","Package":"k8s.io/kubernetes/test/integration/garbagecollector","Test":"TestCreateWithNonExistentOwner","Output":"bs batch/v1, Resource=jobs certificates.k8s.io/v1, Resource=certificatesigningrequests coordination.k8s.io/v1, Resource=leases discovery.k8s.io/v1, Resource=endpointslices events.k8s.io/v1, Resource=events flowcontrol.apiserver.k8s.io/v1beta2, Resource=flowschemas flowcontrol.apiserver.k8s.io/v1beta2, Resource=prioritylevelconfigurations internal.apiserver.k8s.io/v1alpha1, Resource=storageversions networking.k8s.io/v1, Resource=ingressclasses networking.k8s.io/v1, Resource=ingresses networking.k8s.io/v1, Resource=networkpolicies node.k8s.io/v1, Resource=runtimeclasses policy/v1, Resource=poddisruptionbudgets policy/v1beta1, Resource=podsecuritypolicies rbac.authorization.k8s.io/v1, Resource=clusterrolebindings rbac.authorization.k8s.io/v1, Resource=clusterroles rbac.authorization.k8s.io/v1, Resource=rolebindings rbac.authorization.k8s.io/v1, Resource=roles scheduling.k8s.io/v1, Resource=priorityclasses storage.k8s.io/v1, Resource=csidrivers storage.k8s.io/v1, Resource=csinodes storage.k8s.io/v1, Resource=csis"} {"Time":"2022-08-05T16:38:05.020068513Z","Action":"output{"component":"entrypoint","file":"k8s.io/test-infra/prow/entrypoint/run.go:169","func":"k8s.io/test-infra/prow/entrypoint.Options.ExecuteProcess","level":"error","msg":"Entrypoint received interrupt: terminated","severity":"error","time":"2022-08-05T16:38:25Z"} ++ early_exit_handler ++ '[' -n 173 ']' ++ kill -TERM 173 ++ cleanup_dind ++ [[ true == \t\r\u\e ]] ++ echo 'Cleaning up after docker' ... skipping 5 lines ...