PR | kannon92: Using parsers in applyDefaultImageTag and adding error test cases. |
Result | ABORTED |
Tests | 0 failed / 140 succeeded |
Started | |
Elapsed | 45m7s |
Revision | 0c9996821735632c0e44badb5d1cb54f0939d472 |
Refs |
116231 |
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdCompletion
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdCompletion/shell_not_expected
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdCompletion/unsupported_shell_type
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitAPIPort
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitAPIPort/accept_a_valid_port_number
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitAPIPort/fail_on_negative_port_number
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitAPIPort/fail_on_non-string_port
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitAPIPort/fail_on_too_large_port_number
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/can't_load_old_component_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/can't_load_v1beta1_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/can't_load_v1beta2_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/can_load_current_component_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/can_load_v1beta3_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/don't_allow_mixed_arguments_v1beta3
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitConfig/fail_on_non_existing_path
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitFeatureGates
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitFeatureGates/feature_gate_PublicKeysECDSA=true
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitFeatureGates/no_feature_gates_passed
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitKubernetesVersion
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitKubernetesVersion/invalid_semantic_version_string_is_detected
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitKubernetesVersion/valid_version_is_accepted
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitToken
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitToken/invalid_token_non-lowercase
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitToken/invalid_token_size
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdInitToken/valid_token_is_accepted
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinArgsMixed
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinArgsMixed/discovery-token_and_config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinBadArgs
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinBadArgs/discovery-token_and_discovery-file_can't_both_be_set
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinBadArgs/discovery-token_or_discovery-file_must_be_set
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinConfig
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinConfig/config
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinConfig/config_path
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryFile
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryFile/invalid_discovery_file
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryFile/valid_discovery_file
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryToken
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryToken/valid_discovery_token
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinDiscoveryToken/valid_discovery_token_url
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinNodeName
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinNodeName/valid_node_name
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinTLSBootstrapToken
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinTLSBootstrapToken/valid_bootstrap_token
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinTLSBootstrapToken/valid_bootstrap_token_url
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinToken
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinToken/valid_token
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdJoinToken/valid_token_url
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdTokenDelete
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdTokenDelete/invalid_token
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdTokenDelete/no_token_provided
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdTokenGenerate
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdTokenGenerateTypoError
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersion
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersion/default_output
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersion/invalid_output_option
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersion/short_output
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersionOutputJsonOrYaml
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersionOutputJsonOrYaml/json_output
k8s.io/kubernetes/cmd/kubeadm/test/cmd TestCmdVersionOutputJsonOrYaml/yaml_output
test-cmd run_RESTMapper_evaluation_tests
test-cmd run_assert_categories_tests
test-cmd run_assert_short_name_tests
test-cmd run_assert_singular_name_tests
test-cmd run_authorization_tests
test-cmd run_certificates_tests
test-cmd run_client_config_tests
test-cmd run_cluster_management_tests
test-cmd run_clusterroles_tests
test-cmd run_configmap_tests
test-cmd run_convert_tests
test-cmd run_crd_deletion_recreation_tests
test-cmd run_crd_tests
test-cmd run_create_job_tests
test-cmd run_create_secret_tests
test-cmd run_daemonset_history_tests
test-cmd run_daemonset_tests
test-cmd run_deployment_tests
test-cmd run_deprecated_api_tests
test-cmd run_exec_credentials_interactive_tests
test-cmd run_exec_credentials_tests
test-cmd run_impersonation_tests
test-cmd run_job_tests
test-cmd run_kubectl_all_namespace_tests
test-cmd run_kubectl_apply_deployments_tests
test-cmd run_kubectl_apply_tests
test-cmd run_kubectl_config_set_cluster_tests
test-cmd run_kubectl_config_set_credentials_tests
test-cmd run_kubectl_config_set_tests
test-cmd run_kubectl_create_error_tests
test-cmd run_kubectl_create_filter_tests
test-cmd run_kubectl_create_kustomization_directory_tests
test-cmd run_kubectl_create_validate_tests
test-cmd run_kubectl_debug_baseline_node_tests
test-cmd run_kubectl_debug_baseline_tests
test-cmd run_kubectl_debug_general_node_tests
test-cmd run_kubectl_debug_general_tests
test-cmd run_kubectl_debug_node_tests
test-cmd run_kubectl_debug_pod_tests
test-cmd run_kubectl_delete_allnamespaces_tests
test-cmd run_kubectl_diff_same_names
test-cmd run_kubectl_diff_tests
test-cmd run_kubectl_events_tests
test-cmd run_kubectl_exec_pod_tests
test-cmd run_kubectl_exec_resource_name_tests
test-cmd run_kubectl_explain_tests
test-cmd run_kubectl_get_tests
test-cmd run_kubectl_help_tests
test-cmd run_kubectl_local_proxy_tests
test-cmd run_kubectl_request_timeout_tests
test-cmd run_kubectl_results_tests
test-cmd run_kubectl_run_tests
test-cmd run_kubectl_server_side_apply_tests
test-cmd run_kubectl_sort_by_tests
test-cmd run_kubectl_version_tests
test-cmd run_lists_tests
test-cmd run_multi_resources_tests
test-cmd run_namespace_tests
test-cmd run_nodes_tests
test-cmd run_persistent_volume_claims_tests
test-cmd run_persistent_volumes_tests
test-cmd run_plugins_tests
test-cmd run_pod_templates_tests
test-cmd run_pod_tests
test-cmd run_rc_tests
test-cmd run_recursive_resources_tests
test-cmd run_resource_aliasing_tests
test-cmd run_retrieve_multiple_tests
test-cmd run_role_tests
test-cmd run_rs_tests
test-cmd run_save_config_tests
test-cmd run_secrets_test
test-cmd run_service_accounts_tests
test-cmd run_service_tests
test-cmd run_stateful_set_tests
test-cmd run_statefulset_history_tests
test-cmd run_storage_class_tests
test-cmd run_swagger_tests
test-cmd run_template_output_tests
test-cmd run_wait_tests
... skipping 53 lines ... Recording: record_command_canary Running command: record_command_canary +++ Running case: test-cmd.record_command_canary +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: record_command_canary /home/prow/go/src/k8s.io/kubernetes/test/cmd/legacy-script.sh: line 165: bogus-expected-to-fail: command not found !!! [0317 19:17:52] Call tree: !!! [0317 19:17:52] 1: /home/prow/go/src/k8s.io/kubernetes/test/cmd/../../third_party/forked/shell2junit/sh2ju.sh:47 record_command_canary(...) !!! [0317 19:17:52] 2: /home/prow/go/src/k8s.io/kubernetes/test/cmd/../../third_party/forked/shell2junit/sh2ju.sh:112 eVal(...) !!! [0317 19:17:52] 3: /home/prow/go/src/k8s.io/kubernetes/test/cmd/legacy-script.sh:141 juLog(...) !!! [0317 19:17:52] 4: /home/prow/go/src/k8s.io/kubernetes/test/cmd/legacy-script.sh:169 record_command(...) !!! [0317 19:17:52] 5: hack/make-rules/test-cmd.sh:35 source(...) +++ exit code: 1 +++ error: 1 +++ [0317 19:17:52] Running kubeadm tests +++ [0317 19:17:52] WARNING: linux/arm will no longer be built/shipped by default, please build it explicitly if needed. +++ [0317 19:17:52] support for linux/arm will be removed in a subsequent release. go version go1.20.2 linux/amd64 +++ [0317 19:17:56] Building go targets for linux/amd64 k8s.io/kubernetes/cmd/kubeadm (static) ... skipping 263 lines ... go version go1.20.2 linux/amd64 +++ [0317 19:21:37] Building go targets for linux/amd64 k8s.io/kubernetes/cmd/kube-controller-manager (static) +++ [0317 19:22:21] Generate kubeconfig for controller-manager +++ [0317 19:22:21] Starting controller-manager I0317 19:22:21.959838 23215 serving.go:348] Generated self-signed cert in-memory W0317 19:22:22.391229 23215 authentication.go:426] failed to read in-cluster kubeconfig for delegated authentication: open /var/run/secrets/kubernetes.io/serviceaccount/token: no such file or directory W0317 19:22:22.391272 23215 authentication.go:320] No authentication-kubeconfig provided in order to lookup client-ca-file in configmap/extension-apiserver-authentication in kube-system, so client certificate authentication won't work. W0317 19:22:22.391283 23215 authentication.go:344] No authentication-kubeconfig provided in order to lookup requestheader-client-ca-file in configmap/extension-apiserver-authentication in kube-system, so request-header client certificate authentication won't work. W0317 19:22:22.391296 23215 authorization.go:225] failed to read in-cluster kubeconfig for delegated authorization: open /var/run/secrets/kubernetes.io/serviceaccount/token: no such file or directory W0317 19:22:22.391310 23215 authorization.go:193] No authorization-kubeconfig provided, so SubjectAccessReview of authorization tokens won't work. I0317 19:22:22.391827 23215 controllermanager.go:187] "Starting" version="v1.27.0-beta.0.16+cb052f91e92e8d" I0317 19:22:22.391858 23215 controllermanager.go:189] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" I0317 19:22:22.394615 23215 secure_serving.go:210] Serving securely on [::]:10257 I0317 19:22:22.394833 23215 tlsconfig.go:240] "Starting DynamicServingCertificateController" I0317 19:22:22.395003 23215 leaderelection.go:245] attempting to acquire leader lease kube-system/kube-controller-manager... ... skipping 24 lines ... I0317 19:22:22.458409 23215 certificate_controller.go:112] Starting certificate controller "csrsigning-kube-apiserver-client" I0317 19:22:22.458440 23215 shared_informer.go:311] Waiting for caches to sync for certificate-csrsigning-kube-apiserver-client I0317 19:22:22.458478 23215 dynamic_serving_content.go:132] "Starting controller" name="csr-controller::hack/testdata/ca/ca.crt::hack/testdata/ca/ca.key" I0317 19:22:22.459808 23215 controllermanager.go:638] "Started controller" controller="csrsigning" I0317 19:22:22.459920 23215 certificate_controller.go:112] Starting certificate controller "csrsigning-legacy-unknown" I0317 19:22:22.459982 23215 shared_informer.go:311] Waiting for caches to sync for certificate-csrsigning-legacy-unknown E0317 19:22:22.460075 23215 core.go:213] "Failed to start cloud node lifecycle controller" err="no cloud provider provided" I0317 19:22:22.460101 23215 controllermanager.go:616] "Warning: skipping controller" controller="cloud-node-lifecycle" I0317 19:22:22.460079 23215 dynamic_serving_content.go:132] "Starting controller" name="csr-controller::hack/testdata/ca/ca.crt::hack/testdata/ca/ca.key" W0317 19:22:22.460300 23215 mutation_detector.go:53] Mutation detector is enabled, this will result in memory leakage. I0317 19:22:22.460401 23215 controllermanager.go:638] "Started controller" controller="clusterrole-aggregation" I0317 19:22:22.460424 23215 core.go:224] "Will not configure cloud provider routes for allocate-node-cidrs" CIDRs=false routes=true I0317 19:22:22.460437 23215 controllermanager.go:616] "Warning: skipping controller" controller="route" ... skipping 117 lines ... I0317 19:22:22.483868 23215 replica_set.go:201] "Starting controller" name="replicaset" I0317 19:22:22.483890 23215 shared_informer.go:311] Waiting for caches to sync for ReplicaSet I0317 19:22:22.483996 23215 controllermanager.go:638] "Started controller" controller="disruption" I0317 19:22:22.484130 23215 disruption.go:423] Sending events to api server. I0317 19:22:22.484175 23215 disruption.go:434] Starting disruption controller I0317 19:22:22.484184 23215 shared_informer.go:311] Waiting for caches to sync for disruption E0317 19:22:22.484701 23215 core.go:92] "Failed to start service controller" err="WARNING: no cloud provider provided, services of type LoadBalancer will fail" I0317 19:22:22.484729 23215 controllermanager.go:616] "Warning: skipping controller" controller="service" I0317 19:22:22.485075 23215 controllermanager.go:638] "Started controller" controller="daemonset" I0317 19:22:22.485440 23215 daemon_controller.go:289] "Starting daemon sets controller" I0317 19:22:22.485463 23215 shared_informer.go:311] Waiting for caches to sync for daemon sets I0317 19:22:22.485538 23215 controllermanager.go:638] "Started controller" controller="statefulset" I0317 19:22:22.485696 23215 stateful_set.go:161] "Starting stateful set controller" ... skipping 53 lines ... I0317 19:22:22.856264 23215 shared_informer.go:318] Caches are synced for certificate-csrsigning-kubelet-serving I0317 19:22:22.857527 23215 shared_informer.go:318] Caches are synced for certificate-csrsigning-kubelet-client I0317 19:22:22.858759 23215 shared_informer.go:318] Caches are synced for certificate-csrsigning-kube-apiserver-client I0317 19:22:22.861064 23215 shared_informer.go:318] Caches are synced for certificate-csrsigning-legacy-unknown I0317 19:22:22.874879 23215 shared_informer.go:318] Caches are synced for certificate-csrapproving node/127.0.0.1 created I0317 19:22:23.190178 23215 actual_state_of_world.go:547] "Failed to update statusUpdateNeeded field in actual state of world" err="Failed to set statusUpdateNeeded to needed true, because nodeName=\"127.0.0.1\" does not exist" +++ [0317 19:22:23] Checking kubectl version I0317 19:22:23.212564 23215 shared_informer.go:318] Caches are synced for garbage collector I0317 19:22:23.262523 23215 shared_informer.go:318] Caches are synced for garbage collector I0317 19:22:23.262568 23215 garbagecollector.go:166] "All resource monitors have synced. Proceeding to collect garbage" WARNING: This version information is deprecated and will be replaced with the output from kubectl version --short. Use --output=yaml|json to get the full version. Client Version: version.Info{Major:"1", Minor:"27+", GitVersion:"v1.27.0-beta.0.16+cb052f91e92e8d", GitCommit:"cb052f91e92e8d51ede0eea29ef5e944607966b3", GitTreeState:"clean", BuildDate:"2023-03-17T19:07:07Z", GoVersion:"go1.20.2", Compiler:"gc", Platform:"linux/amd64"} Kustomize Version: v5.0.1 Server Version: version.Info{Major:"1", Minor:"27+", GitVersion:"v1.27.0-beta.0.16+cb052f91e92e8d", GitCommit:"cb052f91e92e8d51ede0eea29ef5e944607966b3", GitTreeState:"clean", BuildDate:"2023-03-17T19:07:07Z", GoVersion:"go1.20.2", Compiler:"gc", Platform:"linux/amd64"} The Service "kubernetes" is invalid: spec.clusterIPs: Invalid value: []string{"10.0.0.1"}: failed to allocate IP 10.0.0.1: provided IP is already allocated NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.0.0.1 <none> 443/TCP 47s Recording: run_kubectl_version_tests Running command: run_kubectl_version_tests +++ Running case: test-cmd.run_kubectl_version_tests ... skipping 196 lines ... +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_RESTMapper_evaluation_tests +++ [0317 19:22:29] Creating namespace namespace-1679080949-16676 namespace/namespace-1679080949-16676 created Context "test" modified. +++ [0317 19:22:29] Testing RESTMapper +++ [0317 19:22:29] "kubectl get unknownresourcetype" returns error as expected: error: the server doesn't have a resource type "unknownresourcetype" +++ exit code: 0 NAME SHORTNAMES APIVERSION NAMESPACED KIND bindings v1 true Binding componentstatuses cs v1 false ComponentStatus configmaps cm v1 true ConfigMap endpoints ep v1 true Endpoints ... skipping 60 lines ... namespace/namespace-1679080952-9017 created Context "test" modified. +++ [0317 19:22:32] Testing clusterroles [32mrbac.sh:29: Successful get clusterroles/cluster-admin {{.metadata.name}}: cluster-admin (B[m[32mrbac.sh:30: Successful get clusterrolebindings/cluster-admin {{.metadata.name}}: cluster-admin (B[m[32mSuccessful (B[mmessage:Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pod-admin" not found has:clusterroles.rbac.authorization.k8s.io "pod-admin" not found clusterrole.rbac.authorization.k8s.io/pod-admin created (dry run) clusterrole.rbac.authorization.k8s.io/pod-admin created (server dry run) [32mSuccessful (B[mmessage:Error from server (NotFound): clusterroles.rbac.authorization.k8s.io "pod-admin" not found has:clusterroles.rbac.authorization.k8s.io "pod-admin" not found clusterrole.rbac.authorization.k8s.io/pod-admin created [32mrbac.sh:42: Successful get clusterrole/pod-admin {{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}: *: (B[m[32mSuccessful (B[mmessage:Warning: deleting cluster-scoped resources, not scoped to the provided namespace clusterrole.rbac.authorization.k8s.io "pod-admin" deleted ... skipping 18 lines ... (B[mclusterrole.rbac.authorization.k8s.io/url-reader created [32mrbac.sh:61: Successful get clusterrole/url-reader {{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}: get: (B[m[32mrbac.sh:62: Successful get clusterrole/url-reader {{range.rules}}{{range.nonResourceURLs}}{{.}}:{{end}}{{end}}: /logs/*:/healthz/*: (B[mclusterrole.rbac.authorization.k8s.io/aggregation-reader created [32mrbac.sh:64: Successful get clusterrole/aggregation-reader {{.metadata.name}}: aggregation-reader (B[m[32mSuccessful (B[mmessage:Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "super-admin" not found has:clusterrolebindings.rbac.authorization.k8s.io "super-admin" not found clusterrolebinding.rbac.authorization.k8s.io/super-admin created (dry run) clusterrolebinding.rbac.authorization.k8s.io/super-admin created (server dry run) [32mSuccessful (B[mmessage:Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "super-admin" not found has:clusterrolebindings.rbac.authorization.k8s.io "super-admin" not found clusterrolebinding.rbac.authorization.k8s.io/super-admin created [32mrbac.sh:77: Successful get clusterrolebinding/super-admin {{range.subjects}}{{.name}}:{{end}}: super-admin: (B[mclusterrolebinding.rbac.authorization.k8s.io/super-admin subjects updated (dry run) clusterrolebinding.rbac.authorization.k8s.io/super-admin subjects updated (server dry run) [32mrbac.sh:80: Successful get clusterrolebinding/super-admin {{range.subjects}}{{.name}}:{{end}}: super-admin: ... skipping 64 lines ... [32mrbac.sh:102: Successful get clusterrolebinding/super-admin {{range.subjects}}{{.name}}:{{end}}: super-admin:foo:test-all-user: (B[m[32mrbac.sh:103: Successful get clusterrolebinding/super-group {{range.subjects}}{{.name}}:{{end}}: the-group:foo:test-all-user: (B[m[32mrbac.sh:104: Successful get clusterrolebinding/super-sa {{range.subjects}}{{.name}}:{{end}}: sa-name:foo:test-all-user: (B[mrolebinding.rbac.authorization.k8s.io/admin created (dry run) rolebinding.rbac.authorization.k8s.io/admin created (server dry run) [32mSuccessful (B[mmessage:Error from server (NotFound): rolebindings.rbac.authorization.k8s.io "admin" not found has: not found rolebinding.rbac.authorization.k8s.io/admin created [32mrbac.sh:113: Successful get rolebinding/admin {{.roleRef.kind}}: ClusterRole (B[m[32mrbac.sh:114: Successful get rolebinding/admin {{range.subjects}}{{.name}}:{{end}}: default-admin: (B[mrolebinding.rbac.authorization.k8s.io/admin subjects updated [32mrbac.sh:116: Successful get rolebinding/admin {{range.subjects}}{{.name}}:{{end}}: default-admin:foo: ... skipping 152 lines ... namespace/namespace-1679080961-3134 created Context "test" modified. +++ [0317 19:22:41] Testing role role.rbac.authorization.k8s.io/pod-admin created (dry run) role.rbac.authorization.k8s.io/pod-admin created (server dry run) [32mSuccessful (B[mmessage:Error from server (NotFound): roles.rbac.authorization.k8s.io "pod-admin" not found has: not found role.rbac.authorization.k8s.io/pod-admin created [32mrbac.sh:159: Successful get role/pod-admin {{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}: *: (B[m[32mrbac.sh:160: Successful get role/pod-admin {{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}: pods: (B[m[32mrbac.sh:161: Successful get role/pod-admin {{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}: : (B[m[32mSuccessful ... skipping 270 lines ... +++ [0317 19:22:53] Creating namespace namespace-1679080973-19785 namespace/namespace-1679080973-19785 created Context "test" modified. [32mcore.sh:76: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/valid-pod created W0317 19:22:53.977450 20099 cacher.go:171] Terminating all watchers from cacher examples.test.com E0317 19:22:53.979125 23215 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource { "apiVersion": "v1", "items": [ { "apiVersion": "v1", "kind": "Pod", ... skipping 207 lines ... Mounts: <none> Volumes: <none> QoS Class: Guaranteed Node-Selectors: <none> Tolerations: <none> Events: <none> (B[mW0317 19:22:55.319336 23215 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 19:22:55.319416 23215 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mSuccessful describe Name: valid-pod Namespace: namespace-1679080973-19785 Priority: 0 Node: <none> Labels: name=valid-pod ... skipping 88 lines ... pod "valid-pod" force deleted [32mcore.sh:122: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/valid-pod created [32mcore.sh:127: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[mpod "valid-pod" deleted [32mcore.sh:131: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mW0317 19:22:56.922884 23215 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 19:22:56.922934 23215 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource pod/valid-pod created [32mcore.sh:136: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[mpod "valid-pod" deleted [32mcore.sh:140: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m+++ [0317 19:22:57] Creating namespace namespace-1679080977-12076 namespace/namespace-1679080977-12076 created ... skipping 27 lines ... has:valid-pod [32mSuccessful (B[mmessage:NAME READY STATUS RESTARTS AGE valid-pod 0/1 Pending 0 0s has:valid-pod [32mcore.sh:194: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[merror: resource(s) were provided, but no name was specified [32mcore.sh:198: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[m[32mcore.sh:202: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[merror: setting 'all' parameter but found a non empty selector. [32mcore.sh:206: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[m[32mcore.sh:210: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[mWarning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. pod "valid-pod" force deleted [32mcore.sh:214: Successful get pods -lname=valid-pod {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mcore.sh:219: Successful get namespaces {{range.items}}{{ if eq .metadata.name "test-kubectl-describe-pod" }}found{{end}}{{end}}:: : ... skipping 6 lines ... (B[msecret/test-secret created [32mcore.sh:235: Successful get secret/test-secret --namespace=test-kubectl-describe-pod {{.metadata.name}}: test-secret (B[m[32mcore.sh:236: Successful get secret/test-secret --namespace=test-kubectl-describe-pod {{.type}}: test-type (B[m[32mcore.sh:241: Successful get configmaps --namespace=test-kubectl-describe-pod {{range.items}}{{ if eq .metadata.name "test-configmap" }}found{{end}}{{end}}:: : (B[mconfigmap/test-configmap created [32mcore.sh:247: Successful get configmap/test-configmap --namespace=test-kubectl-describe-pod {{.metadata.name}}: test-configmap (B[mW0317 19:23:01.936251 23215 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 19:23:01.936306 23215 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mcore.sh:251: Successful get pdb --namespace=test-kubectl-describe-pod {{range.items}}{{ if eq .metadata.name "test-pdb-1" }}found{{end}}{{end}}:: : (B[mpoddisruptionbudget.policy/test-pdb-1 created (dry run) I0317 19:23:02.112013 20099 controller.go:624] quota admission added evaluator for: poddisruptionbudgets.policy poddisruptionbudget.policy/test-pdb-1 created (server dry run) [32mcore.sh:255: Successful get pdb --namespace=test-kubectl-describe-pod {{range.items}}{{ if eq .metadata.name "test-pdb-1" }}found{{end}}{{end}}:: : (B[mpoddisruptionbudget.policy/test-pdb-1 created ... skipping 12 lines ... I0317 19:23:02.640125 28369 round_trippers.go:553] GET https://127.0.0.1:6443/apis/policy/v1/namespaces/test-kubectl-describe-pod/poddisruptionbudgets/test-pdb-2 200 OK in 1 milliseconds I0317 19:23:02.642289 28369 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/namespaces/test-kubectl-describe-pod/events?fieldSelector=involvedObject.name%3Dtest-pdb-2%2CinvolvedObject.namespace%3Dtest-kubectl-describe-pod%2CinvolvedObject.kind%3DPodDisruptionBudget%2CinvolvedObject.uid%3D58ae327b-d06e-4ac4-bef3-6685446a4992&limit=500 200 OK in 1 milliseconds (B[mpoddisruptionbudget.policy/test-pdb-3 created [32mcore.sh:271: Successful get pdb/test-pdb-3 --namespace=test-kubectl-describe-pod {{.spec.maxUnavailable}}: 2 (B[mpoddisruptionbudget.policy/test-pdb-4 created [32mcore.sh:275: Successful get pdb/test-pdb-4 --namespace=test-kubectl-describe-pod {{.spec.maxUnavailable}}: 50% (B[merror: min-available and max-unavailable cannot be both specified [32mcore.sh:281: Successful get pods --namespace=test-kubectl-describe-pod {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/env-test-pod created matched TEST_CMD_1 matched <set to the key 'key-1' in secret 'test-secret'> matched TEST_CMD_2 matched <set to the key 'key-2' of config map 'test-configmap'> ... skipping 141 lines ... [32mcore.sh:395: Successful get pod valid-pod {{range.metadata.annotations}}{{.}}:{{end}}: :kubectl label pods valid-pod record-change=true --record=true --server=https://127.0.0.1:6443 --insecure-skip-tls-verify=true --match-server-version=true: (B[mFlag --record has been deprecated, --record will be removed in the future pod/valid-pod labeled [32mcore.sh:402: Successful get pod valid-pod {{range.metadata.annotations}}{{.}}:{{end}}: :kubectl label pods valid-pod new-record-change=true --record=true --server=https://127.0.0.1:6443 --insecure-skip-tls-verify=true --match-server-version=true: (B[m[32mcore.sh:407: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[mWarning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. W0317 19:23:14.471449 23215 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 19:23:14.471490 23215 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource pod "valid-pod" force deleted I0317 19:23:14.549352 23215 namespace_controller.go:182] "Namespace has been deleted" namespace="test-kubectl-describe-pod" [32mcore.sh:411: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mcore.sh:415: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/pod-with-precision created [32mcore.sh:419: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: pod-with-precision: ... skipping 89 lines ... [32mcore.sh:542: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: registry.k8s.io/pause:3.9: (B[m[32mSuccessful (B[mmessage:kubectl-create kubectl-patch has:kubectl-patch pod/valid-pod patched [32mcore.sh:562: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: nginx: (B[m+++ [0317 19:23:20] "kubectl patch with resourceVersion 626" returns error as expected: Error from server (Conflict): Operation cannot be fulfilled on pods "valid-pod": the object has been modified; please apply your changes to the latest version and try again pod "valid-pod" deleted pod/valid-pod replaced [32mcore.sh:586: Successful get pod valid-pod {{(index .spec.containers 0).name}}: replaced-k8s-serve-hostname (B[m[32mSuccessful (B[mmessage:kubectl-replace has:kubectl-replace [32mSuccessful (B[mmessage:error: --grace-period must have --force specified has:\-\-grace-period must have \-\-force specified [32mSuccessful (B[mmessage:error: --timeout must have --force specified has:\-\-timeout must have \-\-force specified node/node-v1-test created I0317 19:23:22.080136 23215 actual_state_of_world.go:547] "Failed to update statusUpdateNeeded field in actual state of world" err="Failed to set statusUpdateNeeded to needed true, because nodeName=\"node-v1-test\" does not exist" [32mcore.sh:614: Successful get node node-v1-test {{range.items}}{{if .metadata.annotations.a}}found{{end}}{{end}}:: : (B[mnode/node-v1-test replaced (server dry run) node/node-v1-test replaced (dry run) [32mcore.sh:639: Successful get node node-v1-test {{range.items}}{{if .metadata.annotations.a}}found{{end}}{{end}}:: : (B[mI0317 19:23:22.774868 23215 event.go:307] "Event occurred" object="node-v1-test" fieldPath="" kind="Node" apiVersion="v1" type="Normal" reason="RegisteredNode" message="Node node-v1-test event: Registered Node node-v1-test in Controller" I0317 19:23:22.901535 23215 shared_informer.go:311] Waiting for caches to sync for resource quota ... skipping 32 lines ... spec: containers: - image: registry.k8s.io/pause:3.9 name: kubernetes-pause has:localonlyvalue [32mcore.sh:691: Successful get pod valid-pod {{.metadata.labels.name}}: valid-pod (B[merror: 'name' already has a value (valid-pod), and --overwrite is false [32mcore.sh:695: Successful get pod valid-pod {{.metadata.labels.name}}: valid-pod (B[m[32mcore.sh:699: Successful get pod valid-pod {{.metadata.labels.name}}: valid-pod (B[mpod/valid-pod labeled [32mcore.sh:703: Successful get pod valid-pod {{.metadata.labels.name}}: valid-pod-super-sayan (B[m[32mcore.sh:707: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[mWarning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. ... skipping 40 lines ... (B[mpod/test-pod created pod "test-pod" deleted +++ [0317 19:23:29] Creating namespace namespace-1679081009-11821 namespace/namespace-1679081009-11821 created Context "test" modified. [32msave-config.sh:41: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mW0317 19:23:30.244904 23215 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 19:23:30.244946 23215 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource pod/test-pod created pod/test-pod edited pod "test-pod" deleted +++ [0317 19:23:31] Creating namespace namespace-1679081011-22266 namespace/namespace-1679081011-22266 created Context "test" modified. ... skipping 33 lines ... +++ Running case: test-cmd.run_kubectl_create_error_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_kubectl_create_error_tests +++ [0317 19:23:33] Creating namespace namespace-1679081013-800 namespace/namespace-1679081013-800 created Context "test" modified. +++ [0317 19:23:34] Testing kubectl create with error Error: must specify one of -f and -k Create a resource from a file or from stdin. JSON and YAML formats are accepted. Examples: ... skipping 63 lines ... If true, keep the managedFields when printing objects in JSON or YAML format. --template='': Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview]. --validate='strict': Must be one of: strict (or true), warn, ignore (or false). "true" or "strict" will use a schema to validate the input and fail the request if invalid. It will perform server side validation if ServerSideFieldValidation is enabled on the api-server, but will fall back to less reliable client-side validation if not. "warn" will warn about unknown or duplicate fields without blocking the request if server-side field validation is enabled on the API server, and behave as "ignore" otherwise. "false" or "ignore" will not perform any schema validation, silently dropping any unknown or duplicate fields. --windows-line-endings=false: Only relevant if --edit=true. Defaults to the line ending native to your platform. Usage: kubectl create -f FILENAME [options] ... skipping 38 lines ... I0317 19:23:37.520198 23215 event.go:307] "Event occurred" object="namespace-1679081014-3081/test-deployment-retainkeys-d65c44c97" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: test-deployment-retainkeys-d65c44c97-gdcxs" deployment.apps "test-deployment-retainkeys" deleted [32mapply.sh:88: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/selector-test-pod created [32mapply.sh:92: Successful get pods selector-test-pod {{.metadata.labels.name}}: selector-test-pod (B[m[32mSuccessful (B[mmessage:Error from server (NotFound): pods "selector-test-pod-dont-apply" not found has:pods "selector-test-pod-dont-apply" not found pod "selector-test-pod" deleted [32mapply.sh:101: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/test-pod created (dry run) pod/test-pod created (server dry run) [32mapply.sh:107: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: ... skipping 31 lines ... (B[mpod/b created [32mapply.sh:207: Successful get pods a {{.metadata.name}}: a (B[m[32mapply.sh:208: Successful get pods b -n nsb {{.metadata.name}}: b (B[mpod "a" deleted pod "b" deleted [32mSuccessful (B[mmessage:error: all resources selected for prune without explicitly passing --all. To prune all resources, pass the --all flag. If you did not mean to prune all resources, specify a label selector has:all resources selected for prune without explicitly passing --all pod/a created pod/b created I0317 19:23:47.421725 20099 alloc.go:330] "allocated clusterIPs" service="namespace-1679081014-3081/prune-svc" clusterIPs=map[IPv4:10.0.0.181] service/prune-svc created W0317 19:23:47.422624 32461 prune.go:71] Deprecated: kubectl apply will no longer prune non-namespaced resources by default when used with the --namespace flag in a future release. To preserve the current behaviour, list the resources you want to target explicitly in the --prune-allowlist flag. ... skipping 44 lines ... (B[mpod/b unchanged W0317 19:24:06.020961 32832 prune.go:71] Deprecated: kubectl apply will no longer prune non-namespaced resources by default when used with the --namespace flag in a future release. To preserve the current behaviour, list the resources you want to target explicitly in the --prune-allowlist flag. pod/a pruned [32mapply.sh:265: Successful get pods -n nsb {{range.items}}{{.metadata.name}}:{{end}}: b: (B[mnamespace "nsb" deleted [32mSuccessful (B[mmessage:error: the namespace from the provided object "nsb" does not match the namespace "foo". You must pass '--namespace=nsb' to perform this operation. has:the namespace from the provided object "nsb" does not match the namespace "foo". [32mapply.sh:276: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: (B[mservice/a created [32mapply.sh:280: Successful get services a {{.metadata.name}}: a (B[m[32mSuccessful (B[mmessage:The Service "a" is invalid: spec.clusterIPs[0]: Invalid value: []string{"10.0.0.12"}: may not change once set ... skipping 28 lines ... (B[m[32mapply.sh:302: Successful get deployment test-the-deployment {{.metadata.name}}: test-the-deployment (B[m[32mapply.sh:303: Successful get service test-the-service {{.metadata.name}}: test-the-service (B[mconfigmap "test-the-map" deleted service "test-the-service" deleted deployment.apps "test-the-deployment" deleted [32mSuccessful (B[mmessage:Error from server (NotFound): namespaces "multi-resource-ns" not found has:namespaces "multi-resource-ns" not found [32mapply.sh:311: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:namespace/multi-resource-ns created Error from server (NotFound): error when creating "hack/testdata/multi-resource-1.yaml": namespaces "multi-resource-ns" not found has:namespaces "multi-resource-ns" not found [32mSuccessful (B[mmessage:Error from server (NotFound): pods "test-pod" not found has:pods "test-pod" not found pod/test-pod created namespace/multi-resource-ns unchanged [32mapply.sh:319: Successful get pods test-pod -n multi-resource-ns {{.metadata.name}}: test-pod (B[mpod "test-pod" deleted namespace "multi-resource-ns" deleted W0317 19:24:17.338599 23215 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 19:24:17.338654 23215 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource I0317 19:24:17.768277 23215 namespace_controller.go:182] "Namespace has been deleted" namespace="nsb" [32mapply.sh:325: Successful get configmaps --field-selector=metadata.name=foo {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:configmap/foo created error: resource mapping not found for name: "foo" namespace: "" from "hack/testdata/multi-resource-2.yaml": no matches for kind "Bogus" in version "example.com/v1" ensure CRDs are installed first has:no matches for kind "Bogus" in version "example.com/v1" [32mapply.sh:331: Successful get configmaps foo {{.metadata.name}}: foo (B[mconfigmap "foo" deleted [32mapply.sh:337: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful ... skipping 6 lines ... (B[mpod "pod-a" deleted pod "pod-c" deleted [32mapply.sh:345: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mapply.sh:349: Successful get crds {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:customresourcedefinition.apiextensions.k8s.io/widgets.example.com created error: resource mapping not found for name: "foo" namespace: "" from "hack/testdata/multi-resource-4.yaml": no matches for kind "Widget" in version "example.com/v1" ensure CRDs are installed first has:no matches for kind "Widget" in version "example.com/v1" I0317 19:24:24.105782 20099 handler.go:165] Adding GroupVersion example.com v1 to ResourceManager customresourcedefinition.apiextensions.k8s.io/widgets.example.com condition met [32mSuccessful (B[mmessage:Error from server (NotFound): widgets.example.com "foo" not found has:widgets.example.com "foo" not found [32mapply.sh:356: Successful get crds widgets.example.com {{.metadata.name}}: widgets.example.com (B[mI0317 19:24:27.049395 20099 controller.go:624] quota admission added evaluator for: widgets.example.com widget.example.com/foo created customresourcedefinition.apiextensions.k8s.io/widgets.example.com unchanged [32mapply.sh:359: Successful get widget foo {{.metadata.name}}: foo ... skipping 34 lines ... (B[mmessage:911 has:911 pod "test-pod" deleted [32mapply.sh:415: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m+++ [0317 19:24:30] Testing upgrade kubectl client-side apply to server-side apply pod/test-pod created error: Apply failed with 1 conflict: conflict with "kubectl-client-side-apply" using v1: .metadata.labels.name Please review the fields above--they currently have other managers. Here are the ways you can resolve this warning: * If you intend to manage all of these fields, please re-run the apply command with the `--force-conflicts` flag. * If you do not intend to manage all of the fields, please edit your manifest to remove references to the fields that should keep their ... skipping 153 lines ... (B[mpod "nginx-extensions" deleted [32mSuccessful (B[mmessage:pod/test1 created has:pod/test1 created pod "test1" deleted [32mSuccessful (B[mmessage:error: Invalid image name "InvalidImageName": invalid reference format has:error: Invalid image name "InvalidImageName": invalid reference format +++ exit code: 0 Recording: run_kubectl_create_filter_tests Running command: run_kubectl_create_filter_tests +++ Running case: test-cmd.run_kubectl_create_filter_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes ... skipping 3 lines ... Context "test" modified. +++ [0317 19:24:38] Testing kubectl create filter [32mcreate.sh:50: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/selector-test-pod created [32mcreate.sh:54: Successful get pods selector-test-pod {{.metadata.labels.name}}: selector-test-pod (B[m[32mSuccessful (B[mmessage:Error from server (NotFound): pods "selector-test-pod-dont-apply" not found has:pods "selector-test-pod-dont-apply" not found pod "selector-test-pod" deleted +++ exit code: 0 Recording: run_kubectl_apply_deployments_tests Running command: run_kubectl_apply_deployments_tests ... skipping 29 lines ... I0317 19:24:42.005259 23215 event.go:307] "Event occurred" object="namespace-1679081079-15257/nginx" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-5645b79496 to 3" I0317 19:24:42.031828 23215 event.go:307] "Event occurred" object="namespace-1679081079-15257/nginx-5645b79496" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-5645b79496-f7j62" I0317 19:24:42.085095 23215 event.go:307] "Event occurred" object="namespace-1679081079-15257/nginx-5645b79496" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-5645b79496-9zvpc" I0317 19:24:42.085130 23215 event.go:307] "Event occurred" object="namespace-1679081079-15257/nginx-5645b79496" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-5645b79496-6f7rz" [32mapps.sh:183: Successful get deployment nginx {{.metadata.name}}: nginx (B[m[32mSuccessful (B[mmessage:Error from server (Conflict): error when applying patch: {"metadata":{"annotations":{"kubectl.kubernetes.io/last-applied-configuration":"{\"apiVersion\":\"apps/v1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"labels\":{\"name\":\"nginx\"},\"name\":\"nginx\",\"namespace\":\"namespace-1679081079-15257\",\"resourceVersion\":\"99\"},\"spec\":{\"replicas\":3,\"selector\":{\"matchLabels\":{\"name\":\"nginx2\"}},\"template\":{\"metadata\":{\"labels\":{\"name\":\"nginx2\"}},\"spec\":{\"containers\":[{\"image\":\"registry.k8s.io/nginx:test-cmd\",\"name\":\"nginx\",\"ports\":[{\"containerPort\":80}]}]}}}}\n"},"resourceVersion":"99"},"spec":{"selector":{"matchLabels":{"name":"nginx2"}},"template":{"metadata":{"labels":{"name":"nginx2"}}}}} to: Resource: "apps/v1, Resource=deployments", GroupVersionKind: "apps/v1, Kind=Deployment" Name: "nginx", Namespace: "namespace-1679081079-15257" for: "hack/testdata/deployment-label-change2.yaml": error when patching "hack/testdata/deployment-label-change2.yaml": Operation cannot be fulfilled on deployments.apps "nginx": the object has been modified; please apply your changes to the latest version and try again has:Error from server (Conflict) W0317 19:24:47.475151 23215 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 19:24:47.475190 23215 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource deployment.apps/nginx configured I0317 19:24:50.756429 23215 event.go:307] "Event occurred" object="namespace-1679081079-15257/nginx" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-5675dfc785 to 3" I0317 19:24:50.818251 23215 event.go:307] "Event occurred" object="namespace-1679081079-15257/nginx-5675dfc785" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-5675dfc785-scpj4" [32mSuccessful (B[mmessage: "name": "nginx2" "name": "nginx2" ... skipping 6 lines ... I0317 19:24:55.149318 23215 event.go:307] "Event occurred" object="namespace-1679081079-15257/nginx" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-5675dfc785 to 3" I0317 19:24:55.209662 23215 event.go:307] "Event occurred" object="namespace-1679081079-15257/nginx-5675dfc785" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-5675dfc785-hsf7m" [32mapps.sh:203: Successful get deployment nginx {{.spec.template.metadata.labels.name}}: nginx2 (B[mI0317 19:24:55.236513 23215 event.go:307] "Event occurred" object="namespace-1679081079-15257/nginx-5675dfc785" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-5675dfc785-k2nbk" I0317 19:24:55.236556 23215 event.go:307] "Event occurred" object="namespace-1679081079-15257/nginx-5675dfc785" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-5675dfc785-2g9jj" deployment.apps "nginx" deleted E0317 19:24:55.348342 23215 replica_set.go:544] sync "namespace-1679081079-15257/nginx-5675dfc785" failed with replicasets.apps "nginx-5675dfc785" not found +++ exit code: 0 Recording: run_kubectl_diff_tests Running command: run_kubectl_diff_tests +++ Running case: test-cmd.run_kubectl_diff_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes ... skipping 520 lines ... +++ [0317 19:25:16] Creating namespace namespace-1679081116-7670 namespace/namespace-1679081116-7670 created Context "test" modified. +++ [0317 19:25:16] Testing kubectl get [32mget.sh:29: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:Error from server (NotFound): pods "abc" not found has:pods "abc" not found [32mget.sh:37: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:Error from server (NotFound): pods "abc" not found has:pods "abc" not found [32mget.sh:45: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:{ "apiVersion": "v1", "items": [], ... skipping 21 lines ... has not:No resources found [32mSuccessful (B[mmessage:NAME has not:No resources found [32mget.sh:73: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:error: the server doesn't have a resource type "foobar" has not:No resources found [32mSuccessful (B[mmessage:No resources found in namespace-1679081116-7670 namespace. has:No resources found [32mSuccessful (B[mmessage: has not:No resources found [32mSuccessful (B[mmessage:No resources found in namespace-1679081116-7670 namespace. has:No resources found [32mget.sh:93: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:Error from server (NotFound): pods "abc" not found has:pods "abc" not found [32mSuccessful (B[mmessage:Error from server (NotFound): pods "abc" not found has not:List [32mSuccessful (B[mmessage:I0317 19:25:18.100574 36084 loader.go:373] Config loaded from file: /tmp/tmp.gjLxJw0fxv/.kube/config I0317 19:25:18.106448 36084 round_trippers.go:553] GET https://127.0.0.1:6443/version?timeout=32s 200 OK in 5 milliseconds I0317 19:25:18.124585 36084 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/namespaces/default/pods 200 OK in 1 milliseconds I0317 19:25:18.126416 36084 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/namespaces/default/replicationcontrollers 200 OK in 1 milliseconds ... skipping 597 lines ... } [32mget.sh:158: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[m<no value>[32mSuccessful (B[mmessage:valid-pod: has:valid-pod: [32mSuccessful (B[mmessage:error: error executing jsonpath "{.missing}": Error executing template: missing is not found. Printing more information for debugging the template: template was: {.missing} object given to jsonpath engine was: map[string]interface {}{"apiVersion":"v1", "kind":"Pod", "metadata":map[string]interface {}{"creationTimestamp":"2023-03-17T19:25:25Z", "labels":map[string]interface {}{"name":"valid-pod"}, "managedFields":[]interface {}{map[string]interface {}{"apiVersion":"v1", "fieldsType":"FieldsV1", "fieldsV1":map[string]interface {}{"f:metadata":map[string]interface {}{"f:labels":map[string]interface {}{".":map[string]interface {}{}, "f:name":map[string]interface {}{}}}, "f:spec":map[string]interface {}{"f:containers":map[string]interface {}{"k:{\"name\":\"kubernetes-serve-hostname\"}":map[string]interface {}{".":map[string]interface {}{}, "f:image":map[string]interface {}{}, "f:imagePullPolicy":map[string]interface {}{}, "f:name":map[string]interface {}{}, "f:resources":map[string]interface {}{".":map[string]interface {}{}, "f:limits":map[string]interface {}{".":map[string]interface {}{}, "f:cpu":map[string]interface {}{}, "f:memory":map[string]interface {}{}}, "f:requests":map[string]interface {}{".":map[string]interface {}{}, "f:cpu":map[string]interface {}{}, "f:memory":map[string]interface {}{}}}, "f:terminationMessagePath":map[string]interface {}{}, "f:terminationMessagePolicy":map[string]interface {}{}}}, "f:dnsPolicy":map[string]interface {}{}, "f:enableServiceLinks":map[string]interface {}{}, "f:restartPolicy":map[string]interface {}{}, "f:schedulerName":map[string]interface {}{}, "f:securityContext":map[string]interface {}{}, "f:terminationGracePeriodSeconds":map[string]interface {}{}}}, "manager":"kubectl-create", "operation":"Update", "time":"2023-03-17T19:25:25Z"}}, "name":"valid-pod", "namespace":"namespace-1679081125-2999", "resourceVersion":"1146", "uid":"f1f00c0b-3b04-419e-a1e8-25daf5f48b4d"}, "spec":map[string]interface {}{"containers":[]interface {}{map[string]interface {}{"image":"registry.k8s.io/serve_hostname", "imagePullPolicy":"Always", "name":"kubernetes-serve-hostname", "resources":map[string]interface {}{"limits":map[string]interface {}{"cpu":"1", "memory":"512Mi"}, "requests":map[string]interface {}{"cpu":"1", "memory":"512Mi"}}, "terminationMessagePath":"/dev/termination-log", "terminationMessagePolicy":"File"}}, "dnsPolicy":"ClusterFirst", "enableServiceLinks":true, "preemptionPolicy":"PreemptLowerPriority", "priority":0, "restartPolicy":"Always", "schedulerName":"default-scheduler", "securityContext":map[string]interface {}{}, "terminationGracePeriodSeconds":30}, "status":map[string]interface {}{"phase":"Pending", "qosClass":"Guaranteed"}} has:missing is not found error: error executing template "{{.missing}}": template: output:1:2: executing "output" at <.missing>: map has no entry for key "missing" [32mSuccessful (B[mmessage:Error executing template: template: output:1:2: executing "output" at <.missing>: map has no entry for key "missing". Printing more information for debugging the template: template was: {{.missing}} raw data was: {"apiVersion":"v1","kind":"Pod","metadata":{"creationTimestamp":"2023-03-17T19:25:25Z","labels":{"name":"valid-pod"},"managedFields":[{"apiVersion":"v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"kubernetes-serve-hostname\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{".":{},"f:limits":{".":{},"f:cpu":{},"f:memory":{}},"f:requests":{".":{},"f:cpu":{},"f:memory":{}}},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}},"manager":"kubectl-create","operation":"Update","time":"2023-03-17T19:25:25Z"}],"name":"valid-pod","namespace":"namespace-1679081125-2999","resourceVersion":"1146","uid":"f1f00c0b-3b04-419e-a1e8-25daf5f48b4d"},"spec":{"containers":[{"image":"registry.k8s.io/serve_hostname","imagePullPolicy":"Always","name":"kubernetes-serve-hostname","resources":{"limits":{"cpu":"1","memory":"512Mi"},"requests":{"cpu":"1","memory":"512Mi"}},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","enableServiceLinks":true,"preemptionPolicy":"PreemptLowerPriority","priority":0,"restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"terminationGracePeriodSeconds":30},"status":{"phase":"Pending","qosClass":"Guaranteed"}} object given to template engine was: map[apiVersion:v1 kind:Pod metadata:map[creationTimestamp:2023-03-17T19:25:25Z labels:map[name:valid-pod] managedFields:[map[apiVersion:v1 fieldsType:FieldsV1 fieldsV1:map[f:metadata:map[f:labels:map[.:map[] f:name:map[]]] f:spec:map[f:containers:map[k:{"name":"kubernetes-serve-hostname"}:map[.:map[] f:image:map[] f:imagePullPolicy:map[] f:name:map[] f:resources:map[.:map[] f:limits:map[.:map[] f:cpu:map[] f:memory:map[]] f:requests:map[.:map[] f:cpu:map[] f:memory:map[]]] f:terminationMessagePath:map[] f:terminationMessagePolicy:map[]]] f:dnsPolicy:map[] f:enableServiceLinks:map[] f:restartPolicy:map[] f:schedulerName:map[] f:securityContext:map[] f:terminationGracePeriodSeconds:map[]]] manager:kubectl-create operation:Update time:2023-03-17T19:25:25Z]] name:valid-pod namespace:namespace-1679081125-2999 resourceVersion:1146 uid:f1f00c0b-3b04-419e-a1e8-25daf5f48b4d] spec:map[containers:[map[image:registry.k8s.io/serve_hostname imagePullPolicy:Always name:kubernetes-serve-hostname resources:map[limits:map[cpu:1 memory:512Mi] requests:map[cpu:1 memory:512Mi]] terminationMessagePath:/dev/termination-log terminationMessagePolicy:File]] dnsPolicy:ClusterFirst enableServiceLinks:true preemptionPolicy:PreemptLowerPriority priority:0 restartPolicy:Always schedulerName:default-scheduler securityContext:map[] terminationGracePeriodSeconds:30] status:map[phase:Pending qosClass:Guaranteed]] has:map has no entry for key "missing" [32mSuccessful (B[mmessage:NAME READY STATUS RESTARTS AGE valid-pod 0/1 Pending 0 1s has:valid-pod [32mSuccessful (B[mmessage:Error from server (NotFound): the server could not find the requested resource has:the server could not find the requested resource [32mSuccessful (B[mmessage:NAME READY STATUS RESTARTS AGE valid-pod 0/1 Pending 0 1s has:STATUS [32mSuccessful ... skipping 78 lines ... terminationGracePeriodSeconds: 30 status: phase: Pending qosClass: Guaranteed has:name: valid-pod [32mSuccessful (B[mmessage:Error from server (NotFound): pods "invalid-pod" not found has:"invalid-pod" not found pod "valid-pod" deleted [32mget.sh:204: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/redis-master created pod/valid-pod created [32mSuccessful (B[mmessage:redis-master valid-pod has:redis-master valid-pod pod "redis-master" deleted pod "valid-pod" deleted [32mget.sh:218: Successful get configmaps --field-selector=metadata.name=test-the-map {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mget.sh:219: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mget.sh:220: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: (B[mW0317 19:25:31.040852 23215 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 19:25:31.040900 23215 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource configmap/test-the-map created I0317 19:25:31.478986 20099 alloc.go:330] "allocated clusterIPs" service="namespace-1679081125-2999/test-the-service" clusterIPs=map[IPv4:10.0.0.246] service/test-the-service created deployment.apps/test-the-deployment created I0317 19:25:31.537777 23215 event.go:307] "Event occurred" object="namespace-1679081125-2999/test-the-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set test-the-deployment-6ccf78d7dd to 3" I0317 19:25:31.574074 23215 event.go:307] "Event occurred" object="namespace-1679081125-2999/test-the-deployment-6ccf78d7dd" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: test-the-deployment-6ccf78d7dd-9jsxc" ... skipping 1123 lines ... +++ [0317 19:25:41] Creating namespace namespace-1679081141-18095 namespace/namespace-1679081141-18095 created Context "test" modified. +++ [0317 19:25:41] Testing kubectl exec POD COMMAND [32mSuccessful (B[mmessage:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (NotFound): pods "abc" not found has:pods "abc" not found [32mSuccessful (B[mmessage:error: cannot exec into multiple objects at a time has:cannot exec into multiple objects at a time pod/test-pod created [32mSuccessful (B[mmessage:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (BadRequest): pod test-pod does not have a host assigned has not:pods "test-pod" not found [32mSuccessful (B[mmessage:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (BadRequest): pod test-pod does not have a host assigned has not:pod or type/name must be specified pod "test-pod" deleted +++ exit code: 0 Recording: run_kubectl_exec_resource_name_tests Running command: run_kubectl_exec_resource_name_tests ... skipping 3 lines ... +++ [0317 19:25:41] Creating namespace namespace-1679081141-20923 namespace/namespace-1679081141-20923 created Context "test" modified. +++ [0317 19:25:41] Testing kubectl exec TYPE/NAME COMMAND [32mSuccessful (B[mmessage:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. error: the server doesn't have a resource type "foo" has:error: [32mSuccessful (B[mmessage:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (NotFound): deployments.apps "bar" not found has:"bar" not found pod/test-pod created replicaset.apps/frontend created I0317 19:25:42.614621 23215 event.go:307] "Event occurred" object="namespace-1679081141-20923/frontend" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-rhnvk" I0317 19:25:42.667281 23215 event.go:307] "Event occurred" object="namespace-1679081141-20923/frontend" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-7b7cg" I0317 19:25:42.667316 23215 event.go:307] "Event occurred" object="namespace-1679081141-20923/frontend" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-5j45c" configmap/test-set-env-config created [32mSuccessful (B[mmessage:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. error: cannot attach to *v1.ConfigMap: selector for *v1.ConfigMap not implemented has:not implemented [32mSuccessful (B[mmessage:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (BadRequest): pod test-pod does not have a host assigned has not:not found [32mSuccessful (B[mmessage:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (BadRequest): pod test-pod does not have a host assigned has not:pod, type/name or --filename must be specified [32mSuccessful (B[mmessage:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (BadRequest): pod frontend-5j45c does not have a host assigned has not:not found [32mSuccessful (B[mmessage:kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Error from server (BadRequest): pod frontend-5j45c does not have a host assigned has not:pod, type/name or --filename must be specified pod "test-pod" deleted replicaset.apps "frontend" deleted configmap "test-set-env-config" deleted +++ exit code: 0 Recording: run_create_secret_tests Running command: run_create_secret_tests +++ Running case: test-cmd.run_create_secret_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_create_secret_tests [32mSuccessful (B[mmessage:Error from server (NotFound): secrets "mysecret" not found has:secrets "mysecret" not found [32mSuccessful (B[mmessage:user-specified has:user-specified [32mSuccessful (B[mmessage:Error from server (NotFound): secrets "mysecret" not found has:secrets "mysecret" not found [32mSuccessful (B[m{"kind":"ConfigMap","apiVersion":"v1","metadata":{"name":"tester-update-cm","namespace":"default","uid":"85a69c81-2f00-43a5-9ee2-04161888cfca","resourceVersion":"1247","creationTimestamp":"2023-03-17T19:25:43Z"}} [32mSuccessful (B[mmessage:{"kind":"ConfigMap","apiVersion":"v1","metadata":{"name":"tester-update-cm","namespace":"default","uid":"85a69c81-2f00-43a5-9ee2-04161888cfca","resourceVersion":"1248","creationTimestamp":"2023-03-17T19:25:43Z"},"data":{"key1":"config1"}} has:uid [32mSuccessful (B[mmessage:{"kind":"ConfigMap","apiVersion":"v1","metadata":{"name":"tester-update-cm","namespace":"default","uid":"85a69c81-2f00-43a5-9ee2-04161888cfca","resourceVersion":"1248","creationTimestamp":"2023-03-17T19:25:43Z"},"data":{"key1":"config1"}} has:config1 {"kind":"Status","apiVersion":"v1","metadata":{},"status":"Success","details":{"name":"tester-update-cm","kind":"configmaps","uid":"85a69c81-2f00-43a5-9ee2-04161888cfca"}} [32mSuccessful (B[mmessage:Error from server (NotFound): configmaps "tester-update-cm" not found has:configmaps "tester-update-cm" not found +++ exit code: 0 Recording: run_kubectl_create_kustomization_directory_tests Running command: run_kubectl_create_kustomization_directory_tests +++ Running case: test-cmd.run_kubectl_create_kustomization_directory_tests ... skipping 25 lines ... +++ command: run_kubectl_create_validate_tests +++ [0317 19:25:45] Creating namespace namespace-1679081145-466 namespace/namespace-1679081145-466 created Context "test" modified. +++ [0317 19:25:45] Testing kubectl create --validate Successful message:Error from server (BadRequest): error when creating "hack/testdata/invalid-deployment-unknown-and-duplicate-fields.yaml": Deployment in version "v1" cannot be handled as a Deployment: strict decoding error: unknown field "spec.baz", unknown field "spec.foo" has either:strict decoding error or:error validating data +++ [0317 19:25:45] Testing kubectl create --validate=true I0317 19:25:45.923603 23215 namespace_controller.go:182] "Namespace has been deleted" namespace="test-events" Successful message:Error from server (BadRequest): error when creating "hack/testdata/invalid-deployment-unknown-and-duplicate-fields.yaml": Deployment in version "v1" cannot be handled as a Deployment: strict decoding error: unknown field "spec.baz", unknown field "spec.foo" has either:strict decoding error or:error validating data +++ [0317 19:25:46] Testing kubectl create --validate=false [32mSuccessful (B[mmessage:deployment.apps/invalid-nginx-deployment created has:deployment.apps/invalid-nginx-deployment created I0317 19:25:46.217785 23215 event.go:307] "Event occurred" object="namespace-1679081145-466/invalid-nginx-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set invalid-nginx-deployment-cbdccf466 to 4" I0317 19:25:46.252357 23215 event.go:307] "Event occurred" object="namespace-1679081145-466/invalid-nginx-deployment-cbdccf466" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: invalid-nginx-deployment-cbdccf466-zqqdm" deployment.apps "invalid-nginx-deployment" deleted I0317 19:25:46.276162 23215 event.go:307] "Event occurred" object="namespace-1679081145-466/invalid-nginx-deployment-cbdccf466" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: invalid-nginx-deployment-cbdccf466-r24d5" I0317 19:25:46.276194 23215 event.go:307] "Event occurred" object="namespace-1679081145-466/invalid-nginx-deployment-cbdccf466" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: invalid-nginx-deployment-cbdccf466-wsnzs" +++ [0317 19:25:46] Testing kubectl create --validate=strict I0317 19:25:46.302925 23215 event.go:307] "Event occurred" object="namespace-1679081145-466/invalid-nginx-deployment-cbdccf466" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: invalid-nginx-deployment-cbdccf466-x7kbm" E0317 19:25:46.316313 23215 replica_set.go:544] sync "namespace-1679081145-466/invalid-nginx-deployment-cbdccf466" failed with replicasets.apps "invalid-nginx-deployment-cbdccf466" not found Successful message:Error from server (BadRequest): error when creating "hack/testdata/invalid-deployment-unknown-and-duplicate-fields.yaml": Deployment in version "v1" cannot be handled as a Deployment: strict decoding error: unknown field "spec.baz", unknown field "spec.foo" has either:strict decoding error or:error validating data +++ [0317 19:25:46] Testing kubectl create --validate=warn Warning: unknown field "spec.baz" Warning: unknown field "spec.foo" [32mSuccessful (B[mmessage:deployment.apps/invalid-nginx-deployment created has:deployment.apps/invalid-nginx-deployment created I0317 19:25:46.747582 23215 event.go:307] "Event occurred" object="namespace-1679081145-466/invalid-nginx-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set invalid-nginx-deployment-cbdccf466 to 4" I0317 19:25:46.774139 23215 event.go:307] "Event occurred" object="namespace-1679081145-466/invalid-nginx-deployment-cbdccf466" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: invalid-nginx-deployment-cbdccf466-dt6g2" I0317 19:25:46.799290 23215 event.go:307] "Event occurred" object="namespace-1679081145-466/invalid-nginx-deployment-cbdccf466" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: invalid-nginx-deployment-cbdccf466-zt26j" I0317 19:25:46.799340 23215 event.go:307] "Event occurred" object="namespace-1679081145-466/invalid-nginx-deployment-cbdccf466" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: invalid-nginx-deployment-cbdccf466-m5hqp" I0317 19:25:46.840742 23215 event.go:307] "Event occurred" object="namespace-1679081145-466/invalid-nginx-deployment-cbdccf466" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: invalid-nginx-deployment-cbdccf466-6vmcg" deployment.apps "invalid-nginx-deployment" deleted +++ [0317 19:25:46] Testing kubectl create --validate=ignore E0317 19:25:46.900817 23215 replica_set.go:544] sync "namespace-1679081145-466/invalid-nginx-deployment-cbdccf466" failed with Operation cannot be fulfilled on replicasets.apps "invalid-nginx-deployment-cbdccf466": StorageError: invalid object, Code: 4, Key: /registry/replicasets/namespace-1679081145-466/invalid-nginx-deployment-cbdccf466, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: a8f7df4c-0370-439f-9d6f-cb7da7ab52ee, UID in object meta: [32mSuccessful (B[mmessage:deployment.apps/invalid-nginx-deployment created has:deployment.apps/invalid-nginx-deployment created I0317 19:25:47.014084 23215 event.go:307] "Event occurred" object="namespace-1679081145-466/invalid-nginx-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set invalid-nginx-deployment-cbdccf466 to 4" I0317 19:25:47.040499 23215 event.go:307] "Event occurred" object="namespace-1679081145-466/invalid-nginx-deployment-cbdccf466" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: invalid-nginx-deployment-cbdccf466-z5skl" I0317 19:25:47.067606 23215 event.go:307] "Event occurred" object="namespace-1679081145-466/invalid-nginx-deployment-cbdccf466" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: invalid-nginx-deployment-cbdccf466-tvzbv" I0317 19:25:47.067897 23215 event.go:307] "Event occurred" object="namespace-1679081145-466/invalid-nginx-deployment-cbdccf466" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: invalid-nginx-deployment-cbdccf466-qsqw4" deployment.apps "invalid-nginx-deployment" deleted I0317 19:25:47.118646 23215 event.go:307] "Event occurred" object="namespace-1679081145-466/invalid-nginx-deployment-cbdccf466" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: invalid-nginx-deployment-cbdccf466-fxngd" +++ [0317 19:25:47] Testing kubectl create E0317 19:25:47.159904 23215 replica_set.go:544] sync "namespace-1679081145-466/invalid-nginx-deployment-cbdccf466" failed with replicasets.apps "invalid-nginx-deployment-cbdccf466" not found Successful message:Error from server (BadRequest): error when creating "hack/testdata/invalid-deployment-unknown-and-duplicate-fields.yaml": Deployment in version "v1" cannot be handled as a Deployment: strict decoding error: unknown field "spec.baz", unknown field "spec.foo" has either:strict decoding error or:error validating data +++ [0317 19:25:47] Testing kubectl create --validate=foo [32mSuccessful (B[mmessage:error: invalid - validate option "foo"; must be one of: strict (or true), warn, ignore (or false) has:invalid - validate option "foo" +++ exit code: 0 Recording: run_convert_tests Running command: run_convert_tests +++ Running case: test-cmd.run_convert_tests ... skipping 50 lines ... securityContext: {} terminationGracePeriodSeconds: 30 status: {} has:apps/v1beta1 deployment.apps "nginx" deleted [32mSuccessful (B[mmessage:error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing [32mSuccessful (B[mmessage:nginx: has:nginx: +++ exit code: 0 Recording: run_kubectl_delete_allnamespaces_tests ... skipping 103 lines ... has:Timeout [32mSuccessful (B[mmessage:NAME READY STATUS RESTARTS AGE valid-pod 0/1 Pending 0 1s has:valid-pod [32mSuccessful (B[mmessage:error: Invalid timeout value. Timeout must be a single integer in seconds, or an integer followed by a corresponding time unit (e.g. 1s | 2m | 3h) has:Invalid timeout value pod "valid-pod" deleted +++ exit code: 0 Recording: run_crd_tests Running command: run_crd_tests ... skipping 164 lines ... (B[mFlag --record has been deprecated, --record will be removed in the future foo.company.com/test patched [32mcrd.sh:296: Successful get foos/test {{.patched}}: value2 (B[mFlag --record has been deprecated, --record will be removed in the future foo.company.com/test patched [32mcrd.sh:298: Successful get foos/test {{.patched}}: <no value> (B[m+++ [0317 19:25:58] "kubectl patch --local" returns error as expected for CustomResource: error: strategic merge patch is not supported for company.com/v1, Kind=Foo locally, try --type merge { "apiVersion": "company.com/v1", "kind": "Foo", "metadata": { "annotations": { "kubernetes.io/change-cause": "kubectl patch foos/test --server=https://127.0.0.1:6443 --insecure-skip-tls-verify=true --match-server-version=true --patch={\"patched\":null} --type=merge --record=true" ... skipping 217 lines ... (B[m[32mcrd.sh:519: Successful get bars {{range.items}}{{.metadata.name}}:{{end}}: (B[mnamespace/non-native-resources created bar.company.com/test created [32mcrd.sh:524: Successful get bars {{len .items}}: 1 (B[mnamespace "non-native-resources" deleted [32mcrd.sh:527: Successful get bars {{len .items}}: 0 (B[mError from server (NotFound): namespaces "non-native-resources" not found I0317 19:26:13.647192 20099 handler.go:165] Adding GroupVersion company.com v1 to ResourceManager customresourcedefinition.apiextensions.k8s.io "foos.company.com" deleted I0317 19:26:13.666898 20099 handler.go:165] Adding GroupVersion company.com v1 to ResourceManager I0317 19:26:13.714062 20099 handler.go:165] Adding GroupVersion company.com v1 to ResourceManager I0317 19:26:13.834917 20099 handler.go:165] Adding GroupVersion company.com v1 to ResourceManager customresourcedefinition.apiextensions.k8s.io "bars.company.com" deleted ... skipping 15 lines ... +++ [0317 19:26:14] Testing recursive resources +++ [0317 19:26:14] Creating namespace namespace-1679081174-30114 namespace/namespace-1679081174-30114 created Context "test" modified. [32mgeneric-resources.sh:202: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[mW0317 19:26:14.714447 20099 cacher.go:171] Terminating all watchers from cacher foos.company.com E0317 19:26:14.715912 23215 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource W0317 19:26:14.914117 20099 cacher.go:171] Terminating all watchers from cacher bars.company.com E0317 19:26:14.915752 23215 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:206: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mSuccessful (B[mmessage:pod/busybox0 created pod/busybox1 created error: error validating "hack/testdata/recursive/pod/pod/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false has:error validating data: kind not set W0317 19:26:15.561340 23215 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 19:26:15.561382 23215 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:211: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:220: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: busybox:busybox: (B[m[32mSuccessful (B[mmessage:error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:227: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mW0317 19:26:15.987788 23215 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 19:26:15.987826 23215 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource W0317 19:26:16.022366 23215 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 19:26:16.022412 23215 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:231: Successful get pods {{range.items}}{{.metadata.labels.status}}:{{end}}: replaced:replaced: (B[m[32mSuccessful (B[mmessage:pod/busybox0 replaced pod/busybox1 replaced error: error validating "hack/testdata/recursive/pod-modify/pod/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false has:error validating data: kind not set [32mgeneric-resources.sh:236: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mSuccessful (B[mmessage:Name: busybox0 Namespace: namespace-1679081174-30114 Priority: 0 Node: <none> ... skipping 159 lines ... has:Object 'Kind' is missing [32mgeneric-resources.sh:246: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:250: Successful get pods {{range.items}}{{.metadata.annotations.annotatekey}}:{{end}}: annotatevalue:annotatevalue: (B[m[32mSuccessful (B[mmessage:pod/busybox0 annotate pod/busybox1 annotate error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:255: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:259: Successful get pods {{range.items}}{{.metadata.labels.status}}:{{end}}: replaced:replaced: (B[m[32mSuccessful (B[mmessage:Warning: resource pods/busybox0 is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. pod/busybox0 configured Warning: resource pods/busybox1 is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. pod/busybox1 configured error: error validating "hack/testdata/recursive/pod-modify/pod/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false has:error validating data: kind not set [32mgeneric-resources.sh:264: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mSuccessful (B[mmessage:busybox0:busybox1:error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:busybox0:busybox1: [32mSuccessful (B[mmessage:busybox0:busybox1:error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:273: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mpod/busybox0 labeled pod/busybox1 labeled error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' [32mgeneric-resources.sh:278: Successful get pods {{range.items}}{{.metadata.labels.mylabel}}:{{end}}: myvalue:myvalue: (B[m[32mSuccessful (B[mmessage:pod/busybox0 labeled pod/busybox1 labeled error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:283: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[mpod/busybox0 patched pod/busybox1 patched error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' W0317 19:26:17.785672 23215 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 19:26:17.785715 23215 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:288: Successful get pods {{range.items}}{{(index .spec.containers 0).image}}:{{end}}: prom/busybox:prom/busybox: (B[m[32mSuccessful (B[mmessage:pod/busybox0 patched pod/busybox1 patched error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:293: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:297: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. pod "busybox0" force deleted pod "busybox1" force deleted error: unable to decode "hack/testdata/recursive/pod/pod/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"Pod","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:302: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: (B[mI0317 19:26:18.386890 23215 namespace_controller.go:182] "Namespace has been deleted" namespace="non-native-resources" replicationcontroller/busybox0 created I0317 19:26:18.559500 23215 event.go:307] "Event occurred" object="namespace-1679081174-30114/busybox0" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: busybox0-c5876" replicationcontroller/busybox1 created error: error validating "hack/testdata/recursive/rc/rc/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false I0317 19:26:18.652031 23215 event.go:307] "Event occurred" object="namespace-1679081174-30114/busybox1" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: busybox1-tcfvx" [32mgeneric-resources.sh:306: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:311: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:312: Successful get rc busybox0 {{.spec.replicas}}: 1 (B[mW0317 19:26:18.911016 23215 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 19:26:18.911065 23215 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mgeneric-resources.sh:313: Successful get rc busybox1 {{.spec.replicas}}: 1 (B[m[32mgeneric-resources.sh:318: Successful get hpa busybox0 {{.spec.minReplicas}} {{.spec.maxReplicas}} {{(index .spec.metrics 0).resource.target.averageUtilization}}: 1 2 80 (B[m[32mgeneric-resources.sh:319: Successful get hpa busybox1 {{.spec.minReplicas}} {{.spec.maxReplicas}} {{(index .spec.metrics 0).resource.target.averageUtilization}}: 1 2 80 (B[m[32mSuccessful (B[mmessage:horizontalpodautoscaler.autoscaling/busybox0 autoscaled horizontalpodautoscaler.autoscaling/busybox1 autoscaled error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' has:Object 'Kind' is missing horizontalpodautoscaler.autoscaling "busybox0" deleted horizontalpodautoscaler.autoscaling "busybox1" deleted [32mgeneric-resources.sh:327: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:328: Successful get rc busybox0 {{.spec.replicas}}: 1 (B[m[32mgeneric-resources.sh:329: Successful get rc busybox1 {{.spec.replicas}}: 1 (B[mI0317 19:26:19.711143 20099 alloc.go:330] "allocated clusterIPs" service="namespace-1679081174-30114/busybox0" clusterIPs=map[IPv4:10.0.0.169] I0317 19:26:19.762826 20099 alloc.go:330] "allocated clusterIPs" service="namespace-1679081174-30114/busybox1" clusterIPs=map[IPv4:10.0.0.152] [32mgeneric-resources.sh:333: Successful get service busybox0 {{(index .spec.ports 0).name}} {{(index .spec.ports 0).port}}: <no value> 80 (B[m[32mgeneric-resources.sh:334: Successful get service busybox1 {{(index .spec.ports 0).name}} {{(index .spec.ports 0).port}}: <no value> 80 (B[m[32mSuccessful (B[mmessage:service/busybox0 exposed service/busybox1 exposed error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:340: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:341: Successful get rc busybox0 {{.spec.replicas}}: 1 (B[m[32mgeneric-resources.sh:342: Successful get rc busybox1 {{.spec.replicas}}: 1 (B[mI0317 19:26:20.277298 23215 event.go:307] "Event occurred" object="namespace-1679081174-30114/busybox0" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: busybox0-78kgg" I0317 19:26:20.312248 23215 event.go:307] "Event occurred" object="namespace-1679081174-30114/busybox1" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: busybox1-pmlzr" [32mgeneric-resources.sh:346: Successful get rc busybox0 {{.spec.replicas}}: 2 (B[m[32mgeneric-resources.sh:347: Successful get rc busybox1 {{.spec.replicas}}: 2 (B[m[32mSuccessful (B[mmessage:replicationcontroller/busybox0 scaled replicationcontroller/busybox1 scaled error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:352: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mgeneric-resources.sh:356: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mSuccessful (B[mmessage:Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. replicationcontroller "busybox0" force deleted replicationcontroller "busybox1" force deleted error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' has:Object 'Kind' is missing [32mgeneric-resources.sh:361: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: (B[mdeployment.apps/nginx1-deployment created I0317 19:26:21.116493 23215 event.go:307] "Event occurred" object="namespace-1679081174-30114/nginx1-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx1-deployment-69c599568 to 2" deployment.apps/nginx0-deployment created error: error validating "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false I0317 19:26:21.144006 23215 event.go:307] "Event occurred" object="namespace-1679081174-30114/nginx1-deployment-69c599568" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx1-deployment-69c599568-kz4q4" I0317 19:26:21.201198 23215 event.go:307] "Event occurred" object="namespace-1679081174-30114/nginx0-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx0-deployment-5944978c6f to 2" I0317 19:26:21.201388 23215 event.go:307] "Event occurred" object="namespace-1679081174-30114/nginx1-deployment-69c599568" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx1-deployment-69c599568-vkpcn" I0317 19:26:21.233918 23215 event.go:307] "Event occurred" object="namespace-1679081174-30114/nginx0-deployment-5944978c6f" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx0-deployment-5944978c6f-w7bww" [32mgeneric-resources.sh:365: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: nginx0-deployment:nginx1-deployment: (B[mI0317 19:26:21.264882 23215 event.go:307] "Event occurred" object="namespace-1679081174-30114/nginx0-deployment-5944978c6f" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx0-deployment-5944978c6f-tx7jj" [32mgeneric-resources.sh:366: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:1.7.9:registry.k8s.io/nginx:1.7.9: (B[m[32mgeneric-resources.sh:370: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:1.7.9:registry.k8s.io/nginx:1.7.9: (B[m[32mSuccessful (B[mmessage:deployment.apps/nginx1-deployment skipped rollback (current template already matches revision 1) deployment.apps/nginx0-deployment skipped rollback (current template already matches revision 1) error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"registry.k8s.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' has:Object 'Kind' is missing deployment.apps/nginx1-deployment paused deployment.apps/nginx0-deployment paused [32mgeneric-resources.sh:378: Successful get deployment {{range.items}}{{.spec.paused}}:{{end}}: true:true: (B[m[32mSuccessful (B[mmessage:unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"registry.k8s.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' ... skipping 13 lines ... has:Waiting for deployment "nginx1-deployment" rollout to finish [32mSuccessful (B[mmessage:Waiting for deployment "nginx1-deployment" rollout to finish: 0 of 2 updated replicas are available... timed out waiting for the condition unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"registry.k8s.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' has:Object 'Kind' is missing W0317 19:26:23.330958 23215 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 19:26:23.330999 23215 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource I0317 19:26:23.359984 23215 shared_informer.go:311] Waiting for caches to sync for garbage collector I0317 19:26:23.360037 23215 shared_informer.go:318] Caches are synced for garbage collector W0317 19:26:24.577836 23215 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 19:26:24.577881 23215 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mSuccessful (B[mmessage:Waiting for deployment "nginx1-deployment" rollout to finish: 0 of 2 updated replicas are available... Waiting for deployment "nginx0-deployment" rollout to finish: 0 of 2 updated replicas are available... timed out waiting for the condition timed out waiting for the condition unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"registry.k8s.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' ... skipping 18 lines ... 1 <none> deployment.apps/nginx0-deployment REVISION CHANGE-CAUSE 1 <none> error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"registry.k8s.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' has:nginx0-deployment [32mSuccessful (B[mmessage:deployment.apps/nginx1-deployment REVISION CHANGE-CAUSE 1 <none> deployment.apps/nginx0-deployment REVISION CHANGE-CAUSE 1 <none> error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"registry.k8s.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' has:nginx1-deployment [32mSuccessful (B[mmessage:deployment.apps/nginx1-deployment REVISION CHANGE-CAUSE 1 <none> deployment.apps/nginx0-deployment REVISION CHANGE-CAUSE 1 <none> error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"registry.k8s.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' has:Object 'Kind' is missing Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. deployment.apps "nginx1-deployment" force deleted deployment.apps "nginx0-deployment" force deleted error: unable to decode "hack/testdata/recursive/deployment/deployment/nginx-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"apps/v1","ind":"Deployment","metadata":{"labels":{"app":"nginx2-deployment"},"name":"nginx2-deployment"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"registry.k8s.io/nginx:1.7.9","name":"nginx","ports":[{"containerPort":80}]}]}}}}' [32mgeneric-resources.sh:411: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: (B[mreplicationcontroller/busybox0 created I0317 19:26:26.692333 23215 event.go:307] "Event occurred" object="namespace-1679081174-30114/busybox0" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: busybox0-bk7r5" replicationcontroller/busybox1 created error: error validating "hack/testdata/recursive/rc/rc/busybox-broken.yaml": error validating data: kind not set; if you choose to ignore these errors, turn validation off with --validate=false I0317 19:26:26.743703 23215 event.go:307] "Event occurred" object="namespace-1679081174-30114/busybox1" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: busybox1-lddd8" [32mgeneric-resources.sh:415: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: busybox0:busybox1: (B[m[32mSuccessful (B[mmessage:no rollbacker has been implemented for "ReplicationController" no rollbacker has been implemented for "ReplicationController" unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' ... skipping 2 lines ... (B[mmessage:no rollbacker has been implemented for "ReplicationController" no rollbacker has been implemented for "ReplicationController" unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' has:Object 'Kind' is missing [32mSuccessful (B[mmessage:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' error: replicationcontrollers "busybox0" pausing is not supported error: replicationcontrollers "busybox1" pausing is not supported has:Object 'Kind' is missing [32mSuccessful (B[mmessage:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' error: replicationcontrollers "busybox0" pausing is not supported error: replicationcontrollers "busybox1" pausing is not supported has:replicationcontrollers "busybox0" pausing is not supported [32mSuccessful (B[mmessage:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' error: replicationcontrollers "busybox0" pausing is not supported error: replicationcontrollers "busybox1" pausing is not supported has:replicationcontrollers "busybox1" pausing is not supported [32mSuccessful (B[mmessage:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' error: replicationcontrollers "busybox0" resuming is not supported error: replicationcontrollers "busybox1" resuming is not supported has:Object 'Kind' is missing [32mSuccessful (B[mmessage:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' error: replicationcontrollers "busybox0" resuming is not supported error: replicationcontrollers "busybox1" resuming is not supported has:replicationcontrollers "busybox0" resuming is not supported [32mSuccessful (B[mmessage:unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' error: replicationcontrollers "busybox0" resuming is not supported error: replicationcontrollers "busybox1" resuming is not supported has:replicationcontrollers "busybox1" resuming is not supported Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. replicationcontroller "busybox0" force deleted replicationcontroller "busybox1" force deleted error: unable to decode "hack/testdata/recursive/rc/rc/busybox-broken.yaml": Object 'Kind' is missing in '{"apiVersion":"v1","ind":"ReplicationController","metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"replicas":1,"selector":{"app":"busybox2"},"template":{"metadata":{"labels":{"app":"busybox2"},"name":"busybox2"},"spec":{"containers":[{"command":["sleep","3600"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"busybox"}],"restartPolicy":"Always"}}}}' +++ exit code: 0 Recording: run_namespace_tests Running command: run_namespace_tests +++ Running case: test-cmd.run_namespace_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_namespace_tests +++ [0317 19:26:28] Testing kubectl(v1:namespaces) [32mSuccessful (B[mmessage:Error from server (NotFound): namespaces "my-namespace" not found has: not found namespace/my-namespace created (dry run) namespace/my-namespace created (server dry run) [32mSuccessful (B[mmessage:Error from server (NotFound): namespaces "my-namespace" not found has: not found namespace/my-namespace created [32mcore.sh:1504: Successful get namespaces/my-namespace {{.metadata.name}}: my-namespace (B[mquery for namespaces had limit param query for resourcequotas had limit param query for limitranges had limit param ... skipping 132 lines ... I0317 19:26:28.945328 42098 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/namespaces/namespace-1679081154-721/resourcequotas?limit=500 200 OK in 1 milliseconds I0317 19:26:28.946721 42098 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/namespaces/namespace-1679081154-721/limitranges?limit=500 200 OK in 1 milliseconds I0317 19:26:28.948602 42098 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/namespaces/namespace-1679081174-30114 200 OK in 1 milliseconds I0317 19:26:28.950200 42098 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/namespaces/namespace-1679081174-30114/resourcequotas?limit=500 200 OK in 1 milliseconds I0317 19:26:28.951596 42098 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/namespaces/namespace-1679081174-30114/limitranges?limit=500 200 OK in 1 milliseconds (B[mnamespace "my-namespace" deleted W0317 19:26:33.609455 23215 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 19:26:33.609494 23215 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource W0317 19:26:33.758563 23215 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 19:26:33.758602 23215 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource I0317 19:26:34.024106 23215 horizontal.go:512] "Horizontal Pod Autoscaler has been deleted" HPA="namespace-1679081174-30114/busybox0" I0317 19:26:34.039395 23215 horizontal.go:512] "Horizontal Pod Autoscaler has been deleted" HPA="namespace-1679081174-30114/busybox1" namespace/my-namespace condition met [32mSuccessful (B[mmessage:Error from server (NotFound): namespaces "my-namespace" not found has: not found namespace/my-namespace created [32mcore.sh:1515: Successful get namespaces/my-namespace {{.metadata.name}}: my-namespace (B[m[32mSuccessful (B[mmessage:Warning: deleting cluster-scoped resources, not scoped to the provided namespace namespace "kube-node-lease" deleted ... skipping 34 lines ... namespace "namespace-1679081148-13608" deleted namespace "namespace-1679081148-31421" deleted namespace "namespace-1679081149-15586" deleted namespace "namespace-1679081151-7297" deleted namespace "namespace-1679081154-721" deleted namespace "namespace-1679081174-30114" deleted Error from server (Forbidden): namespaces "default" is forbidden: this namespace may not be deleted Error from server (Forbidden): namespaces "kube-public" is forbidden: this namespace may not be deleted Error from server (Forbidden): namespaces "kube-system" is forbidden: this namespace may not be deleted has:Warning: deleting cluster-scoped resources [32mSuccessful (B[mmessage:Warning: deleting cluster-scoped resources, not scoped to the provided namespace namespace "kube-node-lease" deleted namespace "my-namespace" deleted namespace "namespace-1679080945-899" deleted ... skipping 32 lines ... namespace "namespace-1679081148-13608" deleted namespace "namespace-1679081148-31421" deleted namespace "namespace-1679081149-15586" deleted namespace "namespace-1679081151-7297" deleted namespace "namespace-1679081154-721" deleted namespace "namespace-1679081174-30114" deleted Error from server (Forbidden): namespaces "default" is forbidden: this namespace may not be deleted Error from server (Forbidden): namespaces "kube-public" is forbidden: this namespace may not be deleted Error from server (Forbidden): namespaces "kube-system" is forbidden: this namespace may not be deleted has:namespace "my-namespace" deleted namespace/quotas created [32mcore.sh:1522: Successful get namespaces/quotas {{.metadata.name}}: quotas (B[m[32mcore.sh:1523: Successful get quota --namespace=quotas {{range.items}}{{ if eq .metadata.name "test-quota" }}found{{end}}{{end}}:: : (B[mresourcequota/test-quota created (dry run) resourcequota/test-quota created (server dry run) ... skipping 19 lines ... (B[m[32mcore.sh:1552: Successful get pods --namespace=other {{range.items}}{{.metadata.name}}:{{end}}: (B[mpod/valid-pod created [32mcore.sh:1556: Successful get pods --namespace=other {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[mI0317 19:26:44.621165 23215 namespace_controller.go:182] "Namespace has been deleted" namespace="my-namespace" [32mcore.sh:1558: Successful get pods -n other {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[m[32mSuccessful (B[mmessage:error: a resource cannot be retrieved by name across all namespaces has:a resource cannot be retrieved by name across all namespaces [32mcore.sh:1565: Successful get pods --namespace=other {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[mWarning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. pod "valid-pod" force deleted [32mcore.sh:1569: Successful get pods --namespace=other {{range.items}}{{.metadata.name}}:{{end}}: (B[mnamespace "other" deleted ... skipping 25 lines ... I0317 19:26:47.702241 23215 namespace_controller.go:182] "Namespace has been deleted" namespace="namespace-1679081077-7171" I0317 19:26:47.746987 23215 namespace_controller.go:182] "Namespace has been deleted" namespace="namespace-1679081116-7071" I0317 19:26:47.838251 23215 namespace_controller.go:182] "Namespace has been deleted" namespace="namespace-1679081116-7670" I0317 19:26:47.867738 23215 namespace_controller.go:182] "Namespace has been deleted" namespace="namespace-1679081133-11121" I0317 19:26:47.921076 23215 namespace_controller.go:182] "Namespace has been deleted" namespace="namespace-1679081079-15257" I0317 19:26:47.972610 23215 namespace_controller.go:182] "Namespace has been deleted" namespace="namespace-1679081125-2999" W0317 19:26:48.226798 23215 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 19:26:48.226848 23215 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource I0317 19:26:48.512545 23215 namespace_controller.go:182] "Namespace has been deleted" namespace="namespace-1679081141-18095" I0317 19:26:48.621377 23215 namespace_controller.go:182] "Namespace has been deleted" namespace="namespace-1679081141-20923" I0317 19:26:48.622512 23215 namespace_controller.go:182] "Namespace has been deleted" namespace="namespace-1679081148-13608" I0317 19:26:48.686578 23215 namespace_controller.go:182] "Namespace has been deleted" namespace="namespace-1679081148-31421" I0317 19:26:48.686630 23215 namespace_controller.go:182] "Namespace has been deleted" namespace="namespace-1679081149-15586" I0317 19:26:48.817827 23215 namespace_controller.go:182] "Namespace has been deleted" namespace="namespace-1679081151-7297" I0317 19:26:48.836044 23215 namespace_controller.go:182] "Namespace has been deleted" namespace="namespace-1679081154-721" I0317 19:26:48.858269 23215 namespace_controller.go:182] "Namespace has been deleted" namespace="namespace-1679081145-466" I0317 19:26:48.870514 23215 namespace_controller.go:182] "Namespace has been deleted" namespace="quotas" I0317 19:26:49.029752 23215 namespace_controller.go:182] "Namespace has been deleted" namespace="namespace-1679081174-30114" W0317 19:26:49.062544 23215 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 19:26:49.062588 23215 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource +++ exit code: 0 Recording: run_secrets_test Running command: run_secrets_test +++ Running case: test-cmd.run_secrets_test +++ working dir: /home/prow/go/src/k8s.io/kubernetes ... skipping 38 lines ... metadata: creationTimestamp: null name: test has not:example.com [32mcore.sh:831: Successful get namespaces {{range.items}}{{ if eq .metadata.name "test-secrets" }}found{{end}}{{end}}:: : (B[mnamespace/test-secrets created W0317 19:26:50.720973 23215 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 19:26:50.721018 23215 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mcore.sh:835: Successful get namespaces/test-secrets {{.metadata.name}}: test-secrets (B[m[32mcore.sh:839: Successful get secrets --namespace=test-secrets {{range.items}}{{.metadata.name}}:{{end}}: (B[msecret/test-secret created [32mcore.sh:843: Successful get secret/test-secret --namespace=test-secrets {{.metadata.name}}: test-secret (B[m[32mcore.sh:844: Successful get secret/test-secret --namespace=test-secrets {{.type}}: test-type (B[mquery for secrets had limit param ... skipping 84 lines ... +++ command: run_client_config_tests +++ [0317 19:27:06] Creating namespace namespace-1679081226-10301 namespace/namespace-1679081226-10301 created Context "test" modified. +++ [0317 19:27:06] Testing client config [32mSuccessful (B[mmessage:error: stat missing: no such file or directory has:missing: no such file or directory [32mSuccessful (B[mmessage:error: stat missing: no such file or directory has:missing: no such file or directory [32mSuccessful (B[mmessage:error: stat missing: no such file or directory has:missing: no such file or directory [32mSuccessful (B[mmessage:Error in configuration: context was not found for specified context: missing-context has:context was not found for specified context: missing-context [32mSuccessful (B[mmessage:error: no server found for cluster "missing-cluster" has:no server found for cluster "missing-cluster" [32mSuccessful (B[mmessage:error: auth info "missing-user" does not exist has:auth info "missing-user" does not exist [32mSuccessful (B[mmessage:error: error loading config file "/tmp/newconfig.yaml": no kind "Config" is registered for version "v-1" in scheme "vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go:50" has:error loading config file [32mSuccessful (B[mmessage:error: stat missing-config: no such file or directory has:no such file or directory +++ exit code: 0 Recording: run_service_accounts_tests Running command: run_service_accounts_tests +++ Running case: test-cmd.run_service_accounts_tests ... skipping 57 lines ... Labels: <none> Annotations: <none> Schedule: 59 23 31 2 * Concurrency Policy: Allow Suspend: False Successful Job History Limit: 3 Failed Job History Limit: 1 Starting Deadline Seconds: <unset> Selector: <unset> Parallelism: <unset> Completions: <unset> Pod Template: Labels: <none> ... skipping 57 lines ... Annotations: batch.kubernetes.io/job-tracking: cronjob.kubernetes.io/instantiate: manual Parallelism: 1 Completions: 1 Completion Mode: NonIndexed Start Time: Fri, 17 Mar 2023 19:27:15 +0000 Pods Statuses: 1 Active (0 Ready) / 0 Succeeded / 0 Failed Pod Template: Labels: batch.kubernetes.io/controller-uid=2997ec60-4def-4222-ada3-f53f4adba23f batch.kubernetes.io/job-name=test-job controller-uid=2997ec60-4def-4222-ada3-f53f4adba23f job-name=test-job Containers: ... skipping 105 lines ... +++ Running case: test-cmd.run_service_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_service_tests Context "test" modified. +++ [0317 19:27:23] Testing kubectl(v1:services) [32mcore.sh:989: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes: (B[mW0317 19:27:24.154875 23215 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 19:27:24.154919 23215 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource I0317 19:27:24.165332 20099 alloc.go:330] "allocated clusterIPs" service="default/redis-master" clusterIPs=map[IPv4:10.0.0.49] service/redis-master created [32mcore.sh:993: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:redis-master: (B[mmatched Name: matched Labels: matched Selector: ... skipping 349 lines ... type: ClusterIP status: loadBalancer: {} [32mSuccessful (B[mmessage:kubectl-create kubectl-set has:kubectl-set error: you must specify resources by --filename when --local is set. Example resource specifications include: '-f rsrc.yaml' '--filename=rsrc.json' [32mcore.sh:1034: Successful get services redis-master {{range.spec.selector}}{{.}}:{{end}}: redis:master:backend: (B[mservice/redis-master selector updated [32mSuccessful (B[mmessage:Error from server (Conflict): Operation cannot be fulfilled on services "redis-master": the object has been modified; please apply your changes to the latest version and try again has:Conflict [32mcore.sh:1047: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes:redis-master: (B[mI0317 19:27:26.274600 23215 namespace_controller.go:182] "Namespace has been deleted" namespace="test-jobs" service "redis-master" deleted [32mcore.sh:1054: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes: (B[m[32mcore.sh:1058: Successful get services {{range.items}}{{.metadata.name}}:{{end}}: kubernetes: ... skipping 305 lines ... (B[mmessage:daemonset.apps/bind REVISION CHANGE-CAUSE 2 kubectl apply --filename=hack/testdata/rollingupdate-daemonset-rv2.yaml --record=true --server=https://127.0.0.1:6443 --insecure-skip-tls-verify=true --match-server-version=true 3 kubectl apply --filename=hack/testdata/rollingupdate-daemonset.yaml --record=true --server=https://127.0.0.1:6443 --insecure-skip-tls-verify=true --match-server-version=true has:3 kubectl apply [32mSuccessful (B[mmessage:error: unable to find specified revision 1000000 in history has:unable to find specified revision [32mapps.sh:122: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/pause:2.0: (B[m[32mapps.sh:123: Successful get daemonset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 1 (B[mdaemonset.apps/bind rolled back [32mapps.sh:126: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/pause:latest: (B[m[32mapps.sh:127: Successful get daemonset {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: registry.k8s.io/nginx:test-cmd: ... skipping 37 lines ... [32mcore.sh:1205: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: (B[mreplicationcontroller/frontend created I0317 19:27:36.555099 23215 event.go:307] "Event occurred" object="namespace-1679081256-25689/frontend" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-z5nkt" I0317 19:27:36.605670 23215 event.go:307] "Event occurred" object="namespace-1679081256-25689/frontend" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-jx4cd" I0317 19:27:36.605730 23215 event.go:307] "Event occurred" object="namespace-1679081256-25689/frontend" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-8c8qq" replicationcontroller "frontend" deleted E0317 19:27:36.630761 23215 replica_set.go:544] sync "namespace-1679081256-25689/frontend" failed with replicationcontrollers "frontend" not found [32mcore.sh:1210: Successful get pods -l name=frontend {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mcore.sh:1214: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: (B[mW0317 19:27:36.813037 23215 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 19:27:36.813080 23215 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource replicationcontroller/frontend created I0317 19:27:37.043419 23215 event.go:307] "Event occurred" object="namespace-1679081256-25689/frontend" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-skzhr" I0317 19:27:37.091525 23215 event.go:307] "Event occurred" object="namespace-1679081256-25689/frontend" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-6qbcl" I0317 19:27:37.091567 23215 event.go:307] "Event occurred" object="namespace-1679081256-25689/frontend" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-gtk8s" [32mcore.sh:1218: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: frontend: (B[mmatched Name: ... skipping 9 lines ... Namespace: namespace-1679081256-25689 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 17 lines ... Namespace: namespace-1679081256-25689 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 18 lines ... Namespace: namespace-1679081256-25689 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 12 lines ... Namespace: namespace-1679081256-25689 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 27 lines ... Namespace: namespace-1679081256-25689 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 17 lines ... Namespace: namespace-1679081256-25689 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 17 lines ... Namespace: namespace-1679081256-25689 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 11 lines ... Namespace: namespace-1679081256-25689 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v4 ... skipping 25 lines ... (B[m[32mcore.sh:1240: Successful get rc frontend {{.spec.replicas}}: 3 (B[mreplicationcontroller/frontend scaled E0317 19:27:38.120822 23215 replica_set.go:220] ReplicaSet has no controller: &ReplicaSet{ObjectMeta:{frontend namespace-1679081256-25689 5b63d45a-1019-4c53-b3c8-b1689d994928 2272 2 2023-03-17 19:27:37 +0000 UTC <nil> <nil> map[app:guestbook tier:frontend] map[] [] [] [{kubectl Update v1 <nil> FieldsV1 {"f:spec":{"f:replicas":{}}} scale} {kube-controller-manager Update v1 2023-03-17 19:27:37 +0000 UTC FieldsV1 {"f:status":{"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:replicas":{}}} status} {kubectl-create Update v1 2023-03-17 19:27:37 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:app":{},"f:tier":{}}},"f:spec":{"f:selector":{},"f:template":{".":{},"f:metadata":{".":{},"f:creationTimestamp":{},"f:labels":{".":{},"f:app":{},"f:tier":{}}},"f:spec":{".":{},"f:containers":{".":{},"k:{\"name\":\"php-redis\"}":{".":{},"f:env":{".":{},"k:{\"name\":\"GET_HOSTS_FROM\"}":{".":{},"f:name":{},"f:value":{}}},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":80,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{".":{},"f:requests":{".":{},"f:cpu":{},"f:memory":{}}},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} }]},Spec:ReplicaSetSpec{Replicas:*2,Selector:&v1.LabelSelector{MatchLabels:map[string]string{app: guestbook,tier: frontend,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[app:guestbook tier:frontend] map[] [] [] []} {[] [] [{php-redis gcr.io/google_samples/gb-frontend:v4 [] [] [{ 0 80 TCP }] [] [{GET_HOSTS_FROM dns nil}] {map[] map[cpu:{{100 -3} {<nil>} 100m DecimalSI} memory:{{104857600 0} {<nil>} 100Mi BinarySI}] []} [] [] [] nil nil nil nil /dev/termination-log File IfNotPresent nil false false false}] [] Always 0xc00277c548 <nil> ClusterFirst map[] <nil> false false false <nil> PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] <nil> nil [] <nil> <nil> <nil> map[] [] <nil> nil <nil> [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:3,FullyLabeledReplicas:3,ObservedGeneration:1,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} I0317 19:27:38.160976 23215 event.go:307] "Event occurred" object="namespace-1679081256-25689/frontend" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: frontend-skzhr" [32mcore.sh:1244: Successful get rc frontend {{.spec.replicas}}: 2 (B[m[32mcore.sh:1248: Successful get rc frontend {{.spec.replicas}}: 2 (B[merror: Expected replicas to be 3, was 2 [32mcore.sh:1252: Successful get rc frontend {{.spec.replicas}}: 2 (B[m[32mcore.sh:1256: Successful get rc frontend {{.spec.replicas}}: 2 (B[mreplicationcontroller/frontend scaled I0317 19:27:38.611253 23215 event.go:307] "Event occurred" object="namespace-1679081256-25689/frontend" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-lxqv4" [32mcore.sh:1260: Successful get rc frontend {{.spec.replicas}}: 3 (B[m[32mcore.sh:1264: Successful get rc frontend {{.spec.replicas}}: 3 ... skipping 54 lines ... (B[mdeployment.apps/nginx-deployment scaled I0317 19:27:41.238989 23215 event.go:307] "Event occurred" object="namespace-1679081256-25689/nginx-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set nginx-deployment-7df65dc9f4 to 1 from 3" [32mcore.sh:1312: Successful get deployment nginx-deployment {{.spec.replicas}}: 1 (B[mI0317 19:27:41.318099 23215 event.go:307] "Event occurred" object="namespace-1679081256-25689/nginx-deployment-7df65dc9f4" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: nginx-deployment-7df65dc9f4-l8kt2" I0317 19:27:41.331606 23215 event.go:307] "Event occurred" object="namespace-1679081256-25689/nginx-deployment-7df65dc9f4" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: nginx-deployment-7df65dc9f4-75t4s" deployment.apps "nginx-deployment" deleted E0317 19:27:41.425129 23215 replica_set.go:544] sync "namespace-1679081256-25689/nginx-deployment-7df65dc9f4" failed with Operation cannot be fulfilled on replicasets.apps "nginx-deployment-7df65dc9f4": StorageError: invalid object, Code: 4, Key: /registry/replicasets/namespace-1679081256-25689/nginx-deployment-7df65dc9f4, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 4fbdc293-42dc-422c-9901-b32f2ab68524, UID in object meta: deployment.apps/nginx-deployment created I0317 19:27:41.672276 23215 event.go:307] "Event occurred" object="namespace-1679081256-25689/nginx-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-7df65dc9f4 to 3" I0317 19:27:41.697517 23215 event.go:307] "Event occurred" object="namespace-1679081256-25689/nginx-deployment-7df65dc9f4" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-7df65dc9f4-6fqpw" I0317 19:27:41.745518 23215 event.go:307] "Event occurred" object="namespace-1679081256-25689/nginx-deployment-7df65dc9f4" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-7df65dc9f4-ckg2v" I0317 19:27:41.745552 23215 event.go:307] "Event occurred" object="namespace-1679081256-25689/nginx-deployment-7df65dc9f4" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-7df65dc9f4-2pc8q" deployment.apps/nginx-deployment scaled ... skipping 4 lines ... I0317 19:27:42.093215 20099 alloc.go:330] "allocated clusterIPs" service="namespace-1679081256-25689/expose-test-deployment" clusterIPs=map[IPv4:10.0.0.156] [32mSuccessful (B[mmessage:service/expose-test-deployment exposed has:service/expose-test-deployment exposed service "expose-test-deployment" deleted [32mSuccessful (B[mmessage:error: couldn't retrieve selectors via --selector flag or introspection: invalid deployment: no selectors, therefore cannot be exposed has:invalid deployment: no selectors deployment.apps/nginx-deployment created I0317 19:27:42.515674 23215 event.go:307] "Event occurred" object="namespace-1679081256-25689/nginx-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-7df65dc9f4 to 3" I0317 19:27:42.532299 23215 event.go:307] "Event occurred" object="namespace-1679081256-25689/nginx-deployment-7df65dc9f4" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-7df65dc9f4-xbcxb" I0317 19:27:42.554158 23215 event.go:307] "Event occurred" object="namespace-1679081256-25689/nginx-deployment-7df65dc9f4" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-7df65dc9f4-w4t7b" I0317 19:27:42.554211 23215 event.go:307] "Event occurred" object="namespace-1679081256-25689/nginx-deployment-7df65dc9f4" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-7df65dc9f4-8zjr6" ... skipping 24 lines ... (B[mpod "valid-pod" deleted service "frontend" deleted service "frontend-2" deleted service "frontend-3" deleted service "frontend-4" deleted [32mSuccessful (B[mmessage:error: cannot expose a Node has:cannot expose [32mSuccessful (B[mmessage:The Service "invalid-large-service-name-that-has-more-than-sixty-three-characters" is invalid: metadata.name: Invalid value: "invalid-large-service-name-that-has-more-than-sixty-three-characters": must be no more than 63 characters has:metadata.name: Invalid value I0317 19:27:45.087592 20099 alloc.go:330] "allocated clusterIPs" service="namespace-1679081256-25689/kubernetes-serve-hostname-testing-sixty-three-characters-in-len" clusterIPs=map[IPv4:10.0.0.143] [32mSuccessful ... skipping 12 lines ... [32mcore.sh:1409: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mcore.sh:1413: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: (B[mreplicationcontroller/frontend created I0317 19:27:46.225037 23215 event.go:307] "Event occurred" object="namespace-1679081256-25689/frontend" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-zwsmp" I0317 19:27:46.240669 23215 event.go:307] "Event occurred" object="namespace-1679081256-25689/frontend" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-vw6qc" I0317 19:27:46.251147 23215 event.go:307] "Event occurred" object="namespace-1679081256-25689/frontend" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-bfr2x" W0317 19:27:46.297734 23215 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 19:27:46.297776 23215 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource replicationcontroller/redis-slave created I0317 19:27:46.450855 23215 event.go:307] "Event occurred" object="namespace-1679081256-25689/redis-slave" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: redis-slave-fznzc" I0317 19:27:46.498838 23215 event.go:307] "Event occurred" object="namespace-1679081256-25689/redis-slave" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: redis-slave-kbml2" [32mcore.sh:1418: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: frontend:redis-slave: (B[m[32mcore.sh:1422: Successful get rc {{range.items}}{{.metadata.name}}:{{end}}: frontend:redis-slave: (B[mreplicationcontroller "frontend" deleted ... skipping 8 lines ... (B[mhorizontalpodautoscaler.autoscaling/frontend autoscaled [32mcore.sh:1436: Successful get hpa frontend {{.spec.minReplicas}} {{.spec.maxReplicas}} {{(index .spec.metrics 0).resource.target.averageUtilization}}: 1 2 70 (B[mhorizontalpodautoscaler.autoscaling "frontend" deleted horizontalpodautoscaler.autoscaling/frontend autoscaled [32mcore.sh:1440: Successful get hpa frontend {{.spec.minReplicas}} {{.spec.maxReplicas}} {{(index .spec.metrics 0).resource.target.averageUtilization}}: 2 3 80 (B[mhorizontalpodautoscaler.autoscaling "frontend" deleted error: required flag(s) "max" not set replicationcontroller "frontend" deleted [32mcore.sh:1449: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: (B[mapiVersion: apps/v1 kind: Deployment metadata: creationTimestamp: null ... skipping 24 lines ... limits: cpu: 300m requests: cpu: 300m terminationGracePeriodSeconds: 0 status: {} Error from server (NotFound): deployments.apps "nginx-deployment-resources" not found deployment.apps/nginx-deployment-resources created I0317 19:27:48.358648 23215 event.go:307] "Event occurred" object="namespace-1679081256-25689/nginx-deployment-resources" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-resources-5f79767bf9 to 3" I0317 19:27:48.373592 23215 event.go:307] "Event occurred" object="namespace-1679081256-25689/nginx-deployment-resources-5f79767bf9" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-resources-5f79767bf9-smb6k" I0317 19:27:48.419731 23215 event.go:307] "Event occurred" object="namespace-1679081256-25689/nginx-deployment-resources-5f79767bf9" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-resources-5f79767bf9-pljxt" I0317 19:27:48.419766 23215 event.go:307] "Event occurred" object="namespace-1679081256-25689/nginx-deployment-resources-5f79767bf9" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-resources-5f79767bf9-bdhw5" [32mcore.sh:1455: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: nginx-deployment-resources: (B[m[32mcore.sh:1456: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:test-cmd: (B[m[32mcore.sh:1457: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: registry.k8s.io/perl: (B[mdeployment.apps/nginx-deployment-resources resource requirements updated I0317 19:27:48.683858 23215 event.go:307] "Event occurred" object="namespace-1679081256-25689/nginx-deployment-resources" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-resources-77d775b4f9 to 1" I0317 19:27:48.707558 23215 event.go:307] "Event occurred" object="namespace-1679081256-25689/nginx-deployment-resources-77d775b4f9" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-resources-77d775b4f9-jtqwp" [32mcore.sh:1460: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}: 100m: (B[m[32mcore.sh:1461: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}: 100m: (B[merror: unable to find container named redis deployment.apps/nginx-deployment-resources resource requirements updated I0317 19:27:49.064055 23215 event.go:307] "Event occurred" object="namespace-1679081256-25689/nginx-deployment-resources" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set nginx-deployment-resources-5f79767bf9 to 2 from 3" [32mcore.sh:1466: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}: 200m: (B[mI0317 19:27:49.134752 23215 event.go:307] "Event occurred" object="namespace-1679081256-25689/nginx-deployment-resources-5f79767bf9" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: nginx-deployment-resources-5f79767bf9-smb6k" I0317 19:27:49.135018 23215 event.go:307] "Event occurred" object="namespace-1679081256-25689/nginx-deployment-resources" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-resources-688f8b78b5 to 1 from 0" I0317 19:27:49.149328 23215 event.go:307] "Event occurred" object="namespace-1679081256-25689/nginx-deployment-resources-688f8b78b5" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-resources-688f8b78b5-pm4zn" ... skipping 155 lines ... status: "True" type: Progressing observedGeneration: 4 replicas: 4 unavailableReplicas: 4 updatedReplicas: 1 error: you must specify resources by --filename when --local is set. Example resource specifications include: '-f rsrc.yaml' '--filename=rsrc.json' [32mcore.sh:1477: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}: 200m: (B[m[32mcore.sh:1478: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}: 300m: (B[m[32mcore.sh:1479: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).resources.requests.cpu}}:{{end}}: 300m: ... skipping 46 lines ... pod-template-hash=859689d794 Annotations: deployment.kubernetes.io/desired-replicas: 1 deployment.kubernetes.io/max-replicas: 2 deployment.kubernetes.io/revision: 1 Controlled By: Deployment/test-nginx-apps Replicas: 1 current / 1 desired Pods Status: 0 Running / 1 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=test-nginx-apps pod-template-hash=859689d794 Containers: nginx: Image: registry.k8s.io/nginx:test-cmd ... skipping 123 lines ... [32mapps.sh:340: Successful get deployment.apps {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:1.7.9: (B[m Image: registry.k8s.io/nginx:test-cmd deployment.apps/nginx rolled back (server dry run) [32mapps.sh:344: Successful get deployment.apps {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:1.7.9: (B[mdeployment.apps/nginx rolled back [32mapps.sh:348: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:test-cmd: (B[merror: unable to find specified revision 1000000 in history [32mapps.sh:351: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:test-cmd: (B[mdeployment.apps/nginx rolled back [32mapps.sh:355: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:1.7.9: (B[mdeployment.apps/nginx paused error: you cannot rollback a paused deployment; resume it first with 'kubectl rollout resume' and try again error: deployments.apps "nginx" can't restart paused deployment (run rollout resume first) deployment.apps/nginx resumed deployment.apps/nginx rolled back deployment.kubernetes.io/revision-history: 1,3 error: desired revision (3) is different from the running revision (5) deployment.apps/nginx restarted I0317 19:27:59.827764 23215 event.go:307] "Event occurred" object="namespace-1679081270-21124/nginx" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set nginx-77566b75db to 2 from 3" I0317 19:27:59.902395 23215 event.go:307] "Event occurred" object="namespace-1679081270-21124/nginx" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-d6947fd6d to 1 from 0" I0317 19:27:59.913844 23215 event.go:307] "Event occurred" object="namespace-1679081270-21124/nginx-77566b75db" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: nginx-77566b75db-j6mmf" I0317 19:27:59.925996 23215 event.go:307] "Event occurred" object="namespace-1679081270-21124/nginx-d6947fd6d" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-d6947fd6d-t9gpb" [32mSuccessful ... skipping 61 lines ... deployment.apps/nginx2 created I0317 19:28:01.256362 23215 event.go:307] "Event occurred" object="namespace-1679081270-21124/nginx2" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx2-5744c8b44d to 3" I0317 19:28:01.281175 23215 event.go:307] "Event occurred" object="namespace-1679081270-21124/nginx2-5744c8b44d" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx2-5744c8b44d-6v2fr" I0317 19:28:01.329079 23215 event.go:307] "Event occurred" object="namespace-1679081270-21124/nginx2-5744c8b44d" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx2-5744c8b44d-n4h94" I0317 19:28:01.329191 23215 event.go:307] "Event occurred" object="namespace-1679081270-21124/nginx2-5744c8b44d" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx2-5744c8b44d-s7cjt" deployment.apps "nginx2" deleted E0317 19:28:01.375829 23215 replica_set.go:544] sync "namespace-1679081270-21124/nginx2-5744c8b44d" failed with replicasets.apps "nginx2-5744c8b44d" not found deployment.apps "nginx" deleted [32mapps.sh:389: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: (B[mdeployment.apps/nginx-deployment created I0317 19:28:01.838577 23215 event.go:307] "Event occurred" object="namespace-1679081270-21124/nginx-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-57bf7fbc68 to 3" I0317 19:28:01.861754 23215 event.go:307] "Event occurred" object="namespace-1679081270-21124/nginx-deployment-57bf7fbc68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-57bf7fbc68-w24fj" I0317 19:28:01.886316 23215 event.go:307] "Event occurred" object="namespace-1679081270-21124/nginx-deployment-57bf7fbc68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-57bf7fbc68-5wwtk" ... skipping 8 lines ... [32mapps.sh:399: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: registry.k8s.io/perl: (B[mdeployment.apps/nginx-deployment image updated I0317 19:28:02.516889 23215 event.go:307] "Event occurred" object="namespace-1679081270-21124/nginx-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-6444b54576 to 1" I0317 19:28:02.547700 23215 event.go:307] "Event occurred" object="namespace-1679081270-21124/nginx-deployment-6444b54576" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-6444b54576-c6wfl" [32mapps.sh:402: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:1.7.9: (B[m[32mapps.sh:403: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: registry.k8s.io/perl: (B[merror: unable to find container named "redis" deployment.apps/nginx-deployment image updated [32mapps.sh:408: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:test-cmd: (B[m[32mapps.sh:409: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: registry.k8s.io/perl: (B[mdeployment.apps/nginx-deployment image updated [32mapps.sh:412: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:1.7.9: (B[m[32mapps.sh:413: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: registry.k8s.io/perl: ... skipping 57 lines ... I0317 19:28:06.353881 23215 event.go:307] "Event occurred" object="namespace-1679081270-21124/nginx-deployment-57bf7fbc68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: nginx-deployment-57bf7fbc68-d6lbb" I0317 19:28:06.364959 23215 event.go:307] "Event occurred" object="namespace-1679081270-21124/nginx-deployment-694d45dfd5" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-694d45dfd5-qxhtf" I0317 19:28:06.445526 23215 event.go:307] "Event occurred" object="namespace-1679081270-21124/nginx-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set nginx-deployment-6bf769bd to 0 from 1" deployment.apps/nginx-deployment env updated I0317 19:28:06.476652 23215 event.go:307] "Event occurred" object="namespace-1679081270-21124/nginx-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-ffc86458c to 1" [32mSuccessful (B[mmessage:error: standard input cannot be used for multiple arguments has:standard input cannot be used for multiple arguments deployment.apps "nginx-deployment" deleted I0317 19:28:06.723832 23215 event.go:307] "Event occurred" object="namespace-1679081270-21124/nginx-deployment-6bf769bd" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: nginx-deployment-6bf769bd-js9nh" configmap "test-set-env-config" deleted I0317 19:28:06.745550 23215 event.go:307] "Event occurred" object="namespace-1679081270-21124/nginx-deployment-ffc86458c" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-ffc86458c-2t96t" E0317 19:28:06.831865 23215 replica_set.go:544] sync "namespace-1679081270-21124/nginx-deployment-57bf7fbc68" failed with replicasets.apps "nginx-deployment-57bf7fbc68" not found secret "test-set-env-secret" deleted [32mapps.sh:474: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: (B[mE0317 19:28:07.023928 23215 replica_set.go:544] sync "namespace-1679081270-21124/nginx-deployment-694d45dfd5" failed with replicasets.apps "nginx-deployment-694d45dfd5" not found E0317 19:28:07.123013 23215 replica_set.go:544] sync "namespace-1679081270-21124/nginx-deployment-6bf769bd" failed with replicasets.apps "nginx-deployment-6bf769bd" not found deployment.apps/nginx-deployment created E0317 19:28:07.190699 23215 replica_set.go:544] sync "namespace-1679081270-21124/nginx-deployment-ffc86458c" failed with replicasets.apps "nginx-deployment-ffc86458c" not found I0317 19:28:07.190973 23215 event.go:307] "Event occurred" object="namespace-1679081270-21124/nginx-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-57bf7fbc68 to 3" [32mapps.sh:477: Successful get deployment {{range.items}}{{.metadata.name}}:{{end}}: nginx-deployment: (B[mI0317 19:28:07.237305 23215 event.go:307] "Event occurred" object="namespace-1679081270-21124/nginx-deployment-57bf7fbc68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-57bf7fbc68-jcgsq" I0317 19:28:07.286425 23215 event.go:307] "Event occurred" object="namespace-1679081270-21124/nginx-deployment-57bf7fbc68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-57bf7fbc68-scsq8" [32mapps.sh:478: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:test-cmd: (B[mI0317 19:28:07.385446 23215 event.go:307] "Event occurred" object="namespace-1679081270-21124/nginx-deployment-57bf7fbc68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-57bf7fbc68-l6t7b" W0317 19:28:07.388612 23215 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 19:28:07.388655 23215 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mapps.sh:479: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: registry.k8s.io/perl: (B[mdeployment.apps/nginx-deployment image updated I0317 19:28:07.506234 23215 event.go:307] "Event occurred" object="namespace-1679081270-21124/nginx-deployment" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set nginx-deployment-6444b54576 to 1" [32mapps.sh:482: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx:1.7.9: (B[mI0317 19:28:07.637559 23215 event.go:307] "Event occurred" object="namespace-1679081270-21124/nginx-deployment-6444b54576" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: nginx-deployment-6444b54576-zxzl7" [32mapps.sh:483: Successful get deployment {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: registry.k8s.io/perl: ... skipping 202 lines ... [32mapps.sh:645: Successful get rs {{range.items}}{{.metadata.name}}:{{end}}: (B[mreplicaset.apps/frontend created +++ [0317 19:28:08] Deleting rs I0317 19:28:08.640995 23215 event.go:307] "Event occurred" object="namespace-1679081288-16100/frontend" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-mwjgg" I0317 19:28:08.671245 23215 event.go:307] "Event occurred" object="namespace-1679081288-16100/frontend" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-jqvxn" I0317 19:28:08.671282 23215 event.go:307] "Event occurred" object="namespace-1679081288-16100/frontend" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-j6zf5" E0317 19:28:08.775693 23215 replica_set.go:544] sync "namespace-1679081288-16100/frontend" failed with replicasets.apps "frontend" not found replicaset.apps "frontend" deleted Waiting for Get pods -l tier=frontend {{range.items}}{{.metadata.name}}:{{end}} : expected: , got: frontend-j6zf5:frontend-jqvxn:frontend-mwjgg: [32mapps.sh:651: Successful get pods -l tier=frontend {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mapps.sh:655: Successful get rs {{range.items}}{{.metadata.name}}:{{end}}: (B[mreplicaset.apps/frontend created I0317 19:28:09.284591 23215 event.go:307] "Event occurred" object="namespace-1679081288-16100/frontend" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-z6dcc" I0317 19:28:09.362815 23215 event.go:307] "Event occurred" object="namespace-1679081288-16100/frontend" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-xt8sv" I0317 19:28:09.362864 23215 event.go:307] "Event occurred" object="namespace-1679081288-16100/frontend" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-5gt9q" [32mapps.sh:659: Successful get pods -l tier=frontend {{range.items}}{{(index .spec.containers 0).name}}:{{end}}: php-redis:php-redis:php-redis: (B[m+++ [0317 19:28:09] Deleting rs replicaset.apps "frontend" deleted E0317 19:28:09.656821 23215 replica_set.go:544] sync "namespace-1679081288-16100/frontend" failed with replicasets.apps "frontend" not found [32mapps.sh:663: Successful get rs {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mapps.sh:665: Successful get pods -l tier=frontend {{range.items}}{{(index .spec.containers 0).name}}:{{end}}: php-redis:php-redis:php-redis: (B[mI0317 19:28:09.847895 23215 horizontal.go:512] "Horizontal Pod Autoscaler has been deleted" HPA="namespace-1679081270-21124/nginx-deployment" pod "frontend-5gt9q" deleted pod "frontend-xt8sv" deleted pod "frontend-z6dcc" deleted ... skipping 16 lines ... Namespace: namespace-1679081288-16100 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 17 lines ... Namespace: namespace-1679081288-16100 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 18 lines ... Namespace: namespace-1679081288-16100 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 12 lines ... Namespace: namespace-1679081288-16100 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 25 lines ... Namespace: namespace-1679081288-16100 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 17 lines ... Namespace: namespace-1679081288-16100 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 17 lines ... Namespace: namespace-1679081288-16100 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 11 lines ... Namespace: namespace-1679081288-16100 Selector: app=guestbook,tier=frontend Labels: app=guestbook tier=frontend Annotations: <none> Replicas: 3 current / 3 desired Pods Status: 0 Running / 3 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=guestbook tier=frontend Containers: php-redis: Image: gcr.io/google_samples/gb-frontend:v3 ... skipping 169 lines ... (B[mreplicaset.apps "frontend" deleted deployment.apps "scale-1" deleted deployment.apps "scale-2" deleted deployment.apps "scale-3" deleted replicaset.apps/frontend created I0317 19:28:14.611685 23215 event.go:307] "Event occurred" object="namespace-1679081288-16100/frontend" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-7slxr" W0317 19:28:14.621877 23215 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 19:28:14.621913 23215 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource I0317 19:28:14.638827 23215 event.go:307] "Event occurred" object="namespace-1679081288-16100/frontend" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-xmpz5" I0317 19:28:14.638857 23215 event.go:307] "Event occurred" object="namespace-1679081288-16100/frontend" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: frontend-ldn9r" [32mapps.sh:740: Successful get rs frontend {{.spec.replicas}}: 3 (B[mI0317 19:28:14.812873 20099 alloc.go:330] "allocated clusterIPs" service="namespace-1679081288-16100/frontend" clusterIPs=map[IPv4:10.0.0.96] service/frontend exposed [32mapps.sh:744: Successful get service frontend {{(index .spec.ports 0).name}} {{(index .spec.ports 0).port}}: <no value> 80 ... skipping 44 lines ... horizontalpodautoscaler.autoscaling/frontend autoscaled [32mapps.sh:808: Successful get hpa frontend {{.spec.minReplicas}} {{.spec.maxReplicas}} {{(index .spec.metrics 0).resource.target.averageUtilization}}: 2 3 80 (B[m[32mSuccessful (B[mmessage:kubectl-autoscale has:kubectl-autoscale horizontalpodautoscaler.autoscaling "frontend" deleted error: required flag(s) "max" not set replicaset.apps "frontend" deleted +++ exit code: 0 Recording: run_stateful_set_tests Running command: run_stateful_set_tests +++ Running case: test-cmd.run_stateful_set_tests ... skipping 265 lines ... (B[mmessage:statefulset.apps/nginx REVISION CHANGE-CAUSE 2 kubectl apply --filename=hack/testdata/rollingupdate-statefulset-rv2.yaml --record=true --server=https://127.0.0.1:6443 --insecure-skip-tls-verify=true --match-server-version=true 3 kubectl apply --filename=hack/testdata/rollingupdate-statefulset.yaml --record=true --server=https://127.0.0.1:6443 --insecure-skip-tls-verify=true --match-server-version=true has:3 kubectl apply [32mSuccessful (B[mmessage:error: unable to find specified revision 1000000 in history has:unable to find specified revision [32mapps.sh:570: Successful get statefulset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx-slim:0.7: (B[m[32mapps.sh:571: Successful get statefulset {{range.items}}{{(len .spec.template.spec.containers)}}{{end}}: 1 (B[mstatefulset.apps/nginx rolled back [32mapps.sh:574: Successful get statefulset {{range.items}}{{(index .spec.template.spec.containers 0).image}}:{{end}}: registry.k8s.io/nginx-slim:0.8: (B[m[32mapps.sh:575: Successful get statefulset {{range.items}}{{(index .spec.template.spec.containers 1).image}}:{{end}}: registry.k8s.io/pause:2.0: ... skipping 87 lines ... Name: mock Namespace: namespace-1679081303-8747 Selector: app=mock Labels: app=mock Annotations: <none> Replicas: 1 current / 1 desired Pods Status: 0 Running / 1 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=mock Containers: mock-container: Image: registry.k8s.io/pause:3.9 Port: 9949/TCP ... skipping 53 lines ... IPs: 10.0.0.163 Port: <unset> 99/TCP TargetPort: 9949/TCP Endpoints: <none> Session Affinity: None Events: <none> W0317 19:28:27.358278 23215 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 19:28:27.358328 23215 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource Name: mock Namespace: namespace-1679081303-8747 Selector: app=mock Labels: app=mock Annotations: <none> Replicas: 1 current / 1 desired Pods Status: 0 Running / 1 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=mock Containers: mock-container: Image: registry.k8s.io/pause:3.9 Port: 9949/TCP ... skipping 61 lines ... Name: mock Namespace: namespace-1679081303-8747 Selector: app=mock Labels: app=mock Annotations: <none> Replicas: 1 current / 1 desired Pods Status: 0 Running / 1 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=mock Containers: mock-container: Image: registry.k8s.io/pause:3.9 Port: 9949/TCP ... skipping 42 lines ... Namespace: namespace-1679081303-8747 Selector: app=mock Labels: app=mock status=replaced Annotations: <none> Replicas: 1 current / 1 desired Pods Status: 0 Running / 1 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=mock Containers: mock-container: Image: registry.k8s.io/pause:3.9 Port: 9949/TCP ... skipping 11 lines ... Namespace: namespace-1679081303-8747 Selector: app=mock2 Labels: app=mock2 status=replaced Annotations: <none> Replicas: 1 current / 1 desired Pods Status: 0 Running / 1 Waiting / 0 Succeeded / 0 Failed Pod Template: Labels: app=mock2 Containers: mock-container: Image: registry.k8s.io/pause:3.9 Port: 9949/TCP ... skipping 115 lines ... +++ [0317 19:28:37] Creating namespace namespace-1679081317-9124 namespace/namespace-1679081317-9124 created Context "test" modified. +++ [0317 19:28:37] Testing persistent volumes [32mstorage.sh:30: Successful get pv {{range.items}}{{.metadata.name}}:{{end}}: (B[mpersistentvolume/pv0001 created E0317 19:28:37.791683 23215 pv_protection_controller.go:113] PV pv0001 failed with : Operation cannot be fulfilled on persistentvolumes "pv0001": the object has been modified; please apply your changes to the latest version and try again E0317 19:28:37.830278 23215 pv_protection_controller.go:113] PV pv0001 failed with : Operation cannot be fulfilled on persistentvolumes "pv0001": the object has been modified; please apply your changes to the latest version and try again [32mstorage.sh:33: Successful get pv {{range.items}}{{.metadata.name}}:{{end}}: pv0001: (B[mpersistentvolume "pv0001" deleted persistentvolume/pv0002 created E0317 19:28:38.335293 23215 pv_protection_controller.go:113] PV pv0002 failed with : Operation cannot be fulfilled on persistentvolumes "pv0002": the object has been modified; please apply your changes to the latest version and try again E0317 19:28:38.350916 23215 pv_protection_controller.go:113] PV pv0002 failed with : Operation cannot be fulfilled on persistentvolumes "pv0002": the object has been modified; please apply your changes to the latest version and try again [32mstorage.sh:36: Successful get pv {{range.items}}{{.metadata.name}}:{{end}}: pv0002: (B[mpersistentvolume "pv0002" deleted persistentvolume/pv0003 created [32mstorage.sh:39: Successful get pv {{range.items}}{{.metadata.name}}:{{end}}: pv0003: (B[mquery for persistentvolumes had limit param query for events had limit param ... skipping 4 lines ... I0317 19:28:38.951146 54730 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/persistentvolumes?limit=500 200 OK in 2 milliseconds I0317 19:28:38.953652 54730 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/persistentvolumes/pv0003 200 OK in 1 milliseconds I0317 19:28:38.964535 54730 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/events?fieldSelector=involvedObject.name%3Dpv0003%2CinvolvedObject.namespace%3D%2CinvolvedObject.kind%3DPersistentVolume%2CinvolvedObject.uid%3De375df24-5906-41f4-8d5c-8afc19302c9c&limit=500 200 OK in 10 milliseconds (B[mpersistentvolume "pv0003" deleted [32mstorage.sh:44: Successful get pv {{range.items}}{{.metadata.name}}:{{end}}: (B[mpersistentvolume/pv0001 created E0317 19:28:39.570787 23215 pv_protection_controller.go:113] PV pv0001 failed with : Operation cannot be fulfilled on persistentvolumes "pv0001": the object has been modified; please apply your changes to the latest version and try again [32mstorage.sh:47: Successful get pv {{range.items}}{{.metadata.name}}:{{end}}: pv0001: (B[m[32mSuccessful (B[mmessage:Warning: deleting cluster-scoped resources, not scoped to the provided namespace persistentvolume "pv0001" deleted has:Warning: deleting cluster-scoped resources [32mSuccessful ... skipping 88 lines ... Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 save-managers: true CreationTimestamp: Fri, 17 Mar 2023 19:22:23 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Fri, 17 Mar 2023 19:22:23 +0000 Fri, 17 Mar 2023 19:23:27 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Fri, 17 Mar 2023 19:22:23 +0000 Fri, 17 Mar 2023 19:23:27 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Fri, 17 Mar 2023 19:22:23 +0000 Fri, 17 Mar 2023 19:23:27 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 34 lines ... Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 save-managers: true CreationTimestamp: Fri, 17 Mar 2023 19:22:23 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Fri, 17 Mar 2023 19:22:23 +0000 Fri, 17 Mar 2023 19:23:27 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Fri, 17 Mar 2023 19:22:23 +0000 Fri, 17 Mar 2023 19:23:27 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Fri, 17 Mar 2023 19:22:23 +0000 Fri, 17 Mar 2023 19:23:27 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 35 lines ... Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 save-managers: true CreationTimestamp: Fri, 17 Mar 2023 19:22:23 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Fri, 17 Mar 2023 19:22:23 +0000 Fri, 17 Mar 2023 19:23:27 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Fri, 17 Mar 2023 19:22:23 +0000 Fri, 17 Mar 2023 19:23:27 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Fri, 17 Mar 2023 19:22:23 +0000 Fri, 17 Mar 2023 19:23:27 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 31 lines ... Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 save-managers: true CreationTimestamp: Fri, 17 Mar 2023 19:22:23 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Fri, 17 Mar 2023 19:22:23 +0000 Fri, 17 Mar 2023 19:23:27 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Fri, 17 Mar 2023 19:22:23 +0000 Fri, 17 Mar 2023 19:23:27 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Fri, 17 Mar 2023 19:22:23 +0000 Fri, 17 Mar 2023 19:23:27 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 42 lines ... Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 save-managers: true CreationTimestamp: Fri, 17 Mar 2023 19:22:23 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Fri, 17 Mar 2023 19:22:23 +0000 Fri, 17 Mar 2023 19:23:27 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Fri, 17 Mar 2023 19:22:23 +0000 Fri, 17 Mar 2023 19:23:27 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Fri, 17 Mar 2023 19:22:23 +0000 Fri, 17 Mar 2023 19:23:27 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 34 lines ... Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 save-managers: true CreationTimestamp: Fri, 17 Mar 2023 19:22:23 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Fri, 17 Mar 2023 19:22:23 +0000 Fri, 17 Mar 2023 19:23:27 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Fri, 17 Mar 2023 19:22:23 +0000 Fri, 17 Mar 2023 19:23:27 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Fri, 17 Mar 2023 19:22:23 +0000 Fri, 17 Mar 2023 19:23:27 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 34 lines ... Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 save-managers: true CreationTimestamp: Fri, 17 Mar 2023 19:22:23 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Fri, 17 Mar 2023 19:22:23 +0000 Fri, 17 Mar 2023 19:23:27 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Fri, 17 Mar 2023 19:22:23 +0000 Fri, 17 Mar 2023 19:23:27 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Fri, 17 Mar 2023 19:22:23 +0000 Fri, 17 Mar 2023 19:23:27 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 30 lines ... Labels: <none> Annotations: node.alpha.kubernetes.io/ttl: 0 save-managers: true CreationTimestamp: Fri, 17 Mar 2023 19:22:23 +0000 Taints: node.kubernetes.io/unreachable:NoSchedule Unschedulable: false Lease: Failed to get lease: leases.coordination.k8s.io "127.0.0.1" not found Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- Ready Unknown Fri, 17 Mar 2023 19:22:23 +0000 Fri, 17 Mar 2023 19:23:27 +0000 NodeStatusNeverUpdated Kubelet never posted node status. MemoryPressure Unknown Fri, 17 Mar 2023 19:22:23 +0000 Fri, 17 Mar 2023 19:23:27 +0000 NodeStatusNeverUpdated Kubelet never posted node status. DiskPressure Unknown Fri, 17 Mar 2023 19:22:23 +0000 Fri, 17 Mar 2023 19:23:27 +0000 NodeStatusNeverUpdated Kubelet never posted node status. ... skipping 100 lines ... +++ [0317 19:28:47] Testing kubectl with configured client.authentication.k8s.io/v1 interactive exec credentials plugin +++ [0317 19:28:47] Running command 'script -q /dev/null -c /tmp/test-cmd-exec-credentials-script-file.sh' (kubectl command: 'replace -f - --force') with input '{"apiVersion":"v1","kind":"ConfigMap","metadata":{"name":"some-resource"}}' +++ [0317 19:28:48] exec credential plugin not run because kubectl already uses standard input +++ [0317 19:28:48] Running command 'script -q /dev/null -c /tmp/test-cmd-exec-credentials-script-file.sh' (kubectl command: 'apply -f -') with input '{"apiVersion":"v1","kind":"ConfigMap","metadata":{"name":"some-resource"}}' +++ [0317 19:28:48] exec credential plugin not run because kubectl already uses standard input +++ [0317 19:28:48] Running command 'script -q /dev/null -c /tmp/test-cmd-exec-credentials-script-file.sh' (kubectl command: 'set env deployment/some-deployment -') with input 'SOME_ENV_VAR_KEY=SOME_ENV_VAR_VAL' W0317 19:28:48.619214 23215 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 19:28:48.619254 23215 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource +++ [0317 19:28:48] exec credential plugin not run because kubectl already uses standard input +++ [0317 19:28:48] kubeconfig was not loaded successfully because client.authentication.k8s.io/v1 exec credential plugin is missing interactiveMode +++ exit code: 0 Recording: run_authorization_tests Running command: run_authorization_tests ... skipping 60 lines ... yes has:the server doesn't have a resource type [32mSuccessful (B[mmessage:yes has:yes [32mSuccessful (B[mmessage:error: --subresource can not be used with NonResourceURL has:subresource can not be used with NonResourceURL [32mSuccessful (B[m[32mSuccessful (B[mmessage:yes 0 has:0 ... skipping 62 lines ... {Verbs:[get list watch] APIGroups:[] Resources:[configmaps] ResourceNames:[] NonResourceURLs:[]} [32mlegacy-script.sh:887: Successful get rolebindings -n some-other-random -l test-cmd=auth {{range.items}}{{.metadata.name}}:{{end}}: testing-RB: (B[m[32mlegacy-script.sh:888: Successful get roles -n some-other-random -l test-cmd=auth {{range.items}}{{.metadata.name}}:{{end}}: testing-R: (B[m[32mlegacy-script.sh:889: Successful get clusterrolebindings -l test-cmd=auth {{range.items}}{{.metadata.name}}:{{end}}: testing-CRB: (B[m[32mlegacy-script.sh:890: Successful get clusterroles -l test-cmd=auth {{range.items}}{{.metadata.name}}:{{end}}: testing-CR: (B[m[32mSuccessful (B[mmessage:error: only rbac.authorization.k8s.io/v1 is supported: not *v1beta1.ClusterRole has:only rbac.authorization.k8s.io/v1 is supported rolebinding.rbac.authorization.k8s.io "testing-RB" deleted role.rbac.authorization.k8s.io "testing-R" deleted Warning: deleting cluster-scoped resources, not scoped to the provided namespace clusterrole.rbac.authorization.k8s.io "testing-CR" deleted clusterrolebinding.rbac.authorization.k8s.io "testing-CRB" deleted ... skipping 24 lines ... [32mdiscovery.sh:236: Successful get all -l app=cassandra {{range.items}}{{range .metadata.labels}}{{.}}:{{end}}{{end}}: cassandra:cassandra:cassandra:cassandra: (B[mI0317 19:28:52.156564 23215 event.go:307] "Event occurred" object="namespace-1679081331-25259/cassandra" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: cassandra-sjdm7" pod "cassandra-ktk25" deleted pod "cassandra-sxmfp" deleted I0317 19:28:52.248159 23215 event.go:307] "Event occurred" object="namespace-1679081331-25259/cassandra" fieldPath="" kind="ReplicationController" apiVersion="v1" type="Normal" reason="SuccessfulCreate" message="Created pod: cassandra-7clhj" replicationcontroller "cassandra" deleted E0317 19:28:52.273873 23215 replica_set.go:544] sync "namespace-1679081331-25259/cassandra" failed with replicationcontrollers "cassandra" not found service "cassandra" deleted +++ exit code: 0 Recording: run_kubectl_explain_tests Running command: run_kubectl_explain_tests +++ Running case: test-cmd.run_kubectl_explain_tests ... skipping 476 lines ... namespace-1679081331-25259 default 0 14s namespace-1679081333-9346 default 0 12s some-other-random default 0 15s has:all-ns-test-2 namespace "all-ns-test-1" deleted namespace "all-ns-test-2" deleted W0317 19:29:12.422251 23215 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 19:29:12.422294 23215 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource I0317 19:29:15.306057 23215 namespace_controller.go:182] "Namespace has been deleted" namespace="all-ns-test-1" [32mget.sh:442: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: valid-pod: (B[mWarning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. pod "valid-pod" force deleted [32mget.sh:446: Successful get pods {{range.items}}{{.metadata.name}}:{{end}}: (B[m[32mget.sh:450: Successful get nodes {{range.items}}{{.metadata.name}}:{{end}}: 127.0.0.1: ... skipping 19 lines ... (B[mmessage:Warning: example.com/v1beta1 DeprecatedKind is deprecated; use example.com/v1 DeprecatedKind No resources found in namespace-1679081333-9346 namespace. has:example.com/v1beta1 DeprecatedKind is deprecated [32mSuccessful (B[mmessage:Warning: example.com/v1beta1 DeprecatedKind is deprecated; use example.com/v1 DeprecatedKind No resources found in namespace-1679081333-9346 namespace. error: 1 warning received has:example.com/v1beta1 DeprecatedKind is deprecated [32mSuccessful (B[mmessage:Warning: example.com/v1beta1 DeprecatedKind is deprecated; use example.com/v1 DeprecatedKind No resources found in namespace-1679081333-9346 namespace. error: 1 warning received has:error: 1 warning received I0317 19:29:16.582235 20099 handler.go:165] Adding GroupVersion example.com v1 to ResourceManager I0317 19:29:16.582288 20099 handler.go:165] Adding GroupVersion example.com v1beta1 to ResourceManager customresourcedefinition.apiextensions.k8s.io "deprecated.example.com" deleted I0317 19:29:16.596633 20099 handler.go:165] Adding GroupVersion example.com v1 to ResourceManager I0317 19:29:16.596684 20099 handler.go:165] Adding GroupVersion example.com v1beta1 to ResourceManager +++ exit code: 0 ... skipping 355 lines ... I0317 19:29:22.771957 58985 round_trippers.go:553] GET https://127.0.0.1:6443/apis/certificates.k8s.io/v1/certificatesigningrequests?limit=500 200 OK in 1 milliseconds I0317 19:29:22.774553 58985 round_trippers.go:553] GET https://127.0.0.1:6443/apis/certificates.k8s.io/v1/certificatesigningrequests/foo 200 OK in 1 milliseconds I0317 19:29:22.785035 58985 round_trippers.go:553] GET https://127.0.0.1:6443/api/v1/events?fieldSelector=involvedObject.namespace%3D%2CinvolvedObject.kind%3DCertificateSigningRequest%2CinvolvedObject.uid%3D49f6b8ec-55b8-4c49-8812-f82b995df7df%2CinvolvedObject.name%3Dfoo&limit=500 200 OK in 9 milliseconds (B[mcertificatesigningrequest.certificates.k8s.io "foo" deleted [32mcertificate.sh:36: Successful get csr {{range.items}}{{.metadata.name}}{{end}}: (B[mcertificatesigningrequest.certificates.k8s.io/foo created W0317 19:29:23.273390 23215 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 19:29:23.273439 23215 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mcertificate.sh:39: Successful get csr/foo {{range.status.conditions}}{{.type}}{{end}}: (B[mcertificatesigningrequest.certificates.k8s.io/foo approved { "apiVersion": "v1", "items": [ { ... skipping 149 lines ... Running command: run_cluster_management_tests +++ Running case: test-cmd.run_cluster_management_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_cluster_management_tests +++ [0317 19:29:25] Creating namespace namespace-1679081365-20138 W0317 19:29:25.095789 23215 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 19:29:25.095836 23215 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource namespace/namespace-1679081365-20138 created Context "test" modified. +++ [0317 19:29:25] Testing cluster-management commands [32mnode-management.sh:85: Successful get nodes {{range.items}}{{.metadata.name}}:{{end}}: 127.0.0.1: (B[mpod/test-pod-1 created pod/test-pod-2 created ... skipping 44 lines ... node/127.0.0.1 cordoned (server dry run) Warning: deleting Pods that declare no controller: namespace-1679081365-20138/test-pod-1 evicting pod namespace-1679081365-20138/test-pod-1 (server dry run) node/127.0.0.1 drained (server dry run) [32mnode-management.sh:140: Successful get pods {{range .items}}{{.metadata.name}},{{end}}: test-pod-1,test-pod-2, (B[mWarning: deleting Pods that declare no controller: namespace-1679081365-20138/test-pod-1 W0317 19:29:51.136605 23215 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 19:29:51.136647 23215 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mSuccessful (B[mmessage:node/127.0.0.1 cordoned evicting pod namespace-1679081365-20138/test-pod-1 pod "test-pod-1" has DeletionTimestamp older than 1 seconds, skipping node/127.0.0.1 drained has:evicting pod .*/test-pod-1 ... skipping 14 lines ... (B[mmessage:node/127.0.0.1 already uncordoned (server dry run) has:already uncordoned [32mnode-management.sh:161: Successful get nodes 127.0.0.1 {{.spec.unschedulable}}: <no value> (B[mnode/127.0.0.1 labeled [32mnode-management.sh:166: Successful get nodes 127.0.0.1 {{.metadata.labels.test}}: label (B[m[32mSuccessful (B[mmessage:error: cannot specify both a node name and a --selector option See 'kubectl drain -h' for help and examples has:cannot specify both a node name [32mnode-management.sh:172: Successful get nodes 127.0.0.1 {{.metadata.labels.test}}: label (B[m[32mnode-management.sh:174: Successful get nodes 127.0.0.1 {{.spec.unschedulable}}: <no value> (B[m[32mnode-management.sh:176: Successful get pods {{range .items}}{{.metadata.name}},{{end}}: test-pod-1,test-pod-2, (B[m[32mSuccessful ... skipping 78 lines ... Warning: deleting Pods that declare no controller: namespace-1679081365-20138/test-pod-1, namespace-1679081365-20138/test-pod-2 evicting pod namespace-1679081365-20138/test-pod-1 (dry run) evicting pod namespace-1679081365-20138/test-pod-2 (dry run) node/127.0.0.1 drained (dry run) has:/v1/pods?fieldSelector=spec.nodeName%3D127.0.0.1&limit=500 200 OK [32mSuccessful (B[mmessage:error: USAGE: cordon NODE [flags] See 'kubectl cordon -h' for help and examples has:error\: USAGE\: cordon NODE node/127.0.0.1 already uncordoned [32mSuccessful (B[mmessage:error: You must provide one or more resources by argument or filename. Example resource specifications include: '-f rsrc.yaml' '--filename=rsrc.json' '<resource> <name>' '<resource>' has:must provide one or more resources ... skipping 18 lines ... +++ [0317 19:30:03] Testing kubectl plugins [32mSuccessful (B[mmessage:The following compatible plugins are available: test/fixtures/pkg/kubectl/plugins/version/kubectl-version - warning: kubectl-version overwrites existing command: "kubectl version" error: one plugin warning was found has:kubectl-version overwrites existing command: "kubectl version" [32mSuccessful (B[mmessage:The following compatible plugins are available: test/fixtures/pkg/kubectl/plugins/kubectl-foo test/fixtures/pkg/kubectl/plugins/foo/kubectl-foo - warning: test/fixtures/pkg/kubectl/plugins/foo/kubectl-foo is overshadowed by a similarly named plugin: test/fixtures/pkg/kubectl/plugins/kubectl-foo error: one plugin warning was found has:test/fixtures/pkg/kubectl/plugins/foo/kubectl-foo is overshadowed by a similarly named plugin [32mSuccessful (B[mmessage:The following compatible plugins are available: test/fixtures/pkg/kubectl/plugins/kubectl-foo has:plugins are available [32mSuccessful (B[mmessage:Unable to read directory "test/fixtures/pkg/kubectl/plugins/empty" from your PATH: open test/fixtures/pkg/kubectl/plugins/empty: no such file or directory. Skipping... error: unable to find any kubectl plugins in your PATH has:unable to find any kubectl plugins in your PATH [32mSuccessful (B[mmessage:I am plugin foo has:plugin foo [32mSuccessful (B[mmessage:I am plugin bar called with args test/fixtures/pkg/kubectl/plugins/bar/kubectl-bar arg1 ... skipping 13 lines ... +++ Running case: test-cmd.run_impersonation_tests +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_impersonation_tests +++ [0317 19:30:03] Testing impersonation [32mSuccessful (B[mmessage:error: requesting uid, groups or user-extra for test-admin without impersonating a user has:without impersonating a user [32mSuccessful (B[mmessage:error: requesting uid, groups or user-extra for test-admin without impersonating a user has:without impersonating a user certificatesigningrequest.certificates.k8s.io/foo created [32mauthorization.sh:60: Successful get csr/foo {{.spec.username}}: user1 (B[m[32mauthorization.sh:61: Successful get csr/foo {{range .spec.groups}}{{.}}{{end}}: system:authenticated (B[mcertificatesigningrequest.certificates.k8s.io "foo" deleted certificatesigningrequest.certificates.k8s.io/foo created [32mauthorization.sh:66: Successful get csr/foo {{len .spec.groups}}: 4 (B[m[32mauthorization.sh:67: Successful get csr/foo {{range .spec.groups}}{{.}} {{end}}: group2 group1 ,,,chameleon system:authenticated (B[mcertificatesigningrequest.certificates.k8s.io "foo" deleted certificatesigningrequest.certificates.k8s.io/foo created [32mauthorization.sh:72: Successful get csr/foo {{.spec.username}}: user1 (B[mW0317 19:30:05.026085 23215 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 19:30:05.026125 23215 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mauthorization.sh:73: Successful get csr/foo {{.spec.uid}}: abc123 (B[mcertificatesigningrequest.certificates.k8s.io "foo" deleted +++ exit code: 0 Recording: run_wait_tests Running command: run_wait_tests ... skipping 8 lines ... I0317 19:30:05.441303 23215 event.go:307] "Event occurred" object="namespace-1679081405-25952/test-1" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set test-1-7697bf65f7 to 1" I0317 19:30:05.493701 23215 event.go:307] "Event occurred" object="namespace-1679081405-25952/test-1-7697bf65f7" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: test-1-7697bf65f7-lc7dm" deployment.apps/test-2 created I0317 19:30:05.528422 23215 event.go:307] "Event occurred" object="namespace-1679081405-25952/test-2" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set test-2-675f68f47d to 1" I0317 19:30:05.559542 23215 event.go:307] "Event occurred" object="namespace-1679081405-25952/test-2-675f68f47d" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: test-2-675f68f47d-8t8dm" [32mwait.sh:36: Successful get deployments {{range .items}}{{.metadata.name}},{{end}}: test-1,test-2, (B[mW0317 19:30:14.553116 23215 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 19:30:14.553153 23215 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource [32mSuccessful (B[mmessage:error: timed out waiting for the condition on deployments/test-1 has:timed out deployment.apps "test-1" deleted deployment.apps "test-2" deleted [32mSuccessful (B[mmessage:deployment.apps/test-1 condition met deployment.apps/test-2 condition met ... skipping 68 lines ... +++ working dir: /home/prow/go/src/k8s.io/kubernetes +++ command: run_kubectl_debug_general_tests +++ [0317 19:30:42] Creating namespace namespace-1679081442-20159 namespace/namespace-1679081442-20159 created Context "test" modified. +++ [0317 19:30:42] Testing kubectl debug profile general W0317 19:30:42.972099 23215 reflector.go:533] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0317 19:30:42.972137 23215 reflector.go:148] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource pod/target created [32mdebug.sh:140: Successful get pod {{range.items}}{{.metadata.name}}:{{end}}: target: (B[m[32mdebug.sh:144: Successful get pod {{range.items}}{{.metadata.name}}:{{end}}: target:target-copy: (B[m[32mdebug.sh:145: Successful get pod/target-copy {{range.spec.containers}}{{.name}}:{{end}}: target:debug-container: (B[m[32mdebug.sh:146: Successful get pod/target-copy {{range.spec.containers}}{{.image}}:{{end}}: registry.k8s.io/nginx:1.7.9:busybox: (B[m[32mdebug.sh:147: Successful get pod/target-copy {{range.spec.containers}}{{if (index . "livenessProbe")}}:{{end}}{{end}}: ... skipping 180 lines ...