Recent runs || View in Spyglass
Result | FAILURE |
Tests | 1 failed / 27 succeeded |
Started | |
Elapsed | 4h6m |
Revision | release-1.7 |
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=capz\-e2e\s\[It\]\sRunning\sthe\sCluster\sAPI\sE2E\stests\sRunning\sthe\squick\-start\sspec\sShould\screate\sa\sworkload\scluster$'
[TIMEDOUT] A suite timeout occurred In [AfterEach] at: /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/quick_start.go:109 @ 01/27/23 01:17:53.963 This is the Progress Report generated when the suite timeout occurred: Running the Cluster API E2E tests Running the quick-start spec Should create a workload cluster (Spec Runtime: 3h53m57.878s) /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/quick_start.go:78 In [AfterEach] (Node Runtime: 3h38m28.711s) /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/quick_start.go:109 At [By Step] Dumping logs from the "quick-start-ieam7e" workload cluster (Step Runtime: 3h38m28.711s) /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/common.go:51 Spec Goroutine goroutine 56441 [semacquire, 218 minutes] sync.runtime_Semacquire(0xc000996780?) /usr/local/go/src/runtime/sema.go:62 sync.(*WaitGroup).Wait(0x1461927?) /usr/local/go/src/sync/waitgroup.go:139 sigs.k8s.io/kind/pkg/errors.AggregateConcurrent({0xc000bc2510, 0x6, 0x3ede0a7?}) /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:54 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode(0xc000784340, {0xc0038f6dc0, 0xf}, 0x1, {0xc000ab8060, 0x5e}) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:156 | var errors []error | errors = append(errors, kinderrors.AggregateConcurrent(windowsInfo(execToPathFn))) > errors = append(errors, kinderrors.AggregateConcurrent(windowsK8sLogs(execToPathFn))) | errors = append(errors, kinderrors.AggregateConcurrent(windowsNetworkLogs(execToPathFn))) | errors = append(errors, kinderrors.AggregateConcurrent(windowsCrashDumpLogs(execToPathFn))) > sigs.k8s.io/cluster-api-provider-azure/test/e2e.AzureLogCollector.CollectMachineLog({}, {0x4305340, 0xc0000640b0}, {0x4315a38, 0xc00127e2a0}, 0xc00052cf10, {0xc000ab8060, 0x5e}) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:74 | hostname := getHostname(m, isAzureMachineWindows(am)) | > if err := collectLogsFromNode(cluster, hostname, isAzureMachineWindows(am), outputPath); err != nil { | errs = append(errs, err) | } sigs.k8s.io/cluster-api/test/framework.(*clusterProxy).CollectWorkloadClusterLogs(0xc002c02780, {0x4305340?, 0xc0000640b0}, {0xc001f28930, 0x12}, {0xc001f28900, 0x12}, {0xc000ab7a10, 0x2b}) /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/framework/cluster_proxy.go:265 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.(*AzureClusterProxy).CollectWorkloadClusterLogs(0xc001e89cf0, {0x4305340, 0xc0000640b0}, {0xc001f28930, 0x12}, {0xc001f28900, 0x12}, {0xc000ab7a10, 0x2b}) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_clusterproxy.go:94 | func (acp *AzureClusterProxy) CollectWorkloadClusterLogs(ctx context.Context, namespace, name, outputPath string) { | Logf("Dumping workload cluster %s/%s logs", namespace, name) > acp.ClusterProxy.CollectWorkloadClusterLogs(ctx, namespace, name, outputPath) | | aboveMachinesPath := strings.Replace(outputPath, "/machines", "", 1) > sigs.k8s.io/cluster-api/test/e2e.dumpSpecResourcesAndCleanup({0x4305340, 0xc0000640b0}, {0x3e41f96, 0xb}, {0x43179b0, 0xc001e89cf0}, {0xc002b22000, 0xf}, 0xc00280e2c0, 0xc0008da1b0, ...) /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/common.go:70 | | // Dump all the logs from the workload cluster before deleting them. > clusterProxy.CollectWorkloadClusterLogs(ctx, cluster.Namespace, cluster.Name, filepath.Join(artifactFolder, "clusters", cluster.Name)) | | Byf("Dumping all the Cluster API resources in the %q namespace", namespace.Name) > sigs.k8s.io/cluster-api/test/e2e.QuickStartSpec.func3() /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/quick_start.go:111 | AfterEach(func() { | // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. > dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) | }) | } github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc0018510e0, 0xc0019732f0}) /home/prow/go/pkg/mod/github.com/onsi/ginkgo/v2@v2.6.0/internal/node.go:445 github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() /home/prow/go/pkg/mod/github.com/onsi/ginkgo/v2@v2.6.0/internal/suite.go:847 github.com/onsi/ginkgo/v2/internal.(*Suite).runNode /home/prow/go/pkg/mod/github.com/onsi/ginkgo/v2@v2.6.0/internal/suite.go:834 Goroutines of Interest goroutine 57555 [chan receive, 218 minutes] golang.org/x/crypto/ssh.(*Session).Wait(0xc001397cb0) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:403 golang.org/x/crypto/ssh.(*Session).Run(0xc000898500?, {0x3e66c99?, 0xc0038f6dc0?}) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:314 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.execOnHost({0xc000898500?, 0xc002e71d40?}, {0xc0038f6dc0?, 0xc002e71db0?}, {0x3e2ffee?, 0x203000?}, {0x42e3160, 0xc0016763f0}, {0x3e66c99, 0x19}, ...) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/helpers.go:510 | command += " " + strings.Join(args, " ") | } > if err = session.Run(command); err != nil { | return errors.Wrapf(err, "running command \"%s\"", command) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:147 | } | defer f.Close() > return execOnHost(controlPlaneEndpoint, hostname, sshPort, f, command, args...) | }) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout.func1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:41 | err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil > err := fn() | if err != nil { | pollError = err k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x157a2f1, 0x0}) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:222 k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x4305340?, 0xc000064098?}, 0xc002e71d30?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:235 k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x4305340, 0xc000064098}, 0xc000af8888, 0x22e7eea?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:662 k8s.io/apimachinery/pkg/util/wait.poll({0x4305340, 0xc000064098}, 0x58?, 0x22e6ca5?, 0x18?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:596 k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x4305340, 0xc000064098}, 0x1eb8025476?, 0xc000cf3ea8?, 0x1461927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:528 k8s.io/apimachinery/pkg/util/wait.PollImmediate(0xf06d37eae3f72aae?, 0xc000cf3ee8?, 0x1461927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:514 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout(0x2f2ea88ab94e08fd?, 0xd0a2c6d191275a33?, 0xc0009d2690) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:39 | func retryWithTimeout(interval, timeout time.Duration, fn func() error) error { | var pollError error > err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil | err := fn() > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:141 | execToPathFn := func(outputFileName, command string, args ...string) func() error { | return func() error { > return retryWithTimeout(collectLogInterval, collectLogTimeout, func() error { | f, err := fileOnHost(filepath.Join(outputPath, outputFileName)) | if err != nil { sigs.k8s.io/kind/pkg/errors.AggregateConcurrent.func1() /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:51 sigs.k8s.io/kind/pkg/errors.AggregateConcurrent /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:49 There were additional failures detected after the initial failure. These are visible in the timelinefrom junit.e2e_suite.1.xml
cluster.cluster.x-k8s.io/quick-start-ieam7e created azurecluster.infrastructure.cluster.x-k8s.io/quick-start-ieam7e created kubeadmcontrolplane.controlplane.cluster.x-k8s.io/quick-start-ieam7e-control-plane created azuremachinetemplate.infrastructure.cluster.x-k8s.io/quick-start-ieam7e-control-plane created machinedeployment.cluster.x-k8s.io/quick-start-ieam7e-md-0 created azuremachinetemplate.infrastructure.cluster.x-k8s.io/quick-start-ieam7e-md-0 created kubeadmconfigtemplate.bootstrap.cluster.x-k8s.io/quick-start-ieam7e-md-0 created machinedeployment.cluster.x-k8s.io/quick-start-ieam7e-md-win created azuremachinetemplate.infrastructure.cluster.x-k8s.io/quick-start-ieam7e-md-win created kubeadmconfigtemplate.bootstrap.cluster.x-k8s.io/quick-start-ieam7e-md-win created machinehealthcheck.cluster.x-k8s.io/quick-start-ieam7e-mhc-0 created clusterresourceset.addons.cluster.x-k8s.io/quick-start-ieam7e-calico-windows created azureclusteridentity.infrastructure.cluster.x-k8s.io/cluster-identity-sp created clusterresourceset.addons.cluster.x-k8s.io/csi-proxy created clusterresourceset.addons.cluster.x-k8s.io/containerd-logger-quick-start-ieam7e created configmap/cni-quick-start-ieam7e-calico-windows created configmap/csi-proxy-addon created configmap/containerd-logger-quick-start-ieam7e created felixconfiguration.crd.projectcalico.org/default configured > Enter [BeforeEach] Running the Cluster API E2E tests - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/capi_test.go:52 @ 01/26/23 21:23:56.085 INFO: "" started at Thu, 26 Jan 2023 21:23:56 UTC on Ginkgo node 1 of 10 and junit test report to file /logs/artifacts/test_e2e_junit.e2e_suite.1.xml < Exit [BeforeEach] Running the Cluster API E2E tests - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/capi_test.go:52 @ 01/26/23 21:23:56.141 (55ms) > Enter [BeforeEach] Running the quick-start spec - /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/quick_start.go:63 @ 01/26/23 21:23:56.141 STEP: Creating a namespace for hosting the "quick-start" test spec - /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/common.go:51 @ 01/26/23 21:23:56.141 INFO: Creating namespace quick-start-1pj9f7 INFO: Creating event watcher for namespace "quick-start-1pj9f7" < Exit [BeforeEach] Running the quick-start spec - /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/quick_start.go:63 @ 01/26/23 21:23:56.184 (44ms) > Enter [It] Should create a workload cluster - /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/quick_start.go:78 @ 01/26/23 21:23:56.184 STEP: Creating a workload cluster - /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/quick_start.go:79 @ 01/26/23 21:23:56.184 INFO: Creating the workload cluster with name "quick-start-ieam7e" using the "(default)" template (Kubernetes v1.24.10, 1 control-plane machines, 1 worker machines) INFO: Getting the cluster template yaml INFO: clusterctl config cluster quick-start-ieam7e --infrastructure (default) --kubernetes-version v1.24.10 --control-plane-machine-count 1 --worker-machine-count 1 --flavor (default) INFO: Applying the cluster template yaml to the cluster INFO: Waiting for the cluster infrastructure to be provisioned STEP: Waiting for cluster to enter the provisioned phase - /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/framework/cluster_helpers.go:134 @ 01/26/23 21:24:01.611 INFO: Waiting for control plane to be initialized STEP: Installing Calico CNI via helm - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/cni.go:51 @ 01/26/23 21:26:01.758 STEP: Configuring calico CNI helm chart for IPv4 configuration - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/cni.go:131 @ 01/26/23 21:26:01.758 Jan 26 21:27:32.258: INFO: getting history for release projectcalico Jan 26 21:27:32.319: INFO: Release projectcalico does not exist, installing it Jan 26 21:27:33.295: INFO: creating 1 resource(s) Jan 26 21:27:33.381: INFO: creating 1 resource(s) Jan 26 21:27:33.461: INFO: creating 1 resource(s) Jan 26 21:27:33.542: INFO: creating 1 resource(s) Jan 26 21:27:33.630: INFO: creating 1 resource(s) Jan 26 21:27:33.707: INFO: creating 1 resource(s) Jan 26 21:27:33.882: INFO: creating 1 resource(s) Jan 26 21:27:34.136: INFO: creating 1 resource(s) Jan 26 21:27:34.215: INFO: creating 1 resource(s) Jan 26 21:27:34.292: INFO: creating 1 resource(s) Jan 26 21:27:34.368: INFO: creating 1 resource(s) Jan 26 21:27:34.564: INFO: creating 1 resource(s) Jan 26 21:27:34.636: INFO: creating 1 resource(s) Jan 26 21:27:34.708: INFO: creating 1 resource(s) Jan 26 21:27:34.779: INFO: creating 1 resource(s) Jan 26 21:27:34.867: INFO: creating 1 resource(s) Jan 26 21:27:34.974: INFO: creating 1 resource(s) Jan 26 21:27:35.057: INFO: creating 1 resource(s) Jan 26 21:27:35.184: INFO: creating 1 resource(s) Jan 26 21:27:35.318: INFO: creating 1 resource(s) Jan 26 21:27:35.730: INFO: creating 1 resource(s) Jan 26 21:27:35.823: INFO: Clearing discovery cache Jan 26 21:27:35.823: INFO: beginning wait for 21 resources with timeout of 1m0s Jan 26 21:27:39.468: INFO: creating 1 resource(s) Jan 26 21:27:40.039: INFO: creating 6 resource(s) Jan 26 21:27:40.795: INFO: Install complete STEP: Waiting for Ready tigera-operator deployment pods - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/cni.go:60 @ 01/26/23 21:27:41.252 STEP: waiting for deployment tigera-operator/tigera-operator to be available - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/common.go:88 @ 01/26/23 21:27:41.5 Jan 26 21:27:41.500: INFO: starting to wait for deployment to become available Jan 26 21:27:51.622: INFO: Deployment tigera-operator/tigera-operator is now available, took 10.121173909s STEP: Waiting for Ready calico-system deployment pods - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/cni.go:74 @ 01/26/23 21:27:52.712 STEP: waiting for deployment calico-system/calico-kube-controllers to be available - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/common.go:88 @ 01/26/23 21:27:53.019 Jan 26 21:27:53.019: INFO: starting to wait for deployment to become available Jan 26 21:28:46.601: INFO: Deployment calico-system/calico-kube-controllers is now available, took 53.581894445s STEP: waiting for deployment calico-system/calico-typha to be available - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/common.go:88 @ 01/26/23 21:28:46.978 Jan 26 21:28:46.978: INFO: starting to wait for deployment to become available Jan 26 21:28:47.040: INFO: Deployment calico-system/calico-typha is now available, took 62.667617ms STEP: Waiting for Ready calico-apiserver deployment pods - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/cni.go:79 @ 01/26/23 21:28:47.04 STEP: waiting for deployment calico-apiserver/calico-apiserver to be available - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/common.go:88 @ 01/26/23 21:28:47.536 Jan 26 21:28:47.536: INFO: starting to wait for deployment to become available Jan 26 21:29:07.723: INFO: Deployment calico-apiserver/calico-apiserver is now available, took 20.187259038s STEP: Waiting for Ready calico-node daemonset pods - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/cni.go:84 @ 01/26/23 21:29:07.723 STEP: waiting for daemonset calico-system/calico-node to be complete - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/common.go:88 @ 01/26/23 21:29:08.031 Jan 26 21:29:08.031: INFO: waiting for daemonset calico-system/calico-node to be complete Jan 26 21:29:08.093: INFO: 1 daemonset calico-system/calico-node pods are running, took 61.963683ms STEP: Waiting for Ready calico windows pods - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/cni.go:91 @ 01/26/23 21:29:08.093 STEP: waiting for daemonset calico-system/calico-node-windows to be complete - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/common.go:88 @ 01/26/23 21:29:08.343 Jan 26 21:29:08.343: INFO: waiting for daemonset calico-system/calico-node-windows to be complete Jan 26 21:29:08.406: INFO: 0 daemonset calico-system/calico-node-windows pods are running, took 62.488859ms STEP: Waiting for Ready calico windows pods - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/cni.go:97 @ 01/26/23 21:29:08.406 STEP: waiting for daemonset kube-system/kube-proxy-windows to be complete - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/common.go:88 @ 01/26/23 21:29:08.707 Jan 26 21:29:08.707: INFO: waiting for daemonset kube-system/kube-proxy-windows to be complete Jan 26 21:29:08.767: INFO: 0 daemonset kube-system/kube-proxy-windows pods are running, took 60.063781ms INFO: Waiting for the first control plane machine managed by quick-start-1pj9f7/quick-start-ieam7e-control-plane to be provisioned STEP: Waiting for one control plane node to exist - /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/framework/controlplane_helpers.go:133 @ 01/26/23 21:29:08.791 STEP: Installing azure-disk CSI driver components via helm - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/cloud-provider-azure.go:71 @ 01/26/23 21:29:08.799 Jan 26 21:29:08.886: INFO: getting history for release azuredisk-csi-driver-oot Jan 26 21:29:08.948: INFO: Release azuredisk-csi-driver-oot does not exist, installing it Jan 26 21:29:11.777: INFO: creating 1 resource(s) Jan 26 21:29:12.043: INFO: creating 18 resource(s) Jan 26 21:29:12.619: INFO: Install complete STEP: Waiting for Ready csi-azuredisk-controller deployment pods - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/cloud-provider-azure.go:81 @ 01/26/23 21:29:12.636 STEP: waiting for deployment kube-system/csi-azuredisk-controller to be available - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/common.go:88 @ 01/26/23 21:29:12.886 Jan 26 21:29:12.886: INFO: starting to wait for deployment to become available Jan 26 21:29:53.478: INFO: Deployment kube-system/csi-azuredisk-controller is now available, took 40.591424618s STEP: Waiting for Running azure-disk-csi node pods - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/cloud-provider-azure.go:86 @ 01/26/23 21:29:53.478 STEP: waiting for daemonset kube-system/csi-azuredisk-node to be complete - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/common.go:88 @ 01/26/23 21:29:53.785 Jan 26 21:29:53.785: INFO: waiting for daemonset kube-system/csi-azuredisk-node to be complete Jan 26 21:30:13.970: INFO: 2 daemonset kube-system/csi-azuredisk-node pods are running, took 20.185051198s STEP: waiting for daemonset kube-system/csi-azuredisk-node-win to be complete - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/common.go:88 @ 01/26/23 21:30:14.276 Jan 26 21:30:14.276: INFO: waiting for daemonset kube-system/csi-azuredisk-node-win to be complete Jan 26 21:30:14.337: INFO: 0 daemonset kube-system/csi-azuredisk-node-win pods are running, took 60.65829ms INFO: Waiting for control plane to be ready INFO: Waiting for control plane quick-start-1pj9f7/quick-start-ieam7e-control-plane to be ready (implies underlying nodes to be ready as well) STEP: Waiting for the control plane to be ready - /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/framework/controlplane_helpers.go:165 @ 01/26/23 21:30:14.352 STEP: Checking all the control plane machines are in the expected failure domains - /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/framework/controlplane_helpers.go:196 @ 01/26/23 21:30:14.36 INFO: Waiting for the machine deployments to be provisioned STEP: Waiting for the workload nodes to exist - /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/framework/machinedeployment_helpers.go:102 @ 01/26/23 21:30:14.39 STEP: Checking all the machines controlled by quick-start-ieam7e-md-0 are in the "<None>" failure domain - /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/framework/ginkgoextensions/output.go:35 @ 01/26/23 21:30:14.402 STEP: Waiting for the workload nodes to exist - /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/framework/machinedeployment_helpers.go:102 @ 01/26/23 21:30:14.414 STEP: Checking all the machines controlled by quick-start-ieam7e-md-win are in the "<None>" failure domain - /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/framework/ginkgoextensions/output.go:35 @ 01/26/23 21:39:25.204 INFO: Waiting for the machine pools to be provisioned STEP: PASSED! - /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/quick_start.go:106 @ 01/26/23 21:39:25.252 < Exit [It] Should create a workload cluster - /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/quick_start.go:78 @ 01/26/23 21:39:25.252 (15m29.067s) > Enter [AfterEach] Running the quick-start spec - /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/quick_start.go:109 @ 01/26/23 21:39:25.252 STEP: Dumping logs from the "quick-start-ieam7e" workload cluster - /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/common.go:51 @ 01/26/23 21:39:25.252 Jan 26 21:39:25.252: INFO: Dumping workload cluster quick-start-1pj9f7/quick-start-ieam7e logs Jan 26 21:39:25.302: INFO: Collecting logs for Linux node quick-start-ieam7e-control-plane-fbv5q in cluster quick-start-ieam7e in namespace quick-start-1pj9f7 Jan 26 21:39:34.621: INFO: Collecting boot logs for AzureMachine quick-start-ieam7e-control-plane-fbv5q Jan 26 21:39:36.180: INFO: Collecting logs for Linux node quick-start-ieam7e-md-0-6jlch in cluster quick-start-ieam7e in namespace quick-start-1pj9f7 Jan 26 21:39:43.774: INFO: Collecting boot logs for AzureMachine quick-start-ieam7e-md-0-6jlch Jan 26 21:39:44.470: INFO: Collecting logs for Windows node quick-sta-vw7hw in cluster quick-start-ieam7e in namespace quick-start-1pj9f7 [TIMEDOUT] A suite timeout occurred In [AfterEach] at: /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/quick_start.go:109 @ 01/27/23 01:17:53.963 This is the Progress Report generated when the suite timeout occurred: Running the Cluster API E2E tests Running the quick-start spec Should create a workload cluster (Spec Runtime: 3h53m57.878s) /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/quick_start.go:78 In [AfterEach] (Node Runtime: 3h38m28.711s) /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/quick_start.go:109 At [By Step] Dumping logs from the "quick-start-ieam7e" workload cluster (Step Runtime: 3h38m28.711s) /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/common.go:51 Spec Goroutine goroutine 56441 [semacquire, 218 minutes] sync.runtime_Semacquire(0xc000996780?) /usr/local/go/src/runtime/sema.go:62 sync.(*WaitGroup).Wait(0x1461927?) /usr/local/go/src/sync/waitgroup.go:139 sigs.k8s.io/kind/pkg/errors.AggregateConcurrent({0xc000bc2510, 0x6, 0x3ede0a7?}) /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:54 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode(0xc000784340, {0xc0038f6dc0, 0xf}, 0x1, {0xc000ab8060, 0x5e}) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:156 | var errors []error | errors = append(errors, kinderrors.AggregateConcurrent(windowsInfo(execToPathFn))) > errors = append(errors, kinderrors.AggregateConcurrent(windowsK8sLogs(execToPathFn))) | errors = append(errors, kinderrors.AggregateConcurrent(windowsNetworkLogs(execToPathFn))) | errors = append(errors, kinderrors.AggregateConcurrent(windowsCrashDumpLogs(execToPathFn))) > sigs.k8s.io/cluster-api-provider-azure/test/e2e.AzureLogCollector.CollectMachineLog({}, {0x4305340, 0xc0000640b0}, {0x4315a38, 0xc00127e2a0}, 0xc00052cf10, {0xc000ab8060, 0x5e}) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:74 | hostname := getHostname(m, isAzureMachineWindows(am)) | > if err := collectLogsFromNode(cluster, hostname, isAzureMachineWindows(am), outputPath); err != nil { | errs = append(errs, err) | } sigs.k8s.io/cluster-api/test/framework.(*clusterProxy).CollectWorkloadClusterLogs(0xc002c02780, {0x4305340?, 0xc0000640b0}, {0xc001f28930, 0x12}, {0xc001f28900, 0x12}, {0xc000ab7a10, 0x2b}) /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/framework/cluster_proxy.go:265 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.(*AzureClusterProxy).CollectWorkloadClusterLogs(0xc001e89cf0, {0x4305340, 0xc0000640b0}, {0xc001f28930, 0x12}, {0xc001f28900, 0x12}, {0xc000ab7a10, 0x2b}) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_clusterproxy.go:94 | func (acp *AzureClusterProxy) CollectWorkloadClusterLogs(ctx context.Context, namespace, name, outputPath string) { | Logf("Dumping workload cluster %s/%s logs", namespace, name) > acp.ClusterProxy.CollectWorkloadClusterLogs(ctx, namespace, name, outputPath) | | aboveMachinesPath := strings.Replace(outputPath, "/machines", "", 1) > sigs.k8s.io/cluster-api/test/e2e.dumpSpecResourcesAndCleanup({0x4305340, 0xc0000640b0}, {0x3e41f96, 0xb}, {0x43179b0, 0xc001e89cf0}, {0xc002b22000, 0xf}, 0xc00280e2c0, 0xc0008da1b0, ...) /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/common.go:70 | | // Dump all the logs from the workload cluster before deleting them. > clusterProxy.CollectWorkloadClusterLogs(ctx, cluster.Namespace, cluster.Name, filepath.Join(artifactFolder, "clusters", cluster.Name)) | | Byf("Dumping all the Cluster API resources in the %q namespace", namespace.Name) > sigs.k8s.io/cluster-api/test/e2e.QuickStartSpec.func3() /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/quick_start.go:111 | AfterEach(func() { | // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. > dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) | }) | } github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc0018510e0, 0xc0019732f0}) /home/prow/go/pkg/mod/github.com/onsi/ginkgo/v2@v2.6.0/internal/node.go:445 github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() /home/prow/go/pkg/mod/github.com/onsi/ginkgo/v2@v2.6.0/internal/suite.go:847 github.com/onsi/ginkgo/v2/internal.(*Suite).runNode /home/prow/go/pkg/mod/github.com/onsi/ginkgo/v2@v2.6.0/internal/suite.go:834 Goroutines of Interest goroutine 57555 [chan receive, 218 minutes] golang.org/x/crypto/ssh.(*Session).Wait(0xc001397cb0) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:403 golang.org/x/crypto/ssh.(*Session).Run(0xc000898500?, {0x3e66c99?, 0xc0038f6dc0?}) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:314 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.execOnHost({0xc000898500?, 0xc002e71d40?}, {0xc0038f6dc0?, 0xc002e71db0?}, {0x3e2ffee?, 0x203000?}, {0x42e3160, 0xc0016763f0}, {0x3e66c99, 0x19}, ...) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/helpers.go:510 | command += " " + strings.Join(args, " ") | } > if err = session.Run(command); err != nil { | return errors.Wrapf(err, "running command \"%s\"", command) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:147 | } | defer f.Close() > return execOnHost(controlPlaneEndpoint, hostname, sshPort, f, command, args...) | }) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout.func1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:41 | err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil > err := fn() | if err != nil { | pollError = err k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x157a2f1, 0x0}) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:222 k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x4305340?, 0xc000064098?}, 0xc002e71d30?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:235 k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x4305340, 0xc000064098}, 0xc000af8888, 0x22e7eea?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:662 k8s.io/apimachinery/pkg/util/wait.poll({0x4305340, 0xc000064098}, 0x58?, 0x22e6ca5?, 0x18?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:596 k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x4305340, 0xc000064098}, 0x1eb8025476?, 0xc000cf3ea8?, 0x1461927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:528 k8s.io/apimachinery/pkg/util/wait.PollImmediate(0xf06d37eae3f72aae?, 0xc000cf3ee8?, 0x1461927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:514 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout(0x2f2ea88ab94e08fd?, 0xd0a2c6d191275a33?, 0xc0009d2690) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:39 | func retryWithTimeout(interval, timeout time.Duration, fn func() error) error { | var pollError error > err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil | err := fn() > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:141 | execToPathFn := func(outputFileName, command string, args ...string) func() error { | return func() error { > return retryWithTimeout(collectLogInterval, collectLogTimeout, func() error { | f, err := fileOnHost(filepath.Join(outputPath, outputFileName)) | if err != nil { sigs.k8s.io/kind/pkg/errors.AggregateConcurrent.func1() /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:51 sigs.k8s.io/kind/pkg/errors.AggregateConcurrent /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:49 < Exit [AfterEach] Running the quick-start spec - /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/quick_start.go:109 @ 01/27/23 01:17:53.983 (3h38m28.731s) > Enter [AfterEach] Running the Cluster API E2E tests - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/capi_test.go:97 @ 01/27/23 01:17:53.983 Jan 27 01:17:53.983: INFO: FAILED! Jan 27 01:17:53.983: INFO: Cleaning up after "Running the Cluster API E2E tests Running the quick-start spec Should create a workload cluster" spec STEP: Redacting sensitive information from logs - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/common.go:212 @ 01/27/23 01:17:53.983 [TIMEDOUT] A grace period timeout occurred In [AfterEach] at: /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/capi_test.go:97 @ 01/27/23 01:18:23.984 This is the Progress Report generated when the grace period timeout occurred: Running the Cluster API E2E tests Running the quick-start spec Should create a workload cluster (Spec Runtime: 3h54m27.898s) /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/quick_start.go:78 In [AfterEach] (Node Runtime: 30.001s) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/capi_test.go:97 At [By Step] Redacting sensitive information from logs (Step Runtime: 30s) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/common.go:212 Spec Goroutine goroutine 56441 [semacquire, 218 minutes] sync.runtime_Semacquire(0xc000996780?) /usr/local/go/src/runtime/sema.go:62 sync.(*WaitGroup).Wait(0x1461927?) /usr/local/go/src/sync/waitgroup.go:139 sigs.k8s.io/kind/pkg/errors.AggregateConcurrent({0xc000bc2510, 0x6, 0x3ede0a7?}) /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:54 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode(0xc000784340, {0xc0038f6dc0, 0xf}, 0x1, {0xc000ab8060, 0x5e}) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:156 | var errors []error | errors = append(errors, kinderrors.AggregateConcurrent(windowsInfo(execToPathFn))) > errors = append(errors, kinderrors.AggregateConcurrent(windowsK8sLogs(execToPathFn))) | errors = append(errors, kinderrors.AggregateConcurrent(windowsNetworkLogs(execToPathFn))) | errors = append(errors, kinderrors.AggregateConcurrent(windowsCrashDumpLogs(execToPathFn))) > sigs.k8s.io/cluster-api-provider-azure/test/e2e.AzureLogCollector.CollectMachineLog({}, {0x4305340, 0xc0000640b0}, {0x4315a38, 0xc00127e2a0}, 0xc00052cf10, {0xc000ab8060, 0x5e}) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:74 | hostname := getHostname(m, isAzureMachineWindows(am)) | > if err := collectLogsFromNode(cluster, hostname, isAzureMachineWindows(am), outputPath); err != nil { | errs = append(errs, err) | } sigs.k8s.io/cluster-api/test/framework.(*clusterProxy).CollectWorkloadClusterLogs(0xc002c02780, {0x4305340?, 0xc0000640b0}, {0xc001f28930, 0x12}, {0xc001f28900, 0x12}, {0xc000ab7a10, 0x2b}) /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/framework/cluster_proxy.go:265 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.(*AzureClusterProxy).CollectWorkloadClusterLogs(0xc001e89cf0, {0x4305340, 0xc0000640b0}, {0xc001f28930, 0x12}, {0xc001f28900, 0x12}, {0xc000ab7a10, 0x2b}) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_clusterproxy.go:94 | func (acp *AzureClusterProxy) CollectWorkloadClusterLogs(ctx context.Context, namespace, name, outputPath string) { | Logf("Dumping workload cluster %s/%s logs", namespace, name) > acp.ClusterProxy.CollectWorkloadClusterLogs(ctx, namespace, name, outputPath) | | aboveMachinesPath := strings.Replace(outputPath, "/machines", "", 1) > sigs.k8s.io/cluster-api/test/e2e.dumpSpecResourcesAndCleanup({0x4305340, 0xc0000640b0}, {0x3e41f96, 0xb}, {0x43179b0, 0xc001e89cf0}, {0xc002b22000, 0xf}, 0xc00280e2c0, 0xc0008da1b0, ...) /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/common.go:70 | | // Dump all the logs from the workload cluster before deleting them. > clusterProxy.CollectWorkloadClusterLogs(ctx, cluster.Namespace, cluster.Name, filepath.Join(artifactFolder, "clusters", cluster.Name)) | | Byf("Dumping all the Cluster API resources in the %q namespace", namespace.Name) > sigs.k8s.io/cluster-api/test/e2e.QuickStartSpec.func3() /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/quick_start.go:111 | AfterEach(func() { | // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. > dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) | }) | } github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc0018510e0, 0xc0019732f0}) /home/prow/go/pkg/mod/github.com/onsi/ginkgo/v2@v2.6.0/internal/node.go:445 github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() /home/prow/go/pkg/mod/github.com/onsi/ginkgo/v2@v2.6.0/internal/suite.go:847 github.com/onsi/ginkgo/v2/internal.(*Suite).runNode /home/prow/go/pkg/mod/github.com/onsi/ginkgo/v2@v2.6.0/internal/suite.go:834 Goroutines of Interest goroutine 57555 [chan receive, 218 minutes] golang.org/x/crypto/ssh.(*Session).Wait(0xc001397cb0) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:403 golang.org/x/crypto/ssh.(*Session).Run(0xc000898500?, {0x3e66c99?, 0xc0038f6dc0?}) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:314 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.execOnHost({0xc000898500?, 0xc002e71d40?}, {0xc0038f6dc0?, 0xc002e71db0?}, {0x3e2ffee?, 0x203000?}, {0x42e3160, 0xc0016763f0}, {0x3e66c99, 0x19}, ...) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/helpers.go:510 | command += " " + strings.Join(args, " ") | } > if err = session.Run(command); err != nil { | return errors.Wrapf(err, "running command \"%s\"", command) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:147 | } | defer f.Close() > return execOnHost(controlPlaneEndpoint, hostname, sshPort, f, command, args...) | }) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout.func1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:41 | err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil > err := fn() | if err != nil { | pollError = err k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x157a2f1, 0x0}) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:222 k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x4305340?, 0xc000064098?}, 0xc002e71d30?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:235 k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x4305340, 0xc000064098}, 0xc000af8888, 0x22e7eea?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:662 k8s.io/apimachinery/pkg/util/wait.poll({0x4305340, 0xc000064098}, 0x58?, 0x22e6ca5?, 0x18?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:596 k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x4305340, 0xc000064098}, 0x1eb8025476?, 0xc000cf3ea8?, 0x1461927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:528 k8s.io/apimachinery/pkg/util/wait.PollImmediate(0xf06d37eae3f72aae?, 0xc000cf3ee8?, 0x1461927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:514 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout(0x2f2ea88ab94e08fd?, 0xd0a2c6d191275a33?, 0xc0009d2690) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:39 | func retryWithTimeout(interval, timeout time.Duration, fn func() error) error { | var pollError error > err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil | err := fn() > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:141 | execToPathFn := func(outputFileName, command string, args ...string) func() error { | return func() error { > return retryWithTimeout(collectLogInterval, collectLogTimeout, func() error { | f, err := fileOnHost(filepath.Join(outputPath, outputFileName)) | if err != nil { sigs.k8s.io/kind/pkg/errors.AggregateConcurrent.func1() /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:51 sigs.k8s.io/kind/pkg/errors.AggregateConcurrent /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:49 goroutine 68653 [syscall, 2 minutes] syscall.Syscall6(0x1?, 0x1?, 0xffffffffffffffff?, 0x37e0c20?, 0x24?, 0x0?, 0x8?) /usr/local/go/src/syscall/syscall_linux.go:90 os.(*Process).blockUntilWaitable(0xc0028e0b10) /usr/local/go/src/os/wait_waitid.go:32 os.(*Process).wait(0xc0028e0b10) /usr/local/go/src/os/exec_unix.go:22 os.(*Process).Wait(...) /usr/local/go/src/os/exec.go:132 os/exec.(*Cmd).Wait(0xc0028afb80) /usr/local/go/src/os/exec/exec.go:599 os/exec.(*Cmd).Run(0xc0024f5310?) /usr/local/go/src/os/exec/exec.go:437 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.redactLogs() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/common.go:216 | //nolint:gosec // Ignore warning about running a command constructed from user input | cmd := exec.Command(e2eConfig.GetVariable(RedactLogScriptPath)) > if err := cmd.Run(); err != nil { | LogWarningf("Redact logs command failed: %v", err) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.glob..func2.2() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/capi_test.go:99 | AfterEach(func() { | CheckTestBeforeCleanup() > redactLogs() | | Expect(os.Unsetenv(AzureResourceGroup)).To(Succeed()) github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x1bcb8be, 0xc00280af00}) /home/prow/go/pkg/mod/github.com/onsi/ginkgo/v2@v2.6.0/internal/node.go:445 github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() /home/prow/go/pkg/mod/github.com/onsi/ginkgo/v2@v2.6.0/internal/suite.go:847 github.com/onsi/ginkgo/v2/internal.(*Suite).runNode /home/prow/go/pkg/mod/github.com/onsi/ginkgo/v2@v2.6.0/internal/suite.go:834 < Exit [AfterEach] Running the Cluster API E2E tests - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/capi_test.go:97 @ 01/27/23 01:18:24 (30.017s)
Filter through log files
capz-e2e [It] Running the Cluster API E2E tests Running the MachineDeployment rollout spec Should successfully upgrade Machines upon changes in relevant MachineDeployment fields
capz-e2e [It] Running the Cluster API E2E tests Running the self-hosted spec Should pivot the bootstrap cluster to a self-hosted cluster
capz-e2e [It] Running the Cluster API E2E tests Should successfully exercise machine pools Should successfully create a cluster with machine pool machines
capz-e2e [It] Running the Cluster API E2E tests Should successfully remediate unhealthy machines with MachineHealthCheck Should successfully trigger KCP remediation
capz-e2e [It] Running the Cluster API E2E tests Should successfully remediate unhealthy machines with MachineHealthCheck Should successfully trigger machine deployment remediation
capz-e2e [It] Running the Cluster API E2E tests Should successfully scale out and scale in a MachineDeployment Should successfully scale a MachineDeployment up and down upon changes to the MachineDeployment replica count
capz-e2e [It] Running the Cluster API E2E tests Should successfully set and use node drain timeout A node should be forcefully removed if it cannot be drained in time
capz-e2e [SynchronizedAfterSuite]
capz-e2e [SynchronizedAfterSuite]
capz-e2e [SynchronizedAfterSuite]
capz-e2e [SynchronizedAfterSuite]
capz-e2e [SynchronizedAfterSuite]
capz-e2e [SynchronizedAfterSuite]
capz-e2e [SynchronizedAfterSuite]
capz-e2e [SynchronizedAfterSuite]
capz-e2e [SynchronizedAfterSuite]
capz-e2e [SynchronizedAfterSuite]
capz-e2e [SynchronizedBeforeSuite]
capz-e2e [SynchronizedBeforeSuite]
capz-e2e [SynchronizedBeforeSuite]
capz-e2e [SynchronizedBeforeSuite]
capz-e2e [SynchronizedBeforeSuite]
capz-e2e [SynchronizedBeforeSuite]
capz-e2e [SynchronizedBeforeSuite]
capz-e2e [SynchronizedBeforeSuite]
capz-e2e [SynchronizedBeforeSuite]
capz-e2e [SynchronizedBeforeSuite]
capz-e2e [It] Conformance Tests conformance-tests
capz-e2e [It] Running the Cluster API E2E tests API Version Upgrade upgrade from v1alpha4 to v1beta1, and scale workload clusters created in v1alpha4 Should create a management cluster and then upgrade all the providers
capz-e2e [It] Running the Cluster API E2E tests Running KCP upgrade in a HA cluster [K8s-Upgrade] Should create and upgrade a workload cluster and eventually run kubetest
capz-e2e [It] Running the Cluster API E2E tests Running KCP upgrade in a HA cluster using scale in rollout [K8s-Upgrade] Should create and upgrade a workload cluster and eventually run kubetest
capz-e2e [It] Running the Cluster API E2E tests Running the workload cluster upgrade spec [K8s-Upgrade] Should create and upgrade a workload cluster and eventually run kubetest
capz-e2e [It] Workload cluster creation Creating a GPU-enabled cluster [OPTIONAL] with a single control plane node and 1 node
capz-e2e [It] Workload cluster creation Creating a VMSS cluster [REQUIRED] with a single control plane node and an AzureMachinePool with 2 Linux and 2 Windows worker nodes
capz-e2e [It] Workload cluster creation Creating a cluster that uses the external cloud provider and external azurediskcsi driver [OPTIONAL] with a 1 control plane nodes and 2 worker nodes
capz-e2e [It] Workload cluster creation Creating a cluster that uses the external cloud provider and machinepools [OPTIONAL] with 1 control plane node and 1 machinepool
capz-e2e [It] Workload cluster creation Creating a dual-stack cluster [OPTIONAL] With dual-stack worker node
capz-e2e [It] Workload cluster creation Creating a highly available cluster [REQUIRED] With 3 control-plane nodes and 2 Linux and 2 Windows worker nodes
capz-e2e [It] Workload cluster creation Creating a ipv6 control-plane cluster [REQUIRED] With ipv6 worker node
capz-e2e [It] Workload cluster creation Creating a private cluster [OPTIONAL] Creates a public management cluster in a custom vnet
capz-e2e [It] Workload cluster creation Creating an AKS cluster [EXPERIMENTAL][Managed Kubernetes] with a single control plane node and 1 node
capz-e2e [It] Workload cluster creation Creating clusters using clusterclass [OPTIONAL] with a single control plane node, one linux worker node, and one windows worker node
capz-e2e [It] [K8s-Upgrade] Running the CSI migration tests [CSI Migration] Running CSI migration test CSI=external CCM=external AzureDiskCSIMigration=true: upgrade to v1.23 should create volumes dynamically with out-of-tree cloud provider
capz-e2e [It] [K8s-Upgrade] Running the CSI migration tests [CSI Migration] Running CSI migration test CSI=external CCM=internal AzureDiskCSIMigration=true: upgrade to v1.23 should create volumes dynamically with intree cloud provider
capz-e2e [It] [K8s-Upgrade] Running the CSI migration tests [CSI Migration] Running CSI migration test CSI=internal CCM=internal AzureDiskCSIMigration=false: upgrade to v1.23 should create volumes dynamically with intree cloud provider
... skipping 754 lines ... Jan 26 21:33:36.782: INFO: Describing Pod calico-system/csi-node-driver-84dtn Jan 26 21:33:36.782: INFO: Creating log watcher for controller calico-system/csi-node-driver-84dtn, container csi-node-driver-registrar Jan 26 21:33:36.783: INFO: Creating log watcher for controller calico-system/csi-node-driver-84dtn, container calico-csi Jan 26 21:33:37.099: INFO: Describing Pod calico-system/csi-node-driver-tvkpz Jan 26 21:33:37.099: INFO: Creating log watcher for controller calico-system/csi-node-driver-tvkpz, container calico-csi Jan 26 21:33:37.099: INFO: Creating log watcher for controller calico-system/csi-node-driver-tvkpz, container csi-node-driver-registrar Jan 26 21:33:37.170: INFO: Error starting logs stream for pod calico-system/csi-node-driver-tvkpz, container calico-csi: container "calico-csi" in pod "csi-node-driver-tvkpz" is waiting to start: ContainerCreating Jan 26 21:33:37.170: INFO: Error starting logs stream for pod calico-system/csi-node-driver-tvkpz, container csi-node-driver-registrar: container "csi-node-driver-registrar" in pod "csi-node-driver-tvkpz" is waiting to start: ContainerCreating Jan 26 21:33:37.500: INFO: Creating log watcher for controller kube-system/coredns-57575c5f89-5w487, container coredns Jan 26 21:33:37.500: INFO: Describing Pod kube-system/coredns-57575c5f89-5w487 Jan 26 21:33:37.901: INFO: Describing Pod kube-system/coredns-57575c5f89-k9x24 Jan 26 21:33:37.901: INFO: Creating log watcher for controller kube-system/coredns-57575c5f89-k9x24, container coredns Jan 26 21:33:38.303: INFO: Describing Pod kube-system/csi-azuredisk-controller-545d478dbf-pz9zw Jan 26 21:33:38.303: INFO: Creating log watcher for controller kube-system/csi-azuredisk-controller-545d478dbf-pz9zw, container csi-snapshotter ... skipping 45 lines ... [38;5;243m------------------------------[0m [38;5;10m• [1021.633 seconds][0m [0mRunning the Cluster API E2E tests [38;5;243mRunning the self-hosted spec [38;5;10m[1mShould pivot the bootstrap cluster to a self-hosted cluster[0m [38;5;243m/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_selfhosted.go:108[0m [38;5;243mCaptured StdOut/StdErr Output >>[0m 2023/01/26 21:23:56 failed trying to get namespace (self-hosted):namespaces "self-hosted" not found kubeadmconfigtemplate.bootstrap.cluster.x-k8s.io/self-hosted-qejktc-md-0 created cluster.cluster.x-k8s.io/self-hosted-qejktc created machinedeployment.cluster.x-k8s.io/self-hosted-qejktc-md-0 created kubeadmcontrolplane.controlplane.cluster.x-k8s.io/self-hosted-qejktc-control-plane created azurecluster.infrastructure.cluster.x-k8s.io/self-hosted-qejktc created azureclusteridentity.infrastructure.cluster.x-k8s.io/cluster-identity-sp created ... skipping 207 lines ... Jan 26 21:34:31.133: INFO: Fetching activity logs took 4.370196626s Jan 26 21:34:31.133: INFO: Dumping all the Cluster API resources in the "self-hosted" namespace Jan 26 21:34:31.457: INFO: Deleting all clusters in the self-hosted namespace [1mSTEP:[0m Deleting cluster self-hosted-qejktc [38;5;243m@ 01/26/23 21:34:31.477[0m INFO: Waiting for the Cluster self-hosted/self-hosted-qejktc to be deleted [1mSTEP:[0m Waiting for cluster self-hosted-qejktc to be deleted [38;5;243m@ 01/26/23 21:34:31.485[0m INFO: Got error while streaming logs for pod capi-kubeadm-control-plane-system/capi-kubeadm-control-plane-controller-manager-6bc947c55b-n7f6h, container manager: http2: client connection lost INFO: Got error while streaming logs for pod capz-system/capz-controller-manager-6ddcdc5c9d-vs5j6, container manager: http2: client connection lost INFO: Got error while streaming logs for pod capi-kubeadm-bootstrap-system/capi-kubeadm-bootstrap-controller-manager-8c96b57bb-qgg2h, container manager: http2: client connection lost INFO: Got error while streaming logs for pod capi-system/capi-controller-manager-5dc86f6d4d-klc8g, container manager: http2: client connection lost Jan 26 21:39:11.648: INFO: Deleting namespace used for hosting the "self-hosted" test spec INFO: Deleting namespace self-hosted Jan 26 21:39:11.671: INFO: Checking if any resources are left over in Azure for spec "self-hosted" [1mSTEP:[0m Redacting sensitive information from logs [38;5;243m@ 01/26/23 21:39:12.461[0m Jan 26 21:39:49.297: INFO: Cleaning up after "Running the Cluster API E2E tests Running the self-hosted spec Should pivot the bootstrap cluster to a self-hosted cluster" spec [1mSTEP:[0m Redacting sensitive information from logs [38;5;243m@ 01/26/23 21:39:49.297[0m ... skipping 165 lines ... Jan 26 21:39:09.722: INFO: Describing Pod calico-system/csi-node-driver-s7d4s Jan 26 21:39:09.722: INFO: Creating log watcher for controller calico-system/csi-node-driver-s7d4s, container calico-csi Jan 26 21:39:09.722: INFO: Creating log watcher for controller calico-system/csi-node-driver-s7d4s, container csi-node-driver-registrar Jan 26 21:39:10.121: INFO: Creating log watcher for controller calico-system/csi-node-driver-vqh2d, container calico-csi Jan 26 21:39:10.121: INFO: Describing Pod calico-system/csi-node-driver-vqh2d Jan 26 21:39:10.121: INFO: Creating log watcher for controller calico-system/csi-node-driver-vqh2d, container csi-node-driver-registrar Jan 26 21:39:10.199: INFO: Error starting logs stream for pod calico-system/csi-node-driver-vqh2d, container csi-node-driver-registrar: container "csi-node-driver-registrar" in pod "csi-node-driver-vqh2d" is waiting to start: ContainerCreating Jan 26 21:39:10.200: INFO: Error starting logs stream for pod calico-system/csi-node-driver-vqh2d, container calico-csi: container "calico-csi" in pod "csi-node-driver-vqh2d" is waiting to start: ContainerCreating Jan 26 21:39:10.523: INFO: Describing Pod calico-system/csi-node-driver-vv5cx Jan 26 21:39:10.524: INFO: Creating log watcher for controller calico-system/csi-node-driver-vv5cx, container calico-csi Jan 26 21:39:10.524: INFO: Creating log watcher for controller calico-system/csi-node-driver-vv5cx, container csi-node-driver-registrar Jan 26 21:39:10.922: INFO: Describing Pod kube-system/coredns-57575c5f89-lbvj6 Jan 26 21:39:10.922: INFO: Creating log watcher for controller kube-system/coredns-57575c5f89-lbvj6, container coredns Jan 26 21:39:11.323: INFO: Creating log watcher for controller kube-system/coredns-57575c5f89-nw5pn, container coredns ... skipping 206 lines ... Jan 26 21:41:22.262: INFO: Creating log watcher for controller calico-system/calico-kube-controllers-594d54f99-dbm9h, container calico-kube-controllers Jan 26 21:41:22.262: INFO: Describing Pod calico-system/calico-kube-controllers-594d54f99-dbm9h Jan 26 21:41:22.408: INFO: Creating log watcher for controller calico-system/calico-node-rs8j7, container calico-node Jan 26 21:41:22.408: INFO: Describing Pod calico-system/calico-node-rs8j7 Jan 26 21:41:22.611: INFO: Creating log watcher for controller calico-system/calico-node-x9bdr, container calico-node Jan 26 21:41:22.611: INFO: Describing Pod calico-system/calico-node-x9bdr Jan 26 21:41:22.673: INFO: Error starting logs stream for pod calico-system/calico-node-x9bdr, container calico-node: pods "node-drain-xy2o90-control-plane-p8xhk" not found Jan 26 21:41:22.736: INFO: Creating log watcher for controller calico-system/calico-typha-5d7f599d89-v765r, container calico-typha Jan 26 21:41:22.737: INFO: Describing Pod calico-system/calico-typha-5d7f599d89-v765r Jan 26 21:41:22.863: INFO: Creating log watcher for controller calico-system/csi-node-driver-94xfb, container calico-csi Jan 26 21:41:22.863: INFO: Describing Pod calico-system/csi-node-driver-94xfb Jan 26 21:41:22.864: INFO: Creating log watcher for controller calico-system/csi-node-driver-94xfb, container csi-node-driver-registrar Jan 26 21:41:22.926: INFO: Error starting logs stream for pod calico-system/csi-node-driver-94xfb, container csi-node-driver-registrar: pods "node-drain-xy2o90-control-plane-p8xhk" not found Jan 26 21:41:22.927: INFO: Error starting logs stream for pod calico-system/csi-node-driver-94xfb, container calico-csi: pods "node-drain-xy2o90-control-plane-p8xhk" not found Jan 26 21:41:23.113: INFO: Creating log watcher for controller calico-system/csi-node-driver-tllnr, container calico-csi Jan 26 21:41:23.113: INFO: Creating log watcher for controller calico-system/csi-node-driver-tllnr, container csi-node-driver-registrar Jan 26 21:41:23.114: INFO: Describing Pod calico-system/csi-node-driver-tllnr Jan 26 21:41:23.512: INFO: Describing Pod kube-system/coredns-57575c5f89-kht4v Jan 26 21:41:23.512: INFO: Creating log watcher for controller kube-system/coredns-57575c5f89-kht4v, container coredns Jan 26 21:41:23.912: INFO: Describing Pod kube-system/coredns-57575c5f89-xzp7x ... skipping 10 lines ... Jan 26 21:41:24.711: INFO: Creating log watcher for controller kube-system/csi-azuredisk-node-lxb89, container azuredisk Jan 26 21:41:24.712: INFO: Creating log watcher for controller kube-system/csi-azuredisk-node-lxb89, container liveness-probe Jan 26 21:41:25.115: INFO: Creating log watcher for controller kube-system/csi-azuredisk-node-zq4pl, container liveness-probe Jan 26 21:41:25.115: INFO: Describing Pod kube-system/csi-azuredisk-node-zq4pl Jan 26 21:41:25.115: INFO: Creating log watcher for controller kube-system/csi-azuredisk-node-zq4pl, container node-driver-registrar Jan 26 21:41:25.116: INFO: Creating log watcher for controller kube-system/csi-azuredisk-node-zq4pl, container azuredisk Jan 26 21:41:25.179: INFO: Error starting logs stream for pod kube-system/csi-azuredisk-node-zq4pl, container azuredisk: pods "node-drain-xy2o90-control-plane-p8xhk" not found Jan 26 21:41:25.179: INFO: Error starting logs stream for pod kube-system/csi-azuredisk-node-zq4pl, container node-driver-registrar: pods "node-drain-xy2o90-control-plane-p8xhk" not found Jan 26 21:41:25.179: INFO: Error starting logs stream for pod kube-system/csi-azuredisk-node-zq4pl, container liveness-probe: pods "node-drain-xy2o90-control-plane-p8xhk" not found Jan 26 21:41:25.511: INFO: Describing Pod kube-system/etcd-node-drain-xy2o90-control-plane-p8xhk Jan 26 21:41:25.511: INFO: Creating log watcher for controller kube-system/etcd-node-drain-xy2o90-control-plane-p8xhk, container etcd Jan 26 21:41:25.573: INFO: Error starting logs stream for pod kube-system/etcd-node-drain-xy2o90-control-plane-p8xhk, container etcd: pods "node-drain-xy2o90-control-plane-p8xhk" not found Jan 26 21:41:25.910: INFO: Describing Pod kube-system/etcd-node-drain-xy2o90-control-plane-sdh54 Jan 26 21:41:25.910: INFO: Creating log watcher for controller kube-system/etcd-node-drain-xy2o90-control-plane-sdh54, container etcd Jan 26 21:41:26.311: INFO: Describing Pod kube-system/kube-apiserver-node-drain-xy2o90-control-plane-p8xhk Jan 26 21:41:26.311: INFO: Creating log watcher for controller kube-system/kube-apiserver-node-drain-xy2o90-control-plane-p8xhk, container kube-apiserver Jan 26 21:41:26.374: INFO: Error starting logs stream for pod kube-system/kube-apiserver-node-drain-xy2o90-control-plane-p8xhk, container kube-apiserver: pods "node-drain-xy2o90-control-plane-p8xhk" not found Jan 26 21:41:26.711: INFO: Describing Pod kube-system/kube-apiserver-node-drain-xy2o90-control-plane-sdh54 Jan 26 21:41:26.711: INFO: Creating log watcher for controller kube-system/kube-apiserver-node-drain-xy2o90-control-plane-sdh54, container kube-apiserver Jan 26 21:41:27.111: INFO: Describing Pod kube-system/kube-controller-manager-node-drain-xy2o90-control-plane-p8xhk Jan 26 21:41:27.111: INFO: Creating log watcher for controller kube-system/kube-controller-manager-node-drain-xy2o90-control-plane-p8xhk, container kube-controller-manager Jan 26 21:41:27.178: INFO: Error starting logs stream for pod kube-system/kube-controller-manager-node-drain-xy2o90-control-plane-p8xhk, container kube-controller-manager: pods "node-drain-xy2o90-control-plane-p8xhk" not found Jan 26 21:41:27.511: INFO: Describing Pod kube-system/kube-controller-manager-node-drain-xy2o90-control-plane-sdh54 Jan 26 21:41:27.511: INFO: Creating log watcher for controller kube-system/kube-controller-manager-node-drain-xy2o90-control-plane-sdh54, container kube-controller-manager Jan 26 21:41:27.911: INFO: Describing Pod kube-system/kube-proxy-b76b9 Jan 26 21:41:27.912: INFO: Creating log watcher for controller kube-system/kube-proxy-b76b9, container kube-proxy Jan 26 21:41:28.313: INFO: Creating log watcher for controller kube-system/kube-proxy-tw22r, container kube-proxy Jan 26 21:41:28.313: INFO: Describing Pod kube-system/kube-proxy-tw22r Jan 26 21:41:28.376: INFO: Error starting logs stream for pod kube-system/kube-proxy-tw22r, container kube-proxy: pods "node-drain-xy2o90-control-plane-p8xhk" not found Jan 26 21:41:28.710: INFO: Describing Pod kube-system/kube-scheduler-node-drain-xy2o90-control-plane-p8xhk Jan 26 21:41:28.710: INFO: Creating log watcher for controller kube-system/kube-scheduler-node-drain-xy2o90-control-plane-p8xhk, container kube-scheduler Jan 26 21:41:28.774: INFO: Error starting logs stream for pod kube-system/kube-scheduler-node-drain-xy2o90-control-plane-p8xhk, container kube-scheduler: pods "node-drain-xy2o90-control-plane-p8xhk" not found Jan 26 21:41:29.111: INFO: Describing Pod kube-system/kube-scheduler-node-drain-xy2o90-control-plane-sdh54 Jan 26 21:41:29.111: INFO: Creating log watcher for controller kube-system/kube-scheduler-node-drain-xy2o90-control-plane-sdh54, container kube-scheduler Jan 26 21:41:29.511: INFO: Describing Pod node-drain-jz0lxm-unevictable-workload/unevictable-pod-969-6f8c44cbdd-42l6b Jan 26 21:41:29.511: INFO: Creating log watcher for controller node-drain-jz0lxm-unevictable-workload/unevictable-pod-969-6f8c44cbdd-42l6b, container web Jan 26 21:41:29.912: INFO: Describing Pod node-drain-jz0lxm-unevictable-workload/unevictable-pod-969-6f8c44cbdd-bxfh8 Jan 26 21:41:29.912: INFO: Creating log watcher for controller node-drain-jz0lxm-unevictable-workload/unevictable-pod-969-6f8c44cbdd-bxfh8, container web Jan 26 21:41:30.311: INFO: Creating log watcher for controller node-drain-jz0lxm-unevictable-workload/unevictable-pod-969-6f8c44cbdd-gjklj, container web Jan 26 21:41:30.311: INFO: Describing Pod node-drain-jz0lxm-unevictable-workload/unevictable-pod-969-6f8c44cbdd-gjklj Jan 26 21:41:30.716: INFO: Describing Pod node-drain-jz0lxm-unevictable-workload/unevictable-pod-969-6f8c44cbdd-gn9sz Jan 26 21:41:30.716: INFO: Creating log watcher for controller node-drain-jz0lxm-unevictable-workload/unevictable-pod-969-6f8c44cbdd-gn9sz, container web Jan 26 21:41:30.778: INFO: Error starting logs stream for pod node-drain-jz0lxm-unevictable-workload/unevictable-pod-969-6f8c44cbdd-gn9sz, container web: pods "node-drain-xy2o90-control-plane-p8xhk" not found Jan 26 21:41:31.115: INFO: Creating log watcher for controller node-drain-jz0lxm-unevictable-workload/unevictable-pod-9la-8598949b8b-mh847, container web Jan 26 21:41:31.115: INFO: Describing Pod node-drain-jz0lxm-unevictable-workload/unevictable-pod-9la-8598949b8b-mh847 Jan 26 21:41:31.511: INFO: Describing Pod node-drain-jz0lxm-unevictable-workload/unevictable-pod-9la-8598949b8b-tgknz Jan 26 21:41:31.511: INFO: Creating log watcher for controller node-drain-jz0lxm-unevictable-workload/unevictable-pod-9la-8598949b8b-tgknz, container web Jan 26 21:41:31.911: INFO: Describing Pod node-drain-jz0lxm-unevictable-workload/unevictable-pod-9la-8598949b8b-w6vc8 Jan 26 21:41:31.911: INFO: Creating log watcher for controller node-drain-jz0lxm-unevictable-workload/unevictable-pod-9la-8598949b8b-w6vc8, container web ... skipping 43 lines ... configmap/cni-md-scale-3plpcu-calico-windows created configmap/csi-proxy-addon created configmap/containerd-logger-md-scale-3plpcu created felixconfiguration.crd.projectcalico.org/default created Failed to get logs for Machine md-scale-3plpcu-md-win-cbc947b67-gf8fr, Cluster md-scale-re8xyj/md-scale-3plpcu: [running command "Get-Content "C:\\cni.log"": Process exited with status 1, running command "$p = 'c:\localdumps' ; if (Test-Path $p) { tar.exe -cvzf c:\crashdumps.tar $p *>&1 | %{ Write-Output "$_"} } else { Write-Host "No crash dumps found at $p" }": Process exited with status 1] Failed to get logs for Machine md-scale-3plpcu-md-win-cbc947b67-j4fxk, Cluster md-scale-re8xyj/md-scale-3plpcu: [running command "Get-Content "C:\\cni.log"": Process exited with status 1, running command "$p = 'c:\localdumps' ; if (Test-Path $p) { tar.exe -cvzf c:\crashdumps.tar $p *>&1 | %{ Write-Output "$_"} } else { Write-Host "No crash dumps found at $p" }": Process exited with status 1] [38;5;243m<< Captured StdOut/StdErr Output[0m [38;5;243mTimeline >>[0m INFO: "" started at Thu, 26 Jan 2023 21:23:56 UTC on Ginkgo node 4 of 10 and junit test report to file /logs/artifacts/test_e2e_junit.e2e_suite.1.xml [1mSTEP:[0m Creating a namespace for hosting the "md-scale" test spec [38;5;243m@ 01/26/23 21:23:56.25[0m INFO: Creating namespace md-scale-re8xyj ... skipping 241 lines ... configmap/cni-md-rollout-phn6k7-calico-windows created configmap/csi-proxy-addon created configmap/containerd-logger-md-rollout-phn6k7 created felixconfiguration.crd.projectcalico.org/default configured Failed to get logs for Machine md-rollout-phn6k7-md-win-6748999595-nggcn, Cluster md-rollout-a01ojn/md-rollout-phn6k7: [dialing from control plane to target node at md-rollou-frc2g: ssh: rejected: connect failed (Temporary failure in name resolution), Unable to collect VM Boot Diagnostic logs: failed to get boot diagnostics data: compute.VirtualMachinesClient#RetrieveBootDiagnosticsData: Failure responding to request: StatusCode=404 -- Original Error: autorest/azure: Service returned an error. Status=404 Code="ResourceNotFound" Message="The Resource 'Microsoft.Compute/virtualMachines/md-rollou-frc2g' under resource group 'capz-e2e-v8ru69' was not found. For more details please go to https://aka.ms/ARMResourceNotFoundFix"] Failed to get logs for Machine md-rollout-phn6k7-md-win-6748999595-qpmrp, Cluster md-rollout-a01ojn/md-rollout-phn6k7: azuremachines.infrastructure.cluster.x-k8s.io "md-rollout-phn6k7-md-win-5xzf2" not found Failed to get logs for Machine md-rollout-phn6k7-md-win-b96d568bd-vrmnb, Cluster md-rollout-a01ojn/md-rollout-phn6k7: [running command "Get-Content "C:\\cni.log"": Process exited with status 1, running command "$p = 'c:\localdumps' ; if (Test-Path $p) { tar.exe -cvzf c:\crashdumps.tar $p *>&1 | %{ Write-Output "$_"} } else { Write-Host "No crash dumps found at $p" }": Process exited with status 1] [38;5;243m<< Captured StdOut/StdErr Output[0m [38;5;243mTimeline >>[0m INFO: "" started at Thu, 26 Jan 2023 21:23:56 UTC on Ginkgo node 9 of 10 and junit test report to file /logs/artifacts/test_e2e_junit.e2e_suite.1.xml [1mSTEP:[0m Creating a namespace for hosting the "md-rollout" test spec [38;5;243m@ 01/26/23 21:23:56.23[0m INFO: Creating namespace md-rollout-a01ojn ... skipping 375 lines ... Jan 26 21:54:01.060: INFO: Creating log watcher for controller calico-apiserver/calico-apiserver-5d5ffc8fc5-qnz64, container calico-apiserver Jan 26 21:54:01.060: INFO: Describing Pod calico-apiserver/calico-apiserver-5d5ffc8fc5-qnz64 Jan 26 21:54:01.193: INFO: Creating log watcher for controller calico-system/calico-kube-controllers-594d54f99-fgz26, container calico-kube-controllers Jan 26 21:54:01.193: INFO: Describing Pod calico-system/calico-kube-controllers-594d54f99-fgz26 Jan 26 21:54:01.341: INFO: Creating log watcher for controller calico-system/calico-node-qmxpz, container calico-node Jan 26 21:54:01.342: INFO: Describing Pod calico-system/calico-node-qmxpz Jan 26 21:54:01.413: INFO: Error starting logs stream for pod calico-system/calico-node-qmxpz, container calico-node: pods "machine-pool-pocagg-mp-0000002" not found Jan 26 21:54:01.488: INFO: Creating log watcher for controller calico-system/calico-node-qng6j, container calico-node Jan 26 21:54:01.489: INFO: Describing Pod calico-system/calico-node-qng6j Jan 26 21:54:01.721: INFO: Creating log watcher for controller calico-system/calico-node-windows-n9dc9, container calico-node-felix Jan 26 21:54:01.721: INFO: Describing Pod calico-system/calico-node-windows-n9dc9 Jan 26 21:54:01.723: INFO: Creating log watcher for controller calico-system/calico-node-windows-n9dc9, container calico-node-startup Jan 26 21:54:01.783: INFO: Error starting logs stream for pod calico-system/calico-node-windows-n9dc9, container calico-node-felix: pods "win-p-win000002" not found Jan 26 21:54:01.786: INFO: Error starting logs stream for pod calico-system/calico-node-windows-n9dc9, container calico-node-startup: pods "win-p-win000002" not found Jan 26 21:54:01.849: INFO: Creating log watcher for controller calico-system/calico-typha-59bbd675cc-kq58g, container calico-typha Jan 26 21:54:01.849: INFO: Describing Pod calico-system/calico-typha-59bbd675cc-kq58g Jan 26 21:54:02.060: INFO: Describing Pod calico-system/csi-node-driver-596bb Jan 26 21:54:02.060: INFO: Creating log watcher for controller calico-system/csi-node-driver-596bb, container calico-csi Jan 26 21:54:02.060: INFO: Creating log watcher for controller calico-system/csi-node-driver-596bb, container csi-node-driver-registrar Jan 26 21:54:02.127: INFO: Error starting logs stream for pod calico-system/csi-node-driver-596bb, container calico-csi: pods "machine-pool-pocagg-mp-0000002" not found Jan 26 21:54:02.127: INFO: Error starting logs stream for pod calico-system/csi-node-driver-596bb, container csi-node-driver-registrar: pods "machine-pool-pocagg-mp-0000002" not found Jan 26 21:54:02.460: INFO: Describing Pod calico-system/csi-node-driver-mrzzr Jan 26 21:54:02.460: INFO: Creating log watcher for controller calico-system/csi-node-driver-mrzzr, container calico-csi Jan 26 21:54:02.460: INFO: Creating log watcher for controller calico-system/csi-node-driver-mrzzr, container csi-node-driver-registrar Jan 26 21:54:02.859: INFO: Describing Pod kube-system/containerd-logger-t26tl Jan 26 21:54:02.859: INFO: Creating log watcher for controller kube-system/containerd-logger-t26tl, container containerd-logger Jan 26 21:54:02.922: INFO: Error starting logs stream for pod kube-system/containerd-logger-t26tl, container containerd-logger: pods "win-p-win000002" not found Jan 26 21:54:03.266: INFO: Describing Pod kube-system/coredns-57575c5f89-bxnch Jan 26 21:54:03.266: INFO: Creating log watcher for controller kube-system/coredns-57575c5f89-bxnch, container coredns Jan 26 21:54:03.660: INFO: Creating log watcher for controller kube-system/coredns-57575c5f89-zl7fj, container coredns Jan 26 21:54:03.660: INFO: Describing Pod kube-system/coredns-57575c5f89-zl7fj Jan 26 21:54:04.062: INFO: Describing Pod kube-system/csi-azuredisk-controller-545d478dbf-xb228 Jan 26 21:54:04.062: INFO: Creating log watcher for controller kube-system/csi-azuredisk-controller-545d478dbf-xb228, container csi-snapshotter ... skipping 7 lines ... Jan 26 21:54:04.460: INFO: Creating log watcher for controller kube-system/csi-azuredisk-node-2gzkb, container azuredisk Jan 26 21:54:04.460: INFO: Creating log watcher for controller kube-system/csi-azuredisk-node-2gzkb, container liveness-probe Jan 26 21:54:04.861: INFO: Describing Pod kube-system/csi-azuredisk-node-s8v4c Jan 26 21:54:04.861: INFO: Creating log watcher for controller kube-system/csi-azuredisk-node-s8v4c, container liveness-probe Jan 26 21:54:04.861: INFO: Creating log watcher for controller kube-system/csi-azuredisk-node-s8v4c, container azuredisk Jan 26 21:54:04.861: INFO: Creating log watcher for controller kube-system/csi-azuredisk-node-s8v4c, container node-driver-registrar Jan 26 21:54:04.926: INFO: Error starting logs stream for pod kube-system/csi-azuredisk-node-s8v4c, container node-driver-registrar: pods "machine-pool-pocagg-mp-0000002" not found Jan 26 21:54:04.926: INFO: Error starting logs stream for pod kube-system/csi-azuredisk-node-s8v4c, container azuredisk: pods "machine-pool-pocagg-mp-0000002" not found Jan 26 21:54:04.926: INFO: Error starting logs stream for pod kube-system/csi-azuredisk-node-s8v4c, container liveness-probe: pods "machine-pool-pocagg-mp-0000002" not found Jan 26 21:54:05.261: INFO: Creating log watcher for controller kube-system/csi-azuredisk-node-win-b8mxw, container node-driver-registrar Jan 26 21:54:05.261: INFO: Describing Pod kube-system/csi-azuredisk-node-win-b8mxw Jan 26 21:54:05.261: INFO: Creating log watcher for controller kube-system/csi-azuredisk-node-win-b8mxw, container liveness-probe Jan 26 21:54:05.261: INFO: Creating log watcher for controller kube-system/csi-azuredisk-node-win-b8mxw, container azuredisk Jan 26 21:54:05.326: INFO: Error starting logs stream for pod kube-system/csi-azuredisk-node-win-b8mxw, container node-driver-registrar: pods "win-p-win000002" not found Jan 26 21:54:05.326: INFO: Error starting logs stream for pod kube-system/csi-azuredisk-node-win-b8mxw, container liveness-probe: pods "win-p-win000002" not found Jan 26 21:54:05.326: INFO: Error starting logs stream for pod kube-system/csi-azuredisk-node-win-b8mxw, container azuredisk: pods "win-p-win000002" not found Jan 26 21:54:05.659: INFO: Describing Pod kube-system/csi-proxy-mjvzn Jan 26 21:54:05.658: INFO: Creating log watcher for controller kube-system/csi-proxy-mjvzn, container csi-proxy Jan 26 21:54:05.721: INFO: Error starting logs stream for pod kube-system/csi-proxy-mjvzn, container csi-proxy: pods "win-p-win000002" not found Jan 26 21:54:06.059: INFO: Describing Pod kube-system/etcd-machine-pool-pocagg-control-plane-ccnn4 Jan 26 21:54:06.059: INFO: Creating log watcher for controller kube-system/etcd-machine-pool-pocagg-control-plane-ccnn4, container etcd Jan 26 21:54:06.458: INFO: Describing Pod kube-system/kube-apiserver-machine-pool-pocagg-control-plane-ccnn4 Jan 26 21:54:06.458: INFO: Creating log watcher for controller kube-system/kube-apiserver-machine-pool-pocagg-control-plane-ccnn4, container kube-apiserver Jan 26 21:54:06.857: INFO: Describing Pod kube-system/kube-controller-manager-machine-pool-pocagg-control-plane-ccnn4 Jan 26 21:54:06.858: INFO: Creating log watcher for controller kube-system/kube-controller-manager-machine-pool-pocagg-control-plane-ccnn4, container kube-controller-manager Jan 26 21:54:07.258: INFO: Describing Pod kube-system/kube-proxy-4dk7f Jan 26 21:54:07.258: INFO: Creating log watcher for controller kube-system/kube-proxy-4dk7f, container kube-proxy Jan 26 21:54:07.659: INFO: Describing Pod kube-system/kube-proxy-m8fh2 Jan 26 21:54:07.659: INFO: Creating log watcher for controller kube-system/kube-proxy-m8fh2, container kube-proxy Jan 26 21:54:07.722: INFO: Error starting logs stream for pod kube-system/kube-proxy-m8fh2, container kube-proxy: pods "machine-pool-pocagg-mp-0000002" not found Jan 26 21:54:08.059: INFO: Creating log watcher for controller kube-system/kube-proxy-windows-4dpdh, container kube-proxy Jan 26 21:54:08.059: INFO: Describing Pod kube-system/kube-proxy-windows-4dpdh Jan 26 21:54:08.122: INFO: Error starting logs stream for pod kube-system/kube-proxy-windows-4dpdh, container kube-proxy: pods "win-p-win000002" not found Jan 26 21:54:08.460: INFO: Describing Pod kube-system/kube-scheduler-machine-pool-pocagg-control-plane-ccnn4 Jan 26 21:54:08.460: INFO: Creating log watcher for controller kube-system/kube-scheduler-machine-pool-pocagg-control-plane-ccnn4, container kube-scheduler Jan 26 21:54:08.857: INFO: Fetching kube-system pod logs took 8.620597587s Jan 26 21:54:08.857: INFO: Dumping workload cluster machine-pool-ra97cj/machine-pool-pocagg Azure activity log Jan 26 21:54:08.857: INFO: Creating log watcher for controller tigera-operator/tigera-operator-65d6bf4d4f-8d6kf, container tigera-operator Jan 26 21:54:08.858: INFO: Describing Pod tigera-operator/tigera-operator-65d6bf4d4f-8d6kf ... skipping 11 lines ... [38;5;243m<< Timeline[0m [38;5;243m------------------------------[0m [38;5;10m[SynchronizedAfterSuite] PASSED [0.000 seconds][0m [38;5;10m[1m[SynchronizedAfterSuite] [0m [38;5;243m/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/e2e_suite_test.go:116[0m [38;5;243m------------------------------[0m {"component":"entrypoint","file":"k8s.io/test-infra/prow/entrypoint/run.go:164","func":"k8s.io/test-infra/prow/entrypoint.Options.ExecuteProcess","level":"error","msg":"Process did not finish before 4h0m0s timeout","severity":"error","time":"2023-01-27T01:13:03Z"} ++ early_exit_handler ++ '[' -n 162 ']' ++ kill -TERM 162 ++ cleanup_dind ++ [[ true == \t\r\u\e ]] ++ echo 'Cleaning up after docker' ... skipping 154 lines ... Jan 26 21:39:43.774: INFO: Collecting boot logs for AzureMachine quick-start-ieam7e-md-0-6jlch Jan 26 21:39:44.470: INFO: Collecting logs for Windows node quick-sta-vw7hw in cluster quick-start-ieam7e in namespace quick-start-1pj9f7 [38;5;214m[TIMEDOUT][0m in [AfterEach] - /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/quick_start.go:109 [38;5;243m@ 01/27/23 01:17:53.963[0m Jan 27 01:17:53.983: INFO: FAILED! Jan 27 01:17:53.983: INFO: Cleaning up after "Running the Cluster API E2E tests Running the quick-start spec Should create a workload cluster" spec [1mSTEP:[0m Redacting sensitive information from logs [38;5;243m@ 01/27/23 01:17:53.983[0m [38;5;214m[TIMEDOUT][0m in [AfterEach] - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/capi_test.go:97 [38;5;243m@ 01/27/23 01:18:23.984[0m [38;5;243m<< Timeline[0m [38;5;214m[TIMEDOUT] A suite timeout occurred[0m ... skipping 16 lines ... [38;5;243msync.(*WaitGroup).Wait(0x1461927?)[0m [38;5;243m/usr/local/go/src/sync/waitgroup.go:139[0m [38;5;243msigs.k8s.io/kind/pkg/errors.AggregateConcurrent({0xc000bc2510, 0x6, 0x3ede0a7?})[0m [38;5;243m/home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:54[0m [38;5;214m[1m> sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode(0xc000784340, {0xc0038f6dc0, 0xf}, 0x1, {0xc000ab8060, 0x5e})[0m [38;5;214m[1m/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:156[0m | var errors []error | errors = append(errors, kinderrors.AggregateConcurrent(windowsInfo(execToPathFn))) [1m[38;5;214m> errors = append(errors, kinderrors.AggregateConcurrent(windowsK8sLogs(execToPathFn)))[0m | errors = append(errors, kinderrors.AggregateConcurrent(windowsNetworkLogs(execToPathFn))) | errors = append(errors, kinderrors.AggregateConcurrent(windowsCrashDumpLogs(execToPathFn))) [38;5;214m[1m> sigs.k8s.io/cluster-api-provider-azure/test/e2e.AzureLogCollector.CollectMachineLog({}, {0x4305340, 0xc0000640b0}, {0x4315a38, 0xc00127e2a0}, 0xc00052cf10, {0xc000ab8060, 0x5e})[0m [38;5;214m[1m/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:74[0m ... skipping 51 lines ... | defer f.Close() [1m[38;5;214m> return execOnHost(controlPlaneEndpoint, hostname, sshPort, f, command, args...)[0m | }) | } [38;5;214m[1m> sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout.func1()[0m [38;5;214m[1m/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:41[0m | err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil [1m[38;5;214m> err := fn()[0m | if err != nil { | pollError = err [38;5;243mk8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x157a2f1, 0x0})[0m [38;5;243m/home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:222[0m ... skipping 6 lines ... [38;5;243mk8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x4305340, 0xc000064098}, 0x1eb8025476?, 0xc000cf3ea8?, 0x1461927?)[0m [38;5;243m/home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:528[0m [38;5;243mk8s.io/apimachinery/pkg/util/wait.PollImmediate(0xf06d37eae3f72aae?, 0xc000cf3ee8?, 0x1461927?)[0m [38;5;243m/home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:514[0m [38;5;214m[1m> sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout(0x2f2ea88ab94e08fd?, 0xd0a2c6d191275a33?, 0xc0009d2690)[0m [38;5;214m[1m/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:39[0m | func retryWithTimeout(interval, timeout time.Duration, fn func() error) error { | var pollError error [1m[38;5;214m> err := wait.PollImmediate(interval, timeout, func() (bool, error) {[0m | pollError = nil | err := fn() [38;5;214m[1m> sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1()[0m [38;5;214m[1m/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:141[0m | execToPathFn := func(outputFileName, command string, args ...string) func() error { | return func() error { [1m[38;5;214m> return retryWithTimeout(collectLogInterval, collectLogTimeout, func() error {[0m | f, err := fileOnHost(filepath.Join(outputPath, outputFileName)) | if err != nil { [38;5;243msigs.k8s.io/kind/pkg/errors.AggregateConcurrent.func1()[0m [38;5;243m/home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:51[0m [38;5;243msigs.k8s.io/kind/pkg/errors.AggregateConcurrent[0m [38;5;243m/home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:49[0m ... skipping 3 lines ... [38;5;10m[SynchronizedAfterSuite] PASSED [0.068 seconds][0m [38;5;10m[1m[SynchronizedAfterSuite] [0m [38;5;243m/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/e2e_suite_test.go:116[0m [38;5;243mTimeline >>[0m [1mSTEP:[0m Tearing down the management cluster [38;5;243m@ 01/27/23 01:18:24.005[0m INFO: Deleting the kind cluster "capz-e2e" failed. You may need to remove this by hand. [38;5;243m<< Timeline[0m [38;5;243m------------------------------[0m [38;5;10m[ReportAfterSuite] PASSED [0.019 seconds][0m [38;5;10m[1m[ReportAfterSuite] Autogenerated ReportAfterSuite for --junit-report[0m [38;5;243mautogenerated by Ginkgo[0m [38;5;243m------------------------------[0m [38;5;9m[1mSummarizing 1 Failure:[0m [38;5;214m[TIMEDOUT][0m [0mRunning the Cluster API E2E tests [38;5;214m[1mRunning the quick-start spec [AfterEach] [0mShould create a workload cluster[0m [38;5;243m/home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/quick_start.go:109[0m [38;5;9m[1mRan 8 of 26 Specs in 14263.133 seconds[0m [38;5;9m[1mFAIL! - Suite Timeout Elapsed[0m -- [38;5;10m[1m7 Passed[0m | [38;5;9m[1m1 Failed[0m | [38;5;11m[1m0 Pending[0m | [38;5;14m[1m18 Skipped[0m [38;5;228mYou're using deprecated Ginkgo functionality:[0m [38;5;228m=============================================[0m [38;5;11mCurrentGinkgoTestDescription() is deprecated in Ginkgo V2. Use CurrentSpecReport() instead.[0m [1mLearn more at:[0m [38;5;14m[4mhttps://onsi.github.io/ginkgo/MIGRATING_TO_V2#changed-currentginkgotestdescription[0m [38;5;243m/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/helpers.go:423[0m [38;5;243m/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/common.go:278[0m [38;5;243m/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/common.go:281[0m [38;5;243mTo silence deprecations that can be silenced set the following environment variable:[0m [38;5;243mACK_GINKGO_DEPRECATIONS=2.6.0[0m --- FAIL: TestE2E (14263.12s) FAIL [38;5;228mYou're using deprecated Ginkgo functionality:[0m [38;5;228m=============================================[0m [38;5;11mCurrentGinkgoTestDescription() is deprecated in Ginkgo V2. Use CurrentSpecReport() instead.[0m [1mLearn more at:[0m [38;5;14m[4mhttps://onsi.github.io/ginkgo/MIGRATING_TO_V2#changed-currentginkgotestdescription[0m [38;5;243m/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/helpers.go:423[0m ... skipping 90 lines ... PASS Ginkgo ran 1 suite in 4h1m23.585483114s Test Suite Failed make[1]: *** [Makefile:655: test-e2e-run] Error 1 make[1]: Leaving directory '/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure' make: *** [Makefile:664: test-e2e] Error 2 {"component":"entrypoint","file":"k8s.io/test-infra/prow/entrypoint/run.go:251","func":"k8s.io/test-infra/prow/entrypoint.gracefullyTerminate","level":"error","msg":"Process gracefully exited before 15m0s grace period","severity":"error","time":"2023-01-27T01:19:17Z"}