Recent runs || View in Spyglass
Result | FAILURE |
Tests | 1 failed / 27 succeeded |
Started | |
Elapsed | 4h7m |
Revision | release-1.7 |
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=capz\-e2e\s\[It\]\sRunning\sthe\sCluster\sAPI\sE2E\stests\sRunning\sthe\sMachineDeployment\srollout\sspec\sShould\ssuccessfully\supgrade\sMachines\supon\schanges\sin\srelevant\sMachineDeployment\sfields$'
[TIMEDOUT] A suite timeout occurred In [AfterEach] at: /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/md_rollout.go:103 @ 01/24/23 01:18:09.251 This is the Progress Report generated when the suite timeout occurred: Running the Cluster API E2E tests Running the MachineDeployment rollout spec Should successfully upgrade Machines upon changes in relevant MachineDeployment fields (Spec Runtime: 3h54m33.86s) /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/md_rollout.go:71 In [AfterEach] (Node Runtime: 3h39m30.807s) /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/md_rollout.go:103 At [By Step] Dumping logs from the "md-rollout-sr4aoe" workload cluster (Step Runtime: 3h39m30.807s) /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/common.go:51 Spec Goroutine goroutine 55193 [semacquire, 214 minutes] sync.runtime_Semacquire(0xc0038662a0?) /usr/local/go/src/runtime/sema.go:62 sync.(*WaitGroup).Wait(0xc00113a201?) /usr/local/go/src/sync/waitgroup.go:139 sigs.k8s.io/kind/pkg/errors.AggregateConcurrent({0xc002b8c140, 0x9, 0x3df405a?}) /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:54 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode(0xc0016ea340, {0xc002aafb80, 0xf}, 0x1, {0xc0020ba9c0, 0x5c}) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:157 | errors = append(errors, kinderrors.AggregateConcurrent(windowsInfo(execToPathFn))) | errors = append(errors, kinderrors.AggregateConcurrent(windowsK8sLogs(execToPathFn))) > errors = append(errors, kinderrors.AggregateConcurrent(windowsNetworkLogs(execToPathFn))) | errors = append(errors, kinderrors.AggregateConcurrent(windowsCrashDumpLogs(execToPathFn))) | errors = append(errors, sftpCopyFile(controlPlaneEndpoint, hostname, sshPort, "/c:/crashdumps.tar", filepath.Join(outputPath, "crashdumps.tar"))) > sigs.k8s.io/cluster-api-provider-azure/test/e2e.AzureLogCollector.CollectMachineLog({}, {0x4211260, 0xc0000640b0}, {0x4221958, 0xc0005540e0}, 0xc0014f1920, {0xc0020ba9c0, 0x5c}) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:74 | hostname := getHostname(m, isAzureMachineWindows(am)) | > if err := collectLogsFromNode(cluster, hostname, isAzureMachineWindows(am), outputPath); err != nil { | errs = append(errs, err) | } sigs.k8s.io/cluster-api/test/framework.(*clusterProxy).CollectWorkloadClusterLogs(0xc000fb2780, {0x4211260?, 0xc0000640b0}, {0xc0027fdf50, 0x11}, {0xc0027fdf38, 0x11}, {0xc003691140, 0x2a}) /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/framework/cluster_proxy.go:265 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.(*AzureClusterProxy).CollectWorkloadClusterLogs(0xc00256dc10, {0x4211260, 0xc0000640b0}, {0xc0027fdf50, 0x11}, {0xc0027fdf38, 0x11}, {0xc003691140, 0x2a}) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_clusterproxy.go:93 | func (acp *AzureClusterProxy) CollectWorkloadClusterLogs(ctx context.Context, namespace, name, outputPath string) { | Logf("Dumping workload cluster %s/%s logs", namespace, name) > acp.ClusterProxy.CollectWorkloadClusterLogs(ctx, namespace, name, outputPath) | | aboveMachinesPath := strings.Replace(outputPath, "/machines", "", 1) > sigs.k8s.io/cluster-api/test/e2e.dumpSpecResourcesAndCleanup({0x4211260, 0xc0000640b0}, {0x3d574d8, 0xa}, {0x42238d0, 0xc00256dc10}, {0xc000844a80, 0xf}, 0xc000dc7e40, 0xc0001ae600, ...) /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/common.go:70 | | // Dump all the logs from the workload cluster before deleting them. > clusterProxy.CollectWorkloadClusterLogs(ctx, cluster.Namespace, cluster.Name, filepath.Join(artifactFolder, "clusters", cluster.Name)) | | Byf("Dumping all the Cluster API resources in the %q namespace", namespace.Name) > sigs.k8s.io/cluster-api/test/e2e.MachineDeploymentRolloutSpec.func3() /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/md_rollout.go:105 | AfterEach(func() { | // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. > dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) | }) | } github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x1b817ce, 0xc00195b200}) /home/prow/go/pkg/mod/github.com/onsi/ginkgo/v2@v2.6.0/internal/node.go:445 github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() /home/prow/go/pkg/mod/github.com/onsi/ginkgo/v2@v2.6.0/internal/suite.go:847 github.com/onsi/ginkgo/v2/internal.(*Suite).runNode /home/prow/go/pkg/mod/github.com/onsi/ginkgo/v2@v2.6.0/internal/suite.go:834 Goroutines of Interest goroutine 58420 [chan receive, 214 minutes] golang.org/x/crypto/ssh.(*Session).Wait(0xc001143440) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:403 golang.org/x/crypto/ssh.(*Session).Run(0xc00113a280?, {0x3e14e3c?, 0xc002aafb80?}) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:314 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.execOnHost({0xc00113a280?, 0x7?}, {0xc002aafb80?, 0x10000014185df?}, {0x3d4830c?, 0x0?}, {0x41ef240, 0xc00115e048}, {0x3e14e3c, 0x54}, ...) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/helpers.go:510 | command += " " + strings.Join(args, " ") | } > if err = session.Run(command); err != nil { | return errors.Wrapf(err, "running command \"%s\"", command) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:147 | } | defer f.Close() > return execOnHost(controlPlaneEndpoint, hostname, sshPort, f, command, args...) | }) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout.func1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:41 | err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil > err := fn() | if err != nil { | pollError = err k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x18, 0xc0004ce400}) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:222 k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x4211260?, 0xc000064098?}, 0xc0035b1e18?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:235 k8s.io/apimachinery/pkg/util/wait.poll({0x4211260, 0xc000064098}, 0x58?, 0x229cb25?, 0x18?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:582 k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x4211260, 0xc000064098}, 0xc0035b1e98?, 0xc0035b1ea8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:528 k8s.io/apimachinery/pkg/util/wait.PollImmediate(0xc001068450?, 0xc0035b1ee8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:514 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout(0x14163f6?, 0x392f560?, 0xc000018310) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:39 | func retryWithTimeout(interval, timeout time.Duration, fn func() error) error { | var pollError error > err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil | err := fn() > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:141 | execToPathFn := func(outputFileName, command string, args ...string) func() error { | return func() error { > return retryWithTimeout(collectLogInterval, collectLogTimeout, func() error { | f, err := fileOnHost(filepath.Join(outputPath, outputFileName)) | if err != nil { sigs.k8s.io/kind/pkg/errors.AggregateConcurrent.func1() /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:51 sigs.k8s.io/kind/pkg/errors.AggregateConcurrent /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:49 goroutine 58423 [chan receive, 214 minutes] golang.org/x/crypto/ssh.(*Session).Wait(0xc0011082d0) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:403 golang.org/x/crypto/ssh.(*Session).Run(0xc00113a280?, {0x3e06fa6?, 0xc002aafb80?}) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:314 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.execOnHost({0xc00113a280?, 0x3472732d74756f6c?}, {0xc002aafb80?, 0x2d6c75327533622d?}, {0x3d4830c?, 0x544150203a746964?}, {0x41ef240, 0xc00115e030}, {0x3e06fa6, 0x47}, ...) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/helpers.go:510 | command += " " + strings.Join(args, " ") | } > if err = session.Run(command); err != nil { | return errors.Wrapf(err, "running command \"%s\"", command) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:147 | } | defer f.Close() > return execOnHost(controlPlaneEndpoint, hostname, sshPort, f, command, args...) | }) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout.func1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:41 | err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil > err := fn() | if err != nil { | pollError = err k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x18, 0xc0004ce400}) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:222 k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x4211260?, 0xc000064098?}, 0xc0019c0e18?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:235 k8s.io/apimachinery/pkg/util/wait.poll({0x4211260, 0xc000064098}, 0x58?, 0x229cb25?, 0x18?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:582 k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x4211260, 0xc000064098}, 0x10000000000?, 0xc0019c0ea8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:528 k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x145f172?, 0xc0019c0ee8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:514 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout(0x40?, 0x399ac00?, 0xc000018000) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:39 | func retryWithTimeout(interval, timeout time.Duration, fn func() error) error { | var pollError error > err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil | err := fn() > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:141 | execToPathFn := func(outputFileName, command string, args ...string) func() error { | return func() error { > return retryWithTimeout(collectLogInterval, collectLogTimeout, func() error { | f, err := fileOnHost(filepath.Join(outputPath, outputFileName)) | if err != nil { sigs.k8s.io/kind/pkg/errors.AggregateConcurrent.func1() /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:51 sigs.k8s.io/kind/pkg/errors.AggregateConcurrent /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:49 goroutine 58422 [chan receive, 214 minutes] golang.org/x/crypto/ssh.(*Session).Wait(0xc002719c20) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:403 golang.org/x/crypto/ssh.(*Session).Run(0xc00113a280?, {0x3e14e90?, 0xc002aafb80?}) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:314 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.execOnHost({0xc00113a280?, 0x0?}, {0xc002aafb80?, 0x4211260?}, {0x3d4830c?, 0x0?}, {0x41ef240, 0xc00115e098}, {0x3e14e90, 0x54}, ...) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/helpers.go:510 | command += " " + strings.Join(args, " ") | } > if err = session.Run(command); err != nil { | return errors.Wrapf(err, "running command \"%s\"", command) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:147 | } | defer f.Close() > return execOnHost(controlPlaneEndpoint, hostname, sshPort, f, command, args...) | }) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout.func1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:41 | err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil > err := fn() | if err != nil { | pollError = err k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x18, 0xc0004ce400}) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:222 k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x4211260?, 0xc000064098?}, 0xc001643618?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:235 k8s.io/apimachinery/pkg/util/wait.poll({0x4211260, 0xc000064098}, 0x58?, 0x229cb25?, 0x18?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:582 k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x4211260, 0xc000064098}, 0xc0016436b0?, 0xc0016436a8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:528 k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x1410d51?, 0xc0016436e8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:514 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout(0xc002c29200?, 0x19d2449?, 0xc000018540) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:39 | func retryWithTimeout(interval, timeout time.Duration, fn func() error) error { | var pollError error > err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil | err := fn() > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:141 | execToPathFn := func(outputFileName, command string, args ...string) func() error { | return func() error { > return retryWithTimeout(collectLogInterval, collectLogTimeout, func() error { | f, err := fileOnHost(filepath.Join(outputPath, outputFileName)) | if err != nil { sigs.k8s.io/kind/pkg/errors.AggregateConcurrent.func1() /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:51 sigs.k8s.io/kind/pkg/errors.AggregateConcurrent /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:49 goroutine 58421 [chan receive, 214 minutes] golang.org/x/crypto/ssh.(*Session).Wait(0xc0027199e0) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:403 golang.org/x/crypto/ssh.(*Session).Run(0xc00113a280?, {0x3db16d0?, 0xc002aafb80?}) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:314 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.execOnHost({0xc00113a280?, 0x0?}, {0xc002aafb80?, 0xc00118fcf0?}, {0x3d4830c?, 0xc002b05560?}, {0x41ef240, 0xc00115e058}, {0x3db16d0, 0x29}, ...) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/helpers.go:510 | command += " " + strings.Join(args, " ") | } > if err = session.Run(command); err != nil { | return errors.Wrapf(err, "running command \"%s\"", command) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:147 | } | defer f.Close() > return execOnHost(controlPlaneEndpoint, hostname, sshPort, f, command, args...) | }) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout.func1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:41 | err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil > err := fn() | if err != nil { | pollError = err k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x18, 0xc0004ce400}) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:222 k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x4211260?, 0xc000064098?}, 0xc00118fe18?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:235 k8s.io/apimachinery/pkg/util/wait.poll({0x4211260, 0xc000064098}, 0x58?, 0x229cb25?, 0x18?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:582 k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x4211260, 0xc000064098}, 0xc00118fe80?, 0xc00118fea8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:528 k8s.io/apimachinery/pkg/util/wait.PollImmediate(0xc00118ffa8?, 0xc00118fee8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:514 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout(0x3?, 0x14163f6?, 0xc000018460) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:39 | func retryWithTimeout(interval, timeout time.Duration, fn func() error) error { | var pollError error > err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil | err := fn() > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:141 | execToPathFn := func(outputFileName, command string, args ...string) func() error { | return func() error { > return retryWithTimeout(collectLogInterval, collectLogTimeout, func() error { | f, err := fileOnHost(filepath.Join(outputPath, outputFileName)) | if err != nil { sigs.k8s.io/kind/pkg/errors.AggregateConcurrent.func1() /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:51 sigs.k8s.io/kind/pkg/errors.AggregateConcurrent /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:49 goroutine 58424 [chan receive, 214 minutes] golang.org/x/crypto/ssh.(*Session).Wait(0xc002719950) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:403 golang.org/x/crypto/ssh.(*Session).Run(0xc00113a280?, {0x3dbc7da?, 0xc002aafb80?}) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:314 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.execOnHost({0xc00113a280?, 0xc0011443c0?}, {0xc002aafb80?, 0x7fc57c222c90?}, {0x3d4830c?, 0x7fc5a5a2f3c8?}, {0x41ef240, 0xc00115e0b0}, {0x3dbc7da, 0x2c}, ...) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/helpers.go:510 | command += " " + strings.Join(args, " ") | } > if err = session.Run(command); err != nil { | return errors.Wrapf(err, "running command \"%s\"", command) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:147 | } | defer f.Close() > return execOnHost(controlPlaneEndpoint, hostname, sshPort, f, command, args...) | }) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout.func1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:41 | err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil > err := fn() | if err != nil { | pollError = err k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x18, 0xc0004ce400}) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:222 k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x4211260?, 0xc000064098?}, 0xc000de4618?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:235 k8s.io/apimachinery/pkg/util/wait.poll({0x4211260, 0xc000064098}, 0x58?, 0x229cb25?, 0x18?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:582 k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x4211260, 0xc000064098}, 0x0?, 0xc000de46a8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:528 k8s.io/apimachinery/pkg/util/wait.PollImmediate(0xc000de4782?, 0xc000de46e8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:514 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout(0xc00162f188?, 0xc0028efdbc?, 0xc000018690) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:39 | func retryWithTimeout(interval, timeout time.Duration, fn func() error) error { | var pollError error > err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil | err := fn() > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:141 | execToPathFn := func(outputFileName, command string, args ...string) func() error { | return func() error { > return retryWithTimeout(collectLogInterval, collectLogTimeout, func() error { | f, err := fileOnHost(filepath.Join(outputPath, outputFileName)) | if err != nil { sigs.k8s.io/kind/pkg/errors.AggregateConcurrent.func1() /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:51 sigs.k8s.io/kind/pkg/errors.AggregateConcurrent /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:49 goroutine 58425 [chan receive, 214 minutes] golang.org/x/crypto/ssh.(*Session).Wait(0xc002719b90) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:403 golang.org/x/crypto/ssh.(*Session).Run(0xc00113a280?, {0x3d8f043?, 0xc002aafb80?}) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:314 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.execOnHost({0xc00113a280?, 0xc003016980?}, {0xc002aafb80?, 0x1000?}, {0x3d4830c?, 0x5b299a0?}, {0x41ef240, 0xc00115e0c0}, {0x3d8f043, 0x1e}, ...) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/helpers.go:510 | command += " " + strings.Join(args, " ") | } > if err = session.Run(command); err != nil { | return errors.Wrapf(err, "running command \"%s\"", command) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:147 | } | defer f.Close() > return execOnHost(controlPlaneEndpoint, hostname, sshPort, f, command, args...) | }) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout.func1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:41 | err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil > err := fn() | if err != nil { | pollError = err k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x18, 0xc0004ce400}) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:222 k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x4211260?, 0xc000064098?}, 0xc000de5618?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:235 k8s.io/apimachinery/pkg/util/wait.poll({0x4211260, 0xc000064098}, 0x58?, 0x229cb25?, 0x18?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:582 k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x4211260, 0xc000064098}, 0xc000de56b0?, 0xc000de56a8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:528 k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x1410d51?, 0xc000de56e8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:514 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout(0xc0000bc840?, 0x19d220c?, 0xc000018850) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:39 | func retryWithTimeout(interval, timeout time.Duration, fn func() error) error { | var pollError error > err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil | err := fn() > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:141 | execToPathFn := func(outputFileName, command string, args ...string) func() error { | return func() error { > return retryWithTimeout(collectLogInterval, collectLogTimeout, func() error { | f, err := fileOnHost(filepath.Join(outputPath, outputFileName)) | if err != nil { sigs.k8s.io/kind/pkg/errors.AggregateConcurrent.func1() /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:51 sigs.k8s.io/kind/pkg/errors.AggregateConcurrent /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:49 goroutine 58426 [chan receive, 214 minutes] golang.org/x/crypto/ssh.(*Session).Wait(0xc0011434d0) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:403 golang.org/x/crypto/ssh.(*Session).Run(0xc00113a280?, {0x3dae06b?, 0xc002aafb80?}) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:314 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.execOnHost({0xc00113a280?, 0x0?}, {0xc002aafb80?, 0x0?}, {0x3d4830c?, 0x0?}, {0x41ef240, 0xc001484010}, {0x3dae06b, 0x28}, ...) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/helpers.go:510 | command += " " + strings.Join(args, " ") | } > if err = session.Run(command); err != nil { | return errors.Wrapf(err, "running command \"%s\"", command) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:147 | } | defer f.Close() > return execOnHost(controlPlaneEndpoint, hostname, sshPort, f, command, args...) | }) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout.func1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:41 | err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil > err := fn() | if err != nil { | pollError = err k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x18, 0xc000c20000}) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:222 k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x4211260?, 0xc000064098?}, 0xc0013d4618?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:235 k8s.io/apimachinery/pkg/util/wait.poll({0x4211260, 0xc000064098}, 0x58?, 0x229cb25?, 0x18?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:582 k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x4211260, 0xc000064098}, 0x10000000000?, 0xc0013d46a8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:528 k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0xc0013d46e8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:514 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout(0x0?, 0x0?, 0xc0031ee000) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:39 | func retryWithTimeout(interval, timeout time.Duration, fn func() error) error { | var pollError error > err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil | err := fn() > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:141 | execToPathFn := func(outputFileName, command string, args ...string) func() error { | return func() error { > return retryWithTimeout(collectLogInterval, collectLogTimeout, func() error { | f, err := fileOnHost(filepath.Join(outputPath, outputFileName)) | if err != nil { sigs.k8s.io/kind/pkg/errors.AggregateConcurrent.func1() /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:51 sigs.k8s.io/kind/pkg/errors.AggregateConcurrent /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:49 goroutine 58427 [chan receive, 214 minutes] golang.org/x/crypto/ssh.(*Session).Wait(0xc0027198c0) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:403 golang.org/x/crypto/ssh.(*Session).Run(0xc00113a280?, {0x3db5074?, 0xc002aafb80?}) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:314 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.execOnHost({0xc00113a280?, 0x0?}, {0xc002aafb80?, 0x0?}, {0x3d4830c?, 0x0?}, {0x41ef240, 0xc00425c040}, {0x3db5074, 0x2a}, ...) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/helpers.go:510 | command += " " + strings.Join(args, " ") | } > if err = session.Run(command); err != nil { | return errors.Wrapf(err, "running command \"%s\"", command) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:147 | } | defer f.Close() > return execOnHost(controlPlaneEndpoint, hostname, sshPort, f, command, args...) | }) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout.func1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:41 | err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil > err := fn() | if err != nil { | pollError = err k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x18, 0xc000096c00}) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:222 k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x4211260?, 0xc000064098?}, 0xc0013d4e18?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:235 k8s.io/apimachinery/pkg/util/wait.poll({0x4211260, 0xc000064098}, 0x58?, 0x229cb25?, 0x18?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:582 k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x4211260, 0xc000064098}, 0x0?, 0xc0013d4ea8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:528 k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0xc0013d4ee8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:514 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout(0x0?, 0x0?, 0xc000554fc0) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:39 | func retryWithTimeout(interval, timeout time.Duration, fn func() error) error { | var pollError error > err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil | err := fn() > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:141 | execToPathFn := func(outputFileName, command string, args ...string) func() error { | return func() error { > return retryWithTimeout(collectLogInterval, collectLogTimeout, func() error { | f, err := fileOnHost(filepath.Join(outputPath, outputFileName)) | if err != nil { sigs.k8s.io/kind/pkg/errors.AggregateConcurrent.func1() /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:51 sigs.k8s.io/kind/pkg/errors.AggregateConcurrent /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:49 goroutine 58428 [chan receive, 214 minutes] golang.org/x/crypto/ssh.(*Session).Wait(0xc002719a70) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:403 golang.org/x/crypto/ssh.(*Session).Run(0xc00113a280?, {0x3d6e2a1?, 0xc002aafb80?}) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:314 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.execOnHost({0xc00113a280?, 0x0?}, {0xc002aafb80?, 0x0?}, {0x3d4830c?, 0x0?}, {0x41ef240, 0xc00425c020}, {0x3d6e2a1, 0x13}, ...) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/helpers.go:510 | command += " " + strings.Join(args, " ") | } > if err = session.Run(command); err != nil { | return errors.Wrapf(err, "running command \"%s\"", command) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:147 | } | defer f.Close() > return execOnHost(controlPlaneEndpoint, hostname, sshPort, f, command, args...) | }) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout.func1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:41 | err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil > err := fn() | if err != nil { | pollError = err k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x18, 0xc000096c00}) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:222 k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x4211260?, 0xc000064098?}, 0xc0013d5618?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:235 k8s.io/apimachinery/pkg/util/wait.poll({0x4211260, 0xc000064098}, 0x58?, 0x229cb25?, 0x18?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:582 k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x4211260, 0xc000064098}, 0x0?, 0xc0013d56a8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:528 k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0xc0013d56e8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:514 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout(0x0?, 0x0?, 0xc000554d90) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:39 | func retryWithTimeout(interval, timeout time.Duration, fn func() error) error { | var pollError error > err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil | err := fn() > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:141 | execToPathFn := func(outputFileName, command string, args ...string) func() error { | return func() error { > return retryWithTimeout(collectLogInterval, collectLogTimeout, func() error { | f, err := fileOnHost(filepath.Join(outputPath, outputFileName)) | if err != nil { sigs.k8s.io/kind/pkg/errors.AggregateConcurrent.func1() /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:51 sigs.k8s.io/kind/pkg/errors.AggregateConcurrent /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:49 There were additional failures detected after the initial failure. These are visible in the timelinefrom junit.e2e_suite.1.xml
cluster.cluster.x-k8s.io/md-rollout-sr4aoe created azurecluster.infrastructure.cluster.x-k8s.io/md-rollout-sr4aoe created kubeadmcontrolplane.controlplane.cluster.x-k8s.io/md-rollout-sr4aoe-control-plane created azuremachinetemplate.infrastructure.cluster.x-k8s.io/md-rollout-sr4aoe-control-plane created machinedeployment.cluster.x-k8s.io/md-rollout-sr4aoe-md-0 created azuremachinetemplate.infrastructure.cluster.x-k8s.io/md-rollout-sr4aoe-md-0 created kubeadmconfigtemplate.bootstrap.cluster.x-k8s.io/md-rollout-sr4aoe-md-0 created machinedeployment.cluster.x-k8s.io/md-rollout-sr4aoe-md-win created azuremachinetemplate.infrastructure.cluster.x-k8s.io/md-rollout-sr4aoe-md-win created kubeadmconfigtemplate.bootstrap.cluster.x-k8s.io/md-rollout-sr4aoe-md-win created machinehealthcheck.cluster.x-k8s.io/md-rollout-sr4aoe-mhc-0 created clusterresourceset.addons.cluster.x-k8s.io/md-rollout-sr4aoe-calico-windows created azureclusteridentity.infrastructure.cluster.x-k8s.io/cluster-identity-sp created clusterresourceset.addons.cluster.x-k8s.io/csi-proxy created clusterresourceset.addons.cluster.x-k8s.io/containerd-logger-md-rollout-sr4aoe created configmap/cni-md-rollout-sr4aoe-calico-windows created configmap/csi-proxy-addon created configmap/containerd-logger-md-rollout-sr4aoe created felixconfiguration.crd.projectcalico.org/default configured Failed to get logs for Machine md-rollout-sr4aoe-md-win-65744c7b6d-ttf8v, Cluster md-rollout-qqqtvq/md-rollout-sr4aoe: [running command "Get-Content "C:\\cni.log"": Process exited with status 1, running command "$p = 'c:\localdumps' ; if (Test-Path $p) { tar.exe -cvzf c:\crashdumps.tar $p *>&1 | %{ Write-Output "$_"} } else { Write-Host "No crash dumps found at $p" }": Process exited with status 1] Failed to get logs for Machine md-rollout-sr4aoe-md-win-899f44d55-mgcdp, Cluster md-rollout-qqqtvq/md-rollout-sr4aoe: azuremachines.infrastructure.cluster.x-k8s.io "md-rollout-sr4aoe-md-win-k8lwg" not found > Enter [BeforeEach] Running the Cluster API E2E tests - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/capi_test.go:52 @ 01/23/23 21:23:35.391 INFO: "" started at Mon, 23 Jan 2023 21:23:35 UTC on Ginkgo node 1 of 10 and junit test report to file /logs/artifacts/test_e2e_junit.e2e_suite.1.xml < Exit [BeforeEach] Running the Cluster API E2E tests - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/capi_test.go:52 @ 01/23/23 21:23:35.474 (83ms) > Enter [BeforeEach] Running the MachineDeployment rollout spec - /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/md_rollout.go:56 @ 01/23/23 21:23:35.474 STEP: Creating a namespace for hosting the "md-rollout" test spec - /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/common.go:51 @ 01/23/23 21:23:35.474 INFO: Creating namespace md-rollout-qqqtvq INFO: Creating event watcher for namespace "md-rollout-qqqtvq" < Exit [BeforeEach] Running the MachineDeployment rollout spec - /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/md_rollout.go:56 @ 01/23/23 21:23:35.612 (138ms) > Enter [It] Should successfully upgrade Machines upon changes in relevant MachineDeployment fields - /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/md_rollout.go:71 @ 01/23/23 21:23:35.612 STEP: Creating a workload cluster - /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/md_rollout.go:72 @ 01/23/23 21:23:35.612 INFO: Creating the workload cluster with name "md-rollout-sr4aoe" using the "(default)" template (Kubernetes v1.24.10, 1 control-plane machines, 1 worker machines) INFO: Getting the cluster template yaml INFO: clusterctl config cluster md-rollout-sr4aoe --infrastructure (default) --kubernetes-version v1.24.10 --control-plane-machine-count 1 --worker-machine-count 1 --flavor (default) INFO: Applying the cluster template yaml to the cluster INFO: Waiting for the cluster infrastructure to be provisioned STEP: Waiting for cluster to enter the provisioned phase - /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/framework/cluster_helpers.go:134 @ 01/23/23 21:23:40.693 INFO: Waiting for control plane to be initialized STEP: Installing Calico CNI via helm - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/cni.go:51 @ 01/23/23 21:25:40.817 STEP: Configuring calico CNI helm chart for IPv4 configuration - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/cni.go:131 @ 01/23/23 21:25:40.817 Jan 23 21:28:28.390: INFO: getting history for release projectcalico Jan 23 21:28:28.501: INFO: Release projectcalico does not exist, installing it Jan 23 21:28:29.706: INFO: creating 1 resource(s) Jan 23 21:28:29.839: INFO: creating 1 resource(s) Jan 23 21:28:29.965: INFO: creating 1 resource(s) Jan 23 21:28:30.085: INFO: creating 1 resource(s) Jan 23 21:28:30.215: INFO: creating 1 resource(s) Jan 23 21:28:30.340: INFO: creating 1 resource(s) Jan 23 21:28:30.623: INFO: creating 1 resource(s) Jan 23 21:28:30.861: INFO: creating 1 resource(s) Jan 23 21:28:30.975: INFO: creating 1 resource(s) Jan 23 21:28:31.103: INFO: creating 1 resource(s) Jan 23 21:28:31.237: INFO: creating 1 resource(s) Jan 23 21:28:31.354: INFO: creating 1 resource(s) Jan 23 21:28:31.473: INFO: creating 1 resource(s) Jan 23 21:28:31.602: INFO: creating 1 resource(s) Jan 23 21:28:31.724: INFO: creating 1 resource(s) Jan 23 21:28:31.866: INFO: creating 1 resource(s) Jan 23 21:28:32.038: INFO: creating 1 resource(s) Jan 23 21:28:32.160: INFO: creating 1 resource(s) Jan 23 21:28:32.357: INFO: creating 1 resource(s) Jan 23 21:28:32.558: INFO: creating 1 resource(s) Jan 23 21:28:33.175: INFO: creating 1 resource(s) Jan 23 21:28:33.302: INFO: Clearing discovery cache Jan 23 21:28:33.302: INFO: beginning wait for 21 resources with timeout of 1m0s Jan 23 21:28:38.805: INFO: creating 1 resource(s) Jan 23 21:28:39.628: INFO: creating 6 resource(s) Jan 23 21:28:40.935: INFO: Install complete STEP: Waiting for Ready tigera-operator deployment pods - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/cni.go:60 @ 01/23/23 21:28:41.714 STEP: waiting for deployment tigera-operator/tigera-operator to be available - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/common.go:88 @ 01/23/23 21:28:42.139 Jan 23 21:28:42.139: INFO: starting to wait for deployment to become available Jan 23 21:28:52.346: INFO: Deployment tigera-operator/tigera-operator is now available, took 10.207273226s STEP: Waiting for Ready calico-system deployment pods - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/cni.go:74 @ 01/23/23 21:28:53.527 STEP: waiting for deployment calico-system/calico-kube-controllers to be available - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/common.go:88 @ 01/23/23 21:28:54.05 Jan 23 21:28:54.050: INFO: starting to wait for deployment to become available Jan 23 21:29:45.256: INFO: Deployment calico-system/calico-kube-controllers is now available, took 51.205605046s STEP: waiting for deployment calico-system/calico-typha to be available - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/common.go:88 @ 01/23/23 21:29:45.815 Jan 23 21:29:45.815: INFO: starting to wait for deployment to become available Jan 23 21:29:45.955: INFO: Deployment calico-system/calico-typha is now available, took 139.226991ms STEP: Waiting for Ready calico-apiserver deployment pods - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/cni.go:79 @ 01/23/23 21:29:45.955 STEP: waiting for deployment calico-apiserver/calico-apiserver to be available - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/common.go:88 @ 01/23/23 21:29:46.819 Jan 23 21:29:46.819: INFO: starting to wait for deployment to become available Jan 23 21:30:07.136: INFO: Deployment calico-apiserver/calico-apiserver is now available, took 20.316948101s STEP: Waiting for Ready calico-node daemonset pods - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/cni.go:84 @ 01/23/23 21:30:07.136 STEP: waiting for daemonset calico-system/calico-node to be complete - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/common.go:88 @ 01/23/23 21:30:07.67 Jan 23 21:30:07.670: INFO: waiting for daemonset calico-system/calico-node to be complete Jan 23 21:30:07.776: INFO: 1 daemonset calico-system/calico-node pods are running, took 106.672792ms STEP: Waiting for Ready calico windows pods - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/cni.go:91 @ 01/23/23 21:30:07.777 STEP: waiting for daemonset calico-system/calico-node-windows to be complete - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/common.go:88 @ 01/23/23 21:30:08.299 Jan 23 21:30:08.299: INFO: waiting for daemonset calico-system/calico-node-windows to be complete Jan 23 21:30:08.402: INFO: 0 daemonset calico-system/calico-node-windows pods are running, took 103.906921ms STEP: Waiting for Ready calico windows pods - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/cni.go:97 @ 01/23/23 21:30:08.403 STEP: waiting for daemonset kube-system/kube-proxy-windows to be complete - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/common.go:88 @ 01/23/23 21:30:08.83 Jan 23 21:30:08.830: INFO: waiting for daemonset kube-system/kube-proxy-windows to be complete Jan 23 21:30:08.934: INFO: 0 daemonset kube-system/kube-proxy-windows pods are running, took 103.550418ms INFO: Waiting for the first control plane machine managed by md-rollout-qqqtvq/md-rollout-sr4aoe-control-plane to be provisioned STEP: Waiting for one control plane node to exist - /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/framework/controlplane_helpers.go:133 @ 01/23/23 21:30:08.981 STEP: Installing azure-disk CSI driver components via helm - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/cloud-provider-azure.go:71 @ 01/23/23 21:30:09.002 Jan 23 21:30:09.135: INFO: getting history for release azuredisk-csi-driver-oot Jan 23 21:30:09.241: INFO: Release azuredisk-csi-driver-oot does not exist, installing it Jan 23 21:30:13.777: INFO: creating 1 resource(s) Jan 23 21:30:14.223: INFO: creating 18 resource(s) Jan 23 21:30:15.127: INFO: Install complete STEP: Waiting for Ready csi-azuredisk-controller deployment pods - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/cloud-provider-azure.go:81 @ 01/23/23 21:30:15.159 STEP: waiting for deployment kube-system/csi-azuredisk-controller to be available - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/common.go:88 @ 01/23/23 21:30:15.615 Jan 23 21:30:15.615: INFO: starting to wait for deployment to become available Jan 23 21:30:56.419: INFO: Deployment kube-system/csi-azuredisk-controller is now available, took 40.803932803s STEP: Waiting for Running azure-disk-csi node pods - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/cloud-provider-azure.go:86 @ 01/23/23 21:30:56.419 STEP: waiting for daemonset kube-system/csi-azuredisk-node to be complete - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/common.go:88 @ 01/23/23 21:30:56.946 Jan 23 21:30:56.946: INFO: waiting for daemonset kube-system/csi-azuredisk-node to be complete Jan 23 21:30:57.050: INFO: 1 daemonset kube-system/csi-azuredisk-node pods are running, took 104.197262ms STEP: waiting for daemonset kube-system/csi-azuredisk-node-win to be complete - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/common.go:88 @ 01/23/23 21:30:57.572 Jan 23 21:30:57.572: INFO: waiting for daemonset kube-system/csi-azuredisk-node-win to be complete Jan 23 21:30:57.675: INFO: 0 daemonset kube-system/csi-azuredisk-node-win pods are running, took 103.470434ms INFO: Waiting for control plane to be ready INFO: Waiting for control plane md-rollout-qqqtvq/md-rollout-sr4aoe-control-plane to be ready (implies underlying nodes to be ready as well) STEP: Waiting for the control plane to be ready - /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/framework/controlplane_helpers.go:165 @ 01/23/23 21:30:57.696 STEP: Checking all the control plane machines are in the expected failure domains - /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/framework/controlplane_helpers.go:196 @ 01/23/23 21:30:57.704 INFO: Waiting for the machine deployments to be provisioned STEP: Waiting for the workload nodes to exist - /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/framework/machinedeployment_helpers.go:102 @ 01/23/23 21:30:57.748 STEP: Checking all the machines controlled by md-rollout-sr4aoe-md-0 are in the "<None>" failure domain - /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/framework/ginkgoextensions/output.go:35 @ 01/23/23 21:31:47.872 STEP: Waiting for the workload nodes to exist - /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/framework/machinedeployment_helpers.go:102 @ 01/23/23 21:31:47.894 STEP: Checking all the machines controlled by md-rollout-sr4aoe-md-win are in the "<None>" failure domain - /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/framework/ginkgoextensions/output.go:35 @ 01/23/23 21:32:58.06 INFO: Waiting for the machine pools to be provisioned STEP: Upgrading MachineDeployment Infrastructure ref and wait for rolling upgrade - /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/md_rollout.go:93 @ 01/23/23 21:32:58.107 INFO: Patching the new infrastructure ref to Machine Deployment md-rollout-qqqtvq/md-rollout-sr4aoe-md-0 INFO: Waiting for rolling upgrade to start. INFO: Waiting for MachineDeployment rolling upgrade to start INFO: Waiting for rolling upgrade to complete. INFO: Waiting for MachineDeployment rolling upgrade to complete INFO: Patching the new infrastructure ref to Machine Deployment md-rollout-qqqtvq/md-rollout-sr4aoe-md-win INFO: Waiting for rolling upgrade to start. INFO: Waiting for MachineDeployment rolling upgrade to start INFO: Waiting for rolling upgrade to complete. INFO: Waiting for MachineDeployment rolling upgrade to complete STEP: PASSED! - /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/md_rollout.go:100 @ 01/23/23 21:38:38.443 < Exit [It] Should successfully upgrade Machines upon changes in relevant MachineDeployment fields - /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/md_rollout.go:71 @ 01/23/23 21:38:38.443 (15m2.831s) > Enter [AfterEach] Running the MachineDeployment rollout spec - /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/md_rollout.go:103 @ 01/23/23 21:38:38.444 STEP: Dumping logs from the "md-rollout-sr4aoe" workload cluster - /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/common.go:51 @ 01/23/23 21:38:38.444 Jan 23 21:38:38.444: INFO: Dumping workload cluster md-rollout-qqqtvq/md-rollout-sr4aoe logs Jan 23 21:38:38.491: INFO: Collecting logs for Linux node md-rollout-sr4aoe-control-plane-xfhpl in cluster md-rollout-sr4aoe in namespace md-rollout-qqqtvq Jan 23 21:38:57.629: INFO: Collecting boot logs for AzureMachine md-rollout-sr4aoe-control-plane-xfhpl Jan 23 21:38:59.299: INFO: Collecting logs for Linux node md-rollout-sr4aoe-md-0-b3u2ul-g8qfw in cluster md-rollout-sr4aoe in namespace md-rollout-qqqtvq Jan 23 21:39:13.622: INFO: Collecting boot logs for AzureMachine md-rollout-sr4aoe-md-0-b3u2ul-g8qfw Jan 23 21:39:14.437: INFO: Collecting logs for Windows node md-rollou-fvw4l in cluster md-rollout-sr4aoe in namespace md-rollout-qqqtvq Jan 23 21:41:49.343: INFO: Attempting to copy file /c:/crashdumps.tar on node md-rollou-fvw4l to /logs/artifacts/clusters/md-rollout-sr4aoe/machines/md-rollout-sr4aoe-md-win-65744c7b6d-ttf8v/crashdumps.tar Jan 23 21:41:52.886: INFO: Collecting boot logs for AzureMachine md-rollout-sr4aoe-md-win-7m015t-fvw4l Jan 23 21:41:54.327: INFO: Collecting logs for Windows node md-rollou-tf2vj in cluster md-rollout-sr4aoe in namespace md-rollout-qqqtvq [TIMEDOUT] A suite timeout occurred In [AfterEach] at: /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/md_rollout.go:103 @ 01/24/23 01:18:09.251 This is the Progress Report generated when the suite timeout occurred: Running the Cluster API E2E tests Running the MachineDeployment rollout spec Should successfully upgrade Machines upon changes in relevant MachineDeployment fields (Spec Runtime: 3h54m33.86s) /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/md_rollout.go:71 In [AfterEach] (Node Runtime: 3h39m30.807s) /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/md_rollout.go:103 At [By Step] Dumping logs from the "md-rollout-sr4aoe" workload cluster (Step Runtime: 3h39m30.807s) /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/common.go:51 Spec Goroutine goroutine 55193 [semacquire, 214 minutes] sync.runtime_Semacquire(0xc0038662a0?) /usr/local/go/src/runtime/sema.go:62 sync.(*WaitGroup).Wait(0xc00113a201?) /usr/local/go/src/sync/waitgroup.go:139 sigs.k8s.io/kind/pkg/errors.AggregateConcurrent({0xc002b8c140, 0x9, 0x3df405a?}) /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:54 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode(0xc0016ea340, {0xc002aafb80, 0xf}, 0x1, {0xc0020ba9c0, 0x5c}) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:157 | errors = append(errors, kinderrors.AggregateConcurrent(windowsInfo(execToPathFn))) | errors = append(errors, kinderrors.AggregateConcurrent(windowsK8sLogs(execToPathFn))) > errors = append(errors, kinderrors.AggregateConcurrent(windowsNetworkLogs(execToPathFn))) | errors = append(errors, kinderrors.AggregateConcurrent(windowsCrashDumpLogs(execToPathFn))) | errors = append(errors, sftpCopyFile(controlPlaneEndpoint, hostname, sshPort, "/c:/crashdumps.tar", filepath.Join(outputPath, "crashdumps.tar"))) > sigs.k8s.io/cluster-api-provider-azure/test/e2e.AzureLogCollector.CollectMachineLog({}, {0x4211260, 0xc0000640b0}, {0x4221958, 0xc0005540e0}, 0xc0014f1920, {0xc0020ba9c0, 0x5c}) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:74 | hostname := getHostname(m, isAzureMachineWindows(am)) | > if err := collectLogsFromNode(cluster, hostname, isAzureMachineWindows(am), outputPath); err != nil { | errs = append(errs, err) | } sigs.k8s.io/cluster-api/test/framework.(*clusterProxy).CollectWorkloadClusterLogs(0xc000fb2780, {0x4211260?, 0xc0000640b0}, {0xc0027fdf50, 0x11}, {0xc0027fdf38, 0x11}, {0xc003691140, 0x2a}) /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/framework/cluster_proxy.go:265 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.(*AzureClusterProxy).CollectWorkloadClusterLogs(0xc00256dc10, {0x4211260, 0xc0000640b0}, {0xc0027fdf50, 0x11}, {0xc0027fdf38, 0x11}, {0xc003691140, 0x2a}) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_clusterproxy.go:93 | func (acp *AzureClusterProxy) CollectWorkloadClusterLogs(ctx context.Context, namespace, name, outputPath string) { | Logf("Dumping workload cluster %s/%s logs", namespace, name) > acp.ClusterProxy.CollectWorkloadClusterLogs(ctx, namespace, name, outputPath) | | aboveMachinesPath := strings.Replace(outputPath, "/machines", "", 1) > sigs.k8s.io/cluster-api/test/e2e.dumpSpecResourcesAndCleanup({0x4211260, 0xc0000640b0}, {0x3d574d8, 0xa}, {0x42238d0, 0xc00256dc10}, {0xc000844a80, 0xf}, 0xc000dc7e40, 0xc0001ae600, ...) /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/common.go:70 | | // Dump all the logs from the workload cluster before deleting them. > clusterProxy.CollectWorkloadClusterLogs(ctx, cluster.Namespace, cluster.Name, filepath.Join(artifactFolder, "clusters", cluster.Name)) | | Byf("Dumping all the Cluster API resources in the %q namespace", namespace.Name) > sigs.k8s.io/cluster-api/test/e2e.MachineDeploymentRolloutSpec.func3() /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/md_rollout.go:105 | AfterEach(func() { | // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. > dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) | }) | } github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x1b817ce, 0xc00195b200}) /home/prow/go/pkg/mod/github.com/onsi/ginkgo/v2@v2.6.0/internal/node.go:445 github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() /home/prow/go/pkg/mod/github.com/onsi/ginkgo/v2@v2.6.0/internal/suite.go:847 github.com/onsi/ginkgo/v2/internal.(*Suite).runNode /home/prow/go/pkg/mod/github.com/onsi/ginkgo/v2@v2.6.0/internal/suite.go:834 Goroutines of Interest goroutine 58420 [chan receive, 214 minutes] golang.org/x/crypto/ssh.(*Session).Wait(0xc001143440) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:403 golang.org/x/crypto/ssh.(*Session).Run(0xc00113a280?, {0x3e14e3c?, 0xc002aafb80?}) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:314 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.execOnHost({0xc00113a280?, 0x7?}, {0xc002aafb80?, 0x10000014185df?}, {0x3d4830c?, 0x0?}, {0x41ef240, 0xc00115e048}, {0x3e14e3c, 0x54}, ...) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/helpers.go:510 | command += " " + strings.Join(args, " ") | } > if err = session.Run(command); err != nil { | return errors.Wrapf(err, "running command \"%s\"", command) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:147 | } | defer f.Close() > return execOnHost(controlPlaneEndpoint, hostname, sshPort, f, command, args...) | }) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout.func1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:41 | err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil > err := fn() | if err != nil { | pollError = err k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x18, 0xc0004ce400}) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:222 k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x4211260?, 0xc000064098?}, 0xc0035b1e18?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:235 k8s.io/apimachinery/pkg/util/wait.poll({0x4211260, 0xc000064098}, 0x58?, 0x229cb25?, 0x18?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:582 k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x4211260, 0xc000064098}, 0xc0035b1e98?, 0xc0035b1ea8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:528 k8s.io/apimachinery/pkg/util/wait.PollImmediate(0xc001068450?, 0xc0035b1ee8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:514 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout(0x14163f6?, 0x392f560?, 0xc000018310) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:39 | func retryWithTimeout(interval, timeout time.Duration, fn func() error) error { | var pollError error > err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil | err := fn() > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:141 | execToPathFn := func(outputFileName, command string, args ...string) func() error { | return func() error { > return retryWithTimeout(collectLogInterval, collectLogTimeout, func() error { | f, err := fileOnHost(filepath.Join(outputPath, outputFileName)) | if err != nil { sigs.k8s.io/kind/pkg/errors.AggregateConcurrent.func1() /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:51 sigs.k8s.io/kind/pkg/errors.AggregateConcurrent /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:49 goroutine 58423 [chan receive, 214 minutes] golang.org/x/crypto/ssh.(*Session).Wait(0xc0011082d0) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:403 golang.org/x/crypto/ssh.(*Session).Run(0xc00113a280?, {0x3e06fa6?, 0xc002aafb80?}) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:314 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.execOnHost({0xc00113a280?, 0x3472732d74756f6c?}, {0xc002aafb80?, 0x2d6c75327533622d?}, {0x3d4830c?, 0x544150203a746964?}, {0x41ef240, 0xc00115e030}, {0x3e06fa6, 0x47}, ...) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/helpers.go:510 | command += " " + strings.Join(args, " ") | } > if err = session.Run(command); err != nil { | return errors.Wrapf(err, "running command \"%s\"", command) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:147 | } | defer f.Close() > return execOnHost(controlPlaneEndpoint, hostname, sshPort, f, command, args...) | }) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout.func1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:41 | err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil > err := fn() | if err != nil { | pollError = err k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x18, 0xc0004ce400}) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:222 k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x4211260?, 0xc000064098?}, 0xc0019c0e18?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:235 k8s.io/apimachinery/pkg/util/wait.poll({0x4211260, 0xc000064098}, 0x58?, 0x229cb25?, 0x18?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:582 k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x4211260, 0xc000064098}, 0x10000000000?, 0xc0019c0ea8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:528 k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x145f172?, 0xc0019c0ee8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:514 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout(0x40?, 0x399ac00?, 0xc000018000) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:39 | func retryWithTimeout(interval, timeout time.Duration, fn func() error) error { | var pollError error > err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil | err := fn() > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:141 | execToPathFn := func(outputFileName, command string, args ...string) func() error { | return func() error { > return retryWithTimeout(collectLogInterval, collectLogTimeout, func() error { | f, err := fileOnHost(filepath.Join(outputPath, outputFileName)) | if err != nil { sigs.k8s.io/kind/pkg/errors.AggregateConcurrent.func1() /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:51 sigs.k8s.io/kind/pkg/errors.AggregateConcurrent /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:49 goroutine 58422 [chan receive, 214 minutes] golang.org/x/crypto/ssh.(*Session).Wait(0xc002719c20) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:403 golang.org/x/crypto/ssh.(*Session).Run(0xc00113a280?, {0x3e14e90?, 0xc002aafb80?}) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:314 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.execOnHost({0xc00113a280?, 0x0?}, {0xc002aafb80?, 0x4211260?}, {0x3d4830c?, 0x0?}, {0x41ef240, 0xc00115e098}, {0x3e14e90, 0x54}, ...) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/helpers.go:510 | command += " " + strings.Join(args, " ") | } > if err = session.Run(command); err != nil { | return errors.Wrapf(err, "running command \"%s\"", command) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:147 | } | defer f.Close() > return execOnHost(controlPlaneEndpoint, hostname, sshPort, f, command, args...) | }) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout.func1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:41 | err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil > err := fn() | if err != nil { | pollError = err k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x18, 0xc0004ce400}) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:222 k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x4211260?, 0xc000064098?}, 0xc001643618?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:235 k8s.io/apimachinery/pkg/util/wait.poll({0x4211260, 0xc000064098}, 0x58?, 0x229cb25?, 0x18?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:582 k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x4211260, 0xc000064098}, 0xc0016436b0?, 0xc0016436a8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:528 k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x1410d51?, 0xc0016436e8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:514 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout(0xc002c29200?, 0x19d2449?, 0xc000018540) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:39 | func retryWithTimeout(interval, timeout time.Duration, fn func() error) error { | var pollError error > err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil | err := fn() > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:141 | execToPathFn := func(outputFileName, command string, args ...string) func() error { | return func() error { > return retryWithTimeout(collectLogInterval, collectLogTimeout, func() error { | f, err := fileOnHost(filepath.Join(outputPath, outputFileName)) | if err != nil { sigs.k8s.io/kind/pkg/errors.AggregateConcurrent.func1() /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:51 sigs.k8s.io/kind/pkg/errors.AggregateConcurrent /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:49 goroutine 58421 [chan receive, 214 minutes] golang.org/x/crypto/ssh.(*Session).Wait(0xc0027199e0) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:403 golang.org/x/crypto/ssh.(*Session).Run(0xc00113a280?, {0x3db16d0?, 0xc002aafb80?}) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:314 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.execOnHost({0xc00113a280?, 0x0?}, {0xc002aafb80?, 0xc00118fcf0?}, {0x3d4830c?, 0xc002b05560?}, {0x41ef240, 0xc00115e058}, {0x3db16d0, 0x29}, ...) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/helpers.go:510 | command += " " + strings.Join(args, " ") | } > if err = session.Run(command); err != nil { | return errors.Wrapf(err, "running command \"%s\"", command) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:147 | } | defer f.Close() > return execOnHost(controlPlaneEndpoint, hostname, sshPort, f, command, args...) | }) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout.func1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:41 | err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil > err := fn() | if err != nil { | pollError = err k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x18, 0xc0004ce400}) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:222 k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x4211260?, 0xc000064098?}, 0xc00118fe18?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:235 k8s.io/apimachinery/pkg/util/wait.poll({0x4211260, 0xc000064098}, 0x58?, 0x229cb25?, 0x18?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:582 k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x4211260, 0xc000064098}, 0xc00118fe80?, 0xc00118fea8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:528 k8s.io/apimachinery/pkg/util/wait.PollImmediate(0xc00118ffa8?, 0xc00118fee8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:514 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout(0x3?, 0x14163f6?, 0xc000018460) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:39 | func retryWithTimeout(interval, timeout time.Duration, fn func() error) error { | var pollError error > err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil | err := fn() > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:141 | execToPathFn := func(outputFileName, command string, args ...string) func() error { | return func() error { > return retryWithTimeout(collectLogInterval, collectLogTimeout, func() error { | f, err := fileOnHost(filepath.Join(outputPath, outputFileName)) | if err != nil { sigs.k8s.io/kind/pkg/errors.AggregateConcurrent.func1() /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:51 sigs.k8s.io/kind/pkg/errors.AggregateConcurrent /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:49 goroutine 58424 [chan receive, 214 minutes] golang.org/x/crypto/ssh.(*Session).Wait(0xc002719950) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:403 golang.org/x/crypto/ssh.(*Session).Run(0xc00113a280?, {0x3dbc7da?, 0xc002aafb80?}) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:314 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.execOnHost({0xc00113a280?, 0xc0011443c0?}, {0xc002aafb80?, 0x7fc57c222c90?}, {0x3d4830c?, 0x7fc5a5a2f3c8?}, {0x41ef240, 0xc00115e0b0}, {0x3dbc7da, 0x2c}, ...) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/helpers.go:510 | command += " " + strings.Join(args, " ") | } > if err = session.Run(command); err != nil { | return errors.Wrapf(err, "running command \"%s\"", command) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:147 | } | defer f.Close() > return execOnHost(controlPlaneEndpoint, hostname, sshPort, f, command, args...) | }) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout.func1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:41 | err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil > err := fn() | if err != nil { | pollError = err k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x18, 0xc0004ce400}) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:222 k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x4211260?, 0xc000064098?}, 0xc000de4618?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:235 k8s.io/apimachinery/pkg/util/wait.poll({0x4211260, 0xc000064098}, 0x58?, 0x229cb25?, 0x18?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:582 k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x4211260, 0xc000064098}, 0x0?, 0xc000de46a8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:528 k8s.io/apimachinery/pkg/util/wait.PollImmediate(0xc000de4782?, 0xc000de46e8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:514 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout(0xc00162f188?, 0xc0028efdbc?, 0xc000018690) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:39 | func retryWithTimeout(interval, timeout time.Duration, fn func() error) error { | var pollError error > err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil | err := fn() > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:141 | execToPathFn := func(outputFileName, command string, args ...string) func() error { | return func() error { > return retryWithTimeout(collectLogInterval, collectLogTimeout, func() error { | f, err := fileOnHost(filepath.Join(outputPath, outputFileName)) | if err != nil { sigs.k8s.io/kind/pkg/errors.AggregateConcurrent.func1() /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:51 sigs.k8s.io/kind/pkg/errors.AggregateConcurrent /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:49 goroutine 58425 [chan receive, 214 minutes] golang.org/x/crypto/ssh.(*Session).Wait(0xc002719b90) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:403 golang.org/x/crypto/ssh.(*Session).Run(0xc00113a280?, {0x3d8f043?, 0xc002aafb80?}) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:314 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.execOnHost({0xc00113a280?, 0xc003016980?}, {0xc002aafb80?, 0x1000?}, {0x3d4830c?, 0x5b299a0?}, {0x41ef240, 0xc00115e0c0}, {0x3d8f043, 0x1e}, ...) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/helpers.go:510 | command += " " + strings.Join(args, " ") | } > if err = session.Run(command); err != nil { | return errors.Wrapf(err, "running command \"%s\"", command) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:147 | } | defer f.Close() > return execOnHost(controlPlaneEndpoint, hostname, sshPort, f, command, args...) | }) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout.func1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:41 | err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil > err := fn() | if err != nil { | pollError = err k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x18, 0xc0004ce400}) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:222 k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x4211260?, 0xc000064098?}, 0xc000de5618?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:235 k8s.io/apimachinery/pkg/util/wait.poll({0x4211260, 0xc000064098}, 0x58?, 0x229cb25?, 0x18?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:582 k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x4211260, 0xc000064098}, 0xc000de56b0?, 0xc000de56a8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:528 k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x1410d51?, 0xc000de56e8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:514 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout(0xc0000bc840?, 0x19d220c?, 0xc000018850) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:39 | func retryWithTimeout(interval, timeout time.Duration, fn func() error) error { | var pollError error > err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil | err := fn() > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:141 | execToPathFn := func(outputFileName, command string, args ...string) func() error { | return func() error { > return retryWithTimeout(collectLogInterval, collectLogTimeout, func() error { | f, err := fileOnHost(filepath.Join(outputPath, outputFileName)) | if err != nil { sigs.k8s.io/kind/pkg/errors.AggregateConcurrent.func1() /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:51 sigs.k8s.io/kind/pkg/errors.AggregateConcurrent /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:49 goroutine 58426 [chan receive, 214 minutes] golang.org/x/crypto/ssh.(*Session).Wait(0xc0011434d0) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:403 golang.org/x/crypto/ssh.(*Session).Run(0xc00113a280?, {0x3dae06b?, 0xc002aafb80?}) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:314 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.execOnHost({0xc00113a280?, 0x0?}, {0xc002aafb80?, 0x0?}, {0x3d4830c?, 0x0?}, {0x41ef240, 0xc001484010}, {0x3dae06b, 0x28}, ...) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/helpers.go:510 | command += " " + strings.Join(args, " ") | } > if err = session.Run(command); err != nil { | return errors.Wrapf(err, "running command \"%s\"", command) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:147 | } | defer f.Close() > return execOnHost(controlPlaneEndpoint, hostname, sshPort, f, command, args...) | }) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout.func1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:41 | err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil > err := fn() | if err != nil { | pollError = err k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x18, 0xc000c20000}) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:222 k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x4211260?, 0xc000064098?}, 0xc0013d4618?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:235 k8s.io/apimachinery/pkg/util/wait.poll({0x4211260, 0xc000064098}, 0x58?, 0x229cb25?, 0x18?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:582 k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x4211260, 0xc000064098}, 0x10000000000?, 0xc0013d46a8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:528 k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0xc0013d46e8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:514 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout(0x0?, 0x0?, 0xc0031ee000) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:39 | func retryWithTimeout(interval, timeout time.Duration, fn func() error) error { | var pollError error > err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil | err := fn() > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:141 | execToPathFn := func(outputFileName, command string, args ...string) func() error { | return func() error { > return retryWithTimeout(collectLogInterval, collectLogTimeout, func() error { | f, err := fileOnHost(filepath.Join(outputPath, outputFileName)) | if err != nil { sigs.k8s.io/kind/pkg/errors.AggregateConcurrent.func1() /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:51 sigs.k8s.io/kind/pkg/errors.AggregateConcurrent /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:49 goroutine 58427 [chan receive, 214 minutes] golang.org/x/crypto/ssh.(*Session).Wait(0xc0027198c0) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:403 golang.org/x/crypto/ssh.(*Session).Run(0xc00113a280?, {0x3db5074?, 0xc002aafb80?}) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:314 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.execOnHost({0xc00113a280?, 0x0?}, {0xc002aafb80?, 0x0?}, {0x3d4830c?, 0x0?}, {0x41ef240, 0xc00425c040}, {0x3db5074, 0x2a}, ...) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/helpers.go:510 | command += " " + strings.Join(args, " ") | } > if err = session.Run(command); err != nil { | return errors.Wrapf(err, "running command \"%s\"", command) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:147 | } | defer f.Close() > return execOnHost(controlPlaneEndpoint, hostname, sshPort, f, command, args...) | }) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout.func1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:41 | err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil > err := fn() | if err != nil { | pollError = err k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x18, 0xc000096c00}) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:222 k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x4211260?, 0xc000064098?}, 0xc0013d4e18?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:235 k8s.io/apimachinery/pkg/util/wait.poll({0x4211260, 0xc000064098}, 0x58?, 0x229cb25?, 0x18?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:582 k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x4211260, 0xc000064098}, 0x0?, 0xc0013d4ea8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:528 k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0xc0013d4ee8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:514 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout(0x0?, 0x0?, 0xc000554fc0) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:39 | func retryWithTimeout(interval, timeout time.Duration, fn func() error) error { | var pollError error > err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil | err := fn() > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:141 | execToPathFn := func(outputFileName, command string, args ...string) func() error { | return func() error { > return retryWithTimeout(collectLogInterval, collectLogTimeout, func() error { | f, err := fileOnHost(filepath.Join(outputPath, outputFileName)) | if err != nil { sigs.k8s.io/kind/pkg/errors.AggregateConcurrent.func1() /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:51 sigs.k8s.io/kind/pkg/errors.AggregateConcurrent /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:49 goroutine 58428 [chan receive, 214 minutes] golang.org/x/crypto/ssh.(*Session).Wait(0xc002719a70) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:403 golang.org/x/crypto/ssh.(*Session).Run(0xc00113a280?, {0x3d6e2a1?, 0xc002aafb80?}) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:314 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.execOnHost({0xc00113a280?, 0x0?}, {0xc002aafb80?, 0x0?}, {0x3d4830c?, 0x0?}, {0x41ef240, 0xc00425c020}, {0x3d6e2a1, 0x13}, ...) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/helpers.go:510 | command += " " + strings.Join(args, " ") | } > if err = session.Run(command); err != nil { | return errors.Wrapf(err, "running command \"%s\"", command) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:147 | } | defer f.Close() > return execOnHost(controlPlaneEndpoint, hostname, sshPort, f, command, args...) | }) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout.func1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:41 | err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil > err := fn() | if err != nil { | pollError = err k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x18, 0xc000096c00}) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:222 k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x4211260?, 0xc000064098?}, 0xc0013d5618?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:235 k8s.io/apimachinery/pkg/util/wait.poll({0x4211260, 0xc000064098}, 0x58?, 0x229cb25?, 0x18?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:582 k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x4211260, 0xc000064098}, 0x0?, 0xc0013d56a8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:528 k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0xc0013d56e8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:514 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout(0x0?, 0x0?, 0xc000554d90) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:39 | func retryWithTimeout(interval, timeout time.Duration, fn func() error) error { | var pollError error > err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil | err := fn() > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:141 | execToPathFn := func(outputFileName, command string, args ...string) func() error { | return func() error { > return retryWithTimeout(collectLogInterval, collectLogTimeout, func() error { | f, err := fileOnHost(filepath.Join(outputPath, outputFileName)) | if err != nil { sigs.k8s.io/kind/pkg/errors.AggregateConcurrent.func1() /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:51 sigs.k8s.io/kind/pkg/errors.AggregateConcurrent /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:49 < Exit [AfterEach] Running the MachineDeployment rollout spec - /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/md_rollout.go:103 @ 01/24/23 01:18:09.308 (3h39m30.865s) > Enter [AfterEach] Running the Cluster API E2E tests - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/capi_test.go:97 @ 01/24/23 01:18:09.308 Jan 24 01:18:09.308: INFO: FAILED! Jan 24 01:18:09.309: INFO: Cleaning up after "Running the Cluster API E2E tests Running the MachineDeployment rollout spec Should successfully upgrade Machines upon changes in relevant MachineDeployment fields" spec STEP: Redacting sensitive information from logs - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/common.go:212 @ 01/24/23 01:18:09.309 [TIMEDOUT] A grace period timeout occurred In [AfterEach] at: /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/capi_test.go:97 @ 01/24/23 01:18:39.309 This is the Progress Report generated when the grace period timeout occurred: Running the Cluster API E2E tests Running the MachineDeployment rollout spec Should successfully upgrade Machines upon changes in relevant MachineDeployment fields (Spec Runtime: 3h55m3.918s) /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/md_rollout.go:71 In [AfterEach] (Node Runtime: 30.001s) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/capi_test.go:97 At [By Step] Redacting sensitive information from logs (Step Runtime: 30s) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/common.go:212 Spec Goroutine goroutine 55193 [semacquire, 215 minutes] sync.runtime_Semacquire(0xc0038662a0?) /usr/local/go/src/runtime/sema.go:62 sync.(*WaitGroup).Wait(0xc00113a201?) /usr/local/go/src/sync/waitgroup.go:139 sigs.k8s.io/kind/pkg/errors.AggregateConcurrent({0xc002b8c140, 0x9, 0x3df405a?}) /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:54 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode(0xc0016ea340, {0xc002aafb80, 0xf}, 0x1, {0xc0020ba9c0, 0x5c}) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:157 | errors = append(errors, kinderrors.AggregateConcurrent(windowsInfo(execToPathFn))) | errors = append(errors, kinderrors.AggregateConcurrent(windowsK8sLogs(execToPathFn))) > errors = append(errors, kinderrors.AggregateConcurrent(windowsNetworkLogs(execToPathFn))) | errors = append(errors, kinderrors.AggregateConcurrent(windowsCrashDumpLogs(execToPathFn))) | errors = append(errors, sftpCopyFile(controlPlaneEndpoint, hostname, sshPort, "/c:/crashdumps.tar", filepath.Join(outputPath, "crashdumps.tar"))) > sigs.k8s.io/cluster-api-provider-azure/test/e2e.AzureLogCollector.CollectMachineLog({}, {0x4211260, 0xc0000640b0}, {0x4221958, 0xc0005540e0}, 0xc0014f1920, {0xc0020ba9c0, 0x5c}) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:74 | hostname := getHostname(m, isAzureMachineWindows(am)) | > if err := collectLogsFromNode(cluster, hostname, isAzureMachineWindows(am), outputPath); err != nil { | errs = append(errs, err) | } sigs.k8s.io/cluster-api/test/framework.(*clusterProxy).CollectWorkloadClusterLogs(0xc000fb2780, {0x4211260?, 0xc0000640b0}, {0xc0027fdf50, 0x11}, {0xc0027fdf38, 0x11}, {0xc003691140, 0x2a}) /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/framework/cluster_proxy.go:265 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.(*AzureClusterProxy).CollectWorkloadClusterLogs(0xc00256dc10, {0x4211260, 0xc0000640b0}, {0xc0027fdf50, 0x11}, {0xc0027fdf38, 0x11}, {0xc003691140, 0x2a}) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_clusterproxy.go:93 | func (acp *AzureClusterProxy) CollectWorkloadClusterLogs(ctx context.Context, namespace, name, outputPath string) { | Logf("Dumping workload cluster %s/%s logs", namespace, name) > acp.ClusterProxy.CollectWorkloadClusterLogs(ctx, namespace, name, outputPath) | | aboveMachinesPath := strings.Replace(outputPath, "/machines", "", 1) > sigs.k8s.io/cluster-api/test/e2e.dumpSpecResourcesAndCleanup({0x4211260, 0xc0000640b0}, {0x3d574d8, 0xa}, {0x42238d0, 0xc00256dc10}, {0xc000844a80, 0xf}, 0xc000dc7e40, 0xc0001ae600, ...) /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/common.go:70 | | // Dump all the logs from the workload cluster before deleting them. > clusterProxy.CollectWorkloadClusterLogs(ctx, cluster.Namespace, cluster.Name, filepath.Join(artifactFolder, "clusters", cluster.Name)) | | Byf("Dumping all the Cluster API resources in the %q namespace", namespace.Name) > sigs.k8s.io/cluster-api/test/e2e.MachineDeploymentRolloutSpec.func3() /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/md_rollout.go:105 | AfterEach(func() { | // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. > dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) | }) | } github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x1b817ce, 0xc00195b200}) /home/prow/go/pkg/mod/github.com/onsi/ginkgo/v2@v2.6.0/internal/node.go:445 github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() /home/prow/go/pkg/mod/github.com/onsi/ginkgo/v2@v2.6.0/internal/suite.go:847 github.com/onsi/ginkgo/v2/internal.(*Suite).runNode /home/prow/go/pkg/mod/github.com/onsi/ginkgo/v2@v2.6.0/internal/suite.go:834 Goroutines of Interest goroutine 58420 [chan receive, 215 minutes] golang.org/x/crypto/ssh.(*Session).Wait(0xc001143440) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:403 golang.org/x/crypto/ssh.(*Session).Run(0xc00113a280?, {0x3e14e3c?, 0xc002aafb80?}) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:314 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.execOnHost({0xc00113a280?, 0x7?}, {0xc002aafb80?, 0x10000014185df?}, {0x3d4830c?, 0x0?}, {0x41ef240, 0xc00115e048}, {0x3e14e3c, 0x54}, ...) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/helpers.go:510 | command += " " + strings.Join(args, " ") | } > if err = session.Run(command); err != nil { | return errors.Wrapf(err, "running command \"%s\"", command) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:147 | } | defer f.Close() > return execOnHost(controlPlaneEndpoint, hostname, sshPort, f, command, args...) | }) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout.func1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:41 | err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil > err := fn() | if err != nil { | pollError = err k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x18, 0xc0004ce400}) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:222 k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x4211260?, 0xc000064098?}, 0xc0035b1e18?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:235 k8s.io/apimachinery/pkg/util/wait.poll({0x4211260, 0xc000064098}, 0x58?, 0x229cb25?, 0x18?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:582 k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x4211260, 0xc000064098}, 0xc0035b1e98?, 0xc0035b1ea8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:528 k8s.io/apimachinery/pkg/util/wait.PollImmediate(0xc001068450?, 0xc0035b1ee8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:514 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout(0x14163f6?, 0x392f560?, 0xc000018310) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:39 | func retryWithTimeout(interval, timeout time.Duration, fn func() error) error { | var pollError error > err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil | err := fn() > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:141 | execToPathFn := func(outputFileName, command string, args ...string) func() error { | return func() error { > return retryWithTimeout(collectLogInterval, collectLogTimeout, func() error { | f, err := fileOnHost(filepath.Join(outputPath, outputFileName)) | if err != nil { sigs.k8s.io/kind/pkg/errors.AggregateConcurrent.func1() /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:51 sigs.k8s.io/kind/pkg/errors.AggregateConcurrent /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:49 goroutine 69294 [syscall] syscall.Syscall6(0x1?, 0x1?, 0xffffffffffffffff?, 0x3708860?, 0x24?, 0x0?, 0x8?) /usr/local/go/src/syscall/syscall_linux.go:90 os.(*Process).blockUntilWaitable(0xc000e79ce0) /usr/local/go/src/os/wait_waitid.go:32 os.(*Process).wait(0xc000e79ce0) /usr/local/go/src/os/exec_unix.go:22 os.(*Process).Wait(...) /usr/local/go/src/os/exec.go:132 os/exec.(*Cmd).Wait(0xc000f31080) /usr/local/go/src/os/exec/exec.go:599 os/exec.(*Cmd).Run(0xc0015093b0?) /usr/local/go/src/os/exec/exec.go:437 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.redactLogs() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/common.go:216 | //nolint:gosec // Ignore warning about running a command constructed from user input | cmd := exec.Command(e2eConfig.GetVariable(RedactLogScriptPath)) > if err := cmd.Run(); err != nil { | LogWarningf("Redact logs command failed: %v", err) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.glob..func2.2() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/capi_test.go:99 | AfterEach(func() { | CheckTestBeforeCleanup() > redactLogs() | | Expect(os.Unsetenv(AzureResourceGroup)).To(Succeed()) github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc000555110, 0x0}) /home/prow/go/pkg/mod/github.com/onsi/ginkgo/v2@v2.6.0/internal/node.go:445 github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() /home/prow/go/pkg/mod/github.com/onsi/ginkgo/v2@v2.6.0/internal/suite.go:847 github.com/onsi/ginkgo/v2/internal.(*Suite).runNode /home/prow/go/pkg/mod/github.com/onsi/ginkgo/v2@v2.6.0/internal/suite.go:834 goroutine 58423 [chan receive, 215 minutes] golang.org/x/crypto/ssh.(*Session).Wait(0xc0011082d0) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:403 golang.org/x/crypto/ssh.(*Session).Run(0xc00113a280?, {0x3e06fa6?, 0xc002aafb80?}) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:314 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.execOnHost({0xc00113a280?, 0x3472732d74756f6c?}, {0xc002aafb80?, 0x2d6c75327533622d?}, {0x3d4830c?, 0x544150203a746964?}, {0x41ef240, 0xc00115e030}, {0x3e06fa6, 0x47}, ...) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/helpers.go:510 | command += " " + strings.Join(args, " ") | } > if err = session.Run(command); err != nil { | return errors.Wrapf(err, "running command \"%s\"", command) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:147 | } | defer f.Close() > return execOnHost(controlPlaneEndpoint, hostname, sshPort, f, command, args...) | }) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout.func1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:41 | err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil > err := fn() | if err != nil { | pollError = err k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x18, 0xc0004ce400}) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:222 k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x4211260?, 0xc000064098?}, 0xc0019c0e18?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:235 k8s.io/apimachinery/pkg/util/wait.poll({0x4211260, 0xc000064098}, 0x58?, 0x229cb25?, 0x18?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:582 k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x4211260, 0xc000064098}, 0x10000000000?, 0xc0019c0ea8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:528 k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x145f172?, 0xc0019c0ee8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:514 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout(0x40?, 0x399ac00?, 0xc000018000) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:39 | func retryWithTimeout(interval, timeout time.Duration, fn func() error) error { | var pollError error > err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil | err := fn() > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:141 | execToPathFn := func(outputFileName, command string, args ...string) func() error { | return func() error { > return retryWithTimeout(collectLogInterval, collectLogTimeout, func() error { | f, err := fileOnHost(filepath.Join(outputPath, outputFileName)) | if err != nil { sigs.k8s.io/kind/pkg/errors.AggregateConcurrent.func1() /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:51 sigs.k8s.io/kind/pkg/errors.AggregateConcurrent /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:49 goroutine 58422 [chan receive, 215 minutes] golang.org/x/crypto/ssh.(*Session).Wait(0xc002719c20) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:403 golang.org/x/crypto/ssh.(*Session).Run(0xc00113a280?, {0x3e14e90?, 0xc002aafb80?}) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:314 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.execOnHost({0xc00113a280?, 0x0?}, {0xc002aafb80?, 0x4211260?}, {0x3d4830c?, 0x0?}, {0x41ef240, 0xc00115e098}, {0x3e14e90, 0x54}, ...) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/helpers.go:510 | command += " " + strings.Join(args, " ") | } > if err = session.Run(command); err != nil { | return errors.Wrapf(err, "running command \"%s\"", command) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:147 | } | defer f.Close() > return execOnHost(controlPlaneEndpoint, hostname, sshPort, f, command, args...) | }) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout.func1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:41 | err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil > err := fn() | if err != nil { | pollError = err k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x18, 0xc0004ce400}) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:222 k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x4211260?, 0xc000064098?}, 0xc001643618?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:235 k8s.io/apimachinery/pkg/util/wait.poll({0x4211260, 0xc000064098}, 0x58?, 0x229cb25?, 0x18?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:582 k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x4211260, 0xc000064098}, 0xc0016436b0?, 0xc0016436a8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:528 k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x1410d51?, 0xc0016436e8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:514 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout(0xc002c29200?, 0x19d2449?, 0xc000018540) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:39 | func retryWithTimeout(interval, timeout time.Duration, fn func() error) error { | var pollError error > err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil | err := fn() > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:141 | execToPathFn := func(outputFileName, command string, args ...string) func() error { | return func() error { > return retryWithTimeout(collectLogInterval, collectLogTimeout, func() error { | f, err := fileOnHost(filepath.Join(outputPath, outputFileName)) | if err != nil { sigs.k8s.io/kind/pkg/errors.AggregateConcurrent.func1() /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:51 sigs.k8s.io/kind/pkg/errors.AggregateConcurrent /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:49 goroutine 58421 [chan receive, 215 minutes] golang.org/x/crypto/ssh.(*Session).Wait(0xc0027199e0) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:403 golang.org/x/crypto/ssh.(*Session).Run(0xc00113a280?, {0x3db16d0?, 0xc002aafb80?}) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:314 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.execOnHost({0xc00113a280?, 0x0?}, {0xc002aafb80?, 0xc00118fcf0?}, {0x3d4830c?, 0xc002b05560?}, {0x41ef240, 0xc00115e058}, {0x3db16d0, 0x29}, ...) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/helpers.go:510 | command += " " + strings.Join(args, " ") | } > if err = session.Run(command); err != nil { | return errors.Wrapf(err, "running command \"%s\"", command) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:147 | } | defer f.Close() > return execOnHost(controlPlaneEndpoint, hostname, sshPort, f, command, args...) | }) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout.func1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:41 | err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil > err := fn() | if err != nil { | pollError = err k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x18, 0xc0004ce400}) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:222 k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x4211260?, 0xc000064098?}, 0xc00118fe18?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:235 k8s.io/apimachinery/pkg/util/wait.poll({0x4211260, 0xc000064098}, 0x58?, 0x229cb25?, 0x18?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:582 k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x4211260, 0xc000064098}, 0xc00118fe80?, 0xc00118fea8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:528 k8s.io/apimachinery/pkg/util/wait.PollImmediate(0xc00118ffa8?, 0xc00118fee8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:514 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout(0x3?, 0x14163f6?, 0xc000018460) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:39 | func retryWithTimeout(interval, timeout time.Duration, fn func() error) error { | var pollError error > err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil | err := fn() > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:141 | execToPathFn := func(outputFileName, command string, args ...string) func() error { | return func() error { > return retryWithTimeout(collectLogInterval, collectLogTimeout, func() error { | f, err := fileOnHost(filepath.Join(outputPath, outputFileName)) | if err != nil { sigs.k8s.io/kind/pkg/errors.AggregateConcurrent.func1() /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:51 sigs.k8s.io/kind/pkg/errors.AggregateConcurrent /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:49 goroutine 58424 [chan receive, 215 minutes] golang.org/x/crypto/ssh.(*Session).Wait(0xc002719950) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:403 golang.org/x/crypto/ssh.(*Session).Run(0xc00113a280?, {0x3dbc7da?, 0xc002aafb80?}) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:314 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.execOnHost({0xc00113a280?, 0xc0011443c0?}, {0xc002aafb80?, 0x7fc57c222c90?}, {0x3d4830c?, 0x7fc5a5a2f3c8?}, {0x41ef240, 0xc00115e0b0}, {0x3dbc7da, 0x2c}, ...) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/helpers.go:510 | command += " " + strings.Join(args, " ") | } > if err = session.Run(command); err != nil { | return errors.Wrapf(err, "running command \"%s\"", command) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:147 | } | defer f.Close() > return execOnHost(controlPlaneEndpoint, hostname, sshPort, f, command, args...) | }) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout.func1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:41 | err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil > err := fn() | if err != nil { | pollError = err k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x18, 0xc0004ce400}) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:222 k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x4211260?, 0xc000064098?}, 0xc000de4618?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:235 k8s.io/apimachinery/pkg/util/wait.poll({0x4211260, 0xc000064098}, 0x58?, 0x229cb25?, 0x18?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:582 k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x4211260, 0xc000064098}, 0x0?, 0xc000de46a8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:528 k8s.io/apimachinery/pkg/util/wait.PollImmediate(0xc000de4782?, 0xc000de46e8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:514 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout(0xc00162f188?, 0xc0028efdbc?, 0xc000018690) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:39 | func retryWithTimeout(interval, timeout time.Duration, fn func() error) error { | var pollError error > err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil | err := fn() > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:141 | execToPathFn := func(outputFileName, command string, args ...string) func() error { | return func() error { > return retryWithTimeout(collectLogInterval, collectLogTimeout, func() error { | f, err := fileOnHost(filepath.Join(outputPath, outputFileName)) | if err != nil { sigs.k8s.io/kind/pkg/errors.AggregateConcurrent.func1() /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:51 sigs.k8s.io/kind/pkg/errors.AggregateConcurrent /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:49 goroutine 58425 [chan receive, 215 minutes] golang.org/x/crypto/ssh.(*Session).Wait(0xc002719b90) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:403 golang.org/x/crypto/ssh.(*Session).Run(0xc00113a280?, {0x3d8f043?, 0xc002aafb80?}) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:314 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.execOnHost({0xc00113a280?, 0xc003016980?}, {0xc002aafb80?, 0x1000?}, {0x3d4830c?, 0x5b299a0?}, {0x41ef240, 0xc00115e0c0}, {0x3d8f043, 0x1e}, ...) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/helpers.go:510 | command += " " + strings.Join(args, " ") | } > if err = session.Run(command); err != nil { | return errors.Wrapf(err, "running command \"%s\"", command) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:147 | } | defer f.Close() > return execOnHost(controlPlaneEndpoint, hostname, sshPort, f, command, args...) | }) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout.func1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:41 | err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil > err := fn() | if err != nil { | pollError = err k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x18, 0xc0004ce400}) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:222 k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x4211260?, 0xc000064098?}, 0xc000de5618?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:235 k8s.io/apimachinery/pkg/util/wait.poll({0x4211260, 0xc000064098}, 0x58?, 0x229cb25?, 0x18?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:582 k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x4211260, 0xc000064098}, 0xc000de56b0?, 0xc000de56a8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:528 k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x1410d51?, 0xc000de56e8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:514 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout(0xc0000bc840?, 0x19d220c?, 0xc000018850) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:39 | func retryWithTimeout(interval, timeout time.Duration, fn func() error) error { | var pollError error > err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil | err := fn() > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:141 | execToPathFn := func(outputFileName, command string, args ...string) func() error { | return func() error { > return retryWithTimeout(collectLogInterval, collectLogTimeout, func() error { | f, err := fileOnHost(filepath.Join(outputPath, outputFileName)) | if err != nil { sigs.k8s.io/kind/pkg/errors.AggregateConcurrent.func1() /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:51 sigs.k8s.io/kind/pkg/errors.AggregateConcurrent /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:49 goroutine 58426 [chan receive, 215 minutes] golang.org/x/crypto/ssh.(*Session).Wait(0xc0011434d0) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:403 golang.org/x/crypto/ssh.(*Session).Run(0xc00113a280?, {0x3dae06b?, 0xc002aafb80?}) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:314 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.execOnHost({0xc00113a280?, 0x0?}, {0xc002aafb80?, 0x0?}, {0x3d4830c?, 0x0?}, {0x41ef240, 0xc001484010}, {0x3dae06b, 0x28}, ...) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/helpers.go:510 | command += " " + strings.Join(args, " ") | } > if err = session.Run(command); err != nil { | return errors.Wrapf(err, "running command \"%s\"", command) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:147 | } | defer f.Close() > return execOnHost(controlPlaneEndpoint, hostname, sshPort, f, command, args...) | }) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout.func1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:41 | err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil > err := fn() | if err != nil { | pollError = err k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x18, 0xc000c20000}) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:222 k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x4211260?, 0xc000064098?}, 0xc0013d4618?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:235 k8s.io/apimachinery/pkg/util/wait.poll({0x4211260, 0xc000064098}, 0x58?, 0x229cb25?, 0x18?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:582 k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x4211260, 0xc000064098}, 0x10000000000?, 0xc0013d46a8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:528 k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0xc0013d46e8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:514 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout(0x0?, 0x0?, 0xc0031ee000) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:39 | func retryWithTimeout(interval, timeout time.Duration, fn func() error) error { | var pollError error > err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil | err := fn() > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:141 | execToPathFn := func(outputFileName, command string, args ...string) func() error { | return func() error { > return retryWithTimeout(collectLogInterval, collectLogTimeout, func() error { | f, err := fileOnHost(filepath.Join(outputPath, outputFileName)) | if err != nil { sigs.k8s.io/kind/pkg/errors.AggregateConcurrent.func1() /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:51 sigs.k8s.io/kind/pkg/errors.AggregateConcurrent /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:49 goroutine 58427 [chan receive, 215 minutes] golang.org/x/crypto/ssh.(*Session).Wait(0xc0027198c0) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:403 golang.org/x/crypto/ssh.(*Session).Run(0xc00113a280?, {0x3db5074?, 0xc002aafb80?}) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:314 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.execOnHost({0xc00113a280?, 0x0?}, {0xc002aafb80?, 0x0?}, {0x3d4830c?, 0x0?}, {0x41ef240, 0xc00425c040}, {0x3db5074, 0x2a}, ...) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/helpers.go:510 | command += " " + strings.Join(args, " ") | } > if err = session.Run(command); err != nil { | return errors.Wrapf(err, "running command \"%s\"", command) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:147 | } | defer f.Close() > return execOnHost(controlPlaneEndpoint, hostname, sshPort, f, command, args...) | }) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout.func1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:41 | err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil > err := fn() | if err != nil { | pollError = err k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x18, 0xc000096c00}) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:222 k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x4211260?, 0xc000064098?}, 0xc0013d4e18?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:235 k8s.io/apimachinery/pkg/util/wait.poll({0x4211260, 0xc000064098}, 0x58?, 0x229cb25?, 0x18?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:582 k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x4211260, 0xc000064098}, 0x0?, 0xc0013d4ea8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:528 k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0xc0013d4ee8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:514 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout(0x0?, 0x0?, 0xc000554fc0) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:39 | func retryWithTimeout(interval, timeout time.Duration, fn func() error) error { | var pollError error > err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil | err := fn() > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:141 | execToPathFn := func(outputFileName, command string, args ...string) func() error { | return func() error { > return retryWithTimeout(collectLogInterval, collectLogTimeout, func() error { | f, err := fileOnHost(filepath.Join(outputPath, outputFileName)) | if err != nil { sigs.k8s.io/kind/pkg/errors.AggregateConcurrent.func1() /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:51 sigs.k8s.io/kind/pkg/errors.AggregateConcurrent /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:49 goroutine 58428 [chan receive, 215 minutes] golang.org/x/crypto/ssh.(*Session).Wait(0xc002719a70) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:403 golang.org/x/crypto/ssh.(*Session).Run(0xc00113a280?, {0x3d6e2a1?, 0xc002aafb80?}) /home/prow/go/pkg/mod/golang.org/x/crypto@v0.3.0/ssh/session.go:314 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.execOnHost({0xc00113a280?, 0x0?}, {0xc002aafb80?, 0x0?}, {0x3d4830c?, 0x0?}, {0x41ef240, 0xc00425c020}, {0x3d6e2a1, 0x13}, ...) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/helpers.go:510 | command += " " + strings.Join(args, " ") | } > if err = session.Run(command); err != nil { | return errors.Wrapf(err, "running command \"%s\"", command) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:147 | } | defer f.Close() > return execOnHost(controlPlaneEndpoint, hostname, sshPort, f, command, args...) | }) | } > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout.func1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:41 | err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil > err := fn() | if err != nil { | pollError = err k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x18, 0xc000096c00}) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:222 k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x4211260?, 0xc000064098?}, 0xc0013d5618?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:235 k8s.io/apimachinery/pkg/util/wait.poll({0x4211260, 0xc000064098}, 0x58?, 0x229cb25?, 0x18?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:582 k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x4211260, 0xc000064098}, 0x0?, 0xc0013d56a8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:528 k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0xc0013d56e8?, 0x1418927?) /home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:514 > sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout(0x0?, 0x0?, 0xc000554d90) /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:39 | func retryWithTimeout(interval, timeout time.Duration, fn func() error) error { | var pollError error > err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil | err := fn() > sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1() /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:141 | execToPathFn := func(outputFileName, command string, args ...string) func() error { | return func() error { > return retryWithTimeout(collectLogInterval, collectLogTimeout, func() error { | f, err := fileOnHost(filepath.Join(outputPath, outputFileName)) | if err != nil { sigs.k8s.io/kind/pkg/errors.AggregateConcurrent.func1() /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:51 sigs.k8s.io/kind/pkg/errors.AggregateConcurrent /home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:49 < Exit [AfterEach] Running the Cluster API E2E tests - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/capi_test.go:97 @ 01/24/23 01:18:39.376 (30.067s)
Filter through log files
capz-e2e [It] Running the Cluster API E2E tests Running the quick-start spec Should create a workload cluster
capz-e2e [It] Running the Cluster API E2E tests Running the self-hosted spec Should pivot the bootstrap cluster to a self-hosted cluster
capz-e2e [It] Running the Cluster API E2E tests Should successfully exercise machine pools Should successfully create a cluster with machine pool machines
capz-e2e [It] Running the Cluster API E2E tests Should successfully remediate unhealthy machines with MachineHealthCheck Should successfully trigger KCP remediation
capz-e2e [It] Running the Cluster API E2E tests Should successfully remediate unhealthy machines with MachineHealthCheck Should successfully trigger machine deployment remediation
capz-e2e [It] Running the Cluster API E2E tests Should successfully scale out and scale in a MachineDeployment Should successfully scale a MachineDeployment up and down upon changes to the MachineDeployment replica count
capz-e2e [It] Running the Cluster API E2E tests Should successfully set and use node drain timeout A node should be forcefully removed if it cannot be drained in time
capz-e2e [SynchronizedAfterSuite]
capz-e2e [SynchronizedAfterSuite]
capz-e2e [SynchronizedAfterSuite]
capz-e2e [SynchronizedAfterSuite]
capz-e2e [SynchronizedAfterSuite]
capz-e2e [SynchronizedAfterSuite]
capz-e2e [SynchronizedAfterSuite]
capz-e2e [SynchronizedAfterSuite]
capz-e2e [SynchronizedAfterSuite]
capz-e2e [SynchronizedAfterSuite]
capz-e2e [SynchronizedBeforeSuite]
capz-e2e [SynchronizedBeforeSuite]
capz-e2e [SynchronizedBeforeSuite]
capz-e2e [SynchronizedBeforeSuite]
capz-e2e [SynchronizedBeforeSuite]
capz-e2e [SynchronizedBeforeSuite]
capz-e2e [SynchronizedBeforeSuite]
capz-e2e [SynchronizedBeforeSuite]
capz-e2e [SynchronizedBeforeSuite]
capz-e2e [SynchronizedBeforeSuite]
capz-e2e [It] Conformance Tests conformance-tests
capz-e2e [It] Running the Cluster API E2E tests API Version Upgrade upgrade from v1alpha4 to v1beta1, and scale workload clusters created in v1alpha4 Should create a management cluster and then upgrade all the providers
capz-e2e [It] Running the Cluster API E2E tests Running KCP upgrade in a HA cluster [K8s-Upgrade] Should create and upgrade a workload cluster and eventually run kubetest
capz-e2e [It] Running the Cluster API E2E tests Running KCP upgrade in a HA cluster using scale in rollout [K8s-Upgrade] Should create and upgrade a workload cluster and eventually run kubetest
capz-e2e [It] Running the Cluster API E2E tests Running the workload cluster upgrade spec [K8s-Upgrade] Should create and upgrade a workload cluster and eventually run kubetest
capz-e2e [It] Workload cluster creation Creating a GPU-enabled cluster [OPTIONAL] with a single control plane node and 1 node
capz-e2e [It] Workload cluster creation Creating a VMSS cluster [REQUIRED] with a single control plane node and an AzureMachinePool with 2 Linux and 2 Windows worker nodes
capz-e2e [It] Workload cluster creation Creating a cluster that uses the external cloud provider and external azurediskcsi driver [OPTIONAL] with a 1 control plane nodes and 2 worker nodes
capz-e2e [It] Workload cluster creation Creating a cluster that uses the external cloud provider and machinepools [OPTIONAL] with 1 control plane node and 1 machinepool
capz-e2e [It] Workload cluster creation Creating a dual-stack cluster [OPTIONAL] With dual-stack worker node
capz-e2e [It] Workload cluster creation Creating a highly available cluster [REQUIRED] With 3 control-plane nodes and 2 Linux and 2 Windows worker nodes
capz-e2e [It] Workload cluster creation Creating a ipv6 control-plane cluster [REQUIRED] With ipv6 worker node
capz-e2e [It] Workload cluster creation Creating a private cluster [OPTIONAL] Creates a public management cluster in a custom vnet
capz-e2e [It] Workload cluster creation Creating an AKS cluster [EXPERIMENTAL][Managed Kubernetes] with a single control plane node and 1 node
capz-e2e [It] Workload cluster creation Creating clusters using clusterclass [OPTIONAL] with a single control plane node, one linux worker node, and one windows worker node
capz-e2e [It] [K8s-Upgrade] Running the CSI migration tests [CSI Migration] Running CSI migration test CSI=external CCM=external AzureDiskCSIMigration=true: upgrade to v1.23 should create volumes dynamically with out-of-tree cloud provider
capz-e2e [It] [K8s-Upgrade] Running the CSI migration tests [CSI Migration] Running CSI migration test CSI=external CCM=internal AzureDiskCSIMigration=true: upgrade to v1.23 should create volumes dynamically with intree cloud provider
capz-e2e [It] [K8s-Upgrade] Running the CSI migration tests [CSI Migration] Running CSI migration test CSI=internal CCM=internal AzureDiskCSIMigration=false: upgrade to v1.23 should create volumes dynamically with intree cloud provider
... skipping 785 lines ... Jan 23 21:35:03.349: INFO: Creating log watcher for controller kube-system/csi-azuredisk-controller-545d478dbf-t7l8d, container csi-resizer Jan 23 21:35:03.354: INFO: Creating log watcher for controller kube-system/csi-azuredisk-controller-545d478dbf-t7l8d, container liveness-probe Jan 23 21:35:03.490: INFO: Fetching kube-system pod logs took 1.321170718s Jan 23 21:35:03.490: INFO: Dumping workload cluster mhc-remediation-wo9zty/mhc-remediation-qjta5d Azure activity log Jan 23 21:35:03.490: INFO: Creating log watcher for controller tigera-operator/tigera-operator-65d6bf4d4f-bc2v5, container tigera-operator Jan 23 21:35:03.491: INFO: Collecting events for Pod tigera-operator/tigera-operator-65d6bf4d4f-bc2v5 Jan 23 21:35:03.950: INFO: Error starting logs stream for pod calico-system/csi-node-driver-tm8b5, container csi-node-driver-registrar: container "csi-node-driver-registrar" in pod "csi-node-driver-tm8b5" is waiting to start: ContainerCreating Jan 23 21:35:03.952: INFO: Error starting logs stream for pod calico-system/csi-node-driver-tm8b5, container calico-csi: container "calico-csi" in pod "csi-node-driver-tm8b5" is waiting to start: ContainerCreating Jan 23 21:35:03.953: INFO: Error starting logs stream for pod calico-system/calico-node-q6f74, container calico-node: container "calico-node" in pod "calico-node-q6f74" is waiting to start: PodInitializing Jan 23 21:35:05.105: INFO: Fetching activity logs took 1.615155844s [1mSTEP:[0m Dumping all the Cluster API resources in the "mhc-remediation-wo9zty" namespace [38;5;243m@ 01/23/23 21:35:05.105[0m [1mSTEP:[0m Deleting cluster mhc-remediation-wo9zty/mhc-remediation-qjta5d [38;5;243m@ 01/23/23 21:35:05.459[0m [1mSTEP:[0m Deleting cluster mhc-remediation-qjta5d [38;5;243m@ 01/23/23 21:35:05.481[0m INFO: Waiting for the Cluster mhc-remediation-wo9zty/mhc-remediation-qjta5d to be deleted [1mSTEP:[0m Waiting for cluster mhc-remediation-qjta5d to be deleted [38;5;243m@ 01/23/23 21:35:05.491[0m ... skipping 10 lines ... [38;5;243m------------------------------[0m [38;5;10m• [1166.959 seconds][0m [0mRunning the Cluster API E2E tests [38;5;243mRunning the self-hosted spec [38;5;10m[1mShould pivot the bootstrap cluster to a self-hosted cluster[0m [38;5;243m/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_selfhosted.go:108[0m [38;5;243mCaptured StdOut/StdErr Output >>[0m 2023/01/23 21:23:35 failed trying to get namespace (self-hosted):namespaces "self-hosted" not found kubeadmconfigtemplate.bootstrap.cluster.x-k8s.io/self-hosted-ekj3bq-md-0 created cluster.cluster.x-k8s.io/self-hosted-ekj3bq created machinedeployment.cluster.x-k8s.io/self-hosted-ekj3bq-md-0 created kubeadmcontrolplane.controlplane.cluster.x-k8s.io/self-hosted-ekj3bq-control-plane created azurecluster.infrastructure.cluster.x-k8s.io/self-hosted-ekj3bq created azureclusteridentity.infrastructure.cluster.x-k8s.io/cluster-identity-sp created ... skipping 246 lines ... configmap/cni-quick-start-z181gi-calico-windows created configmap/csi-proxy-addon created configmap/containerd-logger-quick-start-z181gi created felixconfiguration.crd.projectcalico.org/default configured Failed to get logs for Machine quick-start-z181gi-md-win-6c7cd5b9b4-b9vwk, Cluster quick-start-wfseml/quick-start-z181gi: [running command "Get-Content "C:\\cni.log"": Process exited with status 1, running command "$p = 'c:\localdumps' ; if (Test-Path $p) { tar.exe -cvzf c:\crashdumps.tar $p *>&1 | %{ Write-Output "$_"} } else { Write-Host "No crash dumps found at $p" }": Process exited with status 1] Failed to get logs for Machine quick-start-z181gi-md-win-6c7cd5b9b4-qxzj9, Cluster quick-start-wfseml/quick-start-z181gi: [running command "Get-Content "C:\\cni.log"": Process exited with status 1, running command "$p = 'c:\localdumps' ; if (Test-Path $p) { tar.exe -cvzf c:\crashdumps.tar $p *>&1 | %{ Write-Output "$_"} } else { Write-Host "No crash dumps found at $p" }": Process exited with status 1] [38;5;243m<< Captured StdOut/StdErr Output[0m [38;5;243mTimeline >>[0m INFO: "" started at Mon, 23 Jan 2023 21:23:35 UTC on Ginkgo node 5 of 10 and junit test report to file /logs/artifacts/test_e2e_junit.e2e_suite.1.xml [1mSTEP:[0m Creating a namespace for hosting the "quick-start" test spec [38;5;243m@ 01/23/23 21:23:35.555[0m INFO: Creating namespace quick-start-wfseml ... skipping 601 lines ... Jan 23 21:42:36.217: INFO: Collecting events for Pod calico-system/csi-node-driver-cz854 Jan 23 21:42:36.217: INFO: Creating log watcher for controller calico-system/csi-node-driver-cz854, container calico-csi Jan 23 21:42:36.217: INFO: Creating log watcher for controller calico-system/csi-node-driver-cz854, container csi-node-driver-registrar Jan 23 21:42:36.218: INFO: Creating log watcher for controller calico-system/csi-node-driver-jjpqt, container calico-csi Jan 23 21:42:36.218: INFO: Creating log watcher for controller calico-system/csi-node-driver-jjpqt, container csi-node-driver-registrar Jan 23 21:42:36.218: INFO: Collecting events for Pod calico-system/csi-node-driver-jjpqt Jan 23 21:42:36.328: INFO: Error starting logs stream for pod calico-system/csi-node-driver-jjpqt, container calico-csi: pods "node-drain-a74zzf-control-plane-rqqnw" not found Jan 23 21:42:36.329: INFO: Error starting logs stream for pod calico-system/csi-node-driver-jjpqt, container csi-node-driver-registrar: pods "node-drain-a74zzf-control-plane-rqqnw" not found Jan 23 21:42:36.350: INFO: Error starting logs stream for pod calico-system/calico-node-mj2bp, container calico-node: pods "node-drain-a74zzf-control-plane-rqqnw" not found Jan 23 21:42:36.357: INFO: Collecting events for Pod kube-system/coredns-57575c5f89-pmbzw Jan 23 21:42:36.357: INFO: Collecting events for Pod kube-system/csi-azuredisk-controller-545d478dbf-qp6zg Jan 23 21:42:36.357: INFO: Creating log watcher for controller kube-system/csi-azuredisk-controller-545d478dbf-qp6zg, container csi-attacher Jan 23 21:42:36.357: INFO: Creating log watcher for controller kube-system/csi-azuredisk-controller-545d478dbf-qp6zg, container csi-snapshotter Jan 23 21:42:36.358: INFO: Creating log watcher for controller kube-system/coredns-57575c5f89-trr6h, container coredns Jan 23 21:42:36.358: INFO: Creating log watcher for controller kube-system/csi-azuredisk-node-2m77d, container liveness-probe ... skipping 27 lines ... Jan 23 21:42:36.362: INFO: Collecting events for Pod kube-system/kube-apiserver-node-drain-a74zzf-control-plane-5djpp Jan 23 21:42:36.362: INFO: Collecting events for Pod kube-system/kube-proxy-g62ff Jan 23 21:42:36.362: INFO: Creating log watcher for controller kube-system/csi-azuredisk-node-2m77d, container azuredisk Jan 23 21:42:36.362: INFO: Collecting events for Pod kube-system/etcd-node-drain-a74zzf-control-plane-rqqnw Jan 23 21:42:36.362: INFO: Collecting events for Pod kube-system/csi-azuredisk-node-2m77d Jan 23 21:42:36.362: INFO: Creating log watcher for controller kube-system/csi-azuredisk-node-f4q8w, container azuredisk Jan 23 21:42:36.625: INFO: Error starting logs stream for pod kube-system/csi-azuredisk-node-f4q8w, container azuredisk: pods "node-drain-a74zzf-control-plane-rqqnw" not found Jan 23 21:42:36.625: INFO: Error starting logs stream for pod kube-system/kube-apiserver-node-drain-a74zzf-control-plane-rqqnw, container kube-apiserver: pods "node-drain-a74zzf-control-plane-rqqnw" not found Jan 23 21:42:36.625: INFO: Error starting logs stream for pod kube-system/kube-scheduler-node-drain-a74zzf-control-plane-rqqnw, container kube-scheduler: pods "node-drain-a74zzf-control-plane-rqqnw" not found Jan 23 21:42:36.625: INFO: Error starting logs stream for pod kube-system/etcd-node-drain-a74zzf-control-plane-rqqnw, container etcd: pods "node-drain-a74zzf-control-plane-rqqnw" not found Jan 23 21:42:36.625: INFO: Error starting logs stream for pod kube-system/kube-controller-manager-node-drain-a74zzf-control-plane-rqqnw, container kube-controller-manager: pods "node-drain-a74zzf-control-plane-rqqnw" not found Jan 23 21:42:36.625: INFO: Error starting logs stream for pod kube-system/csi-azuredisk-node-f4q8w, container liveness-probe: pods "node-drain-a74zzf-control-plane-rqqnw" not found Jan 23 21:42:36.626: INFO: Error starting logs stream for pod kube-system/kube-proxy-g62ff, container kube-proxy: pods "node-drain-a74zzf-control-plane-rqqnw" not found Jan 23 21:42:36.626: INFO: Error starting logs stream for pod kube-system/csi-azuredisk-node-f4q8w, container node-driver-registrar: pods "node-drain-a74zzf-control-plane-rqqnw" not found Jan 23 21:42:36.628: INFO: Collecting events for Pod node-drain-ygsf2f-unevictable-workload/unevictable-pod-5nc-6f8c44cbdd-pc2xl Jan 23 21:42:36.628: INFO: Collecting events for Pod node-drain-ygsf2f-unevictable-workload/unevictable-pod-5nc-6f8c44cbdd-bhz4t Jan 23 21:42:36.628: INFO: Collecting events for Pod node-drain-ygsf2f-unevictable-workload/unevictable-pod-5nc-6f8c44cbdd-qkjp9 Jan 23 21:42:36.628: INFO: Creating log watcher for controller node-drain-ygsf2f-unevictable-workload/unevictable-pod-5nc-6f8c44cbdd-pc2xl, container web Jan 23 21:42:36.628: INFO: Creating log watcher for controller node-drain-ygsf2f-unevictable-workload/unevictable-pod-5nc-6f8c44cbdd-qkjp9, container web Jan 23 21:42:36.628: INFO: Creating log watcher for controller node-drain-ygsf2f-unevictable-workload/unevictable-pod-5nc-6f8c44cbdd-bhz4t, container web ... skipping 4 lines ... Jan 23 21:42:36.630: INFO: Creating log watcher for controller node-drain-ygsf2f-unevictable-workload/unevictable-pod-nz6-8598949b8b-qcqtk, container web Jan 23 21:42:36.630: INFO: Creating log watcher for controller node-drain-ygsf2f-unevictable-workload/unevictable-pod-nz6-8598949b8b-75dpc, container web Jan 23 21:42:36.630: INFO: Creating log watcher for controller node-drain-ygsf2f-unevictable-workload/unevictable-pod-nz6-8598949b8b-wxpgm, container web Jan 23 21:42:36.630: INFO: Collecting events for Pod node-drain-ygsf2f-unevictable-workload/unevictable-pod-nz6-8598949b8b-qq4v7 Jan 23 21:42:36.630: INFO: Collecting events for Pod node-drain-ygsf2f-unevictable-workload/unevictable-pod-nz6-8598949b8b-qcqtk Jan 23 21:42:36.630: INFO: Collecting events for Pod node-drain-ygsf2f-unevictable-workload/unevictable-pod-nz6-8598949b8b-wxpgm Jan 23 21:42:36.801: INFO: Error starting logs stream for pod node-drain-ygsf2f-unevictable-workload/unevictable-pod-5nc-6f8c44cbdd-pc2xl, container web: pods "node-drain-a74zzf-control-plane-rqqnw" not found Jan 23 21:42:36.801: INFO: Error starting logs stream for pod node-drain-ygsf2f-unevictable-workload/unevictable-pod-5nc-6f8c44cbdd-qkjp9, container web: pods "node-drain-a74zzf-control-plane-rqqnw" not found Jan 23 21:42:36.802: INFO: Fetching kube-system pod logs took 1.716540772s Jan 23 21:42:36.802: INFO: Dumping workload cluster node-drain-ygsf2f/node-drain-a74zzf Azure activity log Jan 23 21:42:36.802: INFO: Creating log watcher for controller tigera-operator/tigera-operator-65d6bf4d4f-ngxlh, container tigera-operator Jan 23 21:42:36.802: INFO: Collecting events for Pod tigera-operator/tigera-operator-65d6bf4d4f-ngxlh Jan 23 21:42:40.425: INFO: Fetching activity logs took 3.623247072s [1mSTEP:[0m Dumping all the Cluster API resources in the "node-drain-ygsf2f" namespace [38;5;243m@ 01/23/23 21:42:40.425[0m ... skipping 174 lines ... Jan 23 21:45:50.912: INFO: Creating log watcher for controller calico-system/csi-node-driver-vndbp, container csi-node-driver-registrar Jan 23 21:45:50.913: INFO: Collecting events for Pod calico-system/calico-node-f8wcf Jan 23 21:45:50.913: INFO: Collecting events for Pod calico-system/csi-node-driver-vndbp Jan 23 21:45:50.913: INFO: Collecting events for Pod calico-system/calico-node-prt2p Jan 23 21:45:50.913: INFO: Creating log watcher for controller calico-system/calico-typha-58db488465-pt9mv, container calico-typha Jan 23 21:45:50.914: INFO: Collecting events for Pod calico-system/calico-typha-58db488465-pt9mv Jan 23 21:45:51.054: INFO: Error starting logs stream for pod calico-system/csi-node-driver-vndbp, container csi-node-driver-registrar: pods "machine-pool-a7uukx-mp-0000002" not found Jan 23 21:45:51.054: INFO: Error starting logs stream for pod calico-system/csi-node-driver-vndbp, container calico-csi: pods "machine-pool-a7uukx-mp-0000002" not found Jan 23 21:45:51.054: INFO: Error starting logs stream for pod calico-system/calico-node-prt2p, container calico-node: pods "machine-pool-a7uukx-mp-0000002" not found Jan 23 21:45:51.071: INFO: Creating log watcher for controller kube-system/coredns-57575c5f89-8pnt5, container coredns Jan 23 21:45:51.071: INFO: Collecting events for Pod kube-system/coredns-57575c5f89-kjnf2 Jan 23 21:45:51.071: INFO: Creating log watcher for controller kube-system/coredns-57575c5f89-kjnf2, container coredns Jan 23 21:45:51.071: INFO: Creating log watcher for controller kube-system/csi-azuredisk-node-58698, container liveness-probe Jan 23 21:45:51.071: INFO: Creating log watcher for controller kube-system/csi-azuredisk-node-559nq, container liveness-probe Jan 23 21:45:51.072: INFO: Creating log watcher for controller kube-system/csi-azuredisk-node-559nq, container azuredisk ... skipping 21 lines ... Jan 23 21:45:51.077: INFO: Collecting events for Pod kube-system/csi-azuredisk-node-58698 Jan 23 21:45:51.076: INFO: Collecting events for Pod kube-system/kube-proxy-gwtqd Jan 23 21:45:51.072: INFO: Creating log watcher for controller kube-system/csi-azuredisk-controller-545d478dbf-scg25, container csi-resizer Jan 23 21:45:51.077: INFO: Collecting events for Pod kube-system/coredns-57575c5f89-8pnt5 Jan 23 21:45:51.354: INFO: Fetching kube-system pod logs took 1.607768705s Jan 23 21:45:51.354: INFO: Dumping workload cluster machine-pool-69ykk4/machine-pool-a7uukx Azure activity log Jan 23 21:45:51.354: INFO: Error starting logs stream for pod kube-system/csi-azuredisk-node-58698, container node-driver-registrar: pods "machine-pool-a7uukx-mp-0000002" not found Jan 23 21:45:51.354: INFO: Error starting logs stream for pod kube-system/csi-azuredisk-node-58698, container azuredisk: pods "machine-pool-a7uukx-mp-0000002" not found Jan 23 21:45:51.354: INFO: Creating log watcher for controller tigera-operator/tigera-operator-65d6bf4d4f-tzrmk, container tigera-operator Jan 23 21:45:51.354: INFO: Error starting logs stream for pod kube-system/csi-azuredisk-node-58698, container liveness-probe: pods "machine-pool-a7uukx-mp-0000002" not found Jan 23 21:45:51.354: INFO: Error starting logs stream for pod kube-system/kube-proxy-6vsx8, container kube-proxy: pods "machine-pool-a7uukx-mp-0000002" not found Jan 23 21:45:51.354: INFO: Collecting events for Pod tigera-operator/tigera-operator-65d6bf4d4f-tzrmk Jan 23 21:45:53.078: INFO: Fetching activity logs took 1.724456476s [1mSTEP:[0m Dumping all the Cluster API resources in the "machine-pool-69ykk4" namespace [38;5;243m@ 01/23/23 21:45:53.078[0m [1mSTEP:[0m Deleting cluster machine-pool-69ykk4/machine-pool-a7uukx [38;5;243m@ 01/23/23 21:45:53.611[0m [1mSTEP:[0m Deleting cluster machine-pool-a7uukx [38;5;243m@ 01/23/23 21:45:53.64[0m INFO: Waiting for the Cluster machine-pool-69ykk4/machine-pool-a7uukx to be deleted ... skipping 32 lines ... configmap/cni-md-scale-bn5jkj-calico-windows created configmap/csi-proxy-addon created configmap/containerd-logger-md-scale-bn5jkj created felixconfiguration.crd.projectcalico.org/default configured Failed to get logs for Machine md-scale-bn5jkj-md-win-85868bcd5b-j5hvw, Cluster md-scale-xwv14q/md-scale-bn5jkj: [running command "Get-Content "C:\\cni.log"": Process exited with status 1, running command "$p = 'c:\localdumps' ; if (Test-Path $p) { tar.exe -cvzf c:\crashdumps.tar $p *>&1 | %{ Write-Output "$_"} } else { Write-Host "No crash dumps found at $p" }": Process exited with status 1] Failed to get logs for Machine md-scale-bn5jkj-md-win-85868bcd5b-kvch7, Cluster md-scale-xwv14q/md-scale-bn5jkj: [running command "Get-Content "C:\\cni.log"": Process exited with status 1, running command "$p = 'c:\localdumps' ; if (Test-Path $p) { tar.exe -cvzf c:\crashdumps.tar $p *>&1 | %{ Write-Output "$_"} } else { Write-Host "No crash dumps found at $p" }": Process exited with status 1] [38;5;243m<< Captured StdOut/StdErr Output[0m [38;5;243mTimeline >>[0m INFO: "" started at Mon, 23 Jan 2023 21:23:35 UTC on Ginkgo node 3 of 10 and junit test report to file /logs/artifacts/test_e2e_junit.e2e_suite.1.xml [1mSTEP:[0m Creating a namespace for hosting the "md-scale" test spec [38;5;243m@ 01/23/23 21:23:35.522[0m INFO: Creating namespace md-scale-xwv14q ... skipping 215 lines ... [38;5;243m<< Timeline[0m [38;5;243m------------------------------[0m [38;5;10m[SynchronizedAfterSuite] PASSED [0.000 seconds][0m [38;5;10m[1m[SynchronizedAfterSuite] [0m [38;5;243m/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/e2e_suite_test.go:116[0m [38;5;243m------------------------------[0m {"component":"entrypoint","file":"k8s.io/test-infra/prow/entrypoint/run.go:164","func":"k8s.io/test-infra/prow/entrypoint.Options.ExecuteProcess","level":"error","msg":"Process did not finish before 4h0m0s timeout","severity":"error","time":"2023-01-24T01:12:09Z"} ++ early_exit_handler ++ '[' -n 158 ']' ++ kill -TERM 158 ++ cleanup_dind ++ [[ true == \t\r\u\e ]] ++ echo 'Cleaning up after docker' ... skipping 39 lines ... configmap/cni-md-rollout-sr4aoe-calico-windows created configmap/csi-proxy-addon created configmap/containerd-logger-md-rollout-sr4aoe created felixconfiguration.crd.projectcalico.org/default configured Failed to get logs for Machine md-rollout-sr4aoe-md-win-65744c7b6d-ttf8v, Cluster md-rollout-qqqtvq/md-rollout-sr4aoe: [running command "Get-Content "C:\\cni.log"": Process exited with status 1, running command "$p = 'c:\localdumps' ; if (Test-Path $p) { tar.exe -cvzf c:\crashdumps.tar $p *>&1 | %{ Write-Output "$_"} } else { Write-Host "No crash dumps found at $p" }": Process exited with status 1] Failed to get logs for Machine md-rollout-sr4aoe-md-win-899f44d55-mgcdp, Cluster md-rollout-qqqtvq/md-rollout-sr4aoe: azuremachines.infrastructure.cluster.x-k8s.io "md-rollout-sr4aoe-md-win-k8lwg" not found [38;5;243m<< Captured StdOut/StdErr Output[0m [38;5;243mTimeline >>[0m INFO: "" started at Mon, 23 Jan 2023 21:23:35 UTC on Ginkgo node 1 of 10 and junit test report to file /logs/artifacts/test_e2e_junit.e2e_suite.1.xml [1mSTEP:[0m Creating a namespace for hosting the "md-rollout" test spec [38;5;243m@ 01/23/23 21:23:35.474[0m INFO: Creating namespace md-rollout-qqqtvq ... skipping 119 lines ... Jan 23 21:41:49.343: INFO: Attempting to copy file /c:/crashdumps.tar on node md-rollou-fvw4l to /logs/artifacts/clusters/md-rollout-sr4aoe/machines/md-rollout-sr4aoe-md-win-65744c7b6d-ttf8v/crashdumps.tar Jan 23 21:41:52.886: INFO: Collecting boot logs for AzureMachine md-rollout-sr4aoe-md-win-7m015t-fvw4l Jan 23 21:41:54.327: INFO: Collecting logs for Windows node md-rollou-tf2vj in cluster md-rollout-sr4aoe in namespace md-rollout-qqqtvq [38;5;214m[TIMEDOUT][0m in [AfterEach] - /home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/md_rollout.go:103 [38;5;243m@ 01/24/23 01:18:09.251[0m Jan 24 01:18:09.308: INFO: FAILED! Jan 24 01:18:09.309: INFO: Cleaning up after "Running the Cluster API E2E tests Running the MachineDeployment rollout spec Should successfully upgrade Machines upon changes in relevant MachineDeployment fields" spec [1mSTEP:[0m Redacting sensitive information from logs [38;5;243m@ 01/24/23 01:18:09.309[0m [38;5;214m[TIMEDOUT][0m in [AfterEach] - /home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/capi_test.go:97 [38;5;243m@ 01/24/23 01:18:39.309[0m [38;5;243m<< Timeline[0m [38;5;214m[TIMEDOUT] A suite timeout occurred[0m ... skipping 80 lines ... | defer f.Close() [1m[38;5;214m> return execOnHost(controlPlaneEndpoint, hostname, sshPort, f, command, args...)[0m | }) | } [38;5;214m[1m> sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout.func1()[0m [38;5;214m[1m/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:41[0m | err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil [1m[38;5;214m> err := fn()[0m | if err != nil { | pollError = err [38;5;243mk8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x18, 0xc0004ce400})[0m [38;5;243m/home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:222[0m ... skipping 4 lines ... [38;5;243mk8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x4211260, 0xc000064098}, 0xc0035b1e98?, 0xc0035b1ea8?, 0x1418927?)[0m [38;5;243m/home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:528[0m [38;5;243mk8s.io/apimachinery/pkg/util/wait.PollImmediate(0xc001068450?, 0xc0035b1ee8?, 0x1418927?)[0m [38;5;243m/home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:514[0m [38;5;214m[1m> sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout(0x14163f6?, 0x392f560?, 0xc000018310)[0m [38;5;214m[1m/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:39[0m | func retryWithTimeout(interval, timeout time.Duration, fn func() error) error { | var pollError error [1m[38;5;214m> err := wait.PollImmediate(interval, timeout, func() (bool, error) {[0m | pollError = nil | err := fn() [38;5;214m[1m> sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1()[0m [38;5;214m[1m/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:141[0m | execToPathFn := func(outputFileName, command string, args ...string) func() error { | return func() error { [1m[38;5;214m> return retryWithTimeout(collectLogInterval, collectLogTimeout, func() error {[0m | f, err := fileOnHost(filepath.Join(outputPath, outputFileName)) | if err != nil { [38;5;243msigs.k8s.io/kind/pkg/errors.AggregateConcurrent.func1()[0m [38;5;243m/home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:51[0m [38;5;243msigs.k8s.io/kind/pkg/errors.AggregateConcurrent[0m [38;5;243m/home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:49[0m ... skipping 16 lines ... | defer f.Close() [1m[38;5;214m> return execOnHost(controlPlaneEndpoint, hostname, sshPort, f, command, args...)[0m | }) | } [38;5;214m[1m> sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout.func1()[0m [38;5;214m[1m/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:41[0m | err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil [1m[38;5;214m> err := fn()[0m | if err != nil { | pollError = err [38;5;243mk8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x18, 0xc0004ce400})[0m [38;5;243m/home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:222[0m ... skipping 4 lines ... [38;5;243mk8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x4211260, 0xc000064098}, 0x10000000000?, 0xc0019c0ea8?, 0x1418927?)[0m [38;5;243m/home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:528[0m [38;5;243mk8s.io/apimachinery/pkg/util/wait.PollImmediate(0x145f172?, 0xc0019c0ee8?, 0x1418927?)[0m [38;5;243m/home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:514[0m [38;5;214m[1m> sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout(0x40?, 0x399ac00?, 0xc000018000)[0m [38;5;214m[1m/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:39[0m | func retryWithTimeout(interval, timeout time.Duration, fn func() error) error { | var pollError error [1m[38;5;214m> err := wait.PollImmediate(interval, timeout, func() (bool, error) {[0m | pollError = nil | err := fn() [38;5;214m[1m> sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1()[0m [38;5;214m[1m/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:141[0m | execToPathFn := func(outputFileName, command string, args ...string) func() error { | return func() error { [1m[38;5;214m> return retryWithTimeout(collectLogInterval, collectLogTimeout, func() error {[0m | f, err := fileOnHost(filepath.Join(outputPath, outputFileName)) | if err != nil { [38;5;243msigs.k8s.io/kind/pkg/errors.AggregateConcurrent.func1()[0m [38;5;243m/home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:51[0m [38;5;243msigs.k8s.io/kind/pkg/errors.AggregateConcurrent[0m [38;5;243m/home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:49[0m ... skipping 16 lines ... | defer f.Close() [1m[38;5;214m> return execOnHost(controlPlaneEndpoint, hostname, sshPort, f, command, args...)[0m | }) | } [38;5;214m[1m> sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout.func1()[0m [38;5;214m[1m/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:41[0m | err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil [1m[38;5;214m> err := fn()[0m | if err != nil { | pollError = err [38;5;243mk8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x18, 0xc0004ce400})[0m [38;5;243m/home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:222[0m ... skipping 4 lines ... [38;5;243mk8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x4211260, 0xc000064098}, 0xc0016436b0?, 0xc0016436a8?, 0x1418927?)[0m [38;5;243m/home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:528[0m [38;5;243mk8s.io/apimachinery/pkg/util/wait.PollImmediate(0x1410d51?, 0xc0016436e8?, 0x1418927?)[0m [38;5;243m/home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:514[0m [38;5;214m[1m> sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout(0xc002c29200?, 0x19d2449?, 0xc000018540)[0m [38;5;214m[1m/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:39[0m | func retryWithTimeout(interval, timeout time.Duration, fn func() error) error { | var pollError error [1m[38;5;214m> err := wait.PollImmediate(interval, timeout, func() (bool, error) {[0m | pollError = nil | err := fn() [38;5;214m[1m> sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1()[0m [38;5;214m[1m/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:141[0m | execToPathFn := func(outputFileName, command string, args ...string) func() error { | return func() error { [1m[38;5;214m> return retryWithTimeout(collectLogInterval, collectLogTimeout, func() error {[0m | f, err := fileOnHost(filepath.Join(outputPath, outputFileName)) | if err != nil { [38;5;243msigs.k8s.io/kind/pkg/errors.AggregateConcurrent.func1()[0m [38;5;243m/home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:51[0m [38;5;243msigs.k8s.io/kind/pkg/errors.AggregateConcurrent[0m [38;5;243m/home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:49[0m ... skipping 16 lines ... | defer f.Close() [1m[38;5;214m> return execOnHost(controlPlaneEndpoint, hostname, sshPort, f, command, args...)[0m | }) | } [38;5;214m[1m> sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout.func1()[0m [38;5;214m[1m/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:41[0m | err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil [1m[38;5;214m> err := fn()[0m | if err != nil { | pollError = err [38;5;243mk8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x18, 0xc0004ce400})[0m [38;5;243m/home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:222[0m ... skipping 4 lines ... [38;5;243mk8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x4211260, 0xc000064098}, 0xc00118fe80?, 0xc00118fea8?, 0x1418927?)[0m [38;5;243m/home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:528[0m [38;5;243mk8s.io/apimachinery/pkg/util/wait.PollImmediate(0xc00118ffa8?, 0xc00118fee8?, 0x1418927?)[0m [38;5;243m/home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:514[0m [38;5;214m[1m> sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout(0x3?, 0x14163f6?, 0xc000018460)[0m [38;5;214m[1m/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:39[0m | func retryWithTimeout(interval, timeout time.Duration, fn func() error) error { | var pollError error [1m[38;5;214m> err := wait.PollImmediate(interval, timeout, func() (bool, error) {[0m | pollError = nil | err := fn() [38;5;214m[1m> sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1()[0m [38;5;214m[1m/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:141[0m | execToPathFn := func(outputFileName, command string, args ...string) func() error { | return func() error { [1m[38;5;214m> return retryWithTimeout(collectLogInterval, collectLogTimeout, func() error {[0m | f, err := fileOnHost(filepath.Join(outputPath, outputFileName)) | if err != nil { [38;5;243msigs.k8s.io/kind/pkg/errors.AggregateConcurrent.func1()[0m [38;5;243m/home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:51[0m [38;5;243msigs.k8s.io/kind/pkg/errors.AggregateConcurrent[0m [38;5;243m/home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:49[0m ... skipping 16 lines ... | defer f.Close() [1m[38;5;214m> return execOnHost(controlPlaneEndpoint, hostname, sshPort, f, command, args...)[0m | }) | } [38;5;214m[1m> sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout.func1()[0m [38;5;214m[1m/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:41[0m | err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil [1m[38;5;214m> err := fn()[0m | if err != nil { | pollError = err [38;5;243mk8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x18, 0xc0004ce400})[0m [38;5;243m/home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:222[0m ... skipping 4 lines ... [38;5;243mk8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x4211260, 0xc000064098}, 0x0?, 0xc000de46a8?, 0x1418927?)[0m [38;5;243m/home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:528[0m [38;5;243mk8s.io/apimachinery/pkg/util/wait.PollImmediate(0xc000de4782?, 0xc000de46e8?, 0x1418927?)[0m [38;5;243m/home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:514[0m [38;5;214m[1m> sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout(0xc00162f188?, 0xc0028efdbc?, 0xc000018690)[0m [38;5;214m[1m/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:39[0m | func retryWithTimeout(interval, timeout time.Duration, fn func() error) error { | var pollError error [1m[38;5;214m> err := wait.PollImmediate(interval, timeout, func() (bool, error) {[0m | pollError = nil | err := fn() [38;5;214m[1m> sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1()[0m [38;5;214m[1m/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:141[0m | execToPathFn := func(outputFileName, command string, args ...string) func() error { | return func() error { [1m[38;5;214m> return retryWithTimeout(collectLogInterval, collectLogTimeout, func() error {[0m | f, err := fileOnHost(filepath.Join(outputPath, outputFileName)) | if err != nil { [38;5;243msigs.k8s.io/kind/pkg/errors.AggregateConcurrent.func1()[0m [38;5;243m/home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:51[0m [38;5;243msigs.k8s.io/kind/pkg/errors.AggregateConcurrent[0m [38;5;243m/home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:49[0m ... skipping 16 lines ... | defer f.Close() [1m[38;5;214m> return execOnHost(controlPlaneEndpoint, hostname, sshPort, f, command, args...)[0m | }) | } [38;5;214m[1m> sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout.func1()[0m [38;5;214m[1m/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:41[0m | err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil [1m[38;5;214m> err := fn()[0m | if err != nil { | pollError = err [38;5;243mk8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x18, 0xc0004ce400})[0m [38;5;243m/home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:222[0m ... skipping 4 lines ... [38;5;243mk8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x4211260, 0xc000064098}, 0xc000de56b0?, 0xc000de56a8?, 0x1418927?)[0m [38;5;243m/home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:528[0m [38;5;243mk8s.io/apimachinery/pkg/util/wait.PollImmediate(0x1410d51?, 0xc000de56e8?, 0x1418927?)[0m [38;5;243m/home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:514[0m [38;5;214m[1m> sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout(0xc0000bc840?, 0x19d220c?, 0xc000018850)[0m [38;5;214m[1m/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:39[0m | func retryWithTimeout(interval, timeout time.Duration, fn func() error) error { | var pollError error [1m[38;5;214m> err := wait.PollImmediate(interval, timeout, func() (bool, error) {[0m | pollError = nil | err := fn() [38;5;214m[1m> sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1()[0m [38;5;214m[1m/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:141[0m | execToPathFn := func(outputFileName, command string, args ...string) func() error { | return func() error { [1m[38;5;214m> return retryWithTimeout(collectLogInterval, collectLogTimeout, func() error {[0m | f, err := fileOnHost(filepath.Join(outputPath, outputFileName)) | if err != nil { [38;5;243msigs.k8s.io/kind/pkg/errors.AggregateConcurrent.func1()[0m [38;5;243m/home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:51[0m [38;5;243msigs.k8s.io/kind/pkg/errors.AggregateConcurrent[0m [38;5;243m/home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:49[0m ... skipping 16 lines ... | defer f.Close() [1m[38;5;214m> return execOnHost(controlPlaneEndpoint, hostname, sshPort, f, command, args...)[0m | }) | } [38;5;214m[1m> sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout.func1()[0m [38;5;214m[1m/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:41[0m | err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil [1m[38;5;214m> err := fn()[0m | if err != nil { | pollError = err [38;5;243mk8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x18, 0xc000c20000})[0m [38;5;243m/home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:222[0m ... skipping 4 lines ... [38;5;243mk8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x4211260, 0xc000064098}, 0x10000000000?, 0xc0013d46a8?, 0x1418927?)[0m [38;5;243m/home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:528[0m [38;5;243mk8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0xc0013d46e8?, 0x1418927?)[0m [38;5;243m/home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:514[0m [38;5;214m[1m> sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout(0x0?, 0x0?, 0xc0031ee000)[0m [38;5;214m[1m/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:39[0m | func retryWithTimeout(interval, timeout time.Duration, fn func() error) error { | var pollError error [1m[38;5;214m> err := wait.PollImmediate(interval, timeout, func() (bool, error) {[0m | pollError = nil | err := fn() [38;5;214m[1m> sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1()[0m [38;5;214m[1m/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:141[0m | execToPathFn := func(outputFileName, command string, args ...string) func() error { | return func() error { [1m[38;5;214m> return retryWithTimeout(collectLogInterval, collectLogTimeout, func() error {[0m | f, err := fileOnHost(filepath.Join(outputPath, outputFileName)) | if err != nil { [38;5;243msigs.k8s.io/kind/pkg/errors.AggregateConcurrent.func1()[0m [38;5;243m/home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:51[0m [38;5;243msigs.k8s.io/kind/pkg/errors.AggregateConcurrent[0m [38;5;243m/home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:49[0m ... skipping 16 lines ... | defer f.Close() [1m[38;5;214m> return execOnHost(controlPlaneEndpoint, hostname, sshPort, f, command, args...)[0m | }) | } [38;5;214m[1m> sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout.func1()[0m [38;5;214m[1m/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:41[0m | err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil [1m[38;5;214m> err := fn()[0m | if err != nil { | pollError = err [38;5;243mk8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x18, 0xc000096c00})[0m [38;5;243m/home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:222[0m ... skipping 4 lines ... [38;5;243mk8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x4211260, 0xc000064098}, 0x0?, 0xc0013d4ea8?, 0x1418927?)[0m [38;5;243m/home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:528[0m [38;5;243mk8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0xc0013d4ee8?, 0x1418927?)[0m [38;5;243m/home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:514[0m [38;5;214m[1m> sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout(0x0?, 0x0?, 0xc000554fc0)[0m [38;5;214m[1m/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:39[0m | func retryWithTimeout(interval, timeout time.Duration, fn func() error) error { | var pollError error [1m[38;5;214m> err := wait.PollImmediate(interval, timeout, func() (bool, error) {[0m | pollError = nil | err := fn() [38;5;214m[1m> sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1()[0m [38;5;214m[1m/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:141[0m | execToPathFn := func(outputFileName, command string, args ...string) func() error { | return func() error { [1m[38;5;214m> return retryWithTimeout(collectLogInterval, collectLogTimeout, func() error {[0m | f, err := fileOnHost(filepath.Join(outputPath, outputFileName)) | if err != nil { [38;5;243msigs.k8s.io/kind/pkg/errors.AggregateConcurrent.func1()[0m [38;5;243m/home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:51[0m [38;5;243msigs.k8s.io/kind/pkg/errors.AggregateConcurrent[0m [38;5;243m/home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:49[0m ... skipping 16 lines ... | defer f.Close() [1m[38;5;214m> return execOnHost(controlPlaneEndpoint, hostname, sshPort, f, command, args...)[0m | }) | } [38;5;214m[1m> sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout.func1()[0m [38;5;214m[1m/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:41[0m | err := wait.PollImmediate(interval, timeout, func() (bool, error) { | pollError = nil [1m[38;5;214m> err := fn()[0m | if err != nil { | pollError = err [38;5;243mk8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x18, 0xc000096c00})[0m [38;5;243m/home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:222[0m ... skipping 4 lines ... [38;5;243mk8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x4211260, 0xc000064098}, 0x0?, 0xc0013d56a8?, 0x1418927?)[0m [38;5;243m/home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:528[0m [38;5;243mk8s.io/apimachinery/pkg/util/wait.PollImmediate(0x0?, 0xc0013d56e8?, 0x1418927?)[0m [38;5;243m/home/prow/go/pkg/mod/k8s.io/apimachinery@v0.25.4/pkg/util/wait/wait.go:514[0m [38;5;214m[1m> sigs.k8s.io/cluster-api-provider-azure/test/e2e.retryWithTimeout(0x0?, 0x0?, 0xc000554d90)[0m [38;5;214m[1m/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/retry.go:39[0m | func retryWithTimeout(interval, timeout time.Duration, fn func() error) error { | var pollError error [1m[38;5;214m> err := wait.PollImmediate(interval, timeout, func() (bool, error) {[0m | pollError = nil | err := fn() [38;5;214m[1m> sigs.k8s.io/cluster-api-provider-azure/test/e2e.collectLogsFromNode.func1.1()[0m [38;5;214m[1m/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/azure_logcollector.go:141[0m | execToPathFn := func(outputFileName, command string, args ...string) func() error { | return func() error { [1m[38;5;214m> return retryWithTimeout(collectLogInterval, collectLogTimeout, func() error {[0m | f, err := fileOnHost(filepath.Join(outputPath, outputFileName)) | if err != nil { [38;5;243msigs.k8s.io/kind/pkg/errors.AggregateConcurrent.func1()[0m [38;5;243m/home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:51[0m [38;5;243msigs.k8s.io/kind/pkg/errors.AggregateConcurrent[0m [38;5;243m/home/prow/go/pkg/mod/sigs.k8s.io/kind@v0.17.0/pkg/errors/concurrent.go:49[0m ... skipping 3 lines ... [38;5;10m[SynchronizedAfterSuite] PASSED [0.068 seconds][0m [38;5;10m[1m[SynchronizedAfterSuite] [0m [38;5;243m/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/e2e_suite_test.go:116[0m [38;5;243mTimeline >>[0m [1mSTEP:[0m Tearing down the management cluster [38;5;243m@ 01/24/23 01:18:39.385[0m INFO: Deleting the kind cluster "capz-e2e" failed. You may need to remove this by hand. [38;5;243m<< Timeline[0m [38;5;243m------------------------------[0m [38;5;10m[ReportAfterSuite] PASSED [0.022 seconds][0m [38;5;10m[1m[ReportAfterSuite] Autogenerated ReportAfterSuite for --junit-report[0m [38;5;243mautogenerated by Ginkgo[0m [38;5;243m------------------------------[0m [38;5;9m[1mSummarizing 1 Failure:[0m [38;5;214m[TIMEDOUT][0m [0mRunning the Cluster API E2E tests [38;5;214m[1mRunning the MachineDeployment rollout spec [AfterEach] [0mShould successfully upgrade Machines upon changes in relevant MachineDeployment fields[0m [38;5;243m/home/prow/go/pkg/mod/sigs.k8s.io/cluster-api/test@v1.3.1/e2e/md_rollout.go:103[0m [38;5;9m[1mRan 8 of 26 Specs in 14267.887 seconds[0m [38;5;9m[1mFAIL! - Suite Timeout Elapsed[0m -- [38;5;10m[1m7 Passed[0m | [38;5;9m[1m1 Failed[0m | [38;5;11m[1m0 Pending[0m | [38;5;14m[1m18 Skipped[0m [38;5;228mYou're using deprecated Ginkgo functionality:[0m [38;5;228m=============================================[0m [38;5;11mCurrentGinkgoTestDescription() is deprecated in Ginkgo V2. Use CurrentSpecReport() instead.[0m [1mLearn more at:[0m [38;5;14m[4mhttps://onsi.github.io/ginkgo/MIGRATING_TO_V2#changed-currentginkgotestdescription[0m [38;5;243m/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/helpers.go:423[0m [38;5;243m/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/common.go:278[0m [38;5;243m/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/common.go:281[0m [38;5;243mTo silence deprecations that can be silenced set the following environment variable:[0m [38;5;243mACK_GINKGO_DEPRECATIONS=2.6.0[0m --- FAIL: TestE2E (14267.89s) FAIL [38;5;228mYou're using deprecated Ginkgo functionality:[0m [38;5;228m=============================================[0m [38;5;11mCurrentGinkgoTestDescription() is deprecated in Ginkgo V2. Use CurrentSpecReport() instead.[0m [1mLearn more at:[0m [38;5;14m[4mhttps://onsi.github.io/ginkgo/MIGRATING_TO_V2#changed-currentginkgotestdescription[0m [38;5;243m/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/helpers.go:423[0m ... skipping 90 lines ... PASS Ginkgo ran 1 suite in 4h1m21.751165307s Test Suite Failed make[1]: *** [Makefile:655: test-e2e-run] Error 1 make[1]: Leaving directory '/home/prow/go/src/sigs.k8s.io/cluster-api-provider-azure' make: *** [Makefile:664: test-e2e] Error 2 {"component":"entrypoint","file":"k8s.io/test-infra/prow/entrypoint/run.go:251","func":"k8s.io/test-infra/prow/entrypoint.gracefullyTerminate","level":"error","msg":"Process gracefully exited before 15m0s grace period","severity":"error","time":"2023-01-24T01:19:30Z"}