use PollUntilContextTimeout to replace PollImmediateWithContext in test

Signed-off-by: bzsuni <bingzhe.sun@daocloud.io>
This commit is contained in:
bzsuni
2023-10-19 22:50:21 +08:00
parent de054fbf94
commit 8775d805fa
55 changed files with 140 additions and 140 deletions

View File

@@ -495,7 +495,7 @@ func (rc *ResourceConsumer) GetHpa(ctx context.Context, name string) (*autoscali
// WaitForReplicas wait for the desired replicas
func (rc *ResourceConsumer) WaitForReplicas(ctx context.Context, desiredReplicas int, duration time.Duration) {
interval := 20 * time.Second
err := wait.PollImmediateWithContext(ctx, interval, duration, func(ctx context.Context) (bool, error) {
err := wait.PollUntilContextTimeout(ctx, interval, duration, true, func(ctx context.Context) (bool, error) {
replicas := rc.GetReplicas(ctx)
framework.Logf("waiting for %d replicas (current: %d)", desiredReplicas, replicas)
return replicas == desiredReplicas, nil // Expected number of replicas found. Exit.
@@ -506,7 +506,7 @@ func (rc *ResourceConsumer) WaitForReplicas(ctx context.Context, desiredReplicas
// EnsureDesiredReplicasInRange ensure the replicas is in a desired range
func (rc *ResourceConsumer) EnsureDesiredReplicasInRange(ctx context.Context, minDesiredReplicas, maxDesiredReplicas int, duration time.Duration, hpaName string) {
interval := 10 * time.Second
err := wait.PollImmediateWithContext(ctx, interval, duration, func(ctx context.Context) (bool, error) {
err := wait.PollUntilContextTimeout(ctx, interval, duration, true, func(ctx context.Context) (bool, error) {
replicas := rc.GetReplicas(ctx)
framework.Logf("expecting there to be in [%d, %d] replicas (are: %d)", minDesiredReplicas, maxDesiredReplicas, replicas)
as, err := rc.GetHpa(ctx, hpaName)
@@ -964,7 +964,7 @@ func CreateCustomResourceDefinition(ctx context.Context, c crdclientset.Interfac
crd, err = c.ApiextensionsV1().CustomResourceDefinitions().Create(ctx, crdSchema, metav1.CreateOptions{})
framework.ExpectNoError(err)
// Wait until just created CRD appears in discovery.
err = wait.PollImmediateWithContext(ctx, 500*time.Millisecond, 30*time.Second, func(ctx context.Context) (bool, error) {
err = wait.PollUntilContextTimeout(ctx, 500*time.Millisecond, 30*time.Second, true, func(ctx context.Context) (bool, error) {
return ExistsInDiscovery(crd, c, "v1")
})
framework.ExpectNoError(err)

View File

@@ -34,7 +34,7 @@ type Action func() error
// Please note delivery of events is not guaranteed. Asserting on events can lead to flaky tests.
func WaitTimeoutForEvent(ctx context.Context, c clientset.Interface, namespace, eventSelector, msg string, timeout time.Duration) error {
interval := 2 * time.Second
return wait.PollImmediateWithContext(ctx, interval, timeout, eventOccurred(c, namespace, eventSelector, msg))
return wait.PollUntilContextTimeout(ctx, interval, timeout, true, eventOccurred(c, namespace, eventSelector, msg))
}
func eventOccurred(c clientset.Interface, namespace, eventSelector, msg string) wait.ConditionWithContextFunc {

View File

@@ -196,7 +196,7 @@ func SimpleGET(ctx context.Context, c *http.Client, url, host string) (string, e
// expectUnreachable is true, it breaks on first non-healthy http code instead.
func PollURL(ctx context.Context, route, host string, timeout time.Duration, interval time.Duration, httpClient *http.Client, expectUnreachable bool) error {
var lastBody string
pollErr := wait.PollImmediateWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) {
pollErr := wait.PollUntilContextTimeout(ctx, interval, timeout, true, func(ctx context.Context) (bool, error) {
var err error
lastBody, err = SimpleGET(ctx, httpClient, route, host)
if err != nil {
@@ -733,7 +733,7 @@ func getIngressAddress(ctx context.Context, client clientset.Interface, ns, name
// WaitForIngressAddress waits for the Ingress to acquire an address.
func (j *TestJig) WaitForIngressAddress(ctx context.Context, c clientset.Interface, ns, ingName string, timeout time.Duration) (string, error) {
var address string
err := wait.PollImmediateWithContext(ctx, 10*time.Second, timeout, func(ctx context.Context) (bool, error) {
err := wait.PollUntilContextTimeout(ctx, 10*time.Second, timeout, true, func(ctx context.Context) (bool, error) {
ipOrNameList, err := getIngressAddress(ctx, c, ns, ingName, j.Class)
if err != nil || len(ipOrNameList) == 0 {
j.Logger.Errorf("Waiting for Ingress %s/%s to acquire IP, error: %v, ipOrNameList: %v", ns, ingName, err, ipOrNameList)
@@ -889,7 +889,7 @@ func getPortURL(ctx context.Context, client clientset.Interface, ns, name string
// unschedulable, since control plane nodes don't run kube-proxy. Without
// kube-proxy NodePorts won't work.
var nodes *v1.NodeList
if wait.PollImmediateWithContext(ctx, poll, framework.SingleCallTimeout, func(ctx context.Context) (bool, error) {
if wait.PollUntilContextTimeout(ctx, poll, framework.SingleCallTimeout, true, func(ctx context.Context) (bool, error) {
nodes, err = client.CoreV1().Nodes().List(ctx, metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})

View File

@@ -91,7 +91,7 @@ func isJobFailed(j *batchv1.Job) bool {
// WaitForJobFinish uses c to wait for the Job jobName in namespace ns to finish (either Failed or Complete).
func WaitForJobFinish(ctx context.Context, c clientset.Interface, ns, jobName string) error {
return wait.PollImmediateWithContext(ctx, framework.Poll, JobTimeout, func(ctx context.Context) (bool, error) {
return wait.PollUntilContextTimeout(ctx, framework.Poll, JobTimeout, true, func(ctx context.Context) (bool, error) {
curr, err := c.BatchV1().Jobs(ns).Get(ctx, jobName, metav1.GetOptions{})
if err != nil {
return false, err
@@ -125,7 +125,7 @@ func WaitForJobGone(ctx context.Context, c clientset.Interface, ns, jobName stri
// WaitForAllJobPodsGone waits for all pods for the Job named jobName in namespace ns
// to be deleted.
func WaitForAllJobPodsGone(ctx context.Context, c clientset.Interface, ns, jobName string) error {
return wait.PollImmediateWithContext(ctx, framework.Poll, JobTimeout, func(ctx context.Context) (bool, error) {
return wait.PollUntilContextTimeout(ctx, framework.Poll, JobTimeout, true, func(ctx context.Context) (bool, error) {
pods, err := GetJobPods(ctx, c, ns, jobName)
if err != nil {
return false, err

View File

@@ -252,7 +252,7 @@ func (g *Grabber) GrabFromScheduler(ctx context.Context) (SchedulerMetrics, erro
var lastMetricsFetchErr error
var output string
if metricsWaitErr := wait.PollImmediateWithContext(ctx, time.Second, time.Minute, func(ctx context.Context) (bool, error) {
if metricsWaitErr := wait.PollUntilContextTimeout(ctx, time.Second, time.Minute, true, func(ctx context.Context) (bool, error) {
output, lastMetricsFetchErr = g.getSecureMetricsFromPod(ctx, g.kubeScheduler, metav1.NamespaceSystem, kubeSchedulerPort)
return lastMetricsFetchErr == nil, nil
}); metricsWaitErr != nil {
@@ -303,7 +303,7 @@ func (g *Grabber) GrabFromControllerManager(ctx context.Context) (ControllerMana
var output string
var lastMetricsFetchErr error
if metricsWaitErr := wait.PollImmediateWithContext(ctx, time.Second, time.Minute, func(ctx context.Context) (bool, error) {
if metricsWaitErr := wait.PollUntilContextTimeout(ctx, time.Second, time.Minute, true, func(ctx context.Context) (bool, error) {
output, lastMetricsFetchErr = g.getSecureMetricsFromPod(ctx, g.kubeControllerManager, metav1.NamespaceSystem, kubeControllerManagerPort)
return lastMetricsFetchErr == nil, nil
}); metricsWaitErr != nil {
@@ -342,7 +342,7 @@ func (g *Grabber) GrabFromSnapshotController(ctx context.Context, podName string
var output string
var lastMetricsFetchErr error
if metricsWaitErr := wait.PollImmediateWithContext(ctx, time.Second, time.Minute, func(ctx context.Context) (bool, error) {
if metricsWaitErr := wait.PollUntilContextTimeout(ctx, time.Second, time.Minute, true, func(ctx context.Context) (bool, error) {
output, lastMetricsFetchErr = g.getMetricsFromPod(ctx, g.client, podName, metav1.NamespaceSystem, port)
return lastMetricsFetchErr == nil, nil
}); metricsWaitErr != nil {

View File

@@ -526,7 +526,7 @@ func (config *NetworkingTestConfig) executeCurlCmd(ctx context.Context, cmd stri
const retryTimeout = 30 * time.Second
podName := config.HostTestContainerPod.Name
var msg string
if pollErr := wait.PollImmediateWithContext(ctx, retryInterval, retryTimeout, func(ctx context.Context) (bool, error) {
if pollErr := wait.PollUntilContextTimeout(ctx, retryInterval, retryTimeout, true, func(ctx context.Context) (bool, error) {
stdout, err := e2epodoutput.RunHostCmd(config.Namespace, podName, cmd)
if err != nil {
msg = fmt.Sprintf("failed executing cmd %v in %v/%v: %v", cmd, config.Namespace, podName, err)
@@ -1172,7 +1172,7 @@ func UnblockNetwork(ctx context.Context, from string, to string) {
// WaitForService waits until the service appears (exist == true), or disappears (exist == false)
func WaitForService(ctx context.Context, c clientset.Interface, namespace, name string, exist bool, interval, timeout time.Duration) error {
err := wait.PollImmediateWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) {
err := wait.PollUntilContextTimeout(ctx, interval, timeout, true, func(ctx context.Context) (bool, error) {
_, err := c.CoreV1().Services(namespace).Get(ctx, name, metav1.GetOptions{})
switch {
case err == nil:

View File

@@ -121,7 +121,7 @@ func allNodesReady(ctx context.Context, c clientset.Interface, timeout time.Dura
framework.Logf("Waiting up to %v for all (but %d) nodes to be ready", timeout, framework.TestContext.AllowedNotReadyNodes)
var notReady []*v1.Node
err := wait.PollImmediateWithContext(ctx, framework.Poll, timeout, func(ctx context.Context) (bool, error) {
err := wait.PollUntilContextTimeout(ctx, framework.Poll, timeout, true, func(ctx context.Context) (bool, error) {
notReady = nil
// It should be OK to list unschedulable Nodes here.
nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{})

View File

@@ -36,7 +36,7 @@ func WaitForSSHTunnels(ctx context.Context, namespace string) {
defer e2ekubectl.RunKubectl(namespace, "delete", "pod", "ssh-tunnel-test")
// allow up to a minute for new ssh tunnels to establish
wait.PollImmediateWithContext(ctx, 5*time.Second, time.Minute, func(ctx context.Context) (bool, error) {
wait.PollUntilContextTimeout(ctx, 5*time.Second, time.Minute, true, func(ctx context.Context) (bool, error) {
_, err := e2ekubectl.RunKubectl(namespace, "logs", "ssh-tunnel-test")
return err == nil, nil
})

View File

@@ -51,7 +51,7 @@ func WaitForTotalHealthy(ctx context.Context, c clientset.Interface, timeout tim
var notReady []v1.Node
var missingPodsPerNode map[string][]string
err := wait.PollImmediateWithContext(ctx, poll, timeout, func(ctx context.Context) (bool, error) {
err := wait.PollUntilContextTimeout(ctx, poll, timeout, true, func(ctx context.Context) (bool, error) {
notReady = nil
// It should be OK to list unschedulable Nodes here.
nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{ResourceVersion: "0"})
@@ -192,7 +192,7 @@ func CheckReady(ctx context.Context, c clientset.Interface, size int, timeout ti
func waitListSchedulableNodes(ctx context.Context, c clientset.Interface) (*v1.NodeList, error) {
var nodes *v1.NodeList
var err error
if wait.PollImmediateWithContext(ctx, poll, singleCallTimeout, func(ctx context.Context) (bool, error) {
if wait.PollUntilContextTimeout(ctx, poll, singleCallTimeout, true, func(ctx context.Context) (bool, error) {
nodes, err = c.CoreV1().Nodes().List(ctx, metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})

View File

@@ -251,7 +251,7 @@ func WaitForFirewallRule(ctx context.Context, gceCloud *gcecloud.Cloud, fwName s
return true, nil
}
if err := wait.PollImmediateWithContext(ctx, 5*time.Second, timeout, condition); err != nil {
if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, timeout, true, condition); err != nil {
return nil, fmt.Errorf("error waiting for firewall %v exist=%v", fwName, exist)
}
return fw, nil

View File

@@ -297,7 +297,7 @@ func DeletePVCandValidatePVGroup(ctx context.Context, c clientset.Interface, tim
func createPV(ctx context.Context, c clientset.Interface, timeouts *framework.TimeoutContext, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) {
var resultPV *v1.PersistentVolume
var lastCreateErr error
err := wait.PollImmediateWithContext(ctx, 29*time.Second, timeouts.PVCreate, func(ctx context.Context) (done bool, err error) {
err := wait.PollUntilContextTimeout(ctx, 29*time.Second, timeouts.PVCreate, true, func(ctx context.Context) (done bool, err error) {
resultPV, lastCreateErr = c.CoreV1().PersistentVolumes().Create(ctx, pv, metav1.CreateOptions{})
if lastCreateErr != nil {
// If we hit a quota problem, we are not done and should retry again. This happens to be the quota failure string for GCP.

View File

@@ -52,7 +52,7 @@ func WaitForReplicaSetTargetAvailableReplicas(ctx context.Context, c clientset.I
// with given timeout.
func WaitForReplicaSetTargetAvailableReplicasWithTimeout(ctx context.Context, c clientset.Interface, replicaSet *appsv1.ReplicaSet, targetReplicaNum int32, timeout time.Duration) error {
desiredGeneration := replicaSet.Generation
err := wait.PollImmediateWithContext(ctx, framework.Poll, timeout, func(ctx context.Context) (bool, error) {
err := wait.PollUntilContextTimeout(ctx, framework.Poll, timeout, true, func(ctx context.Context) (bool, error) {
rs, err := c.AppsV1().ReplicaSets(replicaSet.Namespace).Get(ctx, replicaSet.Name, metav1.GetOptions{})
if err != nil {
return false, err

View File

@@ -175,7 +175,7 @@ func deleteObjectAndWaitForGC(ctx context.Context, c clientset.Interface, rtObje
// waitForPodsGone waits until there are no pods left in the PodStore.
func waitForPodsGone(ctx context.Context, ps *testutils.PodStore, interval, timeout time.Duration) error {
var pods []*v1.Pod
err := wait.PollImmediateWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) {
err := wait.PollUntilContextTimeout(ctx, interval, timeout, true, func(ctx context.Context) (bool, error) {
if pods = ps.List(); len(pods) == 0 {
return true, nil
}
@@ -197,7 +197,7 @@ func waitForPodsGone(ctx context.Context, ps *testutils.PodStore, interval, time
// when the pod is inactvie.
func waitForPodsInactive(ctx context.Context, ps *testutils.PodStore, interval, timeout time.Duration) error {
var activePods []*v1.Pod
err := wait.PollImmediateWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) {
err := wait.PollUntilContextTimeout(ctx, interval, timeout, true, func(ctx context.Context) (bool, error) {
pods := ps.List()
activePods = e2epod.FilterActivePods(pods)
if len(activePods) != 0 {

View File

@@ -328,7 +328,7 @@ func (j *TestJig) GetEndpointNodeNames(ctx context.Context) (sets.String, error)
// WaitForEndpointOnNode waits for a service endpoint on the given node.
func (j *TestJig) WaitForEndpointOnNode(ctx context.Context, nodeName string) error {
return wait.PollImmediateWithContext(ctx, framework.Poll, KubeProxyLagTimeout, func(ctx context.Context) (bool, error) {
return wait.PollUntilContextTimeout(ctx, framework.Poll, KubeProxyLagTimeout, true, func(ctx context.Context) (bool, error) {
endpoints, err := j.Client.CoreV1().Endpoints(j.Namespace).Get(ctx, j.Name, metav1.GetOptions{})
if err != nil {
framework.Logf("Get endpoints for service %s/%s failed (%s)", j.Namespace, j.Name, err)
@@ -627,7 +627,7 @@ func (j *TestJig) waitForCondition(ctx context.Context, timeout time.Duration, m
}
return false, nil
}
if err := wait.PollImmediateWithContext(ctx, framework.Poll, timeout, pollFunc); err != nil {
if err := wait.PollUntilContextTimeout(ctx, framework.Poll, timeout, true, pollFunc); err != nil {
return nil, fmt.Errorf("timed out waiting for service %q to %s: %w", j.Name, message, err)
}
return service, nil
@@ -910,7 +910,7 @@ func testEndpointReachability(ctx context.Context, endpoint string, port int32,
return fmt.Errorf("service reachability check is not supported for %v", protocol)
}
err := wait.PollImmediateWithContext(ctx, 1*time.Second, ServiceReachabilityShortPollTimeout, func(ctx context.Context) (bool, error) {
err := wait.PollUntilContextTimeout(ctx, 1*time.Second, ServiceReachabilityShortPollTimeout, true, func(ctx context.Context) (bool, error) {
stdout, err := e2epodoutput.RunHostCmd(execPod.Namespace, execPod.Name, cmd)
if err != nil {
framework.Logf("Service reachability failing with error: %v\nRetrying...", err)
@@ -1006,7 +1006,7 @@ func (j *TestJig) checkExternalServiceReachability(ctx context.Context, svc *v1.
svcName := fmt.Sprintf("%s.%s.svc.%s", svc.Name, svc.Namespace, framework.TestContext.ClusterDNSDomain)
// Service must resolve to IP
cmd := fmt.Sprintf("nslookup %s", svcName)
return wait.PollImmediateWithContext(ctx, framework.Poll, ServiceReachabilityShortPollTimeout, func(ctx context.Context) (done bool, err error) {
return wait.PollUntilContextTimeout(ctx, framework.Poll, ServiceReachabilityShortPollTimeout, true, func(ctx context.Context) (done bool, err error) {
_, stderr, err := e2epodoutput.RunHostCmdWithFullOutput(pod.Namespace, pod.Name, cmd)
// NOTE(claudiub): nslookup may return 0 on Windows, even though the DNS name was not found. In this case,
// we can check stderr for the error.

View File

@@ -44,7 +44,7 @@ func TestReachableHTTPWithRetriableErrorCodes(ctx context.Context, host string,
return false, nil // caller can retry
}
if err := wait.PollImmediateWithContext(ctx, framework.Poll, timeout, pollfn); err != nil {
if err := wait.PollUntilContextTimeout(ctx, framework.Poll, timeout, true, pollfn); err != nil {
if wait.Interrupted(err) {
framework.Failf("Could not reach HTTP service through %v:%v after %v", host, port, timeout)
} else {

View File

@@ -38,7 +38,7 @@ func WaitForServiceDeletedWithFinalizer(ctx context.Context, cs clientset.Interf
}
ginkgo.By("Wait for service to disappear")
if pollErr := wait.PollImmediateWithContext(ctx, LoadBalancerPollInterval, GetServiceLoadBalancerCreationTimeout(ctx, cs), func(ctx context.Context) (bool, error) {
if pollErr := wait.PollUntilContextTimeout(ctx, LoadBalancerPollInterval, GetServiceLoadBalancerCreationTimeout(ctx, cs), true, func(ctx context.Context) (bool, error) {
svc, err := cs.CoreV1().Services(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
@@ -58,7 +58,7 @@ func WaitForServiceDeletedWithFinalizer(ctx context.Context, cs clientset.Interf
// don't have a finalizer.
func WaitForServiceUpdatedWithFinalizer(ctx context.Context, cs clientset.Interface, namespace, name string, hasFinalizer bool) {
ginkgo.By(fmt.Sprintf("Wait for service to hasFinalizer=%t", hasFinalizer))
if pollErr := wait.PollImmediateWithContext(ctx, LoadBalancerPollInterval, GetServiceLoadBalancerCreationTimeout(ctx, cs), func(ctx context.Context) (bool, error) {
if pollErr := wait.PollUntilContextTimeout(ctx, LoadBalancerPollInterval, GetServiceLoadBalancerCreationTimeout(ctx, cs), true, func(ctx context.Context) (bool, error) {
svc, err := cs.CoreV1().Services(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return false, err

View File

@@ -422,7 +422,7 @@ func nodeAddresses(nodelist *v1.NodeList, addrType v1.NodeAddressType) []string
func waitListSchedulableNodes(ctx context.Context, c clientset.Interface) (*v1.NodeList, error) {
var nodes *v1.NodeList
var err error
if wait.PollImmediateWithContext(ctx, pollNodeInterval, singleCallTimeout, func(ctx context.Context) (bool, error) {
if wait.PollUntilContextTimeout(ctx, pollNodeInterval, singleCallTimeout, true, func(ctx context.Context) (bool, error) {
nodes, err = c.CoreV1().Nodes().List(ctx, metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})

View File

@@ -96,7 +96,7 @@ func DeleteAllStatefulSets(ctx context.Context, c clientset.Interface, ns string
// pvs are global, so we need to wait for the exact ones bound to the statefulset pvcs.
pvNames := sets.NewString()
// TODO: Don't assume all pvcs in the ns belong to a statefulset
pvcPollErr := wait.PollImmediateWithContext(ctx, StatefulSetPoll, StatefulSetTimeout, func(ctx context.Context) (bool, error) {
pvcPollErr := wait.PollUntilContextTimeout(ctx, StatefulSetPoll, StatefulSetTimeout, true, func(ctx context.Context) (bool, error) {
pvcList, err := c.CoreV1().PersistentVolumeClaims(ns).List(ctx, metav1.ListOptions{LabelSelector: labels.Everything().String()})
if err != nil {
framework.Logf("WARNING: Failed to list pvcs, retrying %v", err)
@@ -116,7 +116,7 @@ func DeleteAllStatefulSets(ctx context.Context, c clientset.Interface, ns string
errList = append(errList, fmt.Sprintf("Timeout waiting for pvc deletion."))
}
pollErr := wait.PollImmediateWithContext(ctx, StatefulSetPoll, StatefulSetTimeout, func(ctx context.Context) (bool, error) {
pollErr := wait.PollUntilContextTimeout(ctx, StatefulSetPoll, StatefulSetTimeout, true, func(ctx context.Context) (bool, error) {
pvList, err := c.CoreV1().PersistentVolumes().List(ctx, metav1.ListOptions{LabelSelector: labels.Everything().String()})
if err != nil {
framework.Logf("WARNING: Failed to list pvs, retrying %v", err)
@@ -151,7 +151,7 @@ func Scale(ctx context.Context, c clientset.Interface, ss *appsv1.StatefulSet, c
ss = update(ctx, c, ns, name, count)
var statefulPodList *v1.PodList
pollErr := wait.PollImmediateWithContext(ctx, StatefulSetPoll, StatefulSetTimeout, func(ctx context.Context) (bool, error) {
pollErr := wait.PollUntilContextTimeout(ctx, StatefulSetPoll, StatefulSetTimeout, true, func(ctx context.Context) (bool, error) {
statefulPodList = GetPodList(ctx, c, ss)
if int32(len(statefulPodList.Items)) == count {
return true, nil

View File

@@ -32,7 +32,7 @@ import (
// WaitForRunning waits for numPodsRunning in ss to be Running and for the first
// numPodsReady ordinals to be Ready.
func WaitForRunning(ctx context.Context, c clientset.Interface, numPodsRunning, numPodsReady int32, ss *appsv1.StatefulSet) {
pollErr := wait.PollImmediateWithContext(ctx, StatefulSetPoll, StatefulSetTimeout,
pollErr := wait.PollUntilContextTimeout(ctx, StatefulSetPoll, StatefulSetTimeout, true,
func(ctx context.Context) (bool, error) {
podList := GetPodList(ctx, c, ss)
SortStatefulPods(podList)
@@ -61,7 +61,7 @@ func WaitForRunning(ctx context.Context, c clientset.Interface, numPodsRunning,
// WaitForState periodically polls for the ss and its pods until the until function returns either true or an error
func WaitForState(ctx context.Context, c clientset.Interface, ss *appsv1.StatefulSet, until func(*appsv1.StatefulSet, *v1.PodList) (bool, error)) {
pollErr := wait.PollImmediateWithContext(ctx, StatefulSetPoll, StatefulSetTimeout,
pollErr := wait.PollUntilContextTimeout(ctx, StatefulSetPoll, StatefulSetTimeout, true,
func(ctx context.Context) (bool, error) {
ssGet, err := c.AppsV1().StatefulSets(ss.Namespace).Get(ctx, ss.Name, metav1.GetOptions{})
if err != nil {
@@ -101,7 +101,7 @@ func WaitForStatusReadyReplicas(ctx context.Context, c clientset.Interface, ss *
framework.Logf("Waiting for statefulset status.readyReplicas updated to %d", expectedReplicas)
ns, name := ss.Namespace, ss.Name
pollErr := wait.PollImmediateWithContext(ctx, StatefulSetPoll, StatefulSetTimeout,
pollErr := wait.PollUntilContextTimeout(ctx, StatefulSetPoll, StatefulSetTimeout, true,
func(ctx context.Context) (bool, error) {
ssGet, err := c.AppsV1().StatefulSets(ns).Get(ctx, name, metav1.GetOptions{})
if err != nil {
@@ -126,7 +126,7 @@ func WaitForStatusAvailableReplicas(ctx context.Context, c clientset.Interface,
framework.Logf("Waiting for statefulset status.AvailableReplicas updated to %d", expectedReplicas)
ns, name := ss.Namespace, ss.Name
pollErr := wait.PollImmediateWithContext(ctx, StatefulSetPoll, StatefulSetTimeout,
pollErr := wait.PollUntilContextTimeout(ctx, StatefulSetPoll, StatefulSetTimeout, true,
func(ctx context.Context) (bool, error) {
ssGet, err := c.AppsV1().StatefulSets(ns).Get(ctx, name, metav1.GetOptions{})
if err != nil {
@@ -151,7 +151,7 @@ func WaitForStatusReplicas(ctx context.Context, c clientset.Interface, ss *appsv
framework.Logf("Waiting for statefulset status.replicas updated to %d", expectedReplicas)
ns, name := ss.Namespace, ss.Name
pollErr := wait.PollImmediateWithContext(ctx, StatefulSetPoll, StatefulSetTimeout,
pollErr := wait.PollUntilContextTimeout(ctx, StatefulSetPoll, StatefulSetTimeout, true,
func(ctx context.Context) (bool, error) {
ssGet, err := c.AppsV1().StatefulSets(ns).Get(ctx, name, metav1.GetOptions{})
if err != nil {

View File

@@ -352,7 +352,7 @@ func CreateTestingNS(ctx context.Context, baseName string, c clientset.Interface
}
// Be robust about making the namespace creation call.
var got *v1.Namespace
if err := wait.PollImmediateWithContext(ctx, Poll, 30*time.Second, func(ctx context.Context) (bool, error) {
if err := wait.PollUntilContextTimeout(ctx, Poll, 30*time.Second, true, func(ctx context.Context) (bool, error) {
var err error
got, err = c.CoreV1().Namespaces().Create(ctx, namespaceObj, metav1.CreateOptions{})
if err != nil {

View File

@@ -238,7 +238,7 @@ func getVolumeHandle(ctx context.Context, cs clientset.Interface, claimName stri
// WaitForVolumeAttachmentTerminated waits for the VolumeAttachment with the passed in attachmentName to be terminated.
func WaitForVolumeAttachmentTerminated(ctx context.Context, attachmentName string, cs clientset.Interface, timeout time.Duration) error {
waitErr := wait.PollImmediateWithContext(ctx, 10*time.Second, timeout, func(ctx context.Context) (bool, error) {
waitErr := wait.PollUntilContextTimeout(ctx, 10*time.Second, timeout, true, func(ctx context.Context) (bool, error) {
_, err := cs.StorageV1().VolumeAttachments().Get(ctx, attachmentName, metav1.GetOptions{})
if err != nil {
// if the volumeattachment object is not found, it means it has been terminated.