581 lines
23 KiB
Go
581 lines
23 KiB
Go
/*
|
|
Copyright 2019 The Kubernetes Authors.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
package pod
|
|
|
|
import (
|
|
"bytes"
|
|
"errors"
|
|
"fmt"
|
|
"sync"
|
|
"text/tabwriter"
|
|
"time"
|
|
|
|
"github.com/onsi/ginkgo"
|
|
|
|
v1 "k8s.io/api/core/v1"
|
|
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
"k8s.io/apimachinery/pkg/labels"
|
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
|
"k8s.io/apimachinery/pkg/util/wait"
|
|
clientset "k8s.io/client-go/kubernetes"
|
|
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
|
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
|
e2eresource "k8s.io/kubernetes/test/e2e/framework/resource"
|
|
testutils "k8s.io/kubernetes/test/utils"
|
|
)
|
|
|
|
const (
|
|
// defaultPodDeletionTimeout is the default timeout for deleting pod.
|
|
defaultPodDeletionTimeout = 3 * time.Minute
|
|
|
|
// podListTimeout is how long to wait for the pod to be listable.
|
|
podListTimeout = time.Minute
|
|
|
|
podRespondingTimeout = 15 * time.Minute
|
|
|
|
// How long pods have to become scheduled onto nodes
|
|
podScheduledBeforeTimeout = podListTimeout + (20 * time.Second)
|
|
|
|
// podStartTimeout is how long to wait for the pod to be started.
|
|
// Initial pod start can be delayed O(minutes) by slow docker pulls.
|
|
// TODO: Make this 30 seconds once #4566 is resolved.
|
|
podStartTimeout = 5 * time.Minute
|
|
|
|
// poll is how often to poll pods, nodes and claims.
|
|
poll = 2 * time.Second
|
|
pollShortTimeout = 1 * time.Minute
|
|
pollLongTimeout = 5 * time.Minute
|
|
|
|
// singleCallTimeout is how long to try single API calls (like 'get' or 'list'). Used to prevent
|
|
// transient failures from failing tests.
|
|
// TODO: client should not apply this timeout to Watch calls. Increased from 30s until that is fixed.
|
|
singleCallTimeout = 5 * time.Minute
|
|
|
|
// Some pods can take much longer to get ready due to volume attach/detach latency.
|
|
slowPodStartTimeout = 15 * time.Minute
|
|
)
|
|
|
|
type podCondition func(pod *v1.Pod) (bool, error)
|
|
|
|
// errorBadPodsStates create error message of basic info of bad pods for debugging.
|
|
func errorBadPodsStates(badPods []v1.Pod, desiredPods int, ns, desiredState string, timeout time.Duration) string {
|
|
errStr := fmt.Sprintf("%d / %d pods in namespace %q are NOT in %s state in %v\n", len(badPods), desiredPods, ns, desiredState, timeout)
|
|
// Print bad pods info only if there are fewer than 10 bad pods
|
|
if len(badPods) > 10 {
|
|
return errStr + "There are too many bad pods. Please check log for details."
|
|
}
|
|
|
|
buf := bytes.NewBuffer(nil)
|
|
w := tabwriter.NewWriter(buf, 0, 0, 1, ' ', 0)
|
|
fmt.Fprintln(w, "POD\tNODE\tPHASE\tGRACE\tCONDITIONS")
|
|
for _, badPod := range badPods {
|
|
grace := ""
|
|
if badPod.DeletionGracePeriodSeconds != nil {
|
|
grace = fmt.Sprintf("%ds", *badPod.DeletionGracePeriodSeconds)
|
|
}
|
|
podInfo := fmt.Sprintf("%s\t%s\t%s\t%s\t%+v",
|
|
badPod.ObjectMeta.Name, badPod.Spec.NodeName, badPod.Status.Phase, grace, badPod.Status.Conditions)
|
|
fmt.Fprintln(w, podInfo)
|
|
}
|
|
w.Flush()
|
|
return errStr + buf.String()
|
|
}
|
|
|
|
// WaitForPodsRunningReady waits up to timeout to ensure that all pods in
|
|
// namespace ns are either running and ready, or failed but controlled by a
|
|
// controller. Also, it ensures that at least minPods are running and
|
|
// ready. It has separate behavior from other 'wait for' pods functions in
|
|
// that it requests the list of pods on every iteration. This is useful, for
|
|
// example, in cluster startup, because the number of pods increases while
|
|
// waiting. All pods that are in SUCCESS state are not counted.
|
|
//
|
|
// If ignoreLabels is not empty, pods matching this selector are ignored.
|
|
func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedNotReadyPods int32, timeout time.Duration, ignoreLabels map[string]string) error {
|
|
ignoreSelector := labels.SelectorFromSet(map[string]string{})
|
|
start := time.Now()
|
|
e2elog.Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready",
|
|
timeout, minPods, ns)
|
|
wg := sync.WaitGroup{}
|
|
wg.Add(1)
|
|
var ignoreNotReady bool
|
|
badPods := []v1.Pod{}
|
|
desiredPods := 0
|
|
notReady := int32(0)
|
|
|
|
if wait.PollImmediate(poll, timeout, func() (bool, error) {
|
|
// We get the new list of pods, replication controllers, and
|
|
// replica sets in every iteration because more pods come
|
|
// online during startup and we want to ensure they are also
|
|
// checked.
|
|
replicas, replicaOk := int32(0), int32(0)
|
|
|
|
rcList, err := c.CoreV1().ReplicationControllers(ns).List(metav1.ListOptions{})
|
|
if err != nil {
|
|
e2elog.Logf("Error getting replication controllers in namespace '%s': %v", ns, err)
|
|
if testutils.IsRetryableAPIError(err) {
|
|
return false, nil
|
|
}
|
|
return false, err
|
|
}
|
|
for _, rc := range rcList.Items {
|
|
replicas += *rc.Spec.Replicas
|
|
replicaOk += rc.Status.ReadyReplicas
|
|
}
|
|
|
|
rsList, err := c.AppsV1().ReplicaSets(ns).List(metav1.ListOptions{})
|
|
if err != nil {
|
|
e2elog.Logf("Error getting replication sets in namespace %q: %v", ns, err)
|
|
if testutils.IsRetryableAPIError(err) {
|
|
return false, nil
|
|
}
|
|
return false, err
|
|
}
|
|
for _, rs := range rsList.Items {
|
|
replicas += *rs.Spec.Replicas
|
|
replicaOk += rs.Status.ReadyReplicas
|
|
}
|
|
|
|
podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{})
|
|
if err != nil {
|
|
e2elog.Logf("Error getting pods in namespace '%s': %v", ns, err)
|
|
if testutils.IsRetryableAPIError(err) {
|
|
return false, nil
|
|
}
|
|
return false, err
|
|
}
|
|
nOk := int32(0)
|
|
notReady = int32(0)
|
|
badPods = []v1.Pod{}
|
|
desiredPods = len(podList.Items)
|
|
for _, pod := range podList.Items {
|
|
if len(ignoreLabels) != 0 && ignoreSelector.Matches(labels.Set(pod.Labels)) {
|
|
continue
|
|
}
|
|
res, err := testutils.PodRunningReady(&pod)
|
|
switch {
|
|
case res && err == nil:
|
|
nOk++
|
|
case pod.Status.Phase == v1.PodSucceeded:
|
|
e2elog.Logf("The status of Pod %s is Succeeded, skipping waiting", pod.ObjectMeta.Name)
|
|
// it doesn't make sense to wait for this pod
|
|
continue
|
|
case pod.Status.Phase != v1.PodFailed:
|
|
e2elog.Logf("The status of Pod %s is %s (Ready = false), waiting for it to be either Running (with Ready = true) or Failed", pod.ObjectMeta.Name, pod.Status.Phase)
|
|
notReady++
|
|
badPods = append(badPods, pod)
|
|
default:
|
|
if metav1.GetControllerOf(&pod) == nil {
|
|
e2elog.Logf("Pod %s is Failed, but it's not controlled by a controller", pod.ObjectMeta.Name)
|
|
badPods = append(badPods, pod)
|
|
}
|
|
//ignore failed pods that are controlled by some controller
|
|
}
|
|
}
|
|
|
|
e2elog.Logf("%d / %d pods in namespace '%s' are running and ready (%d seconds elapsed)",
|
|
nOk, len(podList.Items), ns, int(time.Since(start).Seconds()))
|
|
e2elog.Logf("expected %d pod replicas in namespace '%s', %d are Running and Ready.", replicas, ns, replicaOk)
|
|
|
|
if replicaOk == replicas && nOk >= minPods && len(badPods) == 0 {
|
|
return true, nil
|
|
}
|
|
ignoreNotReady = (notReady <= allowedNotReadyPods)
|
|
LogPodStates(badPods)
|
|
return false, nil
|
|
}) != nil {
|
|
if !ignoreNotReady {
|
|
return errors.New(errorBadPodsStates(badPods, desiredPods, ns, "RUNNING and READY", timeout))
|
|
}
|
|
e2elog.Logf("Number of not-ready pods (%d) is below the allowed threshold (%d).", notReady, allowedNotReadyPods)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// WaitForPodCondition waits a pods to be matched to the given condition.
|
|
func WaitForPodCondition(c clientset.Interface, ns, podName, desc string, timeout time.Duration, condition podCondition) error {
|
|
e2elog.Logf("Waiting up to %v for pod %q in namespace %q to be %q", timeout, podName, ns, desc)
|
|
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
|
|
pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
|
|
if err != nil {
|
|
if apierrs.IsNotFound(err) {
|
|
e2elog.Logf("Pod %q in namespace %q not found. Error: %v", podName, ns, err)
|
|
return err
|
|
}
|
|
e2elog.Logf("Get pod %q in namespace %q failed, ignoring for %v. Error: %v", podName, ns, poll, err)
|
|
continue
|
|
}
|
|
// log now so that current pod info is reported before calling `condition()`
|
|
e2elog.Logf("Pod %q: Phase=%q, Reason=%q, readiness=%t. Elapsed: %v",
|
|
podName, pod.Status.Phase, pod.Status.Reason, podutil.IsPodReady(pod), time.Since(start))
|
|
if done, err := condition(pod); done {
|
|
if err == nil {
|
|
e2elog.Logf("Pod %q satisfied condition %q", podName, desc)
|
|
}
|
|
return err
|
|
}
|
|
}
|
|
return fmt.Errorf("Gave up after waiting %v for pod %q to be %q", timeout, podName, desc)
|
|
}
|
|
|
|
// WaitForPodTerminatedInNamespace returns an error if it takes too long for the pod to terminate,
|
|
// if the pod Get api returns an error (IsNotFound or other), or if the pod failed (and thus did not
|
|
// terminate) with an unexpected reason. Typically called to test that the passed-in pod is fully
|
|
// terminated (reason==""), but may be called to detect if a pod did *not* terminate according to
|
|
// the supplied reason.
|
|
func WaitForPodTerminatedInNamespace(c clientset.Interface, podName, reason, namespace string) error {
|
|
return WaitForPodCondition(c, namespace, podName, "terminated due to deadline exceeded", podStartTimeout, func(pod *v1.Pod) (bool, error) {
|
|
// Only consider Failed pods. Successful pods will be deleted and detected in
|
|
// waitForPodCondition's Get call returning `IsNotFound`
|
|
if pod.Status.Phase == v1.PodFailed {
|
|
if pod.Status.Reason == reason { // short-circuit waitForPodCondition's loop
|
|
return true, nil
|
|
}
|
|
return true, fmt.Errorf("Expected pod %q in namespace %q to be terminated with reason %q, got reason: %q", podName, namespace, reason, pod.Status.Reason)
|
|
}
|
|
return false, nil
|
|
})
|
|
}
|
|
|
|
// waitForPodSuccessInNamespaceTimeout returns nil if the pod reached state success, or an error if it reached failure or ran too long.
|
|
func waitForPodSuccessInNamespaceTimeout(c clientset.Interface, podName string, namespace string, timeout time.Duration) error {
|
|
return WaitForPodCondition(c, namespace, podName, "success or failure", timeout, func(pod *v1.Pod) (bool, error) {
|
|
if pod.Spec.RestartPolicy == v1.RestartPolicyAlways {
|
|
return true, fmt.Errorf("pod %q will never terminate with a succeeded state since its restart policy is Always", podName)
|
|
}
|
|
switch pod.Status.Phase {
|
|
case v1.PodSucceeded:
|
|
ginkgo.By("Saw pod success")
|
|
return true, nil
|
|
case v1.PodFailed:
|
|
return true, fmt.Errorf("pod %q failed with status: %+v", podName, pod.Status)
|
|
default:
|
|
return false, nil
|
|
}
|
|
})
|
|
}
|
|
|
|
// WaitForPodNameUnschedulableInNamespace returns an error if it takes too long for the pod to become Pending
|
|
// and have condition Status equal to Unschedulable,
|
|
// if the pod Get api returns an error (IsNotFound or other), or if the pod failed with an unexpected reason.
|
|
// Typically called to test that the passed-in pod is Pending and Unschedulable.
|
|
func WaitForPodNameUnschedulableInNamespace(c clientset.Interface, podName, namespace string) error {
|
|
return WaitForPodCondition(c, namespace, podName, "Unschedulable", podStartTimeout, func(pod *v1.Pod) (bool, error) {
|
|
// Only consider Failed pods. Successful pods will be deleted and detected in
|
|
// waitForPodCondition's Get call returning `IsNotFound`
|
|
if pod.Status.Phase == v1.PodPending {
|
|
for _, cond := range pod.Status.Conditions {
|
|
if cond.Type == v1.PodScheduled && cond.Status == v1.ConditionFalse && cond.Reason == "Unschedulable" {
|
|
return true, nil
|
|
}
|
|
}
|
|
}
|
|
if pod.Status.Phase == v1.PodRunning || pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed {
|
|
return true, fmt.Errorf("Expected pod %q in namespace %q to be in phase Pending, but got phase: %v", podName, namespace, pod.Status.Phase)
|
|
}
|
|
return false, nil
|
|
})
|
|
}
|
|
|
|
// WaitForMatchPodsCondition finds match pods based on the input ListOptions.
|
|
// waits and checks if all match pods are in the given podCondition
|
|
func WaitForMatchPodsCondition(c clientset.Interface, opts metav1.ListOptions, desc string, timeout time.Duration, condition podCondition) error {
|
|
e2elog.Logf("Waiting up to %v for matching pods' status to be %s", timeout, desc)
|
|
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
|
|
pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(opts)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
conditionNotMatch := []string{}
|
|
for _, pod := range pods.Items {
|
|
done, err := condition(&pod)
|
|
if done && err != nil {
|
|
return fmt.Errorf("Unexpected error: %v", err)
|
|
}
|
|
if !done {
|
|
conditionNotMatch = append(conditionNotMatch, format.Pod(&pod))
|
|
}
|
|
}
|
|
if len(conditionNotMatch) <= 0 {
|
|
return err
|
|
}
|
|
e2elog.Logf("%d pods are not %s: %v", len(conditionNotMatch), desc, conditionNotMatch)
|
|
}
|
|
return fmt.Errorf("gave up waiting for matching pods to be '%s' after %v", desc, timeout)
|
|
}
|
|
|
|
// WaitForPodNameRunningInNamespace waits default amount of time (PodStartTimeout) for the specified pod to become running.
|
|
// Returns an error if timeout occurs first, or pod goes in to failed state.
|
|
func WaitForPodNameRunningInNamespace(c clientset.Interface, podName, namespace string) error {
|
|
return WaitTimeoutForPodRunningInNamespace(c, podName, namespace, podStartTimeout)
|
|
}
|
|
|
|
// WaitForPodRunningInNamespaceSlow waits an extended amount of time (slowPodStartTimeout) for the specified pod to become running.
|
|
// The resourceVersion is used when Watching object changes, it tells since when we care
|
|
// about changes to the pod. Returns an error if timeout occurs first, or pod goes in to failed state.
|
|
func WaitForPodRunningInNamespaceSlow(c clientset.Interface, podName, namespace string) error {
|
|
return WaitTimeoutForPodRunningInNamespace(c, podName, namespace, slowPodStartTimeout)
|
|
}
|
|
|
|
// WaitTimeoutForPodRunningInNamespace waits the given timeout duration for the specified pod to become running.
|
|
func WaitTimeoutForPodRunningInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error {
|
|
return wait.PollImmediate(poll, timeout, podRunning(c, podName, namespace))
|
|
}
|
|
|
|
// WaitForPodRunningInNamespace waits default amount of time (podStartTimeout) for the specified pod to become running.
|
|
// Returns an error if timeout occurs first, or pod goes in to failed state.
|
|
func WaitForPodRunningInNamespace(c clientset.Interface, pod *v1.Pod) error {
|
|
if pod.Status.Phase == v1.PodRunning {
|
|
return nil
|
|
}
|
|
return WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, podStartTimeout)
|
|
}
|
|
|
|
// WaitTimeoutForPodNoLongerRunningInNamespace waits the given timeout duration for the specified pod to stop.
|
|
func WaitTimeoutForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error {
|
|
return wait.PollImmediate(poll, timeout, podCompleted(c, podName, namespace))
|
|
}
|
|
|
|
// WaitForPodNoLongerRunningInNamespace waits default amount of time (defaultPodDeletionTimeout) for the specified pod to stop running.
|
|
// Returns an error if timeout occurs first.
|
|
func WaitForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namespace string) error {
|
|
return WaitTimeoutForPodNoLongerRunningInNamespace(c, podName, namespace, defaultPodDeletionTimeout)
|
|
}
|
|
|
|
// WaitTimeoutForPodReadyInNamespace waits the given timeout diration for the
|
|
// specified pod to be ready and running.
|
|
func WaitTimeoutForPodReadyInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error {
|
|
return wait.PollImmediate(poll, timeout, podRunningAndReady(c, podName, namespace))
|
|
}
|
|
|
|
// WaitForPodNotPending returns an error if it took too long for the pod to go out of pending state.
|
|
// The resourceVersion is used when Watching object changes, it tells since when we care
|
|
// about changes to the pod.
|
|
func WaitForPodNotPending(c clientset.Interface, ns, podName string) error {
|
|
return wait.PollImmediate(poll, podStartTimeout, podNotPending(c, podName, ns))
|
|
}
|
|
|
|
// WaitForPodSuccessInNamespace returns nil if the pod reached state success, or an error if it reached failure or until podStartupTimeout.
|
|
func WaitForPodSuccessInNamespace(c clientset.Interface, podName string, namespace string) error {
|
|
return waitForPodSuccessInNamespaceTimeout(c, podName, namespace, podStartTimeout)
|
|
}
|
|
|
|
// WaitForPodSuccessInNamespaceSlow returns nil if the pod reached state success, or an error if it reached failure or until slowPodStartupTimeout.
|
|
func WaitForPodSuccessInNamespaceSlow(c clientset.Interface, podName string, namespace string) error {
|
|
return waitForPodSuccessInNamespaceTimeout(c, podName, namespace, slowPodStartTimeout)
|
|
}
|
|
|
|
// WaitForPodNotFoundInNamespace returns an error if it takes too long for the pod to fully terminate.
|
|
// Unlike `waitForPodTerminatedInNamespace`, the pod's Phase and Reason are ignored. If the pod Get
|
|
// api returns IsNotFound then the wait stops and nil is returned. If the Get api returns an error other
|
|
// than "not found" then that error is returned and the wait stops.
|
|
func WaitForPodNotFoundInNamespace(c clientset.Interface, podName, ns string, timeout time.Duration) error {
|
|
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
|
_, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
|
|
if apierrs.IsNotFound(err) {
|
|
return true, nil // done
|
|
}
|
|
if err != nil {
|
|
return true, err // stop wait with error
|
|
}
|
|
return false, nil
|
|
})
|
|
}
|
|
|
|
// WaitForPodToDisappear waits the given timeout duration for the specified pod to disappear.
|
|
func WaitForPodToDisappear(c clientset.Interface, ns, podName string, label labels.Selector, interval, timeout time.Duration) error {
|
|
return wait.PollImmediate(interval, timeout, func() (bool, error) {
|
|
e2elog.Logf("Waiting for pod %s to disappear", podName)
|
|
options := metav1.ListOptions{LabelSelector: label.String()}
|
|
pods, err := c.CoreV1().Pods(ns).List(options)
|
|
if err != nil {
|
|
if testutils.IsRetryableAPIError(err) {
|
|
return false, nil
|
|
}
|
|
return false, err
|
|
}
|
|
found := false
|
|
for _, pod := range pods.Items {
|
|
if pod.Name == podName {
|
|
e2elog.Logf("Pod %s still exists", podName)
|
|
found = true
|
|
break
|
|
}
|
|
}
|
|
if !found {
|
|
e2elog.Logf("Pod %s no longer exists", podName)
|
|
return true, nil
|
|
}
|
|
return false, nil
|
|
})
|
|
}
|
|
|
|
// PodsResponding waits for the pods to response.
|
|
func PodsResponding(c clientset.Interface, ns, name string, wantName bool, pods *v1.PodList) error {
|
|
ginkgo.By("trying to dial each unique pod")
|
|
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
|
return wait.PollImmediate(poll, podRespondingTimeout, NewProxyResponseChecker(c, ns, label, name, wantName, pods).CheckAllResponses)
|
|
}
|
|
|
|
// WaitForControlledPodsRunning waits up to 10 minutes for pods to become Running.
|
|
func WaitForControlledPodsRunning(c clientset.Interface, ns, name string, kind schema.GroupKind) error {
|
|
rtObject, err := e2eresource.GetRuntimeObjectForKind(c, kind, ns, name)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
selector, err := e2eresource.GetSelectorFromRuntimeObject(rtObject)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
replicas, err := e2eresource.GetReplicasFromRuntimeObject(rtObject)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
err = testutils.WaitForEnoughPodsWithLabelRunning(c, ns, selector, int(replicas))
|
|
if err != nil {
|
|
return fmt.Errorf("Error while waiting for replication controller %s pods to be running: %v", name, err)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// WaitForControlledPods waits up to podListTimeout for getting pods of the specified controller name and return them.
|
|
func WaitForControlledPods(c clientset.Interface, ns, name string, kind schema.GroupKind) (pods *v1.PodList, err error) {
|
|
rtObject, err := e2eresource.GetRuntimeObjectForKind(c, kind, ns, name)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
selector, err := e2eresource.GetSelectorFromRuntimeObject(rtObject)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return WaitForPodsWithLabel(c, ns, selector)
|
|
}
|
|
|
|
// WaitForPodsWithLabelScheduled waits for all matching pods to become scheduled and at least one
|
|
// matching pod exists. Return the list of matching pods.
|
|
func WaitForPodsWithLabelScheduled(c clientset.Interface, ns string, label labels.Selector) (pods *v1.PodList, err error) {
|
|
err = wait.PollImmediate(poll, podScheduledBeforeTimeout,
|
|
func() (bool, error) {
|
|
pods, err = WaitForPodsWithLabel(c, ns, label)
|
|
if err != nil {
|
|
return false, err
|
|
}
|
|
for _, pod := range pods.Items {
|
|
if pod.Spec.NodeName == "" {
|
|
return false, nil
|
|
}
|
|
}
|
|
return true, nil
|
|
})
|
|
return pods, err
|
|
}
|
|
|
|
// WaitForPodsWithLabel waits up to podListTimeout for getting pods with certain label
|
|
func WaitForPodsWithLabel(c clientset.Interface, ns string, label labels.Selector) (pods *v1.PodList, err error) {
|
|
for t := time.Now(); time.Since(t) < podListTimeout; time.Sleep(poll) {
|
|
options := metav1.ListOptions{LabelSelector: label.String()}
|
|
pods, err = c.CoreV1().Pods(ns).List(options)
|
|
if err != nil {
|
|
if testutils.IsRetryableAPIError(err) {
|
|
continue
|
|
}
|
|
return
|
|
}
|
|
if len(pods.Items) > 0 {
|
|
break
|
|
}
|
|
}
|
|
if pods == nil || len(pods.Items) == 0 {
|
|
err = fmt.Errorf("Timeout while waiting for pods with label %v", label)
|
|
}
|
|
return
|
|
}
|
|
|
|
// WaitForPodsWithLabelRunningReady waits for exact amount of matching pods to become running and ready.
|
|
// Return the list of matching pods.
|
|
func WaitForPodsWithLabelRunningReady(c clientset.Interface, ns string, label labels.Selector, num int, timeout time.Duration) (pods *v1.PodList, err error) {
|
|
var current int
|
|
err = wait.Poll(poll, timeout,
|
|
func() (bool, error) {
|
|
pods, err = WaitForPodsWithLabel(c, ns, label)
|
|
if err != nil {
|
|
e2elog.Logf("Failed to list pods: %v", err)
|
|
if testutils.IsRetryableAPIError(err) {
|
|
return false, nil
|
|
}
|
|
return false, err
|
|
}
|
|
current = 0
|
|
for _, pod := range pods.Items {
|
|
if flag, err := testutils.PodRunningReady(&pod); err == nil && flag == true {
|
|
current++
|
|
}
|
|
}
|
|
if current != num {
|
|
e2elog.Logf("Got %v pods running and ready, expect: %v", current, num)
|
|
return false, nil
|
|
}
|
|
return true, nil
|
|
})
|
|
return pods, err
|
|
}
|
|
|
|
// WaitForPodsReady waits for the pods to become ready.
|
|
func WaitForPodsReady(c clientset.Interface, ns, name string, minReadySeconds int) error {
|
|
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
|
options := metav1.ListOptions{LabelSelector: label.String()}
|
|
return wait.Poll(poll, 5*time.Minute, func() (bool, error) {
|
|
pods, err := c.CoreV1().Pods(ns).List(options)
|
|
if err != nil {
|
|
return false, nil
|
|
}
|
|
for _, pod := range pods.Items {
|
|
if !podutil.IsPodAvailable(&pod, int32(minReadySeconds), metav1.Now()) {
|
|
return false, nil
|
|
}
|
|
}
|
|
return true, nil
|
|
})
|
|
}
|
|
|
|
// WaitForNRestartablePods tries to list restarting pods using ps until it finds expect of them,
|
|
// returning their names if it can do so before timeout.
|
|
func WaitForNRestartablePods(ps *testutils.PodStore, expect int, timeout time.Duration) ([]string, error) {
|
|
var pods []*v1.Pod
|
|
var errLast error
|
|
found := wait.Poll(poll, timeout, func() (bool, error) {
|
|
allPods := ps.List()
|
|
pods = FilterNonRestartablePods(allPods)
|
|
if len(pods) != expect {
|
|
errLast = fmt.Errorf("expected to find %d pods but found only %d", expect, len(pods))
|
|
e2elog.Logf("Error getting pods: %v", errLast)
|
|
return false, nil
|
|
}
|
|
return true, nil
|
|
}) == nil
|
|
podNames := make([]string, len(pods))
|
|
for i, p := range pods {
|
|
podNames[i] = p.ObjectMeta.Name
|
|
}
|
|
if !found {
|
|
return podNames, fmt.Errorf("couldn't find %d pods within %v; last error: %v",
|
|
expect, timeout, errLast)
|
|
}
|
|
return podNames, nil
|
|
}
|