Refactored pod-related functions from framework/util.go
This a refactoring of framework/utils.go into framework/pod. Signed-off-by: Jorge Alarcon Ochoa <alarcj137@gmail.com>
This commit is contained in:
committed by
alejandrox1
parent
b3981a2f9a
commit
4969a05327
@@ -49,9 +49,7 @@ go_library(
|
||||
"//pkg/kubelet/dockershim/metrics:go_default_library",
|
||||
"//pkg/kubelet/events:go_default_library",
|
||||
"//pkg/kubelet/metrics:go_default_library",
|
||||
"//pkg/kubelet/pod:go_default_library",
|
||||
"//pkg/kubelet/sysctl:go_default_library",
|
||||
"//pkg/kubelet/util/format:go_default_library",
|
||||
"//pkg/master/ports:go_default_library",
|
||||
"//pkg/registry/core/service/portallocator:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
@@ -111,6 +109,7 @@ go_library(
|
||||
"//test/e2e/framework/ginkgowrapper:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/metrics:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/e2e/framework/ssh:go_default_library",
|
||||
"//test/e2e/framework/testfiles:go_default_library",
|
||||
"//test/e2e/manifest:go_default_library",
|
||||
@@ -152,6 +151,7 @@ filegroup(
|
||||
"//test/e2e/framework/lifecycle:all-srcs",
|
||||
"//test/e2e/framework/log:all-srcs",
|
||||
"//test/e2e/framework/metrics:all-srcs",
|
||||
"//test/e2e/framework/pod:all-srcs",
|
||||
"//test/e2e/framework/podlogs:all-srcs",
|
||||
"//test/e2e/framework/providers/aws:all-srcs",
|
||||
"//test/e2e/framework/providers/azure:all-srcs",
|
||||
|
||||
@@ -48,6 +48,7 @@ import (
|
||||
scaleclient "k8s.io/client-go/scale"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
@@ -431,34 +432,34 @@ func (f *Framework) AddNamespacesToDelete(namespaces ...*v1.Namespace) {
|
||||
|
||||
// WaitForPodTerminated waits for the pod to be terminated with the given reason.
|
||||
func (f *Framework) WaitForPodTerminated(podName, reason string) error {
|
||||
return waitForPodTerminatedInNamespace(f.ClientSet, podName, reason, f.Namespace.Name)
|
||||
return e2epod.WaitForPodTerminatedInNamespace(f.ClientSet, podName, reason, f.Namespace.Name)
|
||||
}
|
||||
|
||||
// WaitForPodNotFound waits for the pod to be completely terminated (not "Get-able").
|
||||
func (f *Framework) WaitForPodNotFound(podName string, timeout time.Duration) error {
|
||||
return waitForPodNotFoundInNamespace(f.ClientSet, podName, f.Namespace.Name, timeout)
|
||||
return e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, podName, f.Namespace.Name, timeout)
|
||||
}
|
||||
|
||||
// WaitForPodRunning waits for the pod to run in the namespace.
|
||||
func (f *Framework) WaitForPodRunning(podName string) error {
|
||||
return WaitForPodNameRunningInNamespace(f.ClientSet, podName, f.Namespace.Name)
|
||||
return e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, podName, f.Namespace.Name)
|
||||
}
|
||||
|
||||
// WaitForPodReady waits for the pod to flip to ready in the namespace.
|
||||
func (f *Framework) WaitForPodReady(podName string) error {
|
||||
return waitTimeoutForPodReadyInNamespace(f.ClientSet, podName, f.Namespace.Name, PodStartTimeout)
|
||||
return e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, podName, f.Namespace.Name, PodStartTimeout)
|
||||
}
|
||||
|
||||
// WaitForPodRunningSlow waits for the pod to run in the namespace.
|
||||
// It has a longer timeout then WaitForPodRunning (util.slowPodStartTimeout).
|
||||
func (f *Framework) WaitForPodRunningSlow(podName string) error {
|
||||
return waitForPodRunningInNamespaceSlow(f.ClientSet, podName, f.Namespace.Name)
|
||||
return e2epod.WaitForPodRunningInNamespaceSlow(f.ClientSet, podName, f.Namespace.Name)
|
||||
}
|
||||
|
||||
// WaitForPodNoLongerRunning waits for the pod to no longer be running in the namespace, for either
|
||||
// success or failure.
|
||||
func (f *Framework) WaitForPodNoLongerRunning(podName string) error {
|
||||
return WaitForPodNoLongerRunningInNamespace(f.ClientSet, podName, f.Namespace.Name)
|
||||
return e2epod.WaitForPodNoLongerRunningInNamespace(f.ClientSet, podName, f.Namespace.Name)
|
||||
}
|
||||
|
||||
// TestContainerOutput runs the given pod in the given namespace and waits
|
||||
|
||||
@@ -39,6 +39,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
coreclientset "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
@@ -525,7 +526,7 @@ func (config *NetworkingTestConfig) DeleteNodePortService() {
|
||||
|
||||
func (config *NetworkingTestConfig) createTestPods() {
|
||||
testContainerPod := config.createTestPodSpec()
|
||||
hostTestContainerPod := NewExecPodSpec(config.Namespace, hostTestPodName, config.HostNetwork)
|
||||
hostTestContainerPod := e2epod.NewExecPodSpec(config.Namespace, hostTestPodName, config.HostNetwork)
|
||||
|
||||
config.createPod(testContainerPod)
|
||||
config.createPod(hostTestContainerPod)
|
||||
@@ -671,7 +672,7 @@ func (config *NetworkingTestConfig) DeleteNetProxyPod() {
|
||||
config.getPodClient().Delete(pod.Name, metav1.NewDeleteOptions(0))
|
||||
config.EndpointPods = config.EndpointPods[1:]
|
||||
// wait for pod being deleted.
|
||||
err := WaitForPodToDisappear(config.f.ClientSet, config.Namespace, pod.Name, labels.Everything(), time.Second, wait.ForeverTestTimeout)
|
||||
err := e2epod.WaitForPodToDisappear(config.f.ClientSet, config.Namespace, pod.Name, labels.Everything(), time.Second, wait.ForeverTestTimeout)
|
||||
if err != nil {
|
||||
Failf("Failed to delete %s pod: %v", pod.Name, err)
|
||||
}
|
||||
|
||||
55
test/e2e/framework/pod/BUILD
Normal file
55
test/e2e/framework/pod/BUILD
Normal file
@@ -0,0 +1,55 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"resource.go",
|
||||
"runtimeobject.go",
|
||||
"wait.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework/pod",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/apis/apps:go_default_library",
|
||||
"//pkg/apis/batch:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/extensions:go_default_library",
|
||||
"//pkg/client/conditions:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/kubelet/pod:go_default_library",
|
||||
"//pkg/kubelet/util/format:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/batch/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
689
test/e2e/framework/pod/resource.go
Normal file
689
test/e2e/framework/pod/resource.go
Normal file
@@ -0,0 +1,689 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package pod
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/client/conditions"
|
||||
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
var (
|
||||
// BusyBoxImage is the image URI of BusyBox.
|
||||
BusyBoxImage = imageutils.GetE2EImage(imageutils.BusyBox)
|
||||
)
|
||||
|
||||
// TODO: Move to its own subpkg.
|
||||
// expectNoErrorWithRetries to their own subpackages within framework.
|
||||
// expectNoError checks if "err" is set, and if so, fails assertion while logging the error.
|
||||
func expectNoError(err error, explain ...interface{}) {
|
||||
expectNoErrorWithOffset(1, err, explain...)
|
||||
}
|
||||
|
||||
// TODO: Move to its own subpkg.
|
||||
// expectNoErrorWithOffset checks if "err" is set, and if so, fails assertion while logging the error at "offset" levels above its caller
|
||||
// (for example, for call chain f -> g -> expectNoErrorWithOffset(1, ...) error would be logged for "f").
|
||||
func expectNoErrorWithOffset(offset int, err error, explain ...interface{}) {
|
||||
if err != nil {
|
||||
e2elog.Logf("Unexpected error occurred: %v", err)
|
||||
}
|
||||
gomega.ExpectWithOffset(1+offset, err).NotTo(gomega.HaveOccurred(), explain...)
|
||||
}
|
||||
|
||||
// TODO: Move to its own subpkg.
|
||||
// expectNoErrorWithRetries checks if an error occurs with the given retry count.
|
||||
func expectNoErrorWithRetries(fn func() error, maxRetries int, explain ...interface{}) {
|
||||
var err error
|
||||
for i := 0; i < maxRetries; i++ {
|
||||
err = fn()
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
e2elog.Logf("(Attempt %d of %d) Unexpected error occurred: %v", i+1, maxRetries, err)
|
||||
}
|
||||
gomega.ExpectWithOffset(1, err).NotTo(gomega.HaveOccurred(), explain...)
|
||||
}
|
||||
|
||||
func isElementOf(podUID types.UID, pods *v1.PodList) bool {
|
||||
for _, pod := range pods.Items {
|
||||
if pod.UID == podUID {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ProxyResponseChecker is a context for checking pods responses by issuing GETs to them (via the API
|
||||
// proxy) and verifying that they answer with their own pod name.
|
||||
type ProxyResponseChecker struct {
|
||||
c clientset.Interface
|
||||
ns string
|
||||
label labels.Selector
|
||||
controllerName string
|
||||
respondName bool // Whether the pod should respond with its own name.
|
||||
pods *v1.PodList
|
||||
}
|
||||
|
||||
// NewProxyResponseChecker returns a context for checking pods responses.
|
||||
func NewProxyResponseChecker(c clientset.Interface, ns string, label labels.Selector, controllerName string, respondName bool, pods *v1.PodList) ProxyResponseChecker {
|
||||
return ProxyResponseChecker{c, ns, label, controllerName, respondName, pods}
|
||||
}
|
||||
|
||||
// CheckAllResponses issues GETs to all pods in the context and verify they
|
||||
// reply with their own pod name.
|
||||
func (r ProxyResponseChecker) CheckAllResponses() (done bool, err error) {
|
||||
successes := 0
|
||||
options := metav1.ListOptions{LabelSelector: r.label.String()}
|
||||
currentPods, err := r.c.CoreV1().Pods(r.ns).List(options)
|
||||
expectNoError(err, "Failed to get list of currentPods in namespace: %s", r.ns)
|
||||
for i, pod := range r.pods.Items {
|
||||
// Check that the replica list remains unchanged, otherwise we have problems.
|
||||
if !isElementOf(pod.UID, currentPods) {
|
||||
return false, fmt.Errorf("pod with UID %s is no longer a member of the replica set. Must have been restarted for some reason. Current replica set: %v", pod.UID, currentPods)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), singleCallTimeout)
|
||||
defer cancel()
|
||||
|
||||
body, err := r.c.CoreV1().RESTClient().Get().
|
||||
Context(ctx).
|
||||
Namespace(r.ns).
|
||||
Resource("pods").
|
||||
SubResource("proxy").
|
||||
Name(string(pod.Name)).
|
||||
Do().
|
||||
Raw()
|
||||
|
||||
if err != nil {
|
||||
if ctx.Err() != nil {
|
||||
// We may encounter errors here because of a race between the pod readiness and apiserver
|
||||
// proxy. So, we log the error and retry if this occurs.
|
||||
e2elog.Logf("Controller %s: Failed to Get from replica %d [%s]: %v\n pod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status)
|
||||
return false, nil
|
||||
}
|
||||
e2elog.Logf("Controller %s: Failed to GET from replica %d [%s]: %v\npod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status)
|
||||
continue
|
||||
}
|
||||
// The response checker expects the pod's name unless !respondName, in
|
||||
// which case it just checks for a non-empty response.
|
||||
got := string(body)
|
||||
what := ""
|
||||
if r.respondName {
|
||||
what = "expected"
|
||||
want := pod.Name
|
||||
if got != want {
|
||||
e2elog.Logf("Controller %s: Replica %d [%s] expected response %q but got %q",
|
||||
r.controllerName, i+1, pod.Name, want, got)
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
what = "non-empty"
|
||||
if len(got) == 0 {
|
||||
e2elog.Logf("Controller %s: Replica %d [%s] expected non-empty response",
|
||||
r.controllerName, i+1, pod.Name)
|
||||
continue
|
||||
}
|
||||
}
|
||||
successes++
|
||||
e2elog.Logf("Controller %s: Got %s result from replica %d [%s]: %q, %d of %d required successes so far",
|
||||
r.controllerName, what, i+1, pod.Name, got, successes, len(r.pods.Items))
|
||||
}
|
||||
if successes < len(r.pods.Items) {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// CountRemainingPods queries the server to count number of remaining pods, and number of pods that had a missing deletion timestamp.
|
||||
func CountRemainingPods(c clientset.Interface, namespace string) (int, int, error) {
|
||||
// check for remaining pods
|
||||
pods, err := c.CoreV1().Pods(namespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
// nothing remains!
|
||||
if len(pods.Items) == 0 {
|
||||
return 0, 0, nil
|
||||
}
|
||||
|
||||
// stuff remains, log about it
|
||||
LogPodStates(pods.Items)
|
||||
|
||||
// check if there were any pods with missing deletion timestamp
|
||||
numPods := len(pods.Items)
|
||||
missingTimestamp := 0
|
||||
for _, pod := range pods.Items {
|
||||
if pod.DeletionTimestamp == nil {
|
||||
missingTimestamp++
|
||||
}
|
||||
}
|
||||
return numPods, missingTimestamp, nil
|
||||
}
|
||||
|
||||
// Initialized checks the state of all init containers in the pod.
|
||||
func Initialized(pod *v1.Pod) (ok bool, failed bool, err error) {
|
||||
allInit := true
|
||||
initFailed := false
|
||||
for _, s := range pod.Status.InitContainerStatuses {
|
||||
switch {
|
||||
case initFailed && s.State.Waiting == nil:
|
||||
return allInit, initFailed, fmt.Errorf("container %s is after a failed container but isn't waiting", s.Name)
|
||||
case allInit && s.State.Waiting == nil:
|
||||
return allInit, initFailed, fmt.Errorf("container %s is after an initializing container but isn't waiting", s.Name)
|
||||
case s.State.Terminated == nil:
|
||||
allInit = false
|
||||
case s.State.Terminated.ExitCode != 0:
|
||||
allInit = false
|
||||
initFailed = true
|
||||
case !s.Ready:
|
||||
return allInit, initFailed, fmt.Errorf("container %s initialized but isn't marked as ready", s.Name)
|
||||
}
|
||||
}
|
||||
return allInit, initFailed, nil
|
||||
}
|
||||
|
||||
func podRunning(c clientset.Interface, podName, namespace string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodRunning:
|
||||
return true, nil
|
||||
case v1.PodFailed, v1.PodSucceeded:
|
||||
return false, conditions.ErrPodCompleted
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
func podCompleted(c clientset.Interface, podName, namespace string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodFailed, v1.PodSucceeded:
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
func podRunningAndReady(c clientset.Interface, podName, namespace string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodFailed, v1.PodSucceeded:
|
||||
return false, conditions.ErrPodCompleted
|
||||
case v1.PodRunning:
|
||||
return podutil.IsPodReady(pod), nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
func podNotPending(c clientset.Interface, podName, namespace string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodPending:
|
||||
return false, nil
|
||||
default:
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PodsCreated returns a pod list matched by the given name.
|
||||
func PodsCreated(c clientset.Interface, ns, name string, replicas int32) (*v1.PodList, error) {
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||
return PodsCreatedByLabel(c, ns, name, replicas, label)
|
||||
}
|
||||
|
||||
// PodsCreatedByLabel returns a created pod list matched by the given label.
|
||||
func PodsCreatedByLabel(c clientset.Interface, ns, name string, replicas int32, label labels.Selector) (*v1.PodList, error) {
|
||||
timeout := 2 * time.Minute
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
|
||||
// List the pods, making sure we observe all the replicas.
|
||||
pods, err := c.CoreV1().Pods(ns).List(options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
created := []v1.Pod{}
|
||||
for _, pod := range pods.Items {
|
||||
if pod.DeletionTimestamp != nil {
|
||||
continue
|
||||
}
|
||||
created = append(created, pod)
|
||||
}
|
||||
e2elog.Logf("Pod name %s: Found %d pods out of %d", name, len(created), replicas)
|
||||
|
||||
if int32(len(created)) == replicas {
|
||||
pods.Items = created
|
||||
return pods, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("Pod name %s: Gave up waiting %v for %d pods to come up", name, timeout, replicas)
|
||||
}
|
||||
|
||||
// VerifyPods checks if the specified pod is responding.
|
||||
func VerifyPods(c clientset.Interface, ns, name string, wantName bool, replicas int32) error {
|
||||
return podRunningMaybeResponding(c, ns, name, wantName, replicas, true)
|
||||
}
|
||||
|
||||
// VerifyPodsRunning checks if the specified pod is running.
|
||||
func VerifyPodsRunning(c clientset.Interface, ns, name string, wantName bool, replicas int32) error {
|
||||
return podRunningMaybeResponding(c, ns, name, wantName, replicas, false)
|
||||
}
|
||||
|
||||
func podRunningMaybeResponding(c clientset.Interface, ns, name string, wantName bool, replicas int32, checkResponding bool) error {
|
||||
pods, err := PodsCreated(c, ns, name, replicas)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
e := podsRunning(c, pods)
|
||||
if len(e) > 0 {
|
||||
return fmt.Errorf("failed to wait for pods running: %v", e)
|
||||
}
|
||||
if checkResponding {
|
||||
err = PodsResponding(c, ns, name, wantName, pods)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to wait for pods responding: %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func podsRunning(c clientset.Interface, pods *v1.PodList) []error {
|
||||
// Wait for the pods to enter the running state. Waiting loops until the pods
|
||||
// are running so non-running pods cause a timeout for this test.
|
||||
ginkgo.By("ensuring each pod is running")
|
||||
e := []error{}
|
||||
errorChan := make(chan error)
|
||||
|
||||
for _, pod := range pods.Items {
|
||||
go func(p v1.Pod) {
|
||||
errorChan <- WaitForPodRunningInNamespace(c, &p)
|
||||
}(pod)
|
||||
}
|
||||
|
||||
for range pods.Items {
|
||||
err := <-errorChan
|
||||
if err != nil {
|
||||
e = append(e, err)
|
||||
}
|
||||
}
|
||||
|
||||
return e
|
||||
}
|
||||
|
||||
// DumpAllPodInfo logs basic info for all pods.
|
||||
func DumpAllPodInfo(c clientset.Interface) {
|
||||
pods, err := c.CoreV1().Pods("").List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("unable to fetch pod debug info: %v", err)
|
||||
}
|
||||
LogPodStates(pods.Items)
|
||||
}
|
||||
|
||||
// LogPodStates logs basic info of provided pods for debugging.
|
||||
func LogPodStates(pods []v1.Pod) {
|
||||
// Find maximum widths for pod, node, and phase strings for column printing.
|
||||
maxPodW, maxNodeW, maxPhaseW, maxGraceW := len("POD"), len("NODE"), len("PHASE"), len("GRACE")
|
||||
for i := range pods {
|
||||
pod := &pods[i]
|
||||
if len(pod.ObjectMeta.Name) > maxPodW {
|
||||
maxPodW = len(pod.ObjectMeta.Name)
|
||||
}
|
||||
if len(pod.Spec.NodeName) > maxNodeW {
|
||||
maxNodeW = len(pod.Spec.NodeName)
|
||||
}
|
||||
if len(pod.Status.Phase) > maxPhaseW {
|
||||
maxPhaseW = len(pod.Status.Phase)
|
||||
}
|
||||
}
|
||||
// Increase widths by one to separate by a single space.
|
||||
maxPodW++
|
||||
maxNodeW++
|
||||
maxPhaseW++
|
||||
maxGraceW++
|
||||
|
||||
// Log pod info. * does space padding, - makes them left-aligned.
|
||||
e2elog.Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s",
|
||||
maxPodW, "POD", maxNodeW, "NODE", maxPhaseW, "PHASE", maxGraceW, "GRACE", "CONDITIONS")
|
||||
for _, pod := range pods {
|
||||
grace := ""
|
||||
if pod.DeletionGracePeriodSeconds != nil {
|
||||
grace = fmt.Sprintf("%ds", *pod.DeletionGracePeriodSeconds)
|
||||
}
|
||||
e2elog.Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s",
|
||||
maxPodW, pod.ObjectMeta.Name, maxNodeW, pod.Spec.NodeName, maxPhaseW, pod.Status.Phase, maxGraceW, grace, pod.Status.Conditions)
|
||||
}
|
||||
e2elog.Logf("") // Final empty line helps for readability.
|
||||
}
|
||||
|
||||
// LogPodTerminationMessages logs termination messages for failing pods. It's a short snippet (much smaller than full logs), but it often shows
|
||||
// why pods crashed and since it is in the API, it's fast to retrieve.
|
||||
func LogPodTerminationMessages(pods []v1.Pod) {
|
||||
for _, pod := range pods {
|
||||
for _, status := range pod.Status.InitContainerStatuses {
|
||||
if status.LastTerminationState.Terminated != nil && len(status.LastTerminationState.Terminated.Message) > 0 {
|
||||
e2elog.Logf("%s[%s].initContainer[%s]=%s", pod.Name, pod.Namespace, status.Name, status.LastTerminationState.Terminated.Message)
|
||||
}
|
||||
}
|
||||
for _, status := range pod.Status.ContainerStatuses {
|
||||
if status.LastTerminationState.Terminated != nil && len(status.LastTerminationState.Terminated.Message) > 0 {
|
||||
e2elog.Logf("%s[%s].container[%s]=%s", pod.Name, pod.Namespace, status.Name, status.LastTerminationState.Terminated.Message)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DumpAllPodInfoForNamespace logs all pod information for a given namespace.
|
||||
func DumpAllPodInfoForNamespace(c clientset.Interface, namespace string) {
|
||||
pods, err := c.CoreV1().Pods(namespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("unable to fetch pod debug info: %v", err)
|
||||
}
|
||||
LogPodStates(pods.Items)
|
||||
LogPodTerminationMessages(pods.Items)
|
||||
}
|
||||
|
||||
// FilterNonRestartablePods filters out pods that will never get recreated if
|
||||
// deleted after termination.
|
||||
func FilterNonRestartablePods(pods []*v1.Pod) []*v1.Pod {
|
||||
var results []*v1.Pod
|
||||
for _, p := range pods {
|
||||
if isNotRestartAlwaysMirrorPod(p) {
|
||||
// Mirror pods with restart policy == Never will not get
|
||||
// recreated if they are deleted after the pods have
|
||||
// terminated. For now, we discount such pods.
|
||||
// https://github.com/kubernetes/kubernetes/issues/34003
|
||||
continue
|
||||
}
|
||||
results = append(results, p)
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
func isNotRestartAlwaysMirrorPod(p *v1.Pod) bool {
|
||||
if !kubepod.IsMirrorPod(p) {
|
||||
return false
|
||||
}
|
||||
return p.Spec.RestartPolicy != v1.RestartPolicyAlways
|
||||
}
|
||||
|
||||
// NewExecPodSpec returns the pod spec of hostexec pod
|
||||
func NewExecPodSpec(ns, name string, hostNetwork bool) *v1.Pod {
|
||||
immediate := int64(0)
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "hostexec",
|
||||
Image: imageutils.GetE2EImage(imageutils.Hostexec),
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
},
|
||||
},
|
||||
HostNetwork: hostNetwork,
|
||||
SecurityContext: &v1.PodSecurityContext{},
|
||||
TerminationGracePeriodSeconds: &immediate,
|
||||
},
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
// LaunchHostExecPod launches a hostexec pod in the given namespace and waits
|
||||
// until it's Running
|
||||
func LaunchHostExecPod(client clientset.Interface, ns, name string) *v1.Pod {
|
||||
hostExecPod := NewExecPodSpec(ns, name, true)
|
||||
pod, err := client.CoreV1().Pods(ns).Create(hostExecPod)
|
||||
expectNoError(err)
|
||||
err = WaitForPodRunningInNamespace(client, pod)
|
||||
expectNoError(err)
|
||||
return pod
|
||||
}
|
||||
|
||||
// newExecPodSpec returns the pod spec of exec pod
|
||||
func newExecPodSpec(ns, generateName string) *v1.Pod {
|
||||
immediate := int64(0)
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: generateName,
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
TerminationGracePeriodSeconds: &immediate,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "exec",
|
||||
Image: BusyBoxImage,
|
||||
Command: []string{"sh", "-c", "trap exit TERM; while true; do sleep 5; done"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
// CreateExecPodOrFail creates a simple busybox pod in a sleep loop used as a
|
||||
// vessel for kubectl exec commands.
|
||||
// Returns the name of the created pod.
|
||||
func CreateExecPodOrFail(client clientset.Interface, ns, generateName string, tweak func(*v1.Pod)) string {
|
||||
e2elog.Logf("Creating new exec pod")
|
||||
execPod := newExecPodSpec(ns, generateName)
|
||||
if tweak != nil {
|
||||
tweak(execPod)
|
||||
}
|
||||
created, err := client.CoreV1().Pods(ns).Create(execPod)
|
||||
expectNoError(err, "failed to create new exec pod in namespace: %s", ns)
|
||||
err = wait.PollImmediate(poll, 5*time.Minute, func() (bool, error) {
|
||||
retrievedPod, err := client.CoreV1().Pods(execPod.Namespace).Get(created.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if testutils.IsRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return retrievedPod.Status.Phase == v1.PodRunning, nil
|
||||
})
|
||||
expectNoError(err)
|
||||
return created.Name
|
||||
}
|
||||
|
||||
// CreatePodOrFail creates a pod with the specified containerPorts.
|
||||
func CreatePodOrFail(c clientset.Interface, ns, name string, labels map[string]string, containerPorts []v1.ContainerPort) {
|
||||
ginkgo.By(fmt.Sprintf("Creating pod %s in namespace %s", name, ns))
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "pause",
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Ports: containerPorts,
|
||||
// Add a dummy environment variable to work around a docker issue.
|
||||
// https://github.com/docker/docker/issues/14203
|
||||
Env: []v1.EnvVar{{Name: "FOO", Value: " "}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err := c.CoreV1().Pods(ns).Create(pod)
|
||||
expectNoError(err, "failed to create pod %s in namespace %s", name, ns)
|
||||
}
|
||||
|
||||
// DeletePodOrFail deletes the pod of the specified namespace and name.
|
||||
func DeletePodOrFail(c clientset.Interface, ns, name string) {
|
||||
ginkgo.By(fmt.Sprintf("Deleting pod %s in namespace %s", name, ns))
|
||||
err := c.CoreV1().Pods(ns).Delete(name, nil)
|
||||
expectNoError(err, "failed to delete pod %s in namespace %s", name, ns)
|
||||
}
|
||||
|
||||
// CheckPodsRunningReady returns whether all pods whose names are listed in
|
||||
// podNames in namespace ns are running and ready, using c and waiting at most
|
||||
// timeout.
|
||||
func CheckPodsRunningReady(c clientset.Interface, ns string, podNames []string, timeout time.Duration) bool {
|
||||
return CheckPodsCondition(c, ns, podNames, timeout, testutils.PodRunningReady, "running and ready")
|
||||
}
|
||||
|
||||
// CheckPodsRunningReadyOrSucceeded returns whether all pods whose names are
|
||||
// listed in podNames in namespace ns are running and ready, or succeeded; use
|
||||
// c and waiting at most timeout.
|
||||
func CheckPodsRunningReadyOrSucceeded(c clientset.Interface, ns string, podNames []string, timeout time.Duration) bool {
|
||||
return CheckPodsCondition(c, ns, podNames, timeout, testutils.PodRunningReadyOrSucceeded, "running and ready, or succeeded")
|
||||
}
|
||||
|
||||
// CheckPodsCondition returns whether all pods whose names are listed in podNames
|
||||
// in namespace ns are in the condition, using c and waiting at most timeout.
|
||||
func CheckPodsCondition(c clientset.Interface, ns string, podNames []string, timeout time.Duration, condition podCondition, desc string) bool {
|
||||
np := len(podNames)
|
||||
e2elog.Logf("Waiting up to %v for %d pods to be %s: %s", timeout, np, desc, podNames)
|
||||
type waitPodResult struct {
|
||||
success bool
|
||||
podName string
|
||||
}
|
||||
result := make(chan waitPodResult, len(podNames))
|
||||
for _, podName := range podNames {
|
||||
// Launch off pod readiness checkers.
|
||||
go func(name string) {
|
||||
err := WaitForPodCondition(c, ns, name, desc, timeout, condition)
|
||||
result <- waitPodResult{err == nil, name}
|
||||
}(podName)
|
||||
}
|
||||
// Wait for them all to finish.
|
||||
success := true
|
||||
for range podNames {
|
||||
res := <-result
|
||||
if !res.success {
|
||||
e2elog.Logf("Pod %[1]s failed to be %[2]s.", res.podName, desc)
|
||||
success = false
|
||||
}
|
||||
}
|
||||
e2elog.Logf("Wanted all %d pods to be %s. Result: %t. Pods: %v", np, desc, success, podNames)
|
||||
return success
|
||||
}
|
||||
|
||||
// GetPodLogs returns the logs of the specified container (namespace/pod/container).
|
||||
// TODO(random-liu): Change this to be a member function of the framework.
|
||||
func GetPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) {
|
||||
return getPodLogsInternal(c, namespace, podName, containerName, false)
|
||||
}
|
||||
|
||||
// GetPreviousPodLogs returns the logs of the previous instance of the
|
||||
// specified container (namespace/pod/container).
|
||||
func GetPreviousPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) {
|
||||
return getPodLogsInternal(c, namespace, podName, containerName, true)
|
||||
}
|
||||
|
||||
// utility function for gomega Eventually
|
||||
func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName string, previous bool) (string, error) {
|
||||
logs, err := c.CoreV1().RESTClient().Get().
|
||||
Resource("pods").
|
||||
Namespace(namespace).
|
||||
Name(podName).SubResource("log").
|
||||
Param("container", containerName).
|
||||
Param("previous", strconv.FormatBool(previous)).
|
||||
Do().
|
||||
Raw()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err == nil && strings.Contains(string(logs), "Internal Error") {
|
||||
return "", fmt.Errorf("Fetched log contains \"Internal Error\": %q", string(logs))
|
||||
}
|
||||
return string(logs), err
|
||||
}
|
||||
|
||||
// GetPodsInNamespace returns the pods in the given namespace.
|
||||
func GetPodsInNamespace(c clientset.Interface, ns string, ignoreLabels map[string]string) ([]*v1.Pod, error) {
|
||||
pods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return []*v1.Pod{}, err
|
||||
}
|
||||
ignoreSelector := labels.SelectorFromSet(ignoreLabels)
|
||||
filtered := []*v1.Pod{}
|
||||
for _, p := range pods.Items {
|
||||
if len(ignoreLabels) != 0 && ignoreSelector.Matches(labels.Set(p.Labels)) {
|
||||
continue
|
||||
}
|
||||
filtered = append(filtered, &p)
|
||||
}
|
||||
return filtered, nil
|
||||
}
|
||||
|
||||
// GetPodsScheduled returns a number of currently scheduled and not scheduled Pods.
|
||||
func GetPodsScheduled(masterNodes sets.String, pods *v1.PodList) (scheduledPods, notScheduledPods []v1.Pod) {
|
||||
for _, pod := range pods.Items {
|
||||
if !masterNodes.Has(pod.Spec.NodeName) {
|
||||
if pod.Spec.NodeName != "" {
|
||||
_, scheduledCondition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
|
||||
gomega.Expect(scheduledCondition != nil).To(gomega.Equal(true))
|
||||
gomega.Expect(scheduledCondition.Status).To(gomega.Equal(v1.ConditionTrue))
|
||||
scheduledPods = append(scheduledPods, pod)
|
||||
} else {
|
||||
_, scheduledCondition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
|
||||
gomega.Expect(scheduledCondition != nil).To(gomega.Equal(true))
|
||||
gomega.Expect(scheduledCondition.Status).To(gomega.Equal(v1.ConditionFalse))
|
||||
if scheduledCondition.Reason == "Unschedulable" {
|
||||
|
||||
notScheduledPods = append(notScheduledPods, pod)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
124
test/e2e/framework/pod/runtimeobject.go
Normal file
124
test/e2e/framework/pod/runtimeobject.go
Normal file
@@ -0,0 +1,124 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package pod
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
batch "k8s.io/api/batch/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
appsinternal "k8s.io/kubernetes/pkg/apis/apps"
|
||||
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
|
||||
)
|
||||
|
||||
// TODO: This function is generic enough and used enough that it should be
|
||||
// moved to its own subpkg.
|
||||
func getRuntimeObjectForKind(c clientset.Interface, kind schema.GroupKind, ns, name string) (runtime.Object, error) {
|
||||
switch kind {
|
||||
case api.Kind("ReplicationController"):
|
||||
return c.CoreV1().ReplicationControllers(ns).Get(name, metav1.GetOptions{})
|
||||
case extensionsinternal.Kind("ReplicaSet"), appsinternal.Kind("ReplicaSet"):
|
||||
return c.AppsV1().ReplicaSets(ns).Get(name, metav1.GetOptions{})
|
||||
case extensionsinternal.Kind("Deployment"), appsinternal.Kind("Deployment"):
|
||||
return c.AppsV1().Deployments(ns).Get(name, metav1.GetOptions{})
|
||||
case extensionsinternal.Kind("DaemonSet"):
|
||||
return c.AppsV1().DaemonSets(ns).Get(name, metav1.GetOptions{})
|
||||
case batchinternal.Kind("Job"):
|
||||
return c.BatchV1().Jobs(ns).Get(name, metav1.GetOptions{})
|
||||
default:
|
||||
return nil, fmt.Errorf("Unsupported kind when getting runtime object: %v", kind)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: This function is generic enough and used enough that it should be
|
||||
// moved to its own subpkg.
|
||||
func getSelectorFromRuntimeObject(obj runtime.Object) (labels.Selector, error) {
|
||||
switch typed := obj.(type) {
|
||||
case *v1.ReplicationController:
|
||||
return labels.SelectorFromSet(typed.Spec.Selector), nil
|
||||
case *extensions.ReplicaSet:
|
||||
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
|
||||
case *apps.ReplicaSet:
|
||||
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
|
||||
case *extensions.Deployment:
|
||||
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
|
||||
case *apps.Deployment:
|
||||
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
|
||||
case *extensions.DaemonSet:
|
||||
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
|
||||
case *apps.DaemonSet:
|
||||
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
|
||||
case *batch.Job:
|
||||
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
|
||||
default:
|
||||
return nil, fmt.Errorf("Unsupported kind when getting selector: %v", obj)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: This function is generic enough and used enough that it should be
|
||||
// moved to its own subpkg.
|
||||
func getReplicasFromRuntimeObject(obj runtime.Object) (int32, error) {
|
||||
switch typed := obj.(type) {
|
||||
case *v1.ReplicationController:
|
||||
if typed.Spec.Replicas != nil {
|
||||
return *typed.Spec.Replicas, nil
|
||||
}
|
||||
return 0, nil
|
||||
case *extensions.ReplicaSet:
|
||||
if typed.Spec.Replicas != nil {
|
||||
return *typed.Spec.Replicas, nil
|
||||
}
|
||||
return 0, nil
|
||||
case *apps.ReplicaSet:
|
||||
if typed.Spec.Replicas != nil {
|
||||
return *typed.Spec.Replicas, nil
|
||||
}
|
||||
return 0, nil
|
||||
case *extensions.Deployment:
|
||||
if typed.Spec.Replicas != nil {
|
||||
return *typed.Spec.Replicas, nil
|
||||
}
|
||||
return 0, nil
|
||||
case *apps.Deployment:
|
||||
if typed.Spec.Replicas != nil {
|
||||
return *typed.Spec.Replicas, nil
|
||||
}
|
||||
return 0, nil
|
||||
case *extensions.DaemonSet:
|
||||
return 0, nil
|
||||
case *apps.DaemonSet:
|
||||
return 0, nil
|
||||
case *batch.Job:
|
||||
// TODO: currently we use pause pods so that's OK. When we'll want to switch to Pods
|
||||
// that actually finish we need a better way to do this.
|
||||
if typed.Spec.Parallelism != nil {
|
||||
return *typed.Spec.Parallelism, nil
|
||||
}
|
||||
return 0, nil
|
||||
default:
|
||||
return -1, fmt.Errorf("Unsupported kind when getting number of replicas: %v", obj)
|
||||
}
|
||||
}
|
||||
651
test/e2e/framework/pod/wait.go
Normal file
651
test/e2e/framework/pod/wait.go
Normal file
@@ -0,0 +1,651 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package pod
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
// defaultPodDeletionTimeout is the default timeout for deleting pod.
|
||||
defaultPodDeletionTimeout = 3 * time.Minute
|
||||
|
||||
// podListTimeout is how long to wait for the pod to be listable.
|
||||
podListTimeout = time.Minute
|
||||
|
||||
podRespondingTimeout = 15 * time.Minute
|
||||
|
||||
// How long pods have to become scheduled onto nodes
|
||||
podScheduledBeforeTimeout = podListTimeout + (20 * time.Second)
|
||||
|
||||
// podStartTimeout is how long to wait for the pod to be started.
|
||||
// Initial pod start can be delayed O(minutes) by slow docker pulls.
|
||||
// TODO: Make this 30 seconds once #4566 is resolved.
|
||||
podStartTimeout = 5 * time.Minute
|
||||
|
||||
// poll is how often to poll pods, nodes and claims.
|
||||
poll = 2 * time.Second
|
||||
pollShortTimeout = 1 * time.Minute
|
||||
pollLongTimeout = 5 * time.Minute
|
||||
|
||||
// singleCallTimeout is how long to try single API calls (like 'get' or 'list'). Used to prevent
|
||||
// transient failures from failing tests.
|
||||
// TODO: client should not apply this timeout to Watch calls. Increased from 30s until that is fixed.
|
||||
singleCallTimeout = 5 * time.Minute
|
||||
|
||||
// Some pods can take much longer to get ready due to volume attach/detach latency.
|
||||
slowPodStartTimeout = 15 * time.Minute
|
||||
)
|
||||
|
||||
type podCondition func(pod *v1.Pod) (bool, error)
|
||||
|
||||
// errorBadPodsStates create error message of basic info of bad pods for debugging.
|
||||
func errorBadPodsStates(badPods []v1.Pod, desiredPods int, ns, desiredState string, timeout time.Duration) string {
|
||||
errStr := fmt.Sprintf("%d / %d pods in namespace %q are NOT in %s state in %v\n", len(badPods), desiredPods, ns, desiredState, timeout)
|
||||
// Print bad pods info only if there are fewer than 10 bad pods
|
||||
if len(badPods) > 10 {
|
||||
return errStr + "There are too many bad pods. Please check log for details."
|
||||
}
|
||||
|
||||
buf := bytes.NewBuffer(nil)
|
||||
w := tabwriter.NewWriter(buf, 0, 0, 1, ' ', 0)
|
||||
fmt.Fprintln(w, "POD\tNODE\tPHASE\tGRACE\tCONDITIONS")
|
||||
for _, badPod := range badPods {
|
||||
grace := ""
|
||||
if badPod.DeletionGracePeriodSeconds != nil {
|
||||
grace = fmt.Sprintf("%ds", *badPod.DeletionGracePeriodSeconds)
|
||||
}
|
||||
podInfo := fmt.Sprintf("%s\t%s\t%s\t%s\t%+v",
|
||||
badPod.ObjectMeta.Name, badPod.Spec.NodeName, badPod.Status.Phase, grace, badPod.Status.Conditions)
|
||||
fmt.Fprintln(w, podInfo)
|
||||
}
|
||||
w.Flush()
|
||||
return errStr + buf.String()
|
||||
}
|
||||
|
||||
// WaitForPodsRunningReady waits up to timeout to ensure that all pods in
|
||||
// namespace ns are either running and ready, or failed but controlled by a
|
||||
// controller. Also, it ensures that at least minPods are running and
|
||||
// ready. It has separate behavior from other 'wait for' pods functions in
|
||||
// that it requests the list of pods on every iteration. This is useful, for
|
||||
// example, in cluster startup, because the number of pods increases while
|
||||
// waiting. All pods that are in SUCCESS state are not counted.
|
||||
//
|
||||
// If ignoreLabels is not empty, pods matching this selector are ignored.
|
||||
func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedNotReadyPods int32, timeout time.Duration, ignoreLabels map[string]string) error {
|
||||
ignoreSelector := labels.SelectorFromSet(map[string]string{})
|
||||
start := time.Now()
|
||||
e2elog.Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready",
|
||||
timeout, minPods, ns)
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
var ignoreNotReady bool
|
||||
badPods := []v1.Pod{}
|
||||
desiredPods := 0
|
||||
notReady := int32(0)
|
||||
|
||||
if wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
// We get the new list of pods, replication controllers, and
|
||||
// replica sets in every iteration because more pods come
|
||||
// online during startup and we want to ensure they are also
|
||||
// checked.
|
||||
replicas, replicaOk := int32(0), int32(0)
|
||||
|
||||
rcList, err := c.CoreV1().ReplicationControllers(ns).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("Error getting replication controllers in namespace '%s': %v", ns, err)
|
||||
if testutils.IsRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
for _, rc := range rcList.Items {
|
||||
replicas += *rc.Spec.Replicas
|
||||
replicaOk += rc.Status.ReadyReplicas
|
||||
}
|
||||
|
||||
rsList, err := c.AppsV1().ReplicaSets(ns).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("Error getting replication sets in namespace %q: %v", ns, err)
|
||||
if testutils.IsRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
for _, rs := range rsList.Items {
|
||||
replicas += *rs.Spec.Replicas
|
||||
replicaOk += rs.Status.ReadyReplicas
|
||||
}
|
||||
|
||||
podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("Error getting pods in namespace '%s': %v", ns, err)
|
||||
if testutils.IsRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
nOk := int32(0)
|
||||
notReady = int32(0)
|
||||
badPods = []v1.Pod{}
|
||||
desiredPods = len(podList.Items)
|
||||
for _, pod := range podList.Items {
|
||||
if len(ignoreLabels) != 0 && ignoreSelector.Matches(labels.Set(pod.Labels)) {
|
||||
continue
|
||||
}
|
||||
res, err := testutils.PodRunningReady(&pod)
|
||||
switch {
|
||||
case res && err == nil:
|
||||
nOk++
|
||||
case pod.Status.Phase == v1.PodSucceeded:
|
||||
e2elog.Logf("The status of Pod %s is Succeeded, skipping waiting", pod.ObjectMeta.Name)
|
||||
// it doesn't make sense to wait for this pod
|
||||
continue
|
||||
case pod.Status.Phase != v1.PodFailed:
|
||||
e2elog.Logf("The status of Pod %s is %s (Ready = false), waiting for it to be either Running (with Ready = true) or Failed", pod.ObjectMeta.Name, pod.Status.Phase)
|
||||
notReady++
|
||||
badPods = append(badPods, pod)
|
||||
default:
|
||||
if metav1.GetControllerOf(&pod) == nil {
|
||||
e2elog.Logf("Pod %s is Failed, but it's not controlled by a controller", pod.ObjectMeta.Name)
|
||||
badPods = append(badPods, pod)
|
||||
}
|
||||
//ignore failed pods that are controlled by some controller
|
||||
}
|
||||
}
|
||||
|
||||
e2elog.Logf("%d / %d pods in namespace '%s' are running and ready (%d seconds elapsed)",
|
||||
nOk, len(podList.Items), ns, int(time.Since(start).Seconds()))
|
||||
e2elog.Logf("expected %d pod replicas in namespace '%s', %d are Running and Ready.", replicas, ns, replicaOk)
|
||||
|
||||
if replicaOk == replicas && nOk >= minPods && len(badPods) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
ignoreNotReady = (notReady <= allowedNotReadyPods)
|
||||
LogPodStates(badPods)
|
||||
return false, nil
|
||||
}) != nil {
|
||||
if !ignoreNotReady {
|
||||
return errors.New(errorBadPodsStates(badPods, desiredPods, ns, "RUNNING and READY", timeout))
|
||||
}
|
||||
e2elog.Logf("Number of not-ready pods (%d) is below the allowed threshold (%d).", notReady, allowedNotReadyPods)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitForPodCondition waits a pods to be matched to the given condition.
|
||||
func WaitForPodCondition(c clientset.Interface, ns, podName, desc string, timeout time.Duration, condition podCondition) error {
|
||||
e2elog.Logf("Waiting up to %v for pod %q in namespace %q to be %q", timeout, podName, ns, desc)
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
|
||||
pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrs.IsNotFound(err) {
|
||||
e2elog.Logf("Pod %q in namespace %q not found. Error: %v", podName, ns, err)
|
||||
return err
|
||||
}
|
||||
e2elog.Logf("Get pod %q in namespace %q failed, ignoring for %v. Error: %v", podName, ns, poll, err)
|
||||
continue
|
||||
}
|
||||
// log now so that current pod info is reported before calling `condition()`
|
||||
e2elog.Logf("Pod %q: Phase=%q, Reason=%q, readiness=%t. Elapsed: %v",
|
||||
podName, pod.Status.Phase, pod.Status.Reason, podutil.IsPodReady(pod), time.Since(start))
|
||||
if done, err := condition(pod); done {
|
||||
if err == nil {
|
||||
e2elog.Logf("Pod %q satisfied condition %q", podName, desc)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("Gave up after waiting %v for pod %q to be %q", timeout, podName, desc)
|
||||
}
|
||||
|
||||
// WaitForPodTerminatedInNamespace returns an error if it takes too long for the pod to terminate,
|
||||
// if the pod Get api returns an error (IsNotFound or other), or if the pod failed (and thus did not
|
||||
// terminate) with an unexpected reason. Typically called to test that the passed-in pod is fully
|
||||
// terminated (reason==""), but may be called to detect if a pod did *not* terminate according to
|
||||
// the supplied reason.
|
||||
func WaitForPodTerminatedInNamespace(c clientset.Interface, podName, reason, namespace string) error {
|
||||
return WaitForPodCondition(c, namespace, podName, "terminated due to deadline exceeded", podStartTimeout, func(pod *v1.Pod) (bool, error) {
|
||||
// Only consider Failed pods. Successful pods will be deleted and detected in
|
||||
// waitForPodCondition's Get call returning `IsNotFound`
|
||||
if pod.Status.Phase == v1.PodFailed {
|
||||
if pod.Status.Reason == reason { // short-circuit waitForPodCondition's loop
|
||||
return true, nil
|
||||
}
|
||||
return true, fmt.Errorf("Expected pod %q in namespace %q to be terminated with reason %q, got reason: %q", podName, namespace, reason, pod.Status.Reason)
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
// waitForPodSuccessInNamespaceTimeout returns nil if the pod reached state success, or an error if it reached failure or ran too long.
|
||||
func waitForPodSuccessInNamespaceTimeout(c clientset.Interface, podName string, namespace string, timeout time.Duration) error {
|
||||
return WaitForPodCondition(c, namespace, podName, "success or failure", timeout, func(pod *v1.Pod) (bool, error) {
|
||||
if pod.Spec.RestartPolicy == v1.RestartPolicyAlways {
|
||||
return true, fmt.Errorf("pod %q will never terminate with a succeeded state since its restart policy is Always", podName)
|
||||
}
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodSucceeded:
|
||||
ginkgo.By("Saw pod success")
|
||||
return true, nil
|
||||
case v1.PodFailed:
|
||||
return true, fmt.Errorf("pod %q failed with status: %+v", podName, pod.Status)
|
||||
default:
|
||||
return false, nil
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForPodNameUnschedulableInNamespace returns an error if it takes too long for the pod to become Pending
|
||||
// and have condition Status equal to Unschedulable,
|
||||
// if the pod Get api returns an error (IsNotFound or other), or if the pod failed with an unexpected reason.
|
||||
// Typically called to test that the passed-in pod is Pending and Unschedulable.
|
||||
func WaitForPodNameUnschedulableInNamespace(c clientset.Interface, podName, namespace string) error {
|
||||
return WaitForPodCondition(c, namespace, podName, "Unschedulable", podStartTimeout, func(pod *v1.Pod) (bool, error) {
|
||||
// Only consider Failed pods. Successful pods will be deleted and detected in
|
||||
// waitForPodCondition's Get call returning `IsNotFound`
|
||||
if pod.Status.Phase == v1.PodPending {
|
||||
for _, cond := range pod.Status.Conditions {
|
||||
if cond.Type == v1.PodScheduled && cond.Status == v1.ConditionFalse && cond.Reason == "Unschedulable" {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if pod.Status.Phase == v1.PodRunning || pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed {
|
||||
return true, fmt.Errorf("Expected pod %q in namespace %q to be in phase Pending, but got phase: %v", podName, namespace, pod.Status.Phase)
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForMatchPodsCondition finds match pods based on the input ListOptions.
|
||||
// waits and checks if all match pods are in the given podCondition
|
||||
func WaitForMatchPodsCondition(c clientset.Interface, opts metav1.ListOptions, desc string, timeout time.Duration, condition podCondition) error {
|
||||
e2elog.Logf("Waiting up to %v for matching pods' status to be %s", timeout, desc)
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
|
||||
pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
conditionNotMatch := []string{}
|
||||
for _, pod := range pods.Items {
|
||||
done, err := condition(&pod)
|
||||
if done && err != nil {
|
||||
return fmt.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
if !done {
|
||||
conditionNotMatch = append(conditionNotMatch, format.Pod(&pod))
|
||||
}
|
||||
}
|
||||
if len(conditionNotMatch) <= 0 {
|
||||
return err
|
||||
}
|
||||
e2elog.Logf("%d pods are not %s: %v", len(conditionNotMatch), desc, conditionNotMatch)
|
||||
}
|
||||
return fmt.Errorf("gave up waiting for matching pods to be '%s' after %v", desc, timeout)
|
||||
}
|
||||
|
||||
// WaitForPodNameRunningInNamespace waits default amount of time (PodStartTimeout) for the specified pod to become running.
|
||||
// Returns an error if timeout occurs first, or pod goes in to failed state.
|
||||
func WaitForPodNameRunningInNamespace(c clientset.Interface, podName, namespace string) error {
|
||||
return WaitTimeoutForPodRunningInNamespace(c, podName, namespace, podStartTimeout)
|
||||
}
|
||||
|
||||
// WaitForPodRunningInNamespaceSlow waits an extended amount of time (slowPodStartTimeout) for the specified pod to become running.
|
||||
// The resourceVersion is used when Watching object changes, it tells since when we care
|
||||
// about changes to the pod. Returns an error if timeout occurs first, or pod goes in to failed state.
|
||||
func WaitForPodRunningInNamespaceSlow(c clientset.Interface, podName, namespace string) error {
|
||||
return WaitTimeoutForPodRunningInNamespace(c, podName, namespace, slowPodStartTimeout)
|
||||
}
|
||||
|
||||
// WaitTimeoutForPodRunningInNamespace waits the given timeout duration for the specified pod to become running.
|
||||
func WaitTimeoutForPodRunningInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error {
|
||||
return wait.PollImmediate(poll, timeout, podRunning(c, podName, namespace))
|
||||
}
|
||||
|
||||
// WaitForPodRunningInNamespace waits default amount of time (podStartTimeout) for the specified pod to become running.
|
||||
// Returns an error if timeout occurs first, or pod goes in to failed state.
|
||||
func WaitForPodRunningInNamespace(c clientset.Interface, pod *v1.Pod) error {
|
||||
if pod.Status.Phase == v1.PodRunning {
|
||||
return nil
|
||||
}
|
||||
return WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, podStartTimeout)
|
||||
}
|
||||
|
||||
// WaitTimeoutForPodEvent waits the given timeout duration for a pod event to occur.
|
||||
func WaitTimeoutForPodEvent(c clientset.Interface, podName, namespace, eventSelector, msg string, timeout time.Duration) error {
|
||||
return wait.PollImmediate(poll, timeout, eventOccurred(c, podName, namespace, eventSelector, msg))
|
||||
}
|
||||
|
||||
func eventOccurred(c clientset.Interface, podName, namespace, eventSelector, msg string) wait.ConditionFunc {
|
||||
options := metav1.ListOptions{FieldSelector: eventSelector}
|
||||
return func() (bool, error) {
|
||||
events, err := c.CoreV1().Events(namespace).List(options)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("got error while getting pod events: %s", err)
|
||||
}
|
||||
for _, event := range events.Items {
|
||||
if strings.Contains(event.Message, msg) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// WaitTimeoutForPodNoLongerRunningInNamespace waits the given timeout duration for the specified pod to stop.
|
||||
func WaitTimeoutForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error {
|
||||
return wait.PollImmediate(poll, timeout, podCompleted(c, podName, namespace))
|
||||
}
|
||||
|
||||
// WaitForPodNoLongerRunningInNamespace waits default amount of time (defaultPodDeletionTimeout) for the specified pod to stop running.
|
||||
// Returns an error if timeout occurs first.
|
||||
func WaitForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namespace string) error {
|
||||
return WaitTimeoutForPodNoLongerRunningInNamespace(c, podName, namespace, defaultPodDeletionTimeout)
|
||||
}
|
||||
|
||||
// WaitTimeoutForPodReadyInNamespace waits the given timeout diration for the
|
||||
// specified pod to be ready and running.
|
||||
func WaitTimeoutForPodReadyInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error {
|
||||
return wait.PollImmediate(poll, timeout, podRunningAndReady(c, podName, namespace))
|
||||
}
|
||||
|
||||
// WaitForPodNotPending returns an error if it took too long for the pod to go out of pending state.
|
||||
// The resourceVersion is used when Watching object changes, it tells since when we care
|
||||
// about changes to the pod.
|
||||
func WaitForPodNotPending(c clientset.Interface, ns, podName string) error {
|
||||
return wait.PollImmediate(poll, podStartTimeout, podNotPending(c, podName, ns))
|
||||
}
|
||||
|
||||
// WaitForPodSuccessInNamespace returns nil if the pod reached state success, or an error if it reached failure or until podStartupTimeout.
|
||||
func WaitForPodSuccessInNamespace(c clientset.Interface, podName string, namespace string) error {
|
||||
return waitForPodSuccessInNamespaceTimeout(c, podName, namespace, podStartTimeout)
|
||||
}
|
||||
|
||||
// WaitForPodSuccessInNamespaceSlow returns nil if the pod reached state success, or an error if it reached failure or until slowPodStartupTimeout.
|
||||
func WaitForPodSuccessInNamespaceSlow(c clientset.Interface, podName string, namespace string) error {
|
||||
return waitForPodSuccessInNamespaceTimeout(c, podName, namespace, slowPodStartTimeout)
|
||||
}
|
||||
|
||||
// WaitForPodNotFoundInNamespace returns an error if it takes too long for the pod to fully terminate.
|
||||
// Unlike `waitForPodTerminatedInNamespace`, the pod's Phase and Reason are ignored. If the pod Get
|
||||
// api returns IsNotFound then the wait stops and nil is returned. If the Get api returns an error other
|
||||
// than "not found" then that error is returned and the wait stops.
|
||||
func WaitForPodNotFoundInNamespace(c clientset.Interface, podName, ns string, timeout time.Duration) error {
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
_, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
|
||||
if apierrs.IsNotFound(err) {
|
||||
return true, nil // done
|
||||
}
|
||||
if err != nil {
|
||||
return true, err // stop wait with error
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForPodToDisappear waits the given timeout duration for the specified pod to disappear.
|
||||
func WaitForPodToDisappear(c clientset.Interface, ns, podName string, label labels.Selector, interval, timeout time.Duration) error {
|
||||
return wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
e2elog.Logf("Waiting for pod %s to disappear", podName)
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err := c.CoreV1().Pods(ns).List(options)
|
||||
if err != nil {
|
||||
if testutils.IsRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
found := false
|
||||
for _, pod := range pods.Items {
|
||||
if pod.Name == podName {
|
||||
e2elog.Logf("Pod %s still exists", podName)
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
e2elog.Logf("Pod %s no longer exists", podName)
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
// PodsResponding waits for the pods to response.
|
||||
func PodsResponding(c clientset.Interface, ns, name string, wantName bool, pods *v1.PodList) error {
|
||||
ginkgo.By("trying to dial each unique pod")
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||
return wait.PollImmediate(poll, podRespondingTimeout, NewProxyResponseChecker(c, ns, label, name, wantName, pods).CheckAllResponses)
|
||||
}
|
||||
|
||||
// WaitForControlledPodsRunning waits up to 10 minutes for pods to become Running.
|
||||
func WaitForControlledPodsRunning(c clientset.Interface, ns, name string, kind schema.GroupKind) error {
|
||||
rtObject, err := getRuntimeObjectForKind(c, kind, ns, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
selector, err := getSelectorFromRuntimeObject(rtObject)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
replicas, err := getReplicasFromRuntimeObject(rtObject)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = testutils.WaitForEnoughPodsWithLabelRunning(c, ns, selector, int(replicas))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error while waiting for replication controller %s pods to be running: %v", name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitForControlledPods waits up to podListTimeout for getting pods of the specified controller name and return them.
|
||||
func WaitForControlledPods(c clientset.Interface, ns, name string, kind schema.GroupKind) (pods *v1.PodList, err error) {
|
||||
rtObject, err := getRuntimeObjectForKind(c, kind, ns, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selector, err := getSelectorFromRuntimeObject(rtObject)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return WaitForPodsWithLabel(c, ns, selector)
|
||||
}
|
||||
|
||||
// WaitForPodsWithLabelScheduled waits for all matching pods to become scheduled and at least one
|
||||
// matching pod exists. Return the list of matching pods.
|
||||
func WaitForPodsWithLabelScheduled(c clientset.Interface, ns string, label labels.Selector) (pods *v1.PodList, err error) {
|
||||
err = wait.PollImmediate(poll, podScheduledBeforeTimeout,
|
||||
func() (bool, error) {
|
||||
pods, err = WaitForPodsWithLabel(c, ns, label)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, pod := range pods.Items {
|
||||
if pod.Spec.NodeName == "" {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
return pods, err
|
||||
}
|
||||
|
||||
// WaitForPodsWithLabel waits up to podListTimeout for getting pods with certain label
|
||||
func WaitForPodsWithLabel(c clientset.Interface, ns string, label labels.Selector) (pods *v1.PodList, err error) {
|
||||
for t := time.Now(); time.Since(t) < podListTimeout; time.Sleep(poll) {
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err = c.CoreV1().Pods(ns).List(options)
|
||||
if err != nil {
|
||||
if testutils.IsRetryableAPIError(err) {
|
||||
continue
|
||||
}
|
||||
return
|
||||
}
|
||||
if len(pods.Items) > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if pods == nil || len(pods.Items) == 0 {
|
||||
err = fmt.Errorf("Timeout while waiting for pods with label %v", label)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// WaitForPodsWithLabelRunningReady waits for exact amount of matching pods to become running and ready.
|
||||
// Return the list of matching pods.
|
||||
func WaitForPodsWithLabelRunningReady(c clientset.Interface, ns string, label labels.Selector, num int, timeout time.Duration) (pods *v1.PodList, err error) {
|
||||
var current int
|
||||
err = wait.Poll(poll, timeout,
|
||||
func() (bool, error) {
|
||||
pods, err := WaitForPodsWithLabel(c, ns, label)
|
||||
if err != nil {
|
||||
e2elog.Logf("Failed to list pods: %v", err)
|
||||
if testutils.IsRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
current = 0
|
||||
for _, pod := range pods.Items {
|
||||
if flag, err := testutils.PodRunningReady(&pod); err == nil && flag == true {
|
||||
current++
|
||||
}
|
||||
}
|
||||
if current != num {
|
||||
e2elog.Logf("Got %v pods running and ready, expect: %v", current, num)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
return pods, err
|
||||
}
|
||||
|
||||
// WaitForPodsInactive waits until there are no active pods left in the PodStore.
|
||||
// This is to make a fair comparison of deletion time between DeleteRCAndPods
|
||||
// and DeleteRCAndWaitForGC, because the RC controller decreases status.replicas
|
||||
// when the pod is inactvie.
|
||||
func WaitForPodsInactive(ps *testutils.PodStore, interval, timeout time.Duration) error {
|
||||
var activePods []*v1.Pod
|
||||
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
pods := ps.List()
|
||||
activePods = nil
|
||||
for _, pod := range pods {
|
||||
if controller.IsPodActive(pod) {
|
||||
activePods = append(activePods, pod)
|
||||
}
|
||||
}
|
||||
|
||||
if len(activePods) != 0 {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
|
||||
if err == wait.ErrWaitTimeout {
|
||||
for _, pod := range activePods {
|
||||
e2elog.Logf("ERROR: Pod %q running on %q is still active", pod.Name, pod.Spec.NodeName)
|
||||
}
|
||||
return fmt.Errorf("there are %d active pods. E.g. %q on node %q", len(activePods), activePods[0].Name, activePods[0].Spec.NodeName)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// WaitForPodsGone waits until there are no pods left in the PodStore.
|
||||
func WaitForPodsGone(ps *testutils.PodStore, interval, timeout time.Duration) error {
|
||||
var pods []*v1.Pod
|
||||
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
if pods = ps.List(); len(pods) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if err == wait.ErrWaitTimeout {
|
||||
for _, pod := range pods {
|
||||
e2elog.Logf("ERROR: Pod %q still exists. Node: %q", pod.Name, pod.Spec.NodeName)
|
||||
}
|
||||
return fmt.Errorf("there are %d pods left. E.g. %q on node %q", len(pods), pods[0].Name, pods[0].Spec.NodeName)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// WaitForPodsReady waits for the pods to become ready.
|
||||
func WaitForPodsReady(c clientset.Interface, ns, name string, minReadySeconds int) error {
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
return wait.Poll(poll, 5*time.Minute, func() (bool, error) {
|
||||
pods, err := c.CoreV1().Pods(ns).List(options)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
for _, pod := range pods.Items {
|
||||
if !podutil.IsPodAvailable(&pod, int32(minReadySeconds), metav1.Now()) {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForNRestartablePods tries to list restarting pods using ps until it finds expect of them,
|
||||
// returning their names if it can do so before timeout.
|
||||
func WaitForNRestartablePods(ps *testutils.PodStore, expect int, timeout time.Duration) ([]string, error) {
|
||||
var pods []*v1.Pod
|
||||
var errLast error
|
||||
found := wait.Poll(poll, timeout, func() (bool, error) {
|
||||
allPods := ps.List()
|
||||
pods = FilterNonRestartablePods(allPods)
|
||||
if len(pods) != expect {
|
||||
errLast = fmt.Errorf("expected to find %d pods but found only %d", expect, len(pods))
|
||||
e2elog.Logf("Error getting pods: %v", errLast)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}) == nil
|
||||
podNames := make([]string, len(pods))
|
||||
for i, p := range pods {
|
||||
podNames[i] = p.ObjectMeta.Name
|
||||
}
|
||||
if !found {
|
||||
return podNames, fmt.Errorf("couldn't find %d pods within %v; last error: %v",
|
||||
expect, timeout, errLast)
|
||||
}
|
||||
return podNames, nil
|
||||
}
|
||||
@@ -34,6 +34,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||
"k8s.io/kubernetes/pkg/kubelet/sysctl"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
@@ -110,7 +111,7 @@ func (c *PodClient) CreateEventually(pod *v1.Pod, opts ...interface{}) *v1.Pod {
|
||||
// CreateSyncInNamespace creates a new pod according to the framework specifications in the given namespace, and waits for it to start.
|
||||
func (c *PodClient) CreateSyncInNamespace(pod *v1.Pod, namespace string) *v1.Pod {
|
||||
p := c.Create(pod)
|
||||
ExpectNoError(WaitForPodNameRunningInNamespace(c.f.ClientSet, p.Name, namespace))
|
||||
ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(c.f.ClientSet, p.Name, namespace))
|
||||
// Get the newest pod after it becomes running, some status may change after pod created, such as pod ip.
|
||||
p, err := c.Get(p.Name, metav1.GetOptions{})
|
||||
ExpectNoError(err)
|
||||
@@ -174,7 +175,7 @@ func (c *PodClient) DeleteSyncInNamespace(name string, namespace string, options
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
Failf("Failed to delete pod %q: %v", name, err)
|
||||
}
|
||||
gomega.Expect(WaitForPodToDisappear(c.f.ClientSet, namespace, name, labels.Everything(),
|
||||
gomega.Expect(e2epod.WaitForPodToDisappear(c.f.ClientSet, namespace, name, labels.Everything(),
|
||||
2*time.Second, timeout)).To(gomega.Succeed(), "wait for pod %q to disappear", name)
|
||||
}
|
||||
|
||||
@@ -218,7 +219,7 @@ func (c *PodClient) mungeSpec(pod *v1.Pod) {
|
||||
// TODO(random-liu): Move pod wait function into this file
|
||||
func (c *PodClient) WaitForSuccess(name string, timeout time.Duration) {
|
||||
f := c.f
|
||||
gomega.Expect(WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, "success or failure", timeout,
|
||||
gomega.Expect(e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, "success or failure", timeout,
|
||||
func(pod *v1.Pod) (bool, error) {
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodFailed:
|
||||
@@ -235,7 +236,7 @@ func (c *PodClient) WaitForSuccess(name string, timeout time.Duration) {
|
||||
// WaitForFailure waits for pod to fail.
|
||||
func (c *PodClient) WaitForFailure(name string, timeout time.Duration) {
|
||||
f := c.f
|
||||
gomega.Expect(WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, "success or failure", timeout,
|
||||
gomega.Expect(e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, "success or failure", timeout,
|
||||
func(pod *v1.Pod) (bool, error) {
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodFailed:
|
||||
@@ -252,7 +253,7 @@ func (c *PodClient) WaitForFailure(name string, timeout time.Duration) {
|
||||
// WaitForFinish waits for pod to finish running, regardless of success or failure.
|
||||
func (c *PodClient) WaitForFinish(name string, timeout time.Duration) {
|
||||
f := c.f
|
||||
gomega.Expect(WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, "success or failure", timeout,
|
||||
gomega.Expect(e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, "success or failure", timeout,
|
||||
func(pod *v1.Pod) (bool, error) {
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodFailed:
|
||||
@@ -293,7 +294,7 @@ func (c *PodClient) WaitForErrorEventOrSuccess(pod *v1.Pod) (*v1.Event, error) {
|
||||
// MatchContainerOutput gets output of a container and match expected regexp in the output.
|
||||
func (c *PodClient) MatchContainerOutput(name string, containerName string, expectedRegexp string) error {
|
||||
f := c.f
|
||||
output, err := GetPodLogs(f.ClientSet, f.Namespace.Name, name, containerName)
|
||||
output, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, name, containerName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get output for container %q of pod %q", containerName, name)
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@ go_library(
|
||||
"//staging/src/k8s.io/legacy-cloud-providers/gce:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v1:go_default_library",
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
@@ -57,13 +58,13 @@ var _ = ginkgo.Describe("Recreate [Feature:Recreate]", func() {
|
||||
|
||||
ps, err = testutils.NewPodStore(f.ClientSet, systemNamespace, labels.Everything(), fields.Everything())
|
||||
allPods := ps.List()
|
||||
originalPods := framework.FilterNonRestartablePods(allPods)
|
||||
originalPods := e2epod.FilterNonRestartablePods(allPods)
|
||||
originalPodNames = make([]string, len(originalPods))
|
||||
for i, p := range originalPods {
|
||||
originalPodNames[i] = p.ObjectMeta.Name
|
||||
}
|
||||
|
||||
if !framework.CheckPodsRunningReadyOrSucceeded(f.ClientSet, systemNamespace, originalPodNames, framework.PodReadyBeforeTimeout) {
|
||||
if !e2epod.CheckPodsRunningReadyOrSucceeded(f.ClientSet, systemNamespace, originalPodNames, framework.PodReadyBeforeTimeout) {
|
||||
framework.Failf("At least one pod wasn't running and ready or succeeded at test start.")
|
||||
}
|
||||
|
||||
@@ -114,10 +115,10 @@ func testRecreate(c clientset.Interface, ps *testutils.PodStore, systemNamespace
|
||||
|
||||
// Make sure the pods from before node recreation are running/completed
|
||||
podCheckStart := time.Now()
|
||||
podNamesAfter, err := framework.WaitForNRestartablePods(ps, len(podNames), framework.RestartPodReadyAgainTimeout)
|
||||
podNamesAfter, err := e2epod.WaitForNRestartablePods(ps, len(podNames), framework.RestartPodReadyAgainTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
remaining := framework.RestartPodReadyAgainTimeout - time.Since(podCheckStart)
|
||||
if !framework.CheckPodsRunningReadyOrSucceeded(c, systemNamespace, podNamesAfter, remaining) {
|
||||
if !e2epod.CheckPodsRunningReadyOrSucceeded(c, systemNamespace, podNamesAfter, remaining) {
|
||||
framework.Failf("At least one pod wasn't running and ready after the restart.")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,6 +32,7 @@ import (
|
||||
storageutil "k8s.io/kubernetes/pkg/apis/storage/v1/util"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
@@ -501,7 +502,7 @@ func WaitAndVerifyBinds(c clientset.Interface, ns string, pvols PVMap, claims PV
|
||||
// Test the pod's exit code to be zero.
|
||||
func testPodSuccessOrFail(c clientset.Interface, ns string, pod *v1.Pod) error {
|
||||
ginkgo.By("Pod should terminate with exitcode 0 (success)")
|
||||
if err := WaitForPodSuccessInNamespace(c, pod.Name, ns); err != nil {
|
||||
if err := e2epod.WaitForPodSuccessInNamespace(c, pod.Name, ns); err != nil {
|
||||
return fmt.Errorf("pod %q failed to reach Success: %v", pod.Name, err)
|
||||
}
|
||||
e2elog.Logf("Pod %v succeeded ", pod.Name)
|
||||
@@ -856,7 +857,7 @@ func CreatePod(client clientset.Interface, namespace string, nodeSelector map[st
|
||||
return nil, fmt.Errorf("pod Create API error: %v", err)
|
||||
}
|
||||
// Waiting for pod to be running
|
||||
err = WaitForPodNameRunningInNamespace(client, pod.Name, namespace)
|
||||
err = e2epod.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)
|
||||
if err != nil {
|
||||
return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err)
|
||||
}
|
||||
@@ -876,7 +877,7 @@ func CreateNginxPod(client clientset.Interface, namespace string, nodeSelector m
|
||||
return nil, fmt.Errorf("pod Create API error: %v", err)
|
||||
}
|
||||
// Waiting for pod to be running
|
||||
err = WaitForPodNameRunningInNamespace(client, pod.Name, namespace)
|
||||
err = e2epod.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)
|
||||
if err != nil {
|
||||
return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err)
|
||||
}
|
||||
@@ -907,7 +908,7 @@ func CreateSecPodWithNodeSelection(client clientset.Interface, namespace string,
|
||||
}
|
||||
|
||||
// Waiting for pod to be running
|
||||
err = WaitTimeoutForPodRunningInNamespace(client, pod.Name, namespace, timeout)
|
||||
err = e2epod.WaitTimeoutForPodRunningInNamespace(client, pod.Name, namespace, timeout)
|
||||
if err != nil {
|
||||
return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err)
|
||||
}
|
||||
@@ -962,7 +963,7 @@ func CreateUnschedulablePod(client clientset.Interface, namespace string, nodeSe
|
||||
return nil, fmt.Errorf("pod Create API error: %v", err)
|
||||
}
|
||||
// Waiting for pod to become Unschedulable
|
||||
err = WaitForPodNameUnschedulableInNamespace(client, pod.Name, namespace)
|
||||
err = e2epod.WaitForPodNameUnschedulableInNamespace(client, pod.Name, namespace)
|
||||
if err != nil {
|
||||
return pod, fmt.Errorf("pod %q is not Unschedulable: %v", pod.Name, err)
|
||||
}
|
||||
|
||||
@@ -31,6 +31,7 @@ import (
|
||||
scaleclient "k8s.io/client-go/scale"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
@@ -137,7 +138,7 @@ func WaitForRCPodToDisappear(c clientset.Interface, ns, rcName, podName string)
|
||||
// NodeController evicts pod after 5 minutes, so we need timeout greater than that to observe effects.
|
||||
// The grace period must be set to 0 on the pod for it to be deleted during the partition.
|
||||
// Otherwise, it goes to the 'Terminating' state till the kubelet confirms deletion.
|
||||
return WaitForPodToDisappear(c, ns, podName, label, 20*time.Second, 10*time.Minute)
|
||||
return e2epod.WaitForPodToDisappear(c, ns, podName, label, 20*time.Second, 10*time.Minute)
|
||||
}
|
||||
|
||||
// WaitForReplicationController waits until the RC appears (exist == true), or disappears (exist == false)
|
||||
|
||||
@@ -40,6 +40,7 @@ import (
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/registry/core/service/portallocator"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
@@ -818,7 +819,7 @@ func (j *ServiceTestJig) waitForPodsCreated(namespace string, replicas int) ([]s
|
||||
|
||||
func (j *ServiceTestJig) waitForPodsReady(namespace string, pods []string) error {
|
||||
timeout := 2 * time.Minute
|
||||
if !CheckPodsRunningReady(j.Client, namespace, pods, timeout) {
|
||||
if !e2epod.CheckPodsRunningReady(j.Client, namespace, pods, timeout) {
|
||||
return fmt.Errorf("timeout waiting for %d pods to be ready", len(pods))
|
||||
}
|
||||
return nil
|
||||
@@ -1303,9 +1304,9 @@ func StopServeHostnameService(clientset clientset.Interface, ns, name string) er
|
||||
// in the cluster. Each pod in the service is expected to echo its name. These
|
||||
// names are compared with the given expectedPods list after a sort | uniq.
|
||||
func VerifyServeHostnameServiceUp(c clientset.Interface, ns, host string, expectedPods []string, serviceIP string, servicePort int) error {
|
||||
execPodName := CreateExecPodOrFail(c, ns, "execpod-", nil)
|
||||
execPodName := e2epod.CreateExecPodOrFail(c, ns, "execpod-", nil)
|
||||
defer func() {
|
||||
DeletePodOrFail(c, ns, execPodName)
|
||||
e2epod.DeletePodOrFail(c, ns, execPodName)
|
||||
}()
|
||||
|
||||
// Loop a bunch of times - the proxy is randomized, so we want a good
|
||||
|
||||
@@ -39,6 +39,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
"k8s.io/kubernetes/test/e2e/manifest"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
@@ -279,7 +280,7 @@ func (s *StatefulSetTester) ConfirmStatefulPodCount(count int, ss *apps.Stateful
|
||||
podList := s.GetPodList(ss)
|
||||
statefulPodCount := len(podList.Items)
|
||||
if statefulPodCount != count {
|
||||
logPodStates(podList.Items)
|
||||
e2epod.LogPodStates(podList.Items)
|
||||
if hard {
|
||||
Failf("StatefulSet %v scaled unexpectedly scaled to %d -> %d replicas", ss.Name, count, len(podList.Items))
|
||||
} else {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -14,6 +14,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
|
||||
@@ -53,6 +53,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
@@ -364,10 +365,10 @@ func StartVolumeServer(client clientset.Interface, config TestConfig) *v1.Pod {
|
||||
}
|
||||
}
|
||||
if config.WaitForCompletion {
|
||||
framework.ExpectNoError(framework.WaitForPodSuccessInNamespace(client, serverPod.Name, serverPod.Namespace))
|
||||
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(client, serverPod.Name, serverPod.Namespace))
|
||||
framework.ExpectNoError(podClient.Delete(serverPod.Name, nil))
|
||||
} else {
|
||||
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(client, serverPod))
|
||||
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(client, serverPod))
|
||||
if pod == nil {
|
||||
ginkgo.By(fmt.Sprintf("locating the %q server pod", serverPodName))
|
||||
pod, err = podClient.Get(serverPodName, metav1.GetOptions{})
|
||||
@@ -487,7 +488,7 @@ func TestVolumeClient(client clientset.Interface, config TestConfig, fsGroup *in
|
||||
framework.Failf("Failed to create %s pod: %v", clientPod.Name, err)
|
||||
|
||||
}
|
||||
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(client, clientPod))
|
||||
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(client, clientPod))
|
||||
|
||||
ginkgo.By("Checking that text file contents are perfect.")
|
||||
for i, test := range tests {
|
||||
@@ -568,7 +569,7 @@ func InjectHTML(client clientset.Interface, config TestConfig, fsGroup *int64, v
|
||||
|
||||
injectPod, err := podClient.Create(injectPod)
|
||||
framework.ExpectNoError(err, "Failed to create injector pod: %v", err)
|
||||
err = framework.WaitForPodSuccessInNamespace(client, injectPod.Name, injectPod.Namespace)
|
||||
err = e2epod.WaitForPodSuccessInNamespace(client, injectPod.Name, injectPod.Namespace)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user