Merge pull request #86199 from hwdef/clean-e2e-framework

test/e2e: move funcs from test/e2e/pod to other folders
This commit is contained in:
Kubernetes Prow Robot
2019-12-16 21:39:03 -08:00
committed by GitHub
10 changed files with 127 additions and 167 deletions

View File

@@ -14,7 +14,6 @@ go_library(
deps = [
"//pkg/api/v1/pod:go_default_library",
"//pkg/client/conditions:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/kubelet/types:go_default_library",
"//pkg/kubelet/util/format:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
@@ -23,7 +22,6 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",

View File

@@ -24,7 +24,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
imageutils "k8s.io/kubernetes/test/utils/image"
)
@@ -33,40 +32,6 @@ var (
BusyBoxImage = imageutils.GetE2EImage(imageutils.BusyBox)
)
// CreateWaitAndDeletePod creates the test pod, wait for (hopefully) success, and then delete the pod.
// Note: need named return value so that the err assignment in the defer sets the returned error.
// Has been shown to be necessary using Go 1.7.
func CreateWaitAndDeletePod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, command string) (err error) {
e2elog.Logf("Creating nfs test pod")
pod := MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, command)
runPod, err := c.CoreV1().Pods(ns).Create(pod)
if err != nil {
return fmt.Errorf("pod Create API error: %v", err)
}
defer func() {
delErr := DeletePodWithWait(c, runPod)
if err == nil { // don't override previous err value
err = delErr // assign to returned err, can be nil
}
}()
err = testPodSuccessOrFail(c, ns, runPod)
if err != nil {
return fmt.Errorf("pod %q did not exit with Success: %v", runPod.Name, err)
}
return // note: named return value
}
// testPodSuccessOrFail tests whether the pod's exit code is zero.
func testPodSuccessOrFail(c clientset.Interface, ns string, pod *v1.Pod) error {
e2elog.Logf("Pod should terminate with exitcode 0 (success)")
if err := WaitForPodSuccessInNamespace(c, pod.Name, ns); err != nil {
return fmt.Errorf("pod %q failed to reach Success: %v", pod.Name, err)
}
e2elog.Logf("Pod %v succeeded ", pod.Name)
return nil
}
// CreateUnschedulablePod with given claims based on node selector
func CreateUnschedulablePod(client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) (*v1.Pod, error) {
pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command)

View File

@@ -30,7 +30,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
@@ -147,33 +146,6 @@ func (r ProxyResponseChecker) CheckAllResponses() (done bool, err error) {
return true, nil
}
// CountRemainingPods queries the server to count number of remaining pods, and number of pods that had a missing deletion timestamp.
func CountRemainingPods(c clientset.Interface, namespace string) (int, int, error) {
// check for remaining pods
pods, err := c.CoreV1().Pods(namespace).List(metav1.ListOptions{})
if err != nil {
return 0, 0, err
}
// nothing remains!
if len(pods.Items) == 0 {
return 0, 0, nil
}
// stuff remains, log about it
LogPodStates(pods.Items)
// check if there were any pods with missing deletion timestamp
numPods := len(pods.Items)
missingTimestamp := 0
for _, pod := range pods.Items {
if pod.DeletionTimestamp == nil {
missingTimestamp++
}
}
return numPods, missingTimestamp, nil
}
func podRunning(c clientset.Interface, podName, namespace string) wait.ConditionFunc {
return func() (bool, error) {
pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{})
@@ -321,15 +293,6 @@ func podsRunning(c clientset.Interface, pods *v1.PodList) []error {
return e
}
// DumpAllPodInfo logs basic info for all pods.
func DumpAllPodInfo(c clientset.Interface) {
pods, err := c.CoreV1().Pods("").List(metav1.ListOptions{})
if err != nil {
e2elog.Logf("unable to fetch pod debug info: %v", err)
}
LogPodStates(pods.Items)
}
// LogPodStates logs basic info of provided pods for debugging.
func LogPodStates(pods []v1.Pod) {
// Find maximum widths for pod, node, and phase strings for column printing.
@@ -578,40 +541,3 @@ func GetPodsInNamespace(c clientset.Interface, ns string, ignoreLabels map[strin
}
return filtered, nil
}
// GetPodsScheduled returns a number of currently scheduled and not scheduled Pods.
func GetPodsScheduled(masterNodes sets.String, pods *v1.PodList) (scheduledPods, notScheduledPods []v1.Pod) {
for _, pod := range pods.Items {
if !masterNodes.Has(pod.Spec.NodeName) {
if pod.Spec.NodeName != "" {
_, scheduledCondition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
gomega.Expect(scheduledCondition != nil).To(gomega.Equal(true))
gomega.Expect(scheduledCondition.Status).To(gomega.Equal(v1.ConditionTrue))
scheduledPods = append(scheduledPods, pod)
} else {
_, scheduledCondition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
gomega.Expect(scheduledCondition != nil).To(gomega.Equal(true))
gomega.Expect(scheduledCondition.Status).To(gomega.Equal(v1.ConditionFalse))
if scheduledCondition.Reason == "Unschedulable" {
notScheduledPods = append(notScheduledPods, pod)
}
}
}
}
return
}
// PatchContainerImages replaces the specified Container Registry with a custom
// one provided via the KUBE_TEST_REPO_LIST env variable
func PatchContainerImages(containers []v1.Container) error {
var err error
for _, c := range containers {
c.Image, err = imageutils.ReplaceRegistryInImageURL(c.Image)
if err != nil {
return err
}
}
return nil
}

View File

@@ -34,7 +34,6 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/kubelet/util/format"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2eresource "k8s.io/kubernetes/test/e2e/framework/resource"
@@ -536,49 +535,6 @@ func WaitForPodsWithLabelRunningReady(c clientset.Interface, ns string, label la
return pods, err
}
// WaitForPodsInactive waits until there are no active pods left in the PodStore.
// This is to make a fair comparison of deletion time between DeleteRCAndPods
// and DeleteRCAndWaitForGC, because the RC controller decreases status.replicas
// when the pod is inactvie.
func WaitForPodsInactive(ps *testutils.PodStore, interval, timeout time.Duration) error {
var activePods []*v1.Pod
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
pods := ps.List()
activePods = controller.FilterActivePods(pods)
if len(activePods) != 0 {
return false, nil
}
return true, nil
})
if err == wait.ErrWaitTimeout {
for _, pod := range activePods {
e2elog.Logf("ERROR: Pod %q running on %q is still active", pod.Name, pod.Spec.NodeName)
}
return fmt.Errorf("there are %d active pods. E.g. %q on node %q", len(activePods), activePods[0].Name, activePods[0].Spec.NodeName)
}
return err
}
// WaitForPodsGone waits until there are no pods left in the PodStore.
func WaitForPodsGone(ps *testutils.PodStore, interval, timeout time.Duration) error {
var pods []*v1.Pod
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
if pods = ps.List(); len(pods) == 0 {
return true, nil
}
return false, nil
})
if err == wait.ErrWaitTimeout {
for _, pod := range pods {
e2elog.Logf("ERROR: Pod %q still exists. Node: %q", pod.Name, pod.Spec.NodeName)
}
return fmt.Errorf("there are %d pods left. E.g. %q on node %q", len(pods), pods[0].Name, pods[0].Spec.NodeName)
}
return err
}
// WaitForPodsReady waits for the pods to become ready.
func WaitForPodsReady(c clientset.Interface, ns, name string, minReadySeconds int) error {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))

View File

@@ -1237,7 +1237,7 @@ func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns
timeout = timeout + 3*time.Minute
}
err = e2epod.WaitForPodsInactive(ps, interval, timeout)
err = waitForPodsInactive(ps, interval, timeout)
if err != nil {
return fmt.Errorf("error while waiting for pods to become inactive %s: %v", name, err)
}
@@ -1247,13 +1247,56 @@ func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns
// In gce, at any point, small percentage of nodes can disappear for
// ~10 minutes due to hostError. 20 minutes should be long enough to
// restart VM in that case and delete the pod.
err = e2epod.WaitForPodsGone(ps, interval, 20*time.Minute)
err = waitForPodsGone(ps, interval, 20*time.Minute)
if err != nil {
return fmt.Errorf("error while waiting for pods gone %s: %v", name, err)
}
return nil
}
// waitForPodsGone waits until there are no pods left in the PodStore.
func waitForPodsGone(ps *testutils.PodStore, interval, timeout time.Duration) error {
var pods []*v1.Pod
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
if pods = ps.List(); len(pods) == 0 {
return true, nil
}
return false, nil
})
if err == wait.ErrWaitTimeout {
for _, pod := range pods {
Logf("ERROR: Pod %q still exists. Node: %q", pod.Name, pod.Spec.NodeName)
}
return fmt.Errorf("there are %d pods left. E.g. %q on node %q", len(pods), pods[0].Name, pods[0].Spec.NodeName)
}
return err
}
// waitForPodsInactive waits until there are no active pods left in the PodStore.
// This is to make a fair comparison of deletion time between DeleteRCAndPods
// and DeleteRCAndWaitForGC, because the RC controller decreases status.replicas
// when the pod is inactvie.
func waitForPodsInactive(ps *testutils.PodStore, interval, timeout time.Duration) error {
var activePods []*v1.Pod
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
pods := ps.List()
activePods = controller.FilterActivePods(pods)
if len(activePods) != 0 {
return false, nil
}
return true, nil
})
if err == wait.ErrWaitTimeout {
for _, pod := range activePods {
Logf("ERROR: Pod %q running on %q is still active", pod.Name, pod.Spec.NodeName)
}
return fmt.Errorf("there are %d active pods. E.g. %q on node %q", len(activePods), activePods[0].Name, activePods[0].Spec.NodeName)
}
return err
}
// RunHostCmd runs the given cmd in the context of the given pod using `kubectl exec`
// inside of a shell.
func RunHostCmd(ns, name, cmd string) (string, error) {