clean up test code
This commit is contained in:
@@ -19,20 +19,18 @@ limitations under the License.
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
"k8s.io/api/core/v1"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
// waitForPods waits for timeout duration, for pod_count.
|
||||
@@ -41,7 +39,7 @@ func waitForPods(f *framework.Framework, pod_count int, timeout time.Duration) (
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(10 * time.Second) {
|
||||
podList, err := f.PodClient().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("Failed to list pods on node: %v", err)
|
||||
framework.Logf("Failed to list pods on node: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -52,7 +50,7 @@ func waitForPods(f *framework.Framework, pod_count int, timeout time.Duration) (
|
||||
}
|
||||
runningPods = append(runningPods, &pod)
|
||||
}
|
||||
e2elog.Logf("Running pod count %d", len(runningPods))
|
||||
framework.Logf("Running pod count %d", len(runningPods))
|
||||
if len(runningPods) >= pod_count {
|
||||
break
|
||||
}
|
||||
@@ -91,7 +89,7 @@ var _ = framework.KubeDescribe("Restart [Serial] [Slow] [Disruptive] [NodeFeatur
|
||||
// startTimeout fit on the node and the node is now saturated.
|
||||
runningPods := waitForPods(f, podCount, startTimeout)
|
||||
if len(runningPods) < minPods {
|
||||
e2elog.Failf("Failed to start %d pods, cannot test that restarting container runtime doesn't leak IPs", minPods)
|
||||
framework.Failf("Failed to start %d pods, cannot test that restarting container runtime doesn't leak IPs", minPods)
|
||||
}
|
||||
|
||||
for i := 0; i < restartCount; i += 1 {
|
||||
@@ -114,7 +112,7 @@ var _ = framework.KubeDescribe("Restart [Serial] [Slow] [Disruptive] [NodeFeatur
|
||||
return nil
|
||||
}, 1*time.Minute, 2*time.Second).Should(gomega.BeNil())
|
||||
if stdout, err := exec.Command("sudo", "kill", fmt.Sprintf("%d", pid)).CombinedOutput(); err != nil {
|
||||
e2elog.Failf("Failed to kill container runtime (pid=%d): %v, stdout: %q", pid, err, string(stdout))
|
||||
framework.Failf("Failed to kill container runtime (pid=%d): %v, stdout: %q", pid, err, string(stdout))
|
||||
}
|
||||
// Assume that container runtime will be restarted by systemd/supervisord etc.
|
||||
time.Sleep(20 * time.Second)
|
||||
@@ -123,12 +121,12 @@ var _ = framework.KubeDescribe("Restart [Serial] [Slow] [Disruptive] [NodeFeatur
|
||||
ginkgo.By("Checking currently Running/Ready pods")
|
||||
postRestartRunningPods := waitForPods(f, len(runningPods), recoverTimeout)
|
||||
if len(postRestartRunningPods) == 0 {
|
||||
e2elog.Failf("Failed to start *any* pods after container runtime restart, this might indicate an IP leak")
|
||||
framework.Failf("Failed to start *any* pods after container runtime restart, this might indicate an IP leak")
|
||||
}
|
||||
ginkgo.By("Confirm no containers have terminated")
|
||||
for _, pod := range postRestartRunningPods {
|
||||
if c := testutils.TerminatedContainers(pod); len(c) != 0 {
|
||||
e2elog.Failf("Pod %q has failed containers %+v after container runtime restart, this might indicate an IP leak", pod.Name, c)
|
||||
framework.Failf("Pod %q has failed containers %+v after container runtime restart, this might indicate an IP leak", pod.Name, c)
|
||||
}
|
||||
}
|
||||
ginkgo.By(fmt.Sprintf("Container runtime restart test passed with %d pods", len(postRestartRunningPods)))
|
||||
|
Reference in New Issue
Block a user