e2e_node: clean up non-recommended import
This commit is contained in:
@@ -28,8 +28,8 @@ import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
"k8s.io/api/core/v1"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
@@ -78,12 +78,12 @@ var _ = framework.KubeDescribe("Restart [Serial] [Slow] [Disruptive] [NodeFeatur
|
||||
)
|
||||
|
||||
f := framework.NewDefaultFramework("restart-test")
|
||||
Context("Container Runtime", func() {
|
||||
Context("Network", func() {
|
||||
It("should recover from ip leak", func() {
|
||||
ginkgo.Context("Container Runtime", func() {
|
||||
ginkgo.Context("Network", func() {
|
||||
ginkgo.It("should recover from ip leak", func() {
|
||||
|
||||
pods := newTestPods(podCount, false, imageutils.GetPauseImageName(), "restart-container-runtime-test")
|
||||
By(fmt.Sprintf("Trying to create %d pods on node", len(pods)))
|
||||
ginkgo.By(fmt.Sprintf("Trying to create %d pods on node", len(pods)))
|
||||
createBatchPodWithRateControl(f, pods, podCreationInterval)
|
||||
defer deletePodsSync(f, pods)
|
||||
|
||||
@@ -95,10 +95,10 @@ var _ = framework.KubeDescribe("Restart [Serial] [Slow] [Disruptive] [NodeFeatur
|
||||
}
|
||||
|
||||
for i := 0; i < restartCount; i += 1 {
|
||||
By(fmt.Sprintf("Killing container runtime iteration %d", i))
|
||||
ginkgo.By(fmt.Sprintf("Killing container runtime iteration %d", i))
|
||||
// Wait for container runtime to be running
|
||||
var pid int
|
||||
Eventually(func() error {
|
||||
gomega.Eventually(func() error {
|
||||
runtimePids, err := getPidsForProcess(framework.TestContext.ContainerRuntimeProcessName, framework.TestContext.ContainerRuntimePidFile)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -112,7 +112,7 @@ var _ = framework.KubeDescribe("Restart [Serial] [Slow] [Disruptive] [NodeFeatur
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, 1*time.Minute, 2*time.Second).Should(BeNil())
|
||||
}, 1*time.Minute, 2*time.Second).Should(gomega.BeNil())
|
||||
if stdout, err := exec.Command("sudo", "kill", fmt.Sprintf("%d", pid)).CombinedOutput(); err != nil {
|
||||
e2elog.Failf("Failed to kill container runtime (pid=%d): %v, stdout: %q", pid, err, string(stdout))
|
||||
}
|
||||
@@ -120,18 +120,18 @@ var _ = framework.KubeDescribe("Restart [Serial] [Slow] [Disruptive] [NodeFeatur
|
||||
time.Sleep(20 * time.Second)
|
||||
}
|
||||
|
||||
By("Checking currently Running/Ready pods")
|
||||
ginkgo.By("Checking currently Running/Ready pods")
|
||||
postRestartRunningPods := waitForPods(f, len(runningPods), recoverTimeout)
|
||||
if len(postRestartRunningPods) == 0 {
|
||||
e2elog.Failf("Failed to start *any* pods after container runtime restart, this might indicate an IP leak")
|
||||
}
|
||||
By("Confirm no containers have terminated")
|
||||
ginkgo.By("Confirm no containers have terminated")
|
||||
for _, pod := range postRestartRunningPods {
|
||||
if c := testutils.TerminatedContainers(pod); len(c) != 0 {
|
||||
e2elog.Failf("Pod %q has failed containers %+v after container runtime restart, this might indicate an IP leak", pod.Name, c)
|
||||
}
|
||||
}
|
||||
By(fmt.Sprintf("Container runtime restart test passed with %d pods", len(postRestartRunningPods)))
|
||||
ginkgo.By(fmt.Sprintf("Container runtime restart test passed with %d pods", len(postRestartRunningPods)))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
Reference in New Issue
Block a user