e2e: use Ginkgo context

All code must use the context from Ginkgo when doing API calls or polling for a
change, otherwise the code would not return immediately when the test gets
aborted.
This commit is contained in:
Patrick Ohly
2022-12-12 10:11:10 +01:00
parent bf1d1dfd0f
commit 2f6c4f5eab
418 changed files with 11489 additions and 11369 deletions

View File

@@ -48,14 +48,14 @@ func makeNodePerfPod(w workloads.NodePerfWorkload) *v1.Pod {
}
}
func setKubeletConfig(f *framework.Framework, cfg *kubeletconfig.KubeletConfiguration) {
func setKubeletConfig(ctx context.Context, f *framework.Framework, cfg *kubeletconfig.KubeletConfiguration) {
if cfg != nil {
// Update the Kubelet configuration.
ginkgo.By("Stopping the kubelet")
startKubelet := stopKubelet()
// wait until the kubelet health check will fail
gomega.Eventually(func() bool {
gomega.Eventually(ctx, func() bool {
return kubeletHealthCheck(kubeletHealthCheckURL)
}, time.Minute, time.Second).Should(gomega.BeFalse())
@@ -65,14 +65,14 @@ func setKubeletConfig(f *framework.Framework, cfg *kubeletconfig.KubeletConfigur
startKubelet()
// wait until the kubelet health check will succeed
gomega.Eventually(func() bool {
gomega.Eventually(ctx, func() bool {
return kubeletHealthCheck(kubeletHealthCheckURL)
}, 2*time.Minute, 5*time.Second).Should(gomega.BeTrue())
}
// Wait for the Kubelet to be ready.
gomega.Eventually(func() bool {
nodes, err := e2enode.TotalReady(f.ClientSet)
gomega.Eventually(ctx, func(ctx context.Context) bool {
nodes, err := e2enode.TotalReady(ctx, f.ClientSet)
framework.ExpectNoError(err)
return nodes == 1
}, time.Minute, time.Second).Should(gomega.BeTrue())
@@ -89,22 +89,22 @@ var _ = SIGDescribe("Node Performance Testing [Serial] [Slow]", func() {
newCfg *kubeletconfig.KubeletConfiguration
pod *v1.Pod
)
ginkgo.JustBeforeEach(func() {
ginkgo.JustBeforeEach(func(ctx context.Context) {
err := wl.PreTestExec()
framework.ExpectNoError(err)
oldCfg, err = getCurrentKubeletConfig()
oldCfg, err = getCurrentKubeletConfig(ctx)
framework.ExpectNoError(err)
newCfg, err = wl.KubeletConfig(oldCfg)
framework.ExpectNoError(err)
setKubeletConfig(f, newCfg)
setKubeletConfig(ctx, f, newCfg)
})
cleanup := func() {
cleanup := func(ctx context.Context) {
gp := int64(0)
delOpts := metav1.DeleteOptions{
GracePeriodSeconds: &gp,
}
e2epod.NewPodClient(f).DeleteSync(pod.Name, delOpts, e2epod.DefaultPodDeletionTimeout)
e2epod.NewPodClient(f).DeleteSync(ctx, pod.Name, delOpts, e2epod.DefaultPodDeletionTimeout)
// We are going to give some more time for the CPU manager to do any clean
// up it needs to do now that the pod has been deleted. Otherwise we may
@@ -117,18 +117,18 @@ var _ = SIGDescribe("Node Performance Testing [Serial] [Slow]", func() {
ginkgo.By("running the post test exec from the workload")
err := wl.PostTestExec()
framework.ExpectNoError(err)
setKubeletConfig(f, oldCfg)
setKubeletConfig(ctx, f, oldCfg)
}
runWorkload := func() {
runWorkload := func(ctx context.Context) {
ginkgo.By("running the workload and waiting for success")
// Make the pod for the workload.
pod = makeNodePerfPod(wl)
// Create the pod.
pod = e2epod.NewPodClient(f).CreateSync(pod)
pod = e2epod.NewPodClient(f).CreateSync(ctx, pod)
// Wait for pod success.
// but avoid using WaitForSuccess because we want the container logs upon failure #109295
podErr := e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, pod.Name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), wl.Timeout(),
podErr := e2epod.WaitForPodCondition(ctx, f.ClientSet, f.Namespace.Name, pod.Name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), wl.Timeout(),
func(pod *v1.Pod) (bool, error) {
switch pod.Status.Phase {
case v1.PodFailed:
@@ -140,7 +140,7 @@ var _ = SIGDescribe("Node Performance Testing [Serial] [Slow]", func() {
}
},
)
podLogs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
podLogs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
framework.ExpectNoError(err)
if podErr != nil {
framework.Logf("dumping pod logs due to pod error detected: \n%s", podLogs)
@@ -153,11 +153,11 @@ var _ = SIGDescribe("Node Performance Testing [Serial] [Slow]", func() {
gomega.Expect(podErr).To(gomega.Succeed(), "wait for pod %q to succeed", pod.Name)
}
ginkgo.BeforeEach(func() {
ginkgo.BeforeEach(func(ctx context.Context) {
ginkgo.By("ensure environment has enough CPU + Memory to run")
minimumRequiredCPU := resource.MustParse("15")
minimumRequiredMemory := resource.MustParse("48Gi")
localNodeCap := getLocalNode(f).Status.Allocatable
localNodeCap := getLocalNode(ctx, f).Status.Allocatable
cpuCap := localNodeCap[v1.ResourceCPU]
memCap := localNodeCap[v1.ResourceMemory]
if cpuCap.Cmp(minimumRequiredCPU) == -1 {
@@ -174,7 +174,7 @@ var _ = SIGDescribe("Node Performance Testing [Serial] [Slow]", func() {
})
ginkgo.It("NAS parallel benchmark (NPB) suite - Integer Sort (IS) workload", func(ctx context.Context) {
ginkgo.DeferCleanup(cleanup)
runWorkload()
runWorkload(ctx)
})
})
ginkgo.Context("Run node performance testing with pre-defined workloads", func() {
@@ -183,7 +183,7 @@ var _ = SIGDescribe("Node Performance Testing [Serial] [Slow]", func() {
})
ginkgo.It("NAS parallel benchmark (NPB) suite - Embarrassingly Parallel (EP) workload", func(ctx context.Context) {
ginkgo.DeferCleanup(cleanup)
runWorkload()
runWorkload(ctx)
})
})
ginkgo.Context("Run node performance testing with pre-defined workloads", func() {
@@ -192,7 +192,7 @@ var _ = SIGDescribe("Node Performance Testing [Serial] [Slow]", func() {
})
ginkgo.It("TensorFlow workload", func(ctx context.Context) {
ginkgo.DeferCleanup(cleanup)
runWorkload()
runWorkload(ctx)
})
})
})