e2e: use Ginkgo context
All code must use the context from Ginkgo when doing API calls or polling for a change, otherwise the code would not return immediately when the test gets aborted.
This commit is contained in:
@@ -87,27 +87,27 @@ var _ = SIGDescribe("Sysctls [LinuxOnly] [NodeConformance]", func() {
|
||||
pod.Spec.Containers[0].Command = []string{"/bin/sysctl", "kernel.shm_rmid_forced"}
|
||||
|
||||
ginkgo.By("Creating a pod with the kernel.shm_rmid_forced sysctl")
|
||||
pod = podClient.Create(pod)
|
||||
pod = podClient.Create(ctx, pod)
|
||||
|
||||
ginkgo.By("Watching for error events or started pod")
|
||||
// watch for events instead of termination of pod because the kubelet deletes
|
||||
// failed pods without running containers. This would create a race as the pod
|
||||
// might have already been deleted here.
|
||||
ev, err := e2epod.NewPodClient(f).WaitForErrorEventOrSuccess(pod)
|
||||
ev, err := e2epod.NewPodClient(f).WaitForErrorEventOrSuccess(ctx, pod)
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(ev).To(gomega.BeNil())
|
||||
|
||||
ginkgo.By("Waiting for pod completion")
|
||||
err = e2epod.WaitForPodNoLongerRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
err = e2epod.WaitForPodNoLongerRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err)
|
||||
pod, err = podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
pod, err = podClient.Get(ctx, pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Checking that the pod succeeded")
|
||||
framework.ExpectEqual(pod.Status.Phase, v1.PodSucceeded)
|
||||
|
||||
ginkgo.By("Getting logs from the pod")
|
||||
log, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
|
||||
log, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Checking that the sysctl is actually updated")
|
||||
@@ -146,7 +146,7 @@ var _ = SIGDescribe("Sysctls [LinuxOnly] [NodeConformance]", func() {
|
||||
|
||||
ginkgo.By("Creating a pod with one valid and two invalid sysctls")
|
||||
client := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
|
||||
_, err := client.Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||
_, err := client.Create(ctx, pod, metav1.CreateOptions{})
|
||||
|
||||
gomega.Expect(err).NotTo(gomega.BeNil())
|
||||
gomega.Expect(err.Error()).To(gomega.ContainSubstring(`Invalid value: "foo-"`))
|
||||
@@ -168,11 +168,11 @@ var _ = SIGDescribe("Sysctls [LinuxOnly] [NodeConformance]", func() {
|
||||
}
|
||||
|
||||
ginkgo.By("Creating a pod with an ignorelisted, but not allowlisted sysctl on the node")
|
||||
pod = podClient.Create(pod)
|
||||
pod = podClient.Create(ctx, pod)
|
||||
|
||||
ginkgo.By("Wait for pod failed reason")
|
||||
// watch for pod failed reason instead of termination of pod
|
||||
err := e2epod.WaitForPodFailedReason(f.ClientSet, pod, "SysctlForbidden", f.Timeouts.PodStart)
|
||||
err := e2epod.WaitForPodFailedReason(ctx, f.ClientSet, pod, "SysctlForbidden", f.Timeouts.PodStart)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
@@ -195,27 +195,27 @@ var _ = SIGDescribe("Sysctls [LinuxOnly] [NodeConformance]", func() {
|
||||
pod.Spec.Containers[0].Command = []string{"/bin/sysctl", "kernel/shm_rmid_forced"}
|
||||
|
||||
ginkgo.By("Creating a pod with the kernel/shm_rmid_forced sysctl")
|
||||
pod = podClient.Create(pod)
|
||||
pod = podClient.Create(ctx, pod)
|
||||
|
||||
ginkgo.By("Watching for error events or started pod")
|
||||
// watch for events instead of termination of pod because the kubelet deletes
|
||||
// failed pods without running containers. This would create a race as the pod
|
||||
// might have already been deleted here.
|
||||
ev, err := e2epod.NewPodClient(f).WaitForErrorEventOrSuccess(pod)
|
||||
ev, err := e2epod.NewPodClient(f).WaitForErrorEventOrSuccess(ctx, pod)
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(ev).To(gomega.BeNil())
|
||||
|
||||
ginkgo.By("Waiting for pod completion")
|
||||
err = e2epod.WaitForPodNoLongerRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
err = e2epod.WaitForPodNoLongerRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err)
|
||||
pod, err = podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
pod, err = podClient.Get(ctx, pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Checking that the pod succeeded")
|
||||
framework.ExpectEqual(pod.Status.Phase, v1.PodSucceeded)
|
||||
|
||||
ginkgo.By("Getting logs from the pod")
|
||||
log, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
|
||||
log, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Checking that the sysctl is actually updated")
|
||||
|
Reference in New Issue
Block a user