e2e: use Ginkgo context

All code must use the context from Ginkgo when doing API calls or polling for a
change, otherwise the code would not return immediately when the test gets
aborted.
This commit is contained in:
Patrick Ohly
2022-12-12 10:11:10 +01:00
parent bf1d1dfd0f
commit 2f6c4f5eab
418 changed files with 11489 additions and 11369 deletions

View File

@@ -97,18 +97,18 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
var v1alphaPodResources *kubeletpodresourcesv1alpha1.ListPodResourcesResponse
var v1PodResources *kubeletpodresourcesv1.ListPodResourcesResponse
var err error
ginkgo.BeforeEach(func() {
ginkgo.BeforeEach(func(ctx context.Context) {
ginkgo.By("Wait for node to be ready")
gomega.Eventually(func() bool {
nodes, err := e2enode.TotalReady(f.ClientSet)
gomega.Eventually(ctx, func(ctx context.Context) bool {
nodes, err := e2enode.TotalReady(ctx, f.ClientSet)
framework.ExpectNoError(err)
return nodes == 1
}, time.Minute, time.Second).Should(gomega.BeTrue())
v1alphaPodResources, err = getV1alpha1NodeDevices()
v1alphaPodResources, err = getV1alpha1NodeDevices(ctx)
framework.ExpectNoError(err, "should get node local podresources by accessing the (v1alpha) podresources API endpoint")
v1PodResources, err = getV1NodeDevices()
v1PodResources, err = getV1NodeDevices(ctx)
framework.ExpectNoError(err, "should get node local podresources by accessing the (v1) podresources API endpoint")
// Before we run the device plugin test, we need to ensure
@@ -137,30 +137,30 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
}
}
dptemplate = dp.DeepCopy()
devicePluginPod = e2epod.NewPodClient(f).CreateSync(dp)
devicePluginPod = e2epod.NewPodClient(f).CreateSync(ctx, dp)
ginkgo.By("Waiting for devices to become available on the local node")
gomega.Eventually(func() bool {
node, ready := getLocalTestNode(f)
gomega.Eventually(ctx, func(ctx context.Context) bool {
node, ready := getLocalTestNode(ctx, f)
return ready && numberOfSampleResources(node) > 0
}, 5*time.Minute, framework.Poll).Should(gomega.BeTrue())
framework.Logf("Successfully created device plugin pod")
ginkgo.By("Waiting for the resource exported by the sample device plugin to become available on the local node")
gomega.Eventually(func() bool {
node, ready := getLocalTestNode(f)
gomega.Eventually(ctx, func(ctx context.Context) bool {
node, ready := getLocalTestNode(ctx, f)
return ready &&
numberOfDevicesCapacity(node, resourceName) == devsLen &&
numberOfDevicesAllocatable(node, resourceName) == devsLen
}, 30*time.Second, framework.Poll).Should(gomega.BeTrue())
})
ginkgo.AfterEach(func() {
ginkgo.AfterEach(func(ctx context.Context) {
ginkgo.By("Deleting the device plugin pod")
e2epod.NewPodClient(f).DeleteSync(devicePluginPod.Name, metav1.DeleteOptions{}, time.Minute)
e2epod.NewPodClient(f).DeleteSync(ctx, devicePluginPod.Name, metav1.DeleteOptions{}, time.Minute)
ginkgo.By("Deleting any Pods created by the test")
l, err := e2epod.NewPodClient(f).List(context.TODO(), metav1.ListOptions{})
l, err := e2epod.NewPodClient(f).List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err)
for _, p := range l.Items {
if p.Namespace != f.Namespace.Name {
@@ -168,14 +168,14 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
}
framework.Logf("Deleting pod: %s", p.Name)
e2epod.NewPodClient(f).DeleteSync(p.Name, metav1.DeleteOptions{}, 2*time.Minute)
e2epod.NewPodClient(f).DeleteSync(ctx, p.Name, metav1.DeleteOptions{}, 2*time.Minute)
}
restartKubelet(true)
ginkgo.By("Waiting for devices to become unavailable on the local node")
gomega.Eventually(func() bool {
node, ready := getLocalTestNode(f)
gomega.Eventually(ctx, func(ctx context.Context) bool {
node, ready := getLocalTestNode(ctx, f)
return ready && numberOfSampleResources(node) <= 0
}, 5*time.Minute, framework.Poll).Should(gomega.BeTrue())
@@ -184,15 +184,15 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
ginkgo.It("Can schedule a pod that requires a device", func(ctx context.Context) {
podRECMD := "devs=$(ls /tmp/ | egrep '^Dev-[0-9]+$') && echo stub devices: $devs && sleep 60"
pod1 := e2epod.NewPodClient(f).CreateSync(makeBusyboxPod(resourceName, podRECMD))
pod1 := e2epod.NewPodClient(f).CreateSync(ctx, makeBusyboxPod(resourceName, podRECMD))
deviceIDRE := "stub devices: (Dev-[0-9]+)"
devID1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
devID1 := parseLog(ctx, f, pod1.Name, pod1.Name, deviceIDRE)
gomega.Expect(devID1).To(gomega.Not(gomega.Equal("")))
v1alphaPodResources, err = getV1alpha1NodeDevices()
v1alphaPodResources, err = getV1alpha1NodeDevices(ctx)
framework.ExpectNoError(err)
v1PodResources, err = getV1NodeDevices()
v1PodResources, err = getV1NodeDevices(ctx)
framework.ExpectNoError(err)
framework.Logf("v1alphaPodResources.PodResources:%+v\n", v1alphaPodResources.PodResources)
@@ -244,78 +244,78 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
ginkgo.It("Keeps device plugin assignments across pod and kubelet restarts", func(ctx context.Context) {
podRECMD := "devs=$(ls /tmp/ | egrep '^Dev-[0-9]+$') && echo stub devices: $devs && sleep 60"
pod1 := e2epod.NewPodClient(f).CreateSync(makeBusyboxPod(resourceName, podRECMD))
pod1 := e2epod.NewPodClient(f).CreateSync(ctx, makeBusyboxPod(resourceName, podRECMD))
deviceIDRE := "stub devices: (Dev-[0-9]+)"
devID1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
devID1 := parseLog(ctx, f, pod1.Name, pod1.Name, deviceIDRE)
gomega.Expect(devID1).To(gomega.Not(gomega.Equal("")))
pod1, err := e2epod.NewPodClient(f).Get(context.TODO(), pod1.Name, metav1.GetOptions{})
pod1, err := e2epod.NewPodClient(f).Get(ctx, pod1.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
ensurePodContainerRestart(f, pod1.Name, pod1.Name)
ensurePodContainerRestart(ctx, f, pod1.Name, pod1.Name)
ginkgo.By("Confirming that device assignment persists even after container restart")
devIDAfterRestart := parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
devIDAfterRestart := parseLog(ctx, f, pod1.Name, pod1.Name, deviceIDRE)
framework.ExpectEqual(devIDAfterRestart, devID1)
ginkgo.By("Restarting Kubelet")
restartKubelet(true)
ginkgo.By("Wait for node to be ready again")
e2enode.WaitForAllNodesSchedulable(f.ClientSet, 5*time.Minute)
e2enode.WaitForAllNodesSchedulable(ctx, f.ClientSet, 5*time.Minute)
ginkgo.By("Validating that assignment is kept")
ensurePodContainerRestart(f, pod1.Name, pod1.Name)
ensurePodContainerRestart(ctx, f, pod1.Name, pod1.Name)
ginkgo.By("Confirming that after a kubelet restart, fake-device assignment is kept")
devIDRestart1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
devIDRestart1 := parseLog(ctx, f, pod1.Name, pod1.Name, deviceIDRE)
framework.ExpectEqual(devIDRestart1, devID1)
})
ginkgo.It("Keeps device plugin assignments after the device plugin has been re-registered", func(ctx context.Context) {
podRECMD := "devs=$(ls /tmp/ | egrep '^Dev-[0-9]+$') && echo stub devices: $devs && sleep 60"
pod1 := e2epod.NewPodClient(f).CreateSync(makeBusyboxPod(resourceName, podRECMD))
pod1 := e2epod.NewPodClient(f).CreateSync(ctx, makeBusyboxPod(resourceName, podRECMD))
deviceIDRE := "stub devices: (Dev-[0-9]+)"
devID1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
devID1 := parseLog(ctx, f, pod1.Name, pod1.Name, deviceIDRE)
gomega.Expect(devID1).To(gomega.Not(gomega.Equal("")))
pod1, err := e2epod.NewPodClient(f).Get(context.TODO(), pod1.Name, metav1.GetOptions{})
pod1, err := e2epod.NewPodClient(f).Get(ctx, pod1.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
ginkgo.By("Restarting Kubelet")
restartKubelet(true)
ginkgo.By("Wait for node to be ready again")
e2enode.WaitForAllNodesSchedulable(f.ClientSet, 5*time.Minute)
e2enode.WaitForAllNodesSchedulable(ctx, f.ClientSet, 5*time.Minute)
ginkgo.By("Re-Register resources and delete the plugin pod")
gp := int64(0)
deleteOptions := metav1.DeleteOptions{
GracePeriodSeconds: &gp,
}
e2epod.NewPodClient(f).DeleteSync(devicePluginPod.Name, deleteOptions, time.Minute)
waitForContainerRemoval(devicePluginPod.Spec.Containers[0].Name, devicePluginPod.Name, devicePluginPod.Namespace)
e2epod.NewPodClient(f).DeleteSync(ctx, devicePluginPod.Name, deleteOptions, time.Minute)
waitForContainerRemoval(ctx, devicePluginPod.Spec.Containers[0].Name, devicePluginPod.Name, devicePluginPod.Namespace)
ginkgo.By("Recreating the plugin pod")
devicePluginPod = e2epod.NewPodClient(f).CreateSync(dptemplate)
devicePluginPod = e2epod.NewPodClient(f).CreateSync(ctx, dptemplate)
ginkgo.By("Confirming that after a kubelet and pod restart, fake-device assignment is kept")
ensurePodContainerRestart(f, pod1.Name, pod1.Name)
devIDRestart1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
ensurePodContainerRestart(ctx, f, pod1.Name, pod1.Name)
devIDRestart1 := parseLog(ctx, f, pod1.Name, pod1.Name, deviceIDRE)
framework.ExpectEqual(devIDRestart1, devID1)
ginkgo.By("Waiting for resource to become available on the local node after re-registration")
gomega.Eventually(func() bool {
node, ready := getLocalTestNode(f)
gomega.Eventually(ctx, func() bool {
node, ready := getLocalTestNode(ctx, f)
return ready &&
numberOfDevicesCapacity(node, resourceName) == devsLen &&
numberOfDevicesAllocatable(node, resourceName) == devsLen
}, 30*time.Second, framework.Poll).Should(gomega.BeTrue())
ginkgo.By("Creating another pod")
pod2 := e2epod.NewPodClient(f).CreateSync(makeBusyboxPod(resourceName, podRECMD))
pod2 := e2epod.NewPodClient(f).CreateSync(ctx, makeBusyboxPod(resourceName, podRECMD))
ginkgo.By("Checking that pod got a different fake device")
devID2 := parseLog(f, pod2.Name, pod2.Name, deviceIDRE)
devID2 := parseLog(ctx, f, pod2.Name, pod2.Name, deviceIDRE)
gomega.Expect(devID1).To(gomega.Not(gomega.Equal(devID2)))
})
@@ -347,16 +347,16 @@ func makeBusyboxPod(resourceName, cmd string) *v1.Pod {
}
// ensurePodContainerRestart confirms that pod container has restarted at least once
func ensurePodContainerRestart(f *framework.Framework, podName string, contName string) {
func ensurePodContainerRestart(ctx context.Context, f *framework.Framework, podName string, contName string) {
var initialCount int32
var currentCount int32
p, err := e2epod.NewPodClient(f).Get(context.TODO(), podName, metav1.GetOptions{})
p, err := e2epod.NewPodClient(f).Get(ctx, podName, metav1.GetOptions{})
if err != nil || len(p.Status.ContainerStatuses) < 1 {
framework.Failf("ensurePodContainerRestart failed for pod %q: %v", podName, err)
}
initialCount = p.Status.ContainerStatuses[0].RestartCount
gomega.Eventually(func() bool {
p, err = e2epod.NewPodClient(f).Get(context.TODO(), podName, metav1.GetOptions{})
gomega.Eventually(ctx, func() bool {
p, err = e2epod.NewPodClient(f).Get(ctx, podName, metav1.GetOptions{})
if err != nil || len(p.Status.ContainerStatuses) < 1 {
return false
}
@@ -367,8 +367,8 @@ func ensurePodContainerRestart(f *framework.Framework, podName string, contName
}
// parseLog returns the matching string for the specified regular expression parsed from the container logs.
func parseLog(f *framework.Framework, podName string, contName string, re string) string {
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, contName)
func parseLog(ctx context.Context, f *framework.Framework, podName string, contName string, re string) string {
logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, podName, contName)
if err != nil {
framework.Failf("GetPodLogs for pod %q failed: %v", podName, err)
}