e2e: use Ginkgo context

All code must use the context from Ginkgo when doing API calls or polling for a
change, otherwise the code would not return immediately when the test gets
aborted.
This commit is contained in:
Patrick Ohly
2022-12-12 10:11:10 +01:00
parent bf1d1dfd0f
commit 2f6c4f5eab
418 changed files with 11489 additions and 11369 deletions

View File

@@ -172,13 +172,13 @@ func containerGCTest(f *framework.Framework, test testRun) {
}
ginkgo.Context(fmt.Sprintf("Garbage Collection Test: %s", test.testName), func() {
ginkgo.BeforeEach(func() {
ginkgo.BeforeEach(func(ctx context.Context) {
realPods := getPods(test.testPods)
e2epod.NewPodClient(f).CreateBatch(realPods)
e2epod.NewPodClient(f).CreateBatch(ctx, realPods)
ginkgo.By("Making sure all containers restart the specified number of times")
gomega.Eventually(func() error {
gomega.Eventually(ctx, func(ctx context.Context) error {
for _, podSpec := range test.testPods {
err := verifyPodRestartCount(f, podSpec.podName, podSpec.numContainers, podSpec.restartCount)
err := verifyPodRestartCount(ctx, f, podSpec.podName, podSpec.numContainers, podSpec.restartCount)
if err != nil {
return err
}
@@ -187,12 +187,12 @@ func containerGCTest(f *framework.Framework, test testRun) {
}, setupDuration, runtimePollInterval).Should(gomega.BeNil())
})
ginkgo.It(fmt.Sprintf("Should eventually garbage collect containers when we exceed the number of dead containers per container"), func(ctx context.Context) {
ginkgo.It("Should eventually garbage collect containers when we exceed the number of dead containers per container", func(ctx context.Context) {
totalContainers := 0
for _, pod := range test.testPods {
totalContainers += pod.numContainers*2 + 1
}
gomega.Eventually(func() error {
gomega.Eventually(ctx, func() error {
total := 0
for _, pod := range test.testPods {
containerNames, err := pod.getContainerNames()
@@ -223,7 +223,7 @@ func containerGCTest(f *framework.Framework, test testRun) {
if maxPerPodContainer >= 2 && maxTotalContainers < 0 { // make sure constraints wouldn't make us gc old containers
ginkgo.By("Making sure the kubelet consistently keeps around an extra copy of each container.")
gomega.Consistently(func() error {
gomega.Consistently(ctx, func() error {
for _, pod := range test.testPods {
containerNames, err := pod.getContainerNames()
if err != nil {
@@ -246,14 +246,14 @@ func containerGCTest(f *framework.Framework, test testRun) {
}
})
ginkgo.AfterEach(func() {
ginkgo.AfterEach(func(ctx context.Context) {
for _, pod := range test.testPods {
ginkgo.By(fmt.Sprintf("Deleting Pod %v", pod.podName))
e2epod.NewPodClient(f).DeleteSync(pod.podName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
e2epod.NewPodClient(f).DeleteSync(ctx, pod.podName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
}
ginkgo.By("Making sure all containers get cleaned up")
gomega.Eventually(func() error {
gomega.Eventually(ctx, func() error {
for _, pod := range test.testPods {
containerNames, err := pod.getContainerNames()
if err != nil {
@@ -267,8 +267,8 @@ func containerGCTest(f *framework.Framework, test testRun) {
}, garbageCollectDuration, runtimePollInterval).Should(gomega.BeNil())
if ginkgo.CurrentSpecReport().Failed() && framework.TestContext.DumpLogsOnFailure {
logNodeEvents(f)
logPodEvents(f)
logNodeEvents(ctx, f)
logPodEvents(ctx, f)
}
})
})
@@ -317,8 +317,8 @@ func getRestartingContainerCommand(path string, containerNum int, restarts int32
}
}
func verifyPodRestartCount(f *framework.Framework, podName string, expectedNumContainers int, expectedRestartCount int32) error {
updatedPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), podName, metav1.GetOptions{})
func verifyPodRestartCount(ctx context.Context, f *framework.Framework, podName string, expectedNumContainers int, expectedRestartCount int32) error {
updatedPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, podName, metav1.GetOptions{})
if err != nil {
return err
}