e2e: use Ginkgo context

All code must use the context from Ginkgo when doing API calls or polling for a
change, otherwise the code would not return immediately when the test gets
aborted.
This commit is contained in:
Patrick Ohly
2022-12-12 10:11:10 +01:00
parent bf1d1dfd0f
commit 2f6c4f5eab
418 changed files with 11489 additions and 11369 deletions

View File

@@ -271,10 +271,10 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager
evictionHard: map[string]string{evictionHardMemory: "100Mi"},
}
verifyMemoryPinning := func(pod *v1.Pod, numaNodeIDs []int) {
verifyMemoryPinning := func(ctx context.Context, pod *v1.Pod, numaNodeIDs []int) {
ginkgo.By("Verifying the NUMA pinning")
output, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
output, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
framework.ExpectNoError(err)
currentNUMANodeIDs, err := cpuset.Parse(strings.Trim(output, "\n"))
@@ -283,9 +283,9 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager
framework.ExpectEqual(numaNodeIDs, currentNUMANodeIDs.ToSlice())
}
waitingForHugepages := func(hugepagesCount int) {
gomega.Eventually(func() error {
node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{})
waitingForHugepages := func(ctx context.Context, hugepagesCount int) {
gomega.Eventually(ctx, func(ctx context.Context) error {
node, err := f.ClientSet.CoreV1().Nodes().Get(ctx, framework.TestContext.NodeName, metav1.GetOptions{})
if err != nil {
return err
}
@@ -309,7 +309,7 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager
}, time.Minute, framework.Poll).Should(gomega.BeNil())
}
ginkgo.BeforeEach(func() {
ginkgo.BeforeEach(func(ctx context.Context) {
if isMultiNUMASupported == nil {
isMultiNUMASupported = pointer.BoolPtr(isMultiNUMA())
}
@@ -325,18 +325,18 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager
// allocate hugepages
if *is2MiHugepagesSupported {
ginkgo.By("Configuring hugepages")
gomega.Eventually(func() error {
gomega.Eventually(ctx, func() error {
return configureHugePages(hugepagesSize2M, hugepages2MiCount, pointer.IntPtr(0))
}, 30*time.Second, framework.Poll).Should(gomega.BeNil())
}
})
// dynamically update the kubelet configuration
ginkgo.JustBeforeEach(func() {
ginkgo.JustBeforeEach(func(ctx context.Context) {
// allocate hugepages
if *is2MiHugepagesSupported {
ginkgo.By("Waiting for hugepages resource to become available on the local node")
waitingForHugepages(hugepages2MiCount)
waitingForHugepages(ctx, hugepages2MiCount)
for i := 0; i < len(ctnParams); i++ {
ctnParams[i].hugepages2Mi = "8Mi"
@@ -348,16 +348,16 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager
}
})
ginkgo.JustAfterEach(func() {
ginkgo.JustAfterEach(func(ctx context.Context) {
// delete the test pod
if testPod != nil && testPod.Name != "" {
e2epod.NewPodClient(f).DeleteSync(testPod.Name, metav1.DeleteOptions{}, 2*time.Minute)
e2epod.NewPodClient(f).DeleteSync(ctx, testPod.Name, metav1.DeleteOptions{}, 2*time.Minute)
}
// release hugepages
if *is2MiHugepagesSupported {
ginkgo.By("Releasing allocated hugepages")
gomega.Eventually(func() error {
gomega.Eventually(ctx, func() error {
// configure hugepages on the NUMA node 0 to avoid hugepages split across NUMA nodes
return configureHugePages(hugepagesSize2M, 0, pointer.IntPtr(0))
}, 90*time.Second, 15*time.Second).ShouldNot(gomega.HaveOccurred(), "failed to release hugepages")
@@ -365,7 +365,7 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager
})
ginkgo.Context("with static policy", func() {
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
kubeParams := *defaultKubeParams
kubeParams.memoryManagerPolicy = staticPolicy
updateKubeletConfigWithMemoryManagerParams(initialConfig, &kubeParams)
@@ -386,7 +386,7 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager
framework.ExpectNoError(err)
defer conn.Close()
resp, err := cli.GetAllocatableResources(context.TODO(), &kubeletpodresourcesv1.AllocatableResourcesRequest{})
resp, err := cli.GetAllocatableResources(ctx, &kubeletpodresourcesv1.AllocatableResourcesRequest{})
framework.ExpectNoError(err)
gomega.Expect(resp.Memory).ToNot(gomega.BeEmpty())
@@ -441,14 +441,14 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager
ginkgo.It("should succeed to start the pod", func(ctx context.Context) {
ginkgo.By("Running the test pod")
testPod = e2epod.NewPodClient(f).CreateSync(testPod)
testPod = e2epod.NewPodClient(f).CreateSync(ctx, testPod)
// it no taste to verify NUMA pinning when the node has only one NUMA node
if !*isMultiNUMASupported {
return
}
verifyMemoryPinning(testPod, []int{0})
verifyMemoryPinning(ctx, testPod, []int{0})
})
})
@@ -466,14 +466,14 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager
ginkgo.It("should succeed to start the pod", func(ctx context.Context) {
ginkgo.By("Running the test pod")
testPod = e2epod.NewPodClient(f).CreateSync(testPod)
testPod = e2epod.NewPodClient(f).CreateSync(ctx, testPod)
// it no taste to verify NUMA pinning when the node has only one NUMA node
if !*isMultiNUMASupported {
return
}
verifyMemoryPinning(testPod, []int{0})
verifyMemoryPinning(ctx, testPod, []int{0})
})
})
@@ -497,27 +497,27 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager
ginkgo.It("should succeed to start all pods", func(ctx context.Context) {
ginkgo.By("Running the test pod and the test pod 2")
testPod = e2epod.NewPodClient(f).CreateSync(testPod)
testPod = e2epod.NewPodClient(f).CreateSync(ctx, testPod)
ginkgo.By("Running the test pod 2")
testPod2 = e2epod.NewPodClient(f).CreateSync(testPod2)
testPod2 = e2epod.NewPodClient(f).CreateSync(ctx, testPod2)
// it no taste to verify NUMA pinning when the node has only one NUMA node
if !*isMultiNUMASupported {
return
}
verifyMemoryPinning(testPod, []int{0})
verifyMemoryPinning(testPod2, []int{0})
verifyMemoryPinning(ctx, testPod, []int{0})
verifyMemoryPinning(ctx, testPod2, []int{0})
})
// TODO: move the test to pod resource API test suite, see - https://github.com/kubernetes/kubernetes/issues/101945
ginkgo.It("should report memory data for each guaranteed pod and container during request to pod resources List", func(ctx context.Context) {
ginkgo.By("Running the test pod and the test pod 2")
testPod = e2epod.NewPodClient(f).CreateSync(testPod)
testPod = e2epod.NewPodClient(f).CreateSync(ctx, testPod)
ginkgo.By("Running the test pod 2")
testPod2 = e2epod.NewPodClient(f).CreateSync(testPod2)
testPod2 = e2epod.NewPodClient(f).CreateSync(ctx, testPod2)
endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket)
framework.ExpectNoError(err)
@@ -526,7 +526,7 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager
framework.ExpectNoError(err)
defer conn.Close()
resp, err := cli.List(context.TODO(), &kubeletpodresourcesv1.ListPodResourcesRequest{})
resp, err := cli.List(ctx, &kubeletpodresourcesv1.ListPodResourcesRequest{})
framework.ExpectNoError(err)
for _, pod := range []*v1.Pod{testPod, testPod2} {
@@ -553,10 +553,10 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager
}
})
ginkgo.JustAfterEach(func() {
ginkgo.JustAfterEach(func(ctx context.Context) {
// delete the test pod 2
if testPod2.Name != "" {
e2epod.NewPodClient(f).DeleteSync(testPod2.Name, metav1.DeleteOptions{}, 2*time.Minute)
e2epod.NewPodClient(f).DeleteSync(ctx, testPod2.Name, metav1.DeleteOptions{}, 2*time.Minute)
}
})
})
@@ -582,7 +582,7 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager
}
})
ginkgo.JustBeforeEach(func() {
ginkgo.JustBeforeEach(func(ctx context.Context) {
stateData, err := getMemoryManagerState()
framework.ExpectNoError(err)
@@ -599,18 +599,18 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager
}
workloadPod := makeMemoryManagerPod(workloadCtnAttrs[0].ctnName, initCtnParams, workloadCtnAttrs)
workloadPod = e2epod.NewPodClient(f).CreateSync(workloadPod)
workloadPod = e2epod.NewPodClient(f).CreateSync(ctx, workloadPod)
workloadPods = append(workloadPods, workloadPod)
}
})
ginkgo.It("should be rejected", func(ctx context.Context) {
ginkgo.By("Creating the pod")
testPod = e2epod.NewPodClient(f).Create(testPod)
testPod = e2epod.NewPodClient(f).Create(ctx, testPod)
ginkgo.By("Checking that pod failed to start because of admission error")
gomega.Eventually(func() bool {
tmpPod, err := e2epod.NewPodClient(f).Get(context.TODO(), testPod.Name, metav1.GetOptions{})
gomega.Eventually(ctx, func() bool {
tmpPod, err := e2epod.NewPodClient(f).Get(ctx, testPod.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
if tmpPod.Status.Phase != v1.PodFailed {
@@ -632,10 +632,10 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager
)
})
ginkgo.JustAfterEach(func() {
ginkgo.JustAfterEach(func(ctx context.Context) {
for _, workloadPod := range workloadPods {
if workloadPod.Name != "" {
e2epod.NewPodClient(f).DeleteSync(workloadPod.Name, metav1.DeleteOptions{}, 2*time.Minute)
e2epod.NewPodClient(f).DeleteSync(ctx, workloadPod.Name, metav1.DeleteOptions{}, 2*time.Minute)
}
}
})
@@ -643,7 +643,7 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager
})
ginkgo.Context("with none policy", func() {
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
kubeParams := *defaultKubeParams
kubeParams.memoryManagerPolicy = nonePolicy
updateKubeletConfigWithMemoryManagerParams(initialConfig, &kubeParams)
@@ -671,7 +671,7 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager
framework.ExpectNoError(err)
defer conn.Close()
resp, err := cli.GetAllocatableResources(context.TODO(), &kubeletpodresourcesv1.AllocatableResourcesRequest{})
resp, err := cli.GetAllocatableResources(ctx, &kubeletpodresourcesv1.AllocatableResourcesRequest{})
framework.ExpectNoError(err)
gomega.Expect(resp.Memory).To(gomega.BeEmpty())
@@ -679,7 +679,7 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager
// TODO: move the test to pod resource API test suite, see - https://github.com/kubernetes/kubernetes/issues/101945
ginkgo.It("should not report any memory data during request to pod resources List", func(ctx context.Context) {
testPod = e2epod.NewPodClient(f).CreateSync(testPod)
testPod = e2epod.NewPodClient(f).CreateSync(ctx, testPod)
endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket)
framework.ExpectNoError(err)
@@ -688,7 +688,7 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager
framework.ExpectNoError(err)
defer conn.Close()
resp, err := cli.List(context.TODO(), &kubeletpodresourcesv1.ListPodResourcesRequest{})
resp, err := cli.List(ctx, &kubeletpodresourcesv1.ListPodResourcesRequest{})
framework.ExpectNoError(err)
for _, podResource := range resp.PodResources {
@@ -703,14 +703,14 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager
})
ginkgo.It("should succeed to start the pod", func(ctx context.Context) {
testPod = e2epod.NewPodClient(f).CreateSync(testPod)
testPod = e2epod.NewPodClient(f).CreateSync(ctx, testPod)
// it no taste to verify NUMA pinning when the node has only one NUMA node
if !*isMultiNUMASupported {
return
}
verifyMemoryPinning(testPod, allNUMANodes)
verifyMemoryPinning(ctx, testPod, allNUMANodes)
})
})
})