e2e: use Ginkgo context
All code must use the context from Ginkgo when doing API calls or polling for a change, otherwise the code would not return immediately when the test gets aborted.
This commit is contained in:
@@ -290,11 +290,11 @@ func findSRIOVResource(node *v1.Node) (string, int64) {
|
||||
return "", 0
|
||||
}
|
||||
|
||||
func validatePodAlignment(f *framework.Framework, pod *v1.Pod, envInfo *testEnvInfo) {
|
||||
func validatePodAlignment(ctx context.Context, f *framework.Framework, pod *v1.Pod, envInfo *testEnvInfo) {
|
||||
for _, cnt := range pod.Spec.Containers {
|
||||
ginkgo.By(fmt.Sprintf("validating the container %s on Gu pod %s", cnt.Name, pod.Name))
|
||||
|
||||
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, cnt.Name)
|
||||
logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, cnt.Name)
|
||||
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]", cnt.Name, pod.Name)
|
||||
|
||||
framework.Logf("got pod logs: %v", logs)
|
||||
@@ -307,13 +307,13 @@ func validatePodAlignment(f *framework.Framework, pod *v1.Pod, envInfo *testEnvI
|
||||
}
|
||||
|
||||
// validatePodAligmentWithPodScope validates whether all pod's CPUs are affined to the same NUMA node.
|
||||
func validatePodAlignmentWithPodScope(f *framework.Framework, pod *v1.Pod, envInfo *testEnvInfo) error {
|
||||
func validatePodAlignmentWithPodScope(ctx context.Context, f *framework.Framework, pod *v1.Pod, envInfo *testEnvInfo) error {
|
||||
// Mapping between CPU IDs and NUMA node IDs.
|
||||
podsNUMA := make(map[int]int)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("validate pod scope alignment for %s pod", pod.Name))
|
||||
for _, cnt := range pod.Spec.Containers {
|
||||
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, cnt.Name)
|
||||
logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, cnt.Name)
|
||||
framework.ExpectNoError(err, "NUMA alignment failed for container [%s] of pod [%s]", cnt.Name, pod.Name)
|
||||
envMap, err := makeEnvMap(logs)
|
||||
framework.ExpectNoError(err, "NUMA alignment failed for container [%s] of pod [%s]", cnt.Name, pod.Name)
|
||||
@@ -336,10 +336,10 @@ func validatePodAlignmentWithPodScope(f *framework.Framework, pod *v1.Pod, envIn
|
||||
return nil
|
||||
}
|
||||
|
||||
func runTopologyManagerPolicySuiteTests(f *framework.Framework) {
|
||||
func runTopologyManagerPolicySuiteTests(ctx context.Context, f *framework.Framework) {
|
||||
var cpuCap, cpuAlloc int64
|
||||
|
||||
cpuCap, cpuAlloc, _ = getLocalNodeCPUDetails(f)
|
||||
cpuCap, cpuAlloc, _ = getLocalNodeCPUDetails(ctx, f)
|
||||
ginkgo.By(fmt.Sprintf("checking node CPU capacity (%d) and allocatable CPUs (%d)", cpuCap, cpuAlloc))
|
||||
|
||||
// Albeit even the weakest CI machines usually have 2 cpus, let's be extra careful and
|
||||
@@ -349,10 +349,10 @@ func runTopologyManagerPolicySuiteTests(f *framework.Framework) {
|
||||
}
|
||||
|
||||
ginkgo.By("running a non-Gu pod")
|
||||
runNonGuPodTest(f, cpuCap)
|
||||
runNonGuPodTest(ctx, f, cpuCap)
|
||||
|
||||
ginkgo.By("running a Gu pod")
|
||||
runGuPodTest(f, 1)
|
||||
runGuPodTest(ctx, f, 1)
|
||||
|
||||
// Skip rest of the tests if CPU allocatable < 3.
|
||||
if cpuAlloc < 3 {
|
||||
@@ -360,16 +360,16 @@ func runTopologyManagerPolicySuiteTests(f *framework.Framework) {
|
||||
}
|
||||
|
||||
ginkgo.By("running multiple Gu and non-Gu pods")
|
||||
runMultipleGuNonGuPods(f, cpuCap, cpuAlloc)
|
||||
runMultipleGuNonGuPods(ctx, f, cpuCap, cpuAlloc)
|
||||
|
||||
ginkgo.By("running a Gu pod requesting multiple CPUs")
|
||||
runMultipleCPUGuPod(f)
|
||||
runMultipleCPUGuPod(ctx, f)
|
||||
|
||||
ginkgo.By("running a Gu pod with multiple containers requesting integer CPUs")
|
||||
runMultipleCPUContainersGuPod(f)
|
||||
runMultipleCPUContainersGuPod(ctx, f)
|
||||
|
||||
ginkgo.By("running multiple Gu pods")
|
||||
runMultipleGuPods(f)
|
||||
runMultipleGuPods(ctx, f)
|
||||
}
|
||||
|
||||
// waitForAllContainerRemoval waits until all the containers on a given pod are really gone.
|
||||
@@ -377,11 +377,11 @@ func runTopologyManagerPolicySuiteTests(f *framework.Framework) {
|
||||
// In these cases, we need to make sure the tests clean up after themselves to make sure each test runs in
|
||||
// a pristine environment. The only way known so far to do that is to introduce this wait.
|
||||
// Worth noting, however, that this makes the test runtime much bigger.
|
||||
func waitForAllContainerRemoval(podName, podNS string) {
|
||||
func waitForAllContainerRemoval(ctx context.Context, podName, podNS string) {
|
||||
rs, _, err := getCRIClient()
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Eventually(func() bool {
|
||||
containers, err := rs.ListContainers(context.Background(), &runtimeapi.ContainerFilter{
|
||||
gomega.Eventually(ctx, func(ctx context.Context) bool {
|
||||
containers, err := rs.ListContainers(ctx, &runtimeapi.ContainerFilter{
|
||||
LabelSelector: map[string]string{
|
||||
types.KubernetesPodNameLabel: podName,
|
||||
types.KubernetesPodNamespaceLabel: podNS,
|
||||
@@ -394,14 +394,14 @@ func waitForAllContainerRemoval(podName, podNS string) {
|
||||
}, 2*time.Minute, 1*time.Second).Should(gomega.BeTrue())
|
||||
}
|
||||
|
||||
func runTopologyManagerPositiveTest(f *framework.Framework, numPods int, ctnAttrs, initCtnAttrs []tmCtnAttribute, envInfo *testEnvInfo) {
|
||||
func runTopologyManagerPositiveTest(ctx context.Context, f *framework.Framework, numPods int, ctnAttrs, initCtnAttrs []tmCtnAttribute, envInfo *testEnvInfo) {
|
||||
podMap := make(map[string]*v1.Pod)
|
||||
|
||||
for podID := 0; podID < numPods; podID++ {
|
||||
podName := fmt.Sprintf("gu-pod-%d", podID)
|
||||
framework.Logf("creating pod %s attrs %v", podName, ctnAttrs)
|
||||
pod := makeTopologyManagerTestPod(podName, ctnAttrs, initCtnAttrs)
|
||||
pod = e2epod.NewPodClient(f).CreateSync(pod)
|
||||
pod = e2epod.NewPodClient(f).CreateSync(ctx, pod)
|
||||
framework.Logf("created pod %s", podName)
|
||||
podMap[podName] = pod
|
||||
}
|
||||
@@ -410,20 +410,20 @@ func runTopologyManagerPositiveTest(f *framework.Framework, numPods int, ctnAttr
|
||||
// we can do a menaingful validation only when using the single-numa node policy
|
||||
if envInfo.policy == topologymanager.PolicySingleNumaNode {
|
||||
for _, pod := range podMap {
|
||||
validatePodAlignment(f, pod, envInfo)
|
||||
validatePodAlignment(ctx, f, pod, envInfo)
|
||||
}
|
||||
if envInfo.scope == podScopeTopology {
|
||||
for _, pod := range podMap {
|
||||
err := validatePodAlignmentWithPodScope(f, pod, envInfo)
|
||||
err := validatePodAlignmentWithPodScope(ctx, f, pod, envInfo)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
deletePodsAsync(f, podMap)
|
||||
deletePodsAsync(ctx, f, podMap)
|
||||
}
|
||||
|
||||
func deletePodsAsync(f *framework.Framework, podMap map[string]*v1.Pod) {
|
||||
func deletePodsAsync(ctx context.Context, f *framework.Framework, podMap map[string]*v1.Pod) {
|
||||
var wg sync.WaitGroup
|
||||
for _, pod := range podMap {
|
||||
wg.Add(1)
|
||||
@@ -431,27 +431,27 @@ func deletePodsAsync(f *framework.Framework, podMap map[string]*v1.Pod) {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
defer wg.Done()
|
||||
|
||||
deletePodSyncByName(f, podName)
|
||||
waitForAllContainerRemoval(podName, podNS)
|
||||
deletePodSyncByName(ctx, f, podName)
|
||||
waitForAllContainerRemoval(ctx, podName, podNS)
|
||||
}(pod.Namespace, pod.Name)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func runTopologyManagerNegativeTest(f *framework.Framework, ctnAttrs, initCtnAttrs []tmCtnAttribute, envInfo *testEnvInfo) {
|
||||
func runTopologyManagerNegativeTest(ctx context.Context, f *framework.Framework, ctnAttrs, initCtnAttrs []tmCtnAttribute, envInfo *testEnvInfo) {
|
||||
podName := "gu-pod"
|
||||
framework.Logf("creating pod %s attrs %v", podName, ctnAttrs)
|
||||
pod := makeTopologyManagerTestPod(podName, ctnAttrs, initCtnAttrs)
|
||||
|
||||
pod = e2epod.NewPodClient(f).Create(pod)
|
||||
err := e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, pod.Name, "Failed", 30*time.Second, func(pod *v1.Pod) (bool, error) {
|
||||
pod = e2epod.NewPodClient(f).Create(ctx, pod)
|
||||
err := e2epod.WaitForPodCondition(ctx, f.ClientSet, f.Namespace.Name, pod.Name, "Failed", 30*time.Second, func(pod *v1.Pod) (bool, error) {
|
||||
if pod.Status.Phase != v1.PodPending {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
pod, err = e2epod.NewPodClient(f).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
pod, err = e2epod.NewPodClient(f).Get(ctx, pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
if pod.Status.Phase != v1.PodFailed {
|
||||
@@ -461,7 +461,7 @@ func runTopologyManagerNegativeTest(f *framework.Framework, ctnAttrs, initCtnAtt
|
||||
framework.Failf("pod %s failed for wrong reason: %q", pod.Name, pod.Status.Reason)
|
||||
}
|
||||
|
||||
deletePodSyncByName(f, pod.Name)
|
||||
deletePodSyncByName(ctx, f, pod.Name)
|
||||
}
|
||||
|
||||
func isTopologyAffinityError(pod *v1.Pod) bool {
|
||||
@@ -498,20 +498,20 @@ type sriovData struct {
|
||||
resourceAmount int64
|
||||
}
|
||||
|
||||
func setupSRIOVConfigOrFail(f *framework.Framework, configMap *v1.ConfigMap) *sriovData {
|
||||
sd := createSRIOVConfigOrFail(f, configMap)
|
||||
func setupSRIOVConfigOrFail(ctx context.Context, f *framework.Framework, configMap *v1.ConfigMap) *sriovData {
|
||||
sd := createSRIOVConfigOrFail(ctx, f, configMap)
|
||||
|
||||
e2enode.WaitForNodeToBeReady(f.ClientSet, framework.TestContext.NodeName, 5*time.Minute)
|
||||
e2enode.WaitForNodeToBeReady(ctx, f.ClientSet, framework.TestContext.NodeName, 5*time.Minute)
|
||||
|
||||
sd.pod = createSRIOVPodOrFail(f)
|
||||
sd.pod = createSRIOVPodOrFail(ctx, f)
|
||||
return sd
|
||||
}
|
||||
|
||||
func createSRIOVConfigOrFail(f *framework.Framework, configMap *v1.ConfigMap) *sriovData {
|
||||
func createSRIOVConfigOrFail(ctx context.Context, f *framework.Framework, configMap *v1.ConfigMap) *sriovData {
|
||||
var err error
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", metav1.NamespaceSystem, configMap.Name))
|
||||
if _, err = f.ClientSet.CoreV1().ConfigMaps(metav1.NamespaceSystem).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil {
|
||||
if _, err = f.ClientSet.CoreV1().ConfigMaps(metav1.NamespaceSystem).Create(ctx, configMap, metav1.CreateOptions{}); err != nil {
|
||||
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
|
||||
}
|
||||
|
||||
@@ -521,7 +521,7 @@ func createSRIOVConfigOrFail(f *framework.Framework, configMap *v1.ConfigMap) *s
|
||||
}
|
||||
serviceAccount := readServiceAccountV1OrDie(data)
|
||||
ginkgo.By(fmt.Sprintf("Creating serviceAccount %v/%v", metav1.NamespaceSystem, serviceAccount.Name))
|
||||
if _, err = f.ClientSet.CoreV1().ServiceAccounts(metav1.NamespaceSystem).Create(context.TODO(), serviceAccount, metav1.CreateOptions{}); err != nil {
|
||||
if _, err = f.ClientSet.CoreV1().ServiceAccounts(metav1.NamespaceSystem).Create(ctx, serviceAccount, metav1.CreateOptions{}); err != nil {
|
||||
framework.Failf("unable to create test serviceAccount %s: %v", serviceAccount.Name, err)
|
||||
}
|
||||
|
||||
@@ -531,15 +531,15 @@ func createSRIOVConfigOrFail(f *framework.Framework, configMap *v1.ConfigMap) *s
|
||||
}
|
||||
}
|
||||
|
||||
func createSRIOVPodOrFail(f *framework.Framework) *v1.Pod {
|
||||
func createSRIOVPodOrFail(ctx context.Context, f *framework.Framework) *v1.Pod {
|
||||
dp := getSRIOVDevicePluginPod()
|
||||
dp.Spec.NodeName = framework.TestContext.NodeName
|
||||
|
||||
ginkgo.By("Create SRIOV device plugin pod")
|
||||
dpPod, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(context.TODO(), dp, metav1.CreateOptions{})
|
||||
dpPod, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(ctx, dp, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
if err = e2epod.WaitForPodCondition(f.ClientSet, metav1.NamespaceSystem, dp.Name, "Ready", 120*time.Second, testutils.PodRunningReady); err != nil {
|
||||
if err = e2epod.WaitForPodCondition(ctx, f.ClientSet, metav1.NamespaceSystem, dp.Name, "Ready", 120*time.Second, testutils.PodRunningReady); err != nil {
|
||||
framework.Logf("SRIOV Pod %v took too long to enter running/ready: %v", dp.Name, err)
|
||||
}
|
||||
framework.ExpectNoError(err)
|
||||
@@ -549,12 +549,12 @@ func createSRIOVPodOrFail(f *framework.Framework) *v1.Pod {
|
||||
|
||||
// waitForSRIOVResources waits until enough SRIOV resources are avaailable, expecting to complete within the timeout.
|
||||
// if exits successfully, updates the sriovData with the resources which were found.
|
||||
func waitForSRIOVResources(f *framework.Framework, sd *sriovData) {
|
||||
func waitForSRIOVResources(ctx context.Context, f *framework.Framework, sd *sriovData) {
|
||||
sriovResourceName := ""
|
||||
var sriovResourceAmount int64
|
||||
ginkgo.By("Waiting for devices to become available on the local node")
|
||||
gomega.Eventually(func() bool {
|
||||
node := getLocalNode(f)
|
||||
gomega.Eventually(ctx, func(ctx context.Context) bool {
|
||||
node := getLocalNode(ctx, f)
|
||||
sriovResourceName, sriovResourceAmount = findSRIOVResource(node)
|
||||
return sriovResourceAmount > minSriovResource
|
||||
}, 2*time.Minute, framework.Poll).Should(gomega.BeTrue())
|
||||
@@ -564,7 +564,7 @@ func waitForSRIOVResources(f *framework.Framework, sd *sriovData) {
|
||||
framework.Logf("Detected SRIOV allocatable devices name=%q amount=%d", sd.resourceName, sd.resourceAmount)
|
||||
}
|
||||
|
||||
func deleteSRIOVPodOrFail(f *framework.Framework, sd *sriovData) {
|
||||
func deleteSRIOVPodOrFail(ctx context.Context, f *framework.Framework, sd *sriovData) {
|
||||
var err error
|
||||
gp := int64(0)
|
||||
deleteOptions := metav1.DeleteOptions{
|
||||
@@ -572,12 +572,12 @@ func deleteSRIOVPodOrFail(f *framework.Framework, sd *sriovData) {
|
||||
}
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Delete SRIOV device plugin pod %s/%s", sd.pod.Namespace, sd.pod.Name))
|
||||
err = f.ClientSet.CoreV1().Pods(sd.pod.Namespace).Delete(context.TODO(), sd.pod.Name, deleteOptions)
|
||||
err = f.ClientSet.CoreV1().Pods(sd.pod.Namespace).Delete(ctx, sd.pod.Name, deleteOptions)
|
||||
framework.ExpectNoError(err)
|
||||
waitForAllContainerRemoval(sd.pod.Name, sd.pod.Namespace)
|
||||
waitForAllContainerRemoval(ctx, sd.pod.Name, sd.pod.Namespace)
|
||||
}
|
||||
|
||||
func removeSRIOVConfigOrFail(f *framework.Framework, sd *sriovData) {
|
||||
func removeSRIOVConfigOrFail(ctx context.Context, f *framework.Framework, sd *sriovData) {
|
||||
var err error
|
||||
gp := int64(0)
|
||||
deleteOptions := metav1.DeleteOptions{
|
||||
@@ -585,25 +585,25 @@ func removeSRIOVConfigOrFail(f *framework.Framework, sd *sriovData) {
|
||||
}
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Deleting configMap %v/%v", metav1.NamespaceSystem, sd.configMap.Name))
|
||||
err = f.ClientSet.CoreV1().ConfigMaps(metav1.NamespaceSystem).Delete(context.TODO(), sd.configMap.Name, deleteOptions)
|
||||
err = f.ClientSet.CoreV1().ConfigMaps(metav1.NamespaceSystem).Delete(ctx, sd.configMap.Name, deleteOptions)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Deleting serviceAccount %v/%v", metav1.NamespaceSystem, sd.serviceAccount.Name))
|
||||
err = f.ClientSet.CoreV1().ServiceAccounts(metav1.NamespaceSystem).Delete(context.TODO(), sd.serviceAccount.Name, deleteOptions)
|
||||
err = f.ClientSet.CoreV1().ServiceAccounts(metav1.NamespaceSystem).Delete(ctx, sd.serviceAccount.Name, deleteOptions)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
func teardownSRIOVConfigOrFail(f *framework.Framework, sd *sriovData) {
|
||||
deleteSRIOVPodOrFail(f, sd)
|
||||
removeSRIOVConfigOrFail(f, sd)
|
||||
func teardownSRIOVConfigOrFail(ctx context.Context, f *framework.Framework, sd *sriovData) {
|
||||
deleteSRIOVPodOrFail(ctx, f, sd)
|
||||
removeSRIOVConfigOrFail(ctx, f, sd)
|
||||
}
|
||||
|
||||
func runTMScopeResourceAlignmentTestSuite(f *framework.Framework, configMap *v1.ConfigMap, reservedSystemCPUs, policy string, numaNodes, coreCount int) {
|
||||
func runTMScopeResourceAlignmentTestSuite(ctx context.Context, f *framework.Framework, configMap *v1.ConfigMap, reservedSystemCPUs, policy string, numaNodes, coreCount int) {
|
||||
threadsPerCore := getSMTLevel()
|
||||
sd := setupSRIOVConfigOrFail(f, configMap)
|
||||
sd := setupSRIOVConfigOrFail(ctx, f, configMap)
|
||||
var ctnAttrs, initCtnAttrs []tmCtnAttribute
|
||||
|
||||
waitForSRIOVResources(f, sd)
|
||||
waitForSRIOVResources(ctx, f, sd)
|
||||
|
||||
envInfo := &testEnvInfo{
|
||||
numaNodes: numaNodes,
|
||||
@@ -631,7 +631,7 @@ func runTMScopeResourceAlignmentTestSuite(f *framework.Framework, configMap *v1.
|
||||
deviceLimit: "1",
|
||||
},
|
||||
}
|
||||
runTopologyManagerPositiveTest(f, 2, ctnAttrs, initCtnAttrs, envInfo)
|
||||
runTopologyManagerPositiveTest(ctx, f, 2, ctnAttrs, initCtnAttrs, envInfo)
|
||||
|
||||
numCores := threadsPerCore * coreCount
|
||||
coresReq := fmt.Sprintf("%dm", numCores*1000)
|
||||
@@ -652,7 +652,7 @@ func runTMScopeResourceAlignmentTestSuite(f *framework.Framework, configMap *v1.
|
||||
deviceLimit: "1",
|
||||
},
|
||||
}
|
||||
runTopologyManagerNegativeTest(f, ctnAttrs, initCtnAttrs, envInfo)
|
||||
runTopologyManagerNegativeTest(ctx, f, ctnAttrs, initCtnAttrs, envInfo)
|
||||
|
||||
// The Topology Manager with pod scope should calculate how many CPUs it needs to admit a pod basing on two requests:
|
||||
// the maximum of init containers' demand for CPU and sum of app containers' requests for CPU.
|
||||
@@ -693,15 +693,15 @@ func runTMScopeResourceAlignmentTestSuite(f *framework.Framework, configMap *v1.
|
||||
deviceLimit: "1",
|
||||
},
|
||||
}
|
||||
runTopologyManagerPositiveTest(f, 2, ctnAttrs, initCtnAttrs, envInfo)
|
||||
runTopologyManagerPositiveTest(ctx, f, 2, ctnAttrs, initCtnAttrs, envInfo)
|
||||
|
||||
teardownSRIOVConfigOrFail(f, sd)
|
||||
teardownSRIOVConfigOrFail(ctx, f, sd)
|
||||
}
|
||||
|
||||
func runTopologyManagerNodeAlignmentSuiteTests(f *framework.Framework, sd *sriovData, reservedSystemCPUs, policy string, numaNodes, coreCount int) {
|
||||
func runTopologyManagerNodeAlignmentSuiteTests(ctx context.Context, f *framework.Framework, sd *sriovData, reservedSystemCPUs, policy string, numaNodes, coreCount int) {
|
||||
threadsPerCore := getSMTLevel()
|
||||
|
||||
waitForSRIOVResources(f, sd)
|
||||
waitForSRIOVResources(ctx, f, sd)
|
||||
|
||||
envInfo := &testEnvInfo{
|
||||
numaNodes: numaNodes,
|
||||
@@ -724,7 +724,7 @@ func runTopologyManagerNodeAlignmentSuiteTests(f *framework.Framework, sd *sriov
|
||||
deviceLimit: "1",
|
||||
},
|
||||
}
|
||||
runTopologyManagerPositiveTest(f, 1, ctnAttrs, initCtnAttrs, envInfo)
|
||||
runTopologyManagerPositiveTest(ctx, f, 1, ctnAttrs, initCtnAttrs, envInfo)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Successfully admit one guaranteed pod with 2 cores, 1 %s device", sd.resourceName))
|
||||
ctnAttrs = []tmCtnAttribute{
|
||||
@@ -737,7 +737,7 @@ func runTopologyManagerNodeAlignmentSuiteTests(f *framework.Framework, sd *sriov
|
||||
deviceLimit: "1",
|
||||
},
|
||||
}
|
||||
runTopologyManagerPositiveTest(f, 1, ctnAttrs, initCtnAttrs, envInfo)
|
||||
runTopologyManagerPositiveTest(ctx, f, 1, ctnAttrs, initCtnAttrs, envInfo)
|
||||
|
||||
if reservedSystemCPUs != "" {
|
||||
// to avoid false negatives, we have put reserved CPUs in such a way there is at least a NUMA node
|
||||
@@ -755,7 +755,7 @@ func runTopologyManagerNodeAlignmentSuiteTests(f *framework.Framework, sd *sriov
|
||||
deviceLimit: "1",
|
||||
},
|
||||
}
|
||||
runTopologyManagerPositiveTest(f, 1, ctnAttrs, initCtnAttrs, envInfo)
|
||||
runTopologyManagerPositiveTest(ctx, f, 1, ctnAttrs, initCtnAttrs, envInfo)
|
||||
}
|
||||
|
||||
if sd.resourceAmount > 1 {
|
||||
@@ -772,7 +772,7 @@ func runTopologyManagerNodeAlignmentSuiteTests(f *framework.Framework, sd *sriov
|
||||
deviceLimit: "1",
|
||||
},
|
||||
}
|
||||
runTopologyManagerPositiveTest(f, 2, ctnAttrs, initCtnAttrs, envInfo)
|
||||
runTopologyManagerPositiveTest(ctx, f, 2, ctnAttrs, initCtnAttrs, envInfo)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Successfully admit two guaranteed pods, each with 2 cores, 1 %s device", sd.resourceName))
|
||||
ctnAttrs = []tmCtnAttribute{
|
||||
@@ -785,7 +785,7 @@ func runTopologyManagerNodeAlignmentSuiteTests(f *framework.Framework, sd *sriov
|
||||
deviceLimit: "1",
|
||||
},
|
||||
}
|
||||
runTopologyManagerPositiveTest(f, 2, ctnAttrs, initCtnAttrs, envInfo)
|
||||
runTopologyManagerPositiveTest(ctx, f, 2, ctnAttrs, initCtnAttrs, envInfo)
|
||||
|
||||
// testing more complex conditions require knowledge about the system cpu+bus topology
|
||||
}
|
||||
@@ -811,7 +811,7 @@ func runTopologyManagerNodeAlignmentSuiteTests(f *framework.Framework, sd *sriov
|
||||
deviceLimit: "1",
|
||||
},
|
||||
}
|
||||
runTopologyManagerPositiveTest(f, 1, ctnAttrs, initCtnAttrs, envInfo)
|
||||
runTopologyManagerPositiveTest(ctx, f, 1, ctnAttrs, initCtnAttrs, envInfo)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Successfully admit two guaranteed pods, each with two containers, each with 1 core, 1 %s device", sd.resourceName))
|
||||
ctnAttrs = []tmCtnAttribute{
|
||||
@@ -832,7 +832,7 @@ func runTopologyManagerNodeAlignmentSuiteTests(f *framework.Framework, sd *sriov
|
||||
deviceLimit: "1",
|
||||
},
|
||||
}
|
||||
runTopologyManagerPositiveTest(f, 2, ctnAttrs, initCtnAttrs, envInfo)
|
||||
runTopologyManagerPositiveTest(ctx, f, 2, ctnAttrs, initCtnAttrs, envInfo)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Successfully admit two guaranteed pods, each with two containers, both with with 2 cores, one with 1 %s device", sd.resourceName))
|
||||
ctnAttrs = []tmCtnAttribute{
|
||||
@@ -850,7 +850,7 @@ func runTopologyManagerNodeAlignmentSuiteTests(f *framework.Framework, sd *sriov
|
||||
cpuLimit: "2000m",
|
||||
},
|
||||
}
|
||||
runTopologyManagerPositiveTest(f, 2, ctnAttrs, initCtnAttrs, envInfo)
|
||||
runTopologyManagerPositiveTest(ctx, f, 2, ctnAttrs, initCtnAttrs, envInfo)
|
||||
}
|
||||
|
||||
// this is the only policy that can guarantee reliable rejects
|
||||
@@ -869,7 +869,7 @@ func runTopologyManagerNodeAlignmentSuiteTests(f *framework.Framework, sd *sriov
|
||||
deviceLimit: "1",
|
||||
},
|
||||
}
|
||||
runTopologyManagerNegativeTest(f, ctnAttrs, initCtnAttrs, envInfo)
|
||||
runTopologyManagerNegativeTest(ctx, f, ctnAttrs, initCtnAttrs, envInfo)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -885,7 +885,7 @@ func runTopologyManagerTests(f *framework.Framework) {
|
||||
}
|
||||
|
||||
ginkgo.It("run Topology Manager policy test suite", func(ctx context.Context) {
|
||||
oldCfg, err = getCurrentKubeletConfig()
|
||||
oldCfg, err = getCurrentKubeletConfig(ctx)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
scope := containerScopeTopology
|
||||
@@ -895,9 +895,9 @@ func runTopologyManagerTests(f *framework.Framework) {
|
||||
framework.Logf("Configuring topology Manager policy to %s", policy)
|
||||
|
||||
newCfg, _ := configureTopologyManagerInKubelet(oldCfg, policy, scope, nil, 0)
|
||||
updateKubeletConfig(f, newCfg, true)
|
||||
updateKubeletConfig(ctx, f, newCfg, true)
|
||||
// Run the tests
|
||||
runTopologyManagerPolicySuiteTests(f)
|
||||
runTopologyManagerPolicySuiteTests(ctx, f)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -906,10 +906,10 @@ func runTopologyManagerTests(f *framework.Framework) {
|
||||
|
||||
configMap := getSRIOVDevicePluginConfigMap(framework.TestContext.SriovdpConfigMapFile)
|
||||
|
||||
oldCfg, err = getCurrentKubeletConfig()
|
||||
oldCfg, err = getCurrentKubeletConfig(ctx)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
sd := setupSRIOVConfigOrFail(f, configMap)
|
||||
sd := setupSRIOVConfigOrFail(ctx, f, configMap)
|
||||
ginkgo.DeferCleanup(teardownSRIOVConfigOrFail, f, sd)
|
||||
|
||||
scope := containerScopeTopology
|
||||
@@ -919,9 +919,9 @@ func runTopologyManagerTests(f *framework.Framework) {
|
||||
framework.Logf("Configuring topology Manager policy to %s", policy)
|
||||
|
||||
newCfg, reservedSystemCPUs := configureTopologyManagerInKubelet(oldCfg, policy, scope, configMap, numaNodes)
|
||||
updateKubeletConfig(f, newCfg, true)
|
||||
updateKubeletConfig(ctx, f, newCfg, true)
|
||||
|
||||
runTopologyManagerNodeAlignmentSuiteTests(f, sd, reservedSystemCPUs, policy, numaNodes, coreCount)
|
||||
runTopologyManagerNodeAlignmentSuiteTests(ctx, f, sd, reservedSystemCPUs, policy, numaNodes, coreCount)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -930,22 +930,22 @@ func runTopologyManagerTests(f *framework.Framework) {
|
||||
|
||||
configMap := getSRIOVDevicePluginConfigMap(framework.TestContext.SriovdpConfigMapFile)
|
||||
|
||||
oldCfg, err = getCurrentKubeletConfig()
|
||||
oldCfg, err = getCurrentKubeletConfig(ctx)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
policy := topologymanager.PolicySingleNumaNode
|
||||
scope := podScopeTopology
|
||||
|
||||
newCfg, reservedSystemCPUs := configureTopologyManagerInKubelet(oldCfg, policy, scope, configMap, numaNodes)
|
||||
updateKubeletConfig(f, newCfg, true)
|
||||
updateKubeletConfig(ctx, f, newCfg, true)
|
||||
|
||||
runTMScopeResourceAlignmentTestSuite(f, configMap, reservedSystemCPUs, policy, numaNodes, coreCount)
|
||||
runTMScopeResourceAlignmentTestSuite(ctx, f, configMap, reservedSystemCPUs, policy, numaNodes, coreCount)
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.AfterEach(func(ctx context.Context) {
|
||||
if oldCfg != nil {
|
||||
// restore kubelet config
|
||||
updateKubeletConfig(f, oldCfg, true)
|
||||
updateKubeletConfig(ctx, f, oldCfg, true)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
Reference in New Issue
Block a user