e2e: use Ginkgo context
All code must use the context from Ginkgo when doing API calls or polling for a change, otherwise the code would not return immediately when the test gets aborted.
This commit is contained in:
@@ -136,8 +136,8 @@ func logPodResources(podIdx int, pr *kubeletpodresourcesv1.PodResources) {
|
||||
|
||||
type podResMap map[string]map[string]kubeletpodresourcesv1.ContainerResources
|
||||
|
||||
func getPodResources(cli kubeletpodresourcesv1.PodResourcesListerClient) podResMap {
|
||||
resp, err := cli.List(context.TODO(), &kubeletpodresourcesv1.ListPodResourcesRequest{})
|
||||
func getPodResources(ctx context.Context, cli kubeletpodresourcesv1.PodResourcesListerClient) podResMap {
|
||||
resp, err := cli.List(ctx, &kubeletpodresourcesv1.ListPodResourcesRequest{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
res := make(map[string]map[string]kubeletpodresourcesv1.ContainerResources)
|
||||
@@ -164,10 +164,10 @@ func newTestPodData() *testPodData {
|
||||
}
|
||||
}
|
||||
|
||||
func (tpd *testPodData) createPodsForTest(f *framework.Framework, podReqs []podDesc) {
|
||||
func (tpd *testPodData) createPodsForTest(ctx context.Context, f *framework.Framework, podReqs []podDesc) {
|
||||
for _, podReq := range podReqs {
|
||||
pod := makePodResourcesTestPod(podReq)
|
||||
pod = e2epod.NewPodClient(f).CreateSync(pod)
|
||||
pod = e2epod.NewPodClient(f).CreateSync(ctx, pod)
|
||||
|
||||
framework.Logf("created pod %s", podReq.podName)
|
||||
tpd.PodMap[podReq.podName] = pod
|
||||
@@ -175,17 +175,17 @@ func (tpd *testPodData) createPodsForTest(f *framework.Framework, podReqs []podD
|
||||
}
|
||||
|
||||
/* deletePodsForTest clean up all the pods run for a testcase. Must ensure proper cleanup */
|
||||
func (tpd *testPodData) deletePodsForTest(f *framework.Framework) {
|
||||
deletePodsAsync(f, tpd.PodMap)
|
||||
func (tpd *testPodData) deletePodsForTest(ctx context.Context, f *framework.Framework) {
|
||||
deletePodsAsync(ctx, f, tpd.PodMap)
|
||||
}
|
||||
|
||||
/* deletePod removes pod during a test. Should do a best-effort clean up */
|
||||
func (tpd *testPodData) deletePod(f *framework.Framework, podName string) {
|
||||
func (tpd *testPodData) deletePod(ctx context.Context, f *framework.Framework, podName string) {
|
||||
_, ok := tpd.PodMap[podName]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
deletePodSyncByName(f, podName)
|
||||
deletePodSyncByName(ctx, f, podName)
|
||||
delete(tpd.PodMap, podName)
|
||||
}
|
||||
|
||||
@@ -250,11 +250,11 @@ func matchPodDescWithResources(expected []podDesc, found podResMap) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func expectPodResources(offset int, cli kubeletpodresourcesv1.PodResourcesListerClient, expected []podDesc) {
|
||||
gomega.EventuallyWithOffset(1+offset, func() error {
|
||||
found := getPodResources(cli)
|
||||
func expectPodResources(ctx context.Context, offset int, cli kubeletpodresourcesv1.PodResourcesListerClient, expected []podDesc) {
|
||||
gomega.EventuallyWithOffset(1+offset, ctx, func(ctx context.Context) error {
|
||||
found := getPodResources(ctx, cli)
|
||||
return matchPodDescWithResources(expected, found)
|
||||
}, time.Minute, 10*time.Second).Should(gomega.BeNil())
|
||||
}, time.Minute, 10*time.Second).Should(gomega.Succeed())
|
||||
}
|
||||
|
||||
func filterOutDesc(descs []podDesc, name string) []podDesc {
|
||||
@@ -268,7 +268,7 @@ func filterOutDesc(descs []podDesc, name string) []podDesc {
|
||||
return ret
|
||||
}
|
||||
|
||||
func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.PodResourcesListerClient, sd *sriovData) {
|
||||
func podresourcesListTests(ctx context.Context, f *framework.Framework, cli kubeletpodresourcesv1.PodResourcesListerClient, sd *sriovData) {
|
||||
var tpd *testPodData
|
||||
|
||||
var found podResMap
|
||||
@@ -281,7 +281,7 @@ func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.Pod
|
||||
}
|
||||
|
||||
ginkgo.By("checking the output when no pods are present")
|
||||
found = getPodResources(cli)
|
||||
found = getPodResources(ctx, cli)
|
||||
gomega.ExpectWithOffset(1, found).To(gomega.HaveLen(expectedBasePods), "base pod expectation mismatch")
|
||||
|
||||
tpd = newTestPodData()
|
||||
@@ -296,9 +296,9 @@ func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.Pod
|
||||
cntName: "cnt-00",
|
||||
},
|
||||
}
|
||||
tpd.createPodsForTest(f, expected)
|
||||
expectPodResources(1, cli, expected)
|
||||
tpd.deletePodsForTest(f)
|
||||
tpd.createPodsForTest(ctx, f, expected)
|
||||
expectPodResources(ctx, 1, cli, expected)
|
||||
tpd.deletePodsForTest(ctx, f)
|
||||
|
||||
tpd = newTestPodData()
|
||||
ginkgo.By("checking the output when only a subset of pods require resources")
|
||||
@@ -352,9 +352,9 @@ func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.Pod
|
||||
}
|
||||
|
||||
}
|
||||
tpd.createPodsForTest(f, expected)
|
||||
expectPodResources(1, cli, expected)
|
||||
tpd.deletePodsForTest(f)
|
||||
tpd.createPodsForTest(ctx, f, expected)
|
||||
expectPodResources(ctx, 1, cli, expected)
|
||||
tpd.deletePodsForTest(ctx, f)
|
||||
|
||||
tpd = newTestPodData()
|
||||
ginkgo.By("checking the output when creating pods which require resources between calls")
|
||||
@@ -396,8 +396,8 @@ func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.Pod
|
||||
}
|
||||
}
|
||||
|
||||
tpd.createPodsForTest(f, expected)
|
||||
expectPodResources(1, cli, expected)
|
||||
tpd.createPodsForTest(ctx, f, expected)
|
||||
expectPodResources(ctx, 1, cli, expected)
|
||||
|
||||
if sd != nil {
|
||||
extra = podDesc{
|
||||
@@ -416,13 +416,13 @@ func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.Pod
|
||||
|
||||
}
|
||||
|
||||
tpd.createPodsForTest(f, []podDesc{
|
||||
tpd.createPodsForTest(ctx, f, []podDesc{
|
||||
extra,
|
||||
})
|
||||
|
||||
expected = append(expected, extra)
|
||||
expectPodResources(1, cli, expected)
|
||||
tpd.deletePodsForTest(f)
|
||||
expectPodResources(ctx, 1, cli, expected)
|
||||
tpd.deletePodsForTest(ctx, f)
|
||||
|
||||
tpd = newTestPodData()
|
||||
ginkgo.By("checking the output when deleting pods which require resources between calls")
|
||||
@@ -476,13 +476,13 @@ func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.Pod
|
||||
},
|
||||
}
|
||||
}
|
||||
tpd.createPodsForTest(f, expected)
|
||||
expectPodResources(1, cli, expected)
|
||||
tpd.createPodsForTest(ctx, f, expected)
|
||||
expectPodResources(ctx, 1, cli, expected)
|
||||
|
||||
tpd.deletePod(f, "pod-01")
|
||||
tpd.deletePod(ctx, f, "pod-01")
|
||||
expectedPostDelete := filterOutDesc(expected, "pod-01")
|
||||
expectPodResources(1, cli, expectedPostDelete)
|
||||
tpd.deletePodsForTest(f)
|
||||
expectPodResources(ctx, 1, cli, expectedPostDelete)
|
||||
tpd.deletePodsForTest(ctx, f)
|
||||
|
||||
tpd = newTestPodData()
|
||||
ginkgo.By("checking the output when pods request non integral CPUs")
|
||||
@@ -511,15 +511,15 @@ func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.Pod
|
||||
}
|
||||
|
||||
}
|
||||
tpd.createPodsForTest(f, expected)
|
||||
expectPodResources(1, cli, expected)
|
||||
tpd.deletePodsForTest(f)
|
||||
tpd.createPodsForTest(ctx, f, expected)
|
||||
expectPodResources(ctx, 1, cli, expected)
|
||||
tpd.deletePodsForTest(ctx, f)
|
||||
|
||||
}
|
||||
|
||||
func podresourcesGetAllocatableResourcesTests(cli kubeletpodresourcesv1.PodResourcesListerClient, sd *sriovData, onlineCPUs, reservedSystemCPUs cpuset.CPUSet) {
|
||||
func podresourcesGetAllocatableResourcesTests(ctx context.Context, cli kubeletpodresourcesv1.PodResourcesListerClient, sd *sriovData, onlineCPUs, reservedSystemCPUs cpuset.CPUSet) {
|
||||
ginkgo.By("checking the devices known to the kubelet")
|
||||
resp, err := cli.GetAllocatableResources(context.TODO(), &kubeletpodresourcesv1.AllocatableResourcesRequest{})
|
||||
resp, err := cli.GetAllocatableResources(ctx, &kubeletpodresourcesv1.AllocatableResourcesRequest{})
|
||||
framework.ExpectNoErrorWithOffset(1, err)
|
||||
devs := resp.GetDevices()
|
||||
allocatableCPUs := cpuset.NewCPUSetInt64(resp.GetCpuIds()...)
|
||||
@@ -565,9 +565,9 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P
|
||||
})
|
||||
|
||||
ginkgo.Context("with CPU manager Static policy", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||
// this is a very rough check. We just want to rule out system that does NOT have enough resources
|
||||
_, cpuAlloc, _ := getLocalNodeCPUDetails(f)
|
||||
_, cpuAlloc, _ := getLocalNodeCPUDetails(ctx, f)
|
||||
|
||||
if cpuAlloc < minCoreCount {
|
||||
e2eskipper.Skipf("Skipping CPU Manager tests since the CPU allocatable < %d", minCoreCount)
|
||||
@@ -576,7 +576,7 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P
|
||||
|
||||
// empty context to apply kubelet config changes
|
||||
ginkgo.Context("", func() {
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
// Set the CPU Manager policy to static.
|
||||
initialConfig.CPUManagerPolicy = string(cpumanager.PolicyStatic)
|
||||
|
||||
@@ -593,10 +593,10 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
configMap := getSRIOVDevicePluginConfigMap(framework.TestContext.SriovdpConfigMapFile)
|
||||
sd := setupSRIOVConfigOrFail(f, configMap)
|
||||
sd := setupSRIOVConfigOrFail(ctx, f, configMap)
|
||||
ginkgo.DeferCleanup(teardownSRIOVConfigOrFail, f, sd)
|
||||
|
||||
waitForSRIOVResources(f, sd)
|
||||
waitForSRIOVResources(ctx, f, sd)
|
||||
|
||||
endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket)
|
||||
framework.ExpectNoError(err)
|
||||
@@ -605,12 +605,12 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P
|
||||
framework.ExpectNoError(err)
|
||||
defer conn.Close()
|
||||
|
||||
waitForSRIOVResources(f, sd)
|
||||
waitForSRIOVResources(ctx, f, sd)
|
||||
|
||||
ginkgo.By("checking List()")
|
||||
podresourcesListTests(f, cli, sd)
|
||||
podresourcesListTests(ctx, f, cli, sd)
|
||||
ginkgo.By("checking GetAllocatableResources()")
|
||||
podresourcesGetAllocatableResourcesTests(cli, sd, onlineCPUs, reservedSystemCPUs)
|
||||
podresourcesGetAllocatableResourcesTests(ctx, cli, sd, onlineCPUs, reservedSystemCPUs)
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -622,10 +622,10 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P
|
||||
requireSRIOVDevices()
|
||||
|
||||
configMap := getSRIOVDevicePluginConfigMap(framework.TestContext.SriovdpConfigMapFile)
|
||||
sd := setupSRIOVConfigOrFail(f, configMap)
|
||||
sd := setupSRIOVConfigOrFail(ctx, f, configMap)
|
||||
ginkgo.DeferCleanup(teardownSRIOVConfigOrFail, f, sd)
|
||||
|
||||
waitForSRIOVResources(f, sd)
|
||||
waitForSRIOVResources(ctx, f, sd)
|
||||
|
||||
endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket)
|
||||
framework.ExpectNoError(err)
|
||||
@@ -634,11 +634,11 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P
|
||||
framework.ExpectNoError(err)
|
||||
defer conn.Close()
|
||||
|
||||
waitForSRIOVResources(f, sd)
|
||||
waitForSRIOVResources(ctx, f, sd)
|
||||
|
||||
// intentionally passing empty cpuset instead of onlineCPUs because with none policy
|
||||
// we should get no allocatable cpus - no exclusively allocatable CPUs, depends on policy static
|
||||
podresourcesGetAllocatableResourcesTests(cli, sd, cpuset.CPUSet{}, cpuset.CPUSet{})
|
||||
podresourcesGetAllocatableResourcesTests(ctx, cli, sd, cpuset.CPUSet{}, cpuset.CPUSet{})
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -649,9 +649,9 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P
|
||||
})
|
||||
|
||||
ginkgo.Context("with CPU manager Static policy", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||
// this is a very rough check. We just want to rule out system that does NOT have enough resources
|
||||
_, cpuAlloc, _ := getLocalNodeCPUDetails(f)
|
||||
_, cpuAlloc, _ := getLocalNodeCPUDetails(ctx, f)
|
||||
|
||||
if cpuAlloc < minCoreCount {
|
||||
e2eskipper.Skipf("Skipping CPU Manager tests since the CPU allocatable < %d", minCoreCount)
|
||||
@@ -660,7 +660,7 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P
|
||||
|
||||
// empty context to apply kubelet config changes
|
||||
ginkgo.Context("", func() {
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
// Set the CPU Manager policy to static.
|
||||
initialConfig.CPUManagerPolicy = string(cpumanager.PolicyStatic)
|
||||
|
||||
@@ -683,8 +683,8 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P
|
||||
framework.ExpectNoError(err)
|
||||
defer conn.Close()
|
||||
|
||||
podresourcesListTests(f, cli, nil)
|
||||
podresourcesGetAllocatableResourcesTests(cli, nil, onlineCPUs, reservedSystemCPUs)
|
||||
podresourcesListTests(ctx, f, cli, nil)
|
||||
podresourcesGetAllocatableResourcesTests(ctx, cli, nil, onlineCPUs, reservedSystemCPUs)
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -700,12 +700,12 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P
|
||||
|
||||
// intentionally passing empty cpuset instead of onlineCPUs because with none policy
|
||||
// we should get no allocatable cpus - no exclusively allocatable CPUs, depends on policy static
|
||||
podresourcesGetAllocatableResourcesTests(cli, nil, cpuset.CPUSet{}, cpuset.CPUSet{})
|
||||
podresourcesGetAllocatableResourcesTests(ctx, cli, nil, cpuset.CPUSet{}, cpuset.CPUSet{})
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.Context("with disabled KubeletPodResourcesGetAllocatable feature gate", func() {
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
if initialConfig.FeatureGates == nil {
|
||||
initialConfig.FeatureGates = make(map[string]bool)
|
||||
}
|
||||
@@ -721,7 +721,7 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P
|
||||
defer conn.Close()
|
||||
|
||||
ginkgo.By("checking GetAllocatableResources fail if the feature gate is not enabled")
|
||||
allocatableRes, err := cli.GetAllocatableResources(context.TODO(), &kubeletpodresourcesv1.AllocatableResourcesRequest{})
|
||||
allocatableRes, err := cli.GetAllocatableResources(ctx, &kubeletpodresourcesv1.AllocatableResourcesRequest{})
|
||||
framework.Logf("GetAllocatableResources result: %v, err: %v", allocatableRes, err)
|
||||
framework.ExpectError(err, "With feature gate disabled, the call must fail")
|
||||
})
|
||||
@@ -737,9 +737,9 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P
|
||||
})
|
||||
|
||||
ginkgo.Context("with CPU manager Static policy", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||
// this is a very rough check. We just want to rule out system that does NOT have enough resources
|
||||
_, cpuAlloc, _ := getLocalNodeCPUDetails(f)
|
||||
_, cpuAlloc, _ := getLocalNodeCPUDetails(ctx, f)
|
||||
|
||||
if cpuAlloc < minCoreCount {
|
||||
e2eskipper.Skipf("Skipping CPU Manager tests since the CPU allocatable < %d", minCoreCount)
|
||||
@@ -748,7 +748,7 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P
|
||||
|
||||
// empty context to apply kubelet config changes
|
||||
ginkgo.Context("", func() {
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
// Set the CPU Manager policy to static.
|
||||
initialConfig.CPUManagerPolicy = string(cpumanager.PolicyStatic)
|
||||
|
||||
@@ -761,10 +761,10 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P
|
||||
})
|
||||
|
||||
ginkgo.It("should return proper podresources the same as before the restart of kubelet", func(ctx context.Context) {
|
||||
dpPod := setupKubeVirtDevicePluginOrFail(f)
|
||||
dpPod := setupKubeVirtDevicePluginOrFail(ctx, f)
|
||||
ginkgo.DeferCleanup(teardownKubeVirtDevicePluginOrFail, f, dpPod)
|
||||
|
||||
waitForKubeVirtResources(f, dpPod)
|
||||
waitForKubeVirtResources(ctx, f, dpPod)
|
||||
|
||||
endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket)
|
||||
framework.ExpectNoError(err)
|
||||
@@ -775,7 +775,7 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P
|
||||
|
||||
ginkgo.By("checking List and resources kubevirt resource should be without topology")
|
||||
|
||||
allocatableResponse, _ := cli.GetAllocatableResources(context.TODO(), &kubeletpodresourcesv1.AllocatableResourcesRequest{})
|
||||
allocatableResponse, _ := cli.GetAllocatableResources(ctx, &kubeletpodresourcesv1.AllocatableResourcesRequest{})
|
||||
for _, dev := range allocatableResponse.GetDevices() {
|
||||
if dev.ResourceName != KubeVirtResourceName {
|
||||
continue
|
||||
@@ -794,11 +794,11 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P
|
||||
}
|
||||
|
||||
tpd := newTestPodData()
|
||||
tpd.createPodsForTest(f, []podDesc{
|
||||
tpd.createPodsForTest(ctx, f, []podDesc{
|
||||
desc,
|
||||
})
|
||||
|
||||
expectPodResources(1, cli, []podDesc{desc})
|
||||
expectPodResources(ctx, 1, cli, []podDesc{desc})
|
||||
|
||||
restartTime := time.Now()
|
||||
ginkgo.By("Restarting Kubelet")
|
||||
@@ -807,8 +807,8 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P
|
||||
// we need to wait for the node to be reported ready before we can safely query
|
||||
// the podresources endpoint again. Otherwise we will have false negatives.
|
||||
ginkgo.By("Wait for node to be ready")
|
||||
gomega.Eventually(func() bool {
|
||||
node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{})
|
||||
gomega.Eventually(ctx, func() bool {
|
||||
node, err := f.ClientSet.CoreV1().Nodes().Get(ctx, framework.TestContext.NodeName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
for _, cond := range node.Status.Conditions {
|
||||
if cond.Type == v1.NodeReady && cond.Status == v1.ConditionTrue && cond.LastHeartbeatTime.After(restartTime) {
|
||||
@@ -818,15 +818,15 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P
|
||||
return false
|
||||
}, 5*time.Minute, framework.Poll).Should(gomega.BeTrue())
|
||||
|
||||
expectPodResources(1, cli, []podDesc{desc})
|
||||
tpd.deletePodsForTest(f)
|
||||
expectPodResources(ctx, 1, cli, []podDesc{desc})
|
||||
tpd.deletePodsForTest(ctx, f)
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.Context("when querying /metrics", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||
// ensure APIs have been called at least once
|
||||
endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket)
|
||||
framework.ExpectNoError(err)
|
||||
@@ -835,10 +835,10 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P
|
||||
framework.ExpectNoError(err)
|
||||
defer conn.Close()
|
||||
|
||||
_, err = cli.List(context.TODO(), &kubeletpodresourcesv1.ListPodResourcesRequest{})
|
||||
_, err = cli.List(ctx, &kubeletpodresourcesv1.ListPodResourcesRequest{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
_, err = cli.GetAllocatableResources(context.TODO(), &kubeletpodresourcesv1.AllocatableResourcesRequest{})
|
||||
_, err = cli.GetAllocatableResources(ctx, &kubeletpodresourcesv1.AllocatableResourcesRequest{})
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
@@ -861,9 +861,9 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P
|
||||
})
|
||||
|
||||
ginkgo.By("Giving the Kubelet time to start up and produce metrics")
|
||||
gomega.Eventually(getPodResourcesMetrics, 1*time.Minute, 15*time.Second).Should(matchResourceMetrics)
|
||||
gomega.Eventually(ctx, getPodResourcesMetrics, 1*time.Minute, 15*time.Second).Should(matchResourceMetrics)
|
||||
ginkgo.By("Ensuring the metrics match the expectations a few more times")
|
||||
gomega.Consistently(getPodResourcesMetrics, 1*time.Minute, 15*time.Second).Should(matchResourceMetrics)
|
||||
gomega.Consistently(ctx, getPodResourcesMetrics, 1*time.Minute, 15*time.Second).Should(matchResourceMetrics)
|
||||
})
|
||||
|
||||
})
|
||||
@@ -883,18 +883,18 @@ func getOnlineCPUs() (cpuset.CPUSet, error) {
|
||||
return cpuset.Parse(strings.TrimSpace(string(onlineCPUList)))
|
||||
}
|
||||
|
||||
func setupKubeVirtDevicePluginOrFail(f *framework.Framework) *v1.Pod {
|
||||
e2enode.WaitForNodeToBeReady(f.ClientSet, framework.TestContext.NodeName, 5*time.Minute)
|
||||
func setupKubeVirtDevicePluginOrFail(ctx context.Context, f *framework.Framework) *v1.Pod {
|
||||
e2enode.WaitForNodeToBeReady(ctx, f.ClientSet, framework.TestContext.NodeName, 5*time.Minute)
|
||||
|
||||
dp := getKubeVirtDevicePluginPod()
|
||||
dp.Spec.NodeName = framework.TestContext.NodeName
|
||||
|
||||
ginkgo.By("Create KubeVirt device plugin pod")
|
||||
|
||||
dpPod, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(context.TODO(), dp, metav1.CreateOptions{})
|
||||
dpPod, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(ctx, dp, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
if err = e2epod.WaitForPodCondition(f.ClientSet, metav1.NamespaceSystem, dp.Name, "Ready", 120*time.Second, testutils.PodRunningReady); err != nil {
|
||||
if err = e2epod.WaitForPodCondition(ctx, f.ClientSet, metav1.NamespaceSystem, dp.Name, "Ready", 120*time.Second, testutils.PodRunningReady); err != nil {
|
||||
framework.Logf("KubeVirt Pod %v took too long to enter running/ready: %v", dp.Name, err)
|
||||
}
|
||||
framework.ExpectNoError(err)
|
||||
@@ -902,16 +902,16 @@ func setupKubeVirtDevicePluginOrFail(f *framework.Framework) *v1.Pod {
|
||||
return dpPod
|
||||
}
|
||||
|
||||
func teardownKubeVirtDevicePluginOrFail(f *framework.Framework, pod *v1.Pod) {
|
||||
func teardownKubeVirtDevicePluginOrFail(ctx context.Context, f *framework.Framework, pod *v1.Pod) {
|
||||
gp := int64(0)
|
||||
deleteOptions := metav1.DeleteOptions{
|
||||
GracePeriodSeconds: &gp,
|
||||
}
|
||||
ginkgo.By(fmt.Sprintf("Delete KubeVirt device plugin pod %s/%s", pod.Namespace, pod.Name))
|
||||
err := f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, deleteOptions)
|
||||
err := f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(ctx, pod.Name, deleteOptions)
|
||||
|
||||
framework.ExpectNoError(err)
|
||||
waitForAllContainerRemoval(pod.Name, pod.Namespace)
|
||||
waitForAllContainerRemoval(ctx, pod.Name, pod.Namespace)
|
||||
}
|
||||
|
||||
func findKubeVirtResource(node *v1.Node) int64 {
|
||||
@@ -927,11 +927,11 @@ func findKubeVirtResource(node *v1.Node) int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func waitForKubeVirtResources(f *framework.Framework, pod *v1.Pod) {
|
||||
func waitForKubeVirtResources(ctx context.Context, f *framework.Framework, pod *v1.Pod) {
|
||||
ginkgo.By("Waiting for kubevirt resources to become available on the local node")
|
||||
|
||||
gomega.Eventually(func() bool {
|
||||
node := getLocalNode(f)
|
||||
gomega.Eventually(ctx, func(ctx context.Context) bool {
|
||||
node := getLocalNode(ctx, f)
|
||||
kubeVirtResourceAmount := findKubeVirtResource(node)
|
||||
return kubeVirtResourceAmount != 0
|
||||
}, 2*time.Minute, framework.Poll).Should(gomega.BeTrue())
|
||||
@@ -957,10 +957,10 @@ func getKubeVirtDevicePluginPod() *v1.Pod {
|
||||
return p
|
||||
}
|
||||
|
||||
func getPodResourcesMetrics() (e2emetrics.KubeletMetrics, error) {
|
||||
func getPodResourcesMetrics(ctx context.Context) (e2emetrics.KubeletMetrics, error) {
|
||||
// we are running out of good names, so we need to be unnecessarily specific to avoid clashes
|
||||
ginkgo.By("getting Pod Resources metrics from the metrics API")
|
||||
return e2emetrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName+":10255", "/metrics")
|
||||
return e2emetrics.GrabKubeletMetricsWithoutProxy(ctx, framework.TestContext.NodeName+":10255", "/metrics")
|
||||
}
|
||||
|
||||
func timelessSampleAtLeast(lower interface{}) types.GomegaMatcher {
|
||||
|
Reference in New Issue
Block a user