e2e: use Ginkgo context
All code must use the context from Ginkgo when doing API calls or polling for a change, otherwise the code would not return immediately when the test gets aborted.
This commit is contained in:
@@ -68,7 +68,7 @@ var _ = SIGDescribe("Node Container Manager [Serial]", func() {
|
||||
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
|
||||
ginkgo.Describe("Validate Node Allocatable [NodeFeature:NodeAllocatable]", func() {
|
||||
ginkgo.It("sets up the node and runs the test", func(ctx context.Context) {
|
||||
framework.ExpectNoError(runTest(f))
|
||||
framework.ExpectNoError(runTest(ctx, f))
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -159,14 +159,14 @@ func convertSharesToWeight(shares int64) int64 {
|
||||
return 1 + ((shares-2)*9999)/262142
|
||||
}
|
||||
|
||||
func runTest(f *framework.Framework) error {
|
||||
func runTest(ctx context.Context, f *framework.Framework) error {
|
||||
var oldCfg *kubeletconfig.KubeletConfiguration
|
||||
subsystems, err := cm.GetCgroupSubsystems()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Get current kubelet configuration
|
||||
oldCfg, err = getCurrentKubeletConfig()
|
||||
oldCfg, err = getCurrentKubeletConfig(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -191,7 +191,7 @@ func runTest(f *framework.Framework) error {
|
||||
startKubelet := stopKubelet()
|
||||
|
||||
// wait until the kubelet health check will fail
|
||||
gomega.Eventually(func() bool {
|
||||
gomega.Eventually(ctx, func() bool {
|
||||
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||
}, time.Minute, time.Second).Should(gomega.BeFalse())
|
||||
|
||||
@@ -201,7 +201,7 @@ func runTest(f *framework.Framework) error {
|
||||
startKubelet()
|
||||
|
||||
// wait until the kubelet health check will succeed
|
||||
gomega.Eventually(func() bool {
|
||||
gomega.Eventually(ctx, func(ctx context.Context) bool {
|
||||
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||
}, 2*time.Minute, 5*time.Second).Should(gomega.BeTrue())
|
||||
}
|
||||
@@ -218,7 +218,7 @@ func runTest(f *framework.Framework) error {
|
||||
startKubelet := stopKubelet()
|
||||
|
||||
// wait until the kubelet health check will fail
|
||||
gomega.Eventually(func() bool {
|
||||
gomega.Eventually(ctx, func() bool {
|
||||
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||
}, time.Minute, time.Second).Should(gomega.BeFalse())
|
||||
|
||||
@@ -228,7 +228,7 @@ func runTest(f *framework.Framework) error {
|
||||
startKubelet()
|
||||
|
||||
// wait until the kubelet health check will succeed
|
||||
gomega.Eventually(func() bool {
|
||||
gomega.Eventually(ctx, func() bool {
|
||||
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||
}, 2*time.Minute, 5*time.Second).Should(gomega.BeTrue())
|
||||
|
||||
@@ -251,8 +251,8 @@ func runTest(f *framework.Framework) error {
|
||||
|
||||
// TODO: Update cgroupManager to expose a Status interface to get current Cgroup Settings.
|
||||
// The node may not have updated capacity and allocatable yet, so check that it happens eventually.
|
||||
gomega.Eventually(func() error {
|
||||
nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
gomega.Eventually(ctx, func(ctx context.Context) error {
|
||||
nodeList, err := f.ClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -306,7 +306,7 @@ func runTest(f *framework.Framework) error {
|
||||
return fmt.Errorf("Unexpected memory allocatable value exposed by the node. Expected: %v, got: %v, capacity: %v", allocatableMemory, schedulerAllocatable[v1.ResourceMemory], capacity[v1.ResourceMemory])
|
||||
}
|
||||
return nil
|
||||
}, time.Minute, 5*time.Second).Should(gomega.BeNil())
|
||||
}, time.Minute, 5*time.Second).Should(gomega.Succeed())
|
||||
|
||||
cgroupPath := ""
|
||||
if currentConfig.CgroupDriver == "systemd" {
|
||||
|
Reference in New Issue
Block a user