e2e: use Ginkgo context
All code must use the context from Ginkgo when doing API calls or polling for a change, otherwise the code would not return immediately when the test gets aborted.
This commit is contained in:
@@ -77,9 +77,9 @@ var _ = SIGDescribe("InodeEviction [Slow] [Serial] [Disruptive][NodeFeature:Evic
|
||||
pressureTimeout := 15 * time.Minute
|
||||
inodesConsumed := uint64(200000)
|
||||
ginkgo.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
// Set the eviction threshold to inodesFree - inodesConsumed, so that using inodesConsumed causes an eviction.
|
||||
summary := eventuallyGetSummary()
|
||||
summary := eventuallyGetSummary(ctx)
|
||||
inodesFree := *summary.Node.Fs.InodesFree
|
||||
if inodesFree <= inodesConsumed {
|
||||
e2eskipper.Skipf("Too few inodes free on the host for the InodeEviction test to run")
|
||||
@@ -114,9 +114,9 @@ var _ = SIGDescribe("ImageGCNoEviction [Slow] [Serial] [Disruptive][NodeFeature:
|
||||
expectedStarvedResource := resourceInodes
|
||||
inodesConsumed := uint64(100000)
|
||||
ginkgo.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
// Set the eviction threshold to inodesFree - inodesConsumed, so that using inodesConsumed causes an eviction.
|
||||
summary := eventuallyGetSummary()
|
||||
summary := eventuallyGetSummary(ctx)
|
||||
inodesFree := *summary.Node.Fs.InodesFree
|
||||
if inodesFree <= inodesConsumed {
|
||||
e2eskipper.Skipf("Too few inodes free on the host for the InodeEviction test to run")
|
||||
@@ -144,9 +144,9 @@ var _ = SIGDescribe("MemoryAllocatableEviction [Slow] [Serial] [Disruptive][Node
|
||||
expectedStarvedResource := v1.ResourceMemory
|
||||
pressureTimeout := 10 * time.Minute
|
||||
ginkgo.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
// Set large system and kube reserved values to trigger allocatable thresholds far before hard eviction thresholds.
|
||||
kubeReserved := getNodeCPUAndMemoryCapacity(f)[v1.ResourceMemory]
|
||||
kubeReserved := getNodeCPUAndMemoryCapacity(ctx, f)[v1.ResourceMemory]
|
||||
// The default hard eviction threshold is 250Mb, so Allocatable = Capacity - Reserved - 250Mb
|
||||
// We want Allocatable = 50Mb, so set Reserved = Capacity - Allocatable - 250Mb = Capacity - 300Mb
|
||||
kubeReserved.Sub(resource.MustParse("300Mi"))
|
||||
@@ -179,8 +179,8 @@ var _ = SIGDescribe("LocalStorageEviction [Slow] [Serial] [Disruptive][NodeFeatu
|
||||
expectedStarvedResource := v1.ResourceEphemeralStorage
|
||||
ginkgo.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
||||
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
summary := eventuallyGetSummary()
|
||||
tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
summary := eventuallyGetSummary(ctx)
|
||||
|
||||
diskConsumedByTest := resource.MustParse("4Gi")
|
||||
availableBytesOnSystem := *(summary.Node.Fs.AvailableBytes)
|
||||
@@ -217,9 +217,9 @@ var _ = SIGDescribe("LocalStorageSoftEviction [Slow] [Serial] [Disruptive][NodeF
|
||||
expectedNodeCondition := v1.NodeDiskPressure
|
||||
expectedStarvedResource := v1.ResourceEphemeralStorage
|
||||
ginkgo.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
diskConsumed := resource.MustParse("4Gi")
|
||||
summary := eventuallyGetSummary()
|
||||
summary := eventuallyGetSummary(ctx)
|
||||
availableBytes := *(summary.Node.Fs.AvailableBytes)
|
||||
if availableBytes <= uint64(diskConsumed.Value()) {
|
||||
e2eskipper.Skipf("Too little disk free on the host for the LocalStorageSoftEviction test to run")
|
||||
@@ -254,7 +254,7 @@ var _ = SIGDescribe("LocalStorageCapacityIsolationMemoryBackedVolumeEviction [Sl
|
||||
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
|
||||
evictionTestTimeout := 7 * time.Minute
|
||||
ginkgo.Context(fmt.Sprintf(testContextFmt, "evictions due to pod local storage violations"), func() {
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
// setting a threshold to 0% disables; non-empty map overrides default value (necessary due to omitempty)
|
||||
initialConfig.EvictionHard = map[string]string{string(evictionapi.SignalMemoryAvailable): "0%"}
|
||||
initialConfig.FeatureGates["SizeMemoryBackedVolumes"] = false
|
||||
@@ -294,7 +294,7 @@ var _ = SIGDescribe("LocalStorageCapacityIsolationEviction [Slow] [Serial] [Disr
|
||||
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
|
||||
evictionTestTimeout := 10 * time.Minute
|
||||
ginkgo.Context(fmt.Sprintf(testContextFmt, "evictions due to pod local storage violations"), func() {
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
// setting a threshold to 0% disables; non-empty map overrides default value (necessary due to omitempty)
|
||||
initialConfig.EvictionHard = map[string]string{string(evictionapi.SignalMemoryAvailable): "0%"}
|
||||
})
|
||||
@@ -353,9 +353,9 @@ var _ = SIGDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [Disruptive]
|
||||
highPriority := int32(999999999)
|
||||
|
||||
ginkgo.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
memoryConsumed := resource.MustParse("600Mi")
|
||||
summary := eventuallyGetSummary()
|
||||
summary := eventuallyGetSummary(ctx)
|
||||
availableBytes := *(summary.Node.Memory.AvailableBytes)
|
||||
if availableBytes <= uint64(memoryConsumed.Value()) {
|
||||
e2eskipper.Skipf("Too little memory free on the host for the PriorityMemoryEvictionOrdering test to run")
|
||||
@@ -363,12 +363,12 @@ var _ = SIGDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [Disruptive]
|
||||
initialConfig.EvictionHard = map[string]string{string(evictionapi.SignalMemoryAvailable): fmt.Sprintf("%d", availableBytes-uint64(memoryConsumed.Value()))}
|
||||
initialConfig.EvictionMinimumReclaim = map[string]string{}
|
||||
})
|
||||
ginkgo.BeforeEach(func() {
|
||||
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}, metav1.CreateOptions{})
|
||||
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(ctx, &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}, metav1.CreateOptions{})
|
||||
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
|
||||
})
|
||||
ginkgo.AfterEach(func() {
|
||||
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(context.TODO(), highPriorityClassName, metav1.DeleteOptions{})
|
||||
ginkgo.AfterEach(func(ctx context.Context) {
|
||||
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(ctx, highPriorityClassName, metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
specs := []podEvictSpec{
|
||||
@@ -411,9 +411,9 @@ var _ = SIGDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Serial] [Disru
|
||||
highPriority := int32(999999999)
|
||||
|
||||
ginkgo.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
diskConsumed := resource.MustParse("4Gi")
|
||||
summary := eventuallyGetSummary()
|
||||
summary := eventuallyGetSummary(ctx)
|
||||
availableBytes := *(summary.Node.Fs.AvailableBytes)
|
||||
if availableBytes <= uint64(diskConsumed.Value()) {
|
||||
e2eskipper.Skipf("Too little disk free on the host for the PriorityLocalStorageEvictionOrdering test to run")
|
||||
@@ -421,12 +421,12 @@ var _ = SIGDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Serial] [Disru
|
||||
initialConfig.EvictionHard = map[string]string{string(evictionapi.SignalNodeFsAvailable): fmt.Sprintf("%d", availableBytes-uint64(diskConsumed.Value()))}
|
||||
initialConfig.EvictionMinimumReclaim = map[string]string{}
|
||||
})
|
||||
ginkgo.BeforeEach(func() {
|
||||
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}, metav1.CreateOptions{})
|
||||
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(ctx, &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}, metav1.CreateOptions{})
|
||||
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
|
||||
})
|
||||
ginkgo.AfterEach(func() {
|
||||
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(context.TODO(), highPriorityClassName, metav1.DeleteOptions{})
|
||||
ginkgo.AfterEach(func(ctx context.Context) {
|
||||
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(ctx, highPriorityClassName, metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
specs := []podEvictSpec{
|
||||
@@ -468,19 +468,19 @@ var _ = SIGDescribe("PriorityPidEvictionOrdering [Slow] [Serial] [Disruptive][No
|
||||
highPriority := int32(999999999)
|
||||
|
||||
ginkgo.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
pidsConsumed := int64(10000)
|
||||
summary := eventuallyGetSummary()
|
||||
summary := eventuallyGetSummary(ctx)
|
||||
availablePids := *(summary.Node.Rlimit.MaxPID) - *(summary.Node.Rlimit.NumOfRunningProcesses)
|
||||
initialConfig.EvictionHard = map[string]string{string(evictionapi.SignalPIDAvailable): fmt.Sprintf("%d", availablePids-pidsConsumed)}
|
||||
initialConfig.EvictionMinimumReclaim = map[string]string{}
|
||||
})
|
||||
ginkgo.BeforeEach(func() {
|
||||
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}, metav1.CreateOptions{})
|
||||
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(ctx, &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}, metav1.CreateOptions{})
|
||||
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
|
||||
})
|
||||
ginkgo.AfterEach(func() {
|
||||
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(context.TODO(), highPriorityClassName, metav1.DeleteOptions{})
|
||||
ginkgo.AfterEach(func(ctx context.Context) {
|
||||
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(ctx, highPriorityClassName, metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
specs := []podEvictSpec{
|
||||
@@ -503,9 +503,9 @@ var _ = SIGDescribe("PriorityPidEvictionOrdering [Slow] [Serial] [Disruptive][No
|
||||
})
|
||||
|
||||
ginkgo.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition)+"; PodDisruptionConditions enabled [NodeFeature:PodDisruptionConditions]", func() {
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
pidsConsumed := int64(10000)
|
||||
summary := eventuallyGetSummary()
|
||||
summary := eventuallyGetSummary(ctx)
|
||||
availablePids := *(summary.Node.Rlimit.MaxPID) - *(summary.Node.Rlimit.NumOfRunningProcesses)
|
||||
initialConfig.EvictionHard = map[string]string{string(evictionapi.SignalPIDAvailable): fmt.Sprintf("%d", availablePids-pidsConsumed)}
|
||||
initialConfig.EvictionMinimumReclaim = map[string]string{}
|
||||
@@ -543,10 +543,10 @@ type podEvictSpec struct {
|
||||
// It ensures that all pods with non-zero evictionPriority are eventually evicted.
|
||||
//
|
||||
// runEvictionTest then cleans up the testing environment by deleting provided pods, and ensures that expectedNodeCondition no longer exists
|
||||
func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expectedNodeCondition v1.NodeConditionType, expectedStarvedResource v1.ResourceName, logFunc func(), testSpecs []podEvictSpec) {
|
||||
func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expectedNodeCondition v1.NodeConditionType, expectedStarvedResource v1.ResourceName, logFunc func(ctx context.Context), testSpecs []podEvictSpec) {
|
||||
// Place the remainder of the test within a context so that the kubelet config is set before and after the test.
|
||||
ginkgo.Context("", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||
// reduce memory usage in the allocatable cgroup to ensure we do not have MemoryPressure
|
||||
reduceAllocatableMemoryUsageIfCgroupv1()
|
||||
// Nodes do not immediately report local storage capacity
|
||||
@@ -557,35 +557,35 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
|
||||
for _, spec := range testSpecs {
|
||||
pods = append(pods, spec.pod)
|
||||
}
|
||||
e2epod.NewPodClient(f).CreateBatch(pods)
|
||||
e2epod.NewPodClient(f).CreateBatch(ctx, pods)
|
||||
})
|
||||
|
||||
ginkgo.It("should eventually evict all of the correct pods", func(ctx context.Context) {
|
||||
ginkgo.By(fmt.Sprintf("Waiting for node to have NodeCondition: %s", expectedNodeCondition))
|
||||
gomega.Eventually(func() error {
|
||||
logFunc()
|
||||
if expectedNodeCondition == noPressure || hasNodeCondition(f, expectedNodeCondition) {
|
||||
gomega.Eventually(ctx, func(ctx context.Context) error {
|
||||
logFunc(ctx)
|
||||
if expectedNodeCondition == noPressure || hasNodeCondition(ctx, f, expectedNodeCondition) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("NodeCondition: %s not encountered", expectedNodeCondition)
|
||||
}, pressureTimeout, evictionPollInterval).Should(gomega.BeNil())
|
||||
|
||||
ginkgo.By("Waiting for evictions to occur")
|
||||
gomega.Eventually(func() error {
|
||||
gomega.Eventually(ctx, func(ctx context.Context) error {
|
||||
if expectedNodeCondition != noPressure {
|
||||
if hasNodeCondition(f, expectedNodeCondition) {
|
||||
if hasNodeCondition(ctx, f, expectedNodeCondition) {
|
||||
framework.Logf("Node has %s", expectedNodeCondition)
|
||||
} else {
|
||||
framework.Logf("Node does NOT have %s", expectedNodeCondition)
|
||||
}
|
||||
}
|
||||
logKubeletLatencyMetrics(kubeletmetrics.EvictionStatsAgeKey)
|
||||
logFunc()
|
||||
return verifyEvictionOrdering(f, testSpecs)
|
||||
}, pressureTimeout, evictionPollInterval).Should(gomega.BeNil())
|
||||
logKubeletLatencyMetrics(ctx, kubeletmetrics.EvictionStatsAgeKey)
|
||||
logFunc(ctx)
|
||||
return verifyEvictionOrdering(ctx, f, testSpecs)
|
||||
}, pressureTimeout, evictionPollInterval).Should(gomega.Succeed())
|
||||
|
||||
ginkgo.By("checking for the expected pod conditions for evicted pods")
|
||||
verifyPodConditions(f, testSpecs)
|
||||
verifyPodConditions(ctx, f, testSpecs)
|
||||
|
||||
// We observe pressure from the API server. The eviction manager observes pressure from the kubelet internal stats.
|
||||
// This means the eviction manager will observe pressure before we will, creating a delay between when the eviction manager
|
||||
@@ -594,30 +594,30 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
|
||||
time.Sleep(pressureDelay)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Waiting for NodeCondition: %s to no longer exist on the node", expectedNodeCondition))
|
||||
gomega.Eventually(func() error {
|
||||
logFunc()
|
||||
logKubeletLatencyMetrics(kubeletmetrics.EvictionStatsAgeKey)
|
||||
if expectedNodeCondition != noPressure && hasNodeCondition(f, expectedNodeCondition) {
|
||||
gomega.Eventually(ctx, func(ctx context.Context) error {
|
||||
logFunc(ctx)
|
||||
logKubeletLatencyMetrics(ctx, kubeletmetrics.EvictionStatsAgeKey)
|
||||
if expectedNodeCondition != noPressure && hasNodeCondition(ctx, f, expectedNodeCondition) {
|
||||
return fmt.Errorf("Conditions haven't returned to normal, node still has %s", expectedNodeCondition)
|
||||
}
|
||||
return nil
|
||||
}, pressureDisappearTimeout, evictionPollInterval).Should(gomega.BeNil())
|
||||
|
||||
ginkgo.By("checking for stable, pressure-free condition without unexpected pod failures")
|
||||
gomega.Consistently(func() error {
|
||||
if expectedNodeCondition != noPressure && hasNodeCondition(f, expectedNodeCondition) {
|
||||
gomega.Consistently(ctx, func(ctx context.Context) error {
|
||||
if expectedNodeCondition != noPressure && hasNodeCondition(ctx, f, expectedNodeCondition) {
|
||||
return fmt.Errorf("%s disappeared and then reappeared", expectedNodeCondition)
|
||||
}
|
||||
logFunc()
|
||||
logKubeletLatencyMetrics(kubeletmetrics.EvictionStatsAgeKey)
|
||||
return verifyEvictionOrdering(f, testSpecs)
|
||||
}, postTestConditionMonitoringPeriod, evictionPollInterval).Should(gomega.BeNil())
|
||||
logFunc(ctx)
|
||||
logKubeletLatencyMetrics(ctx, kubeletmetrics.EvictionStatsAgeKey)
|
||||
return verifyEvictionOrdering(ctx, f, testSpecs)
|
||||
}, postTestConditionMonitoringPeriod, evictionPollInterval).Should(gomega.Succeed())
|
||||
|
||||
ginkgo.By("checking for correctly formatted eviction events")
|
||||
verifyEvictionEvents(f, testSpecs, expectedStarvedResource)
|
||||
verifyEvictionEvents(ctx, f, testSpecs, expectedStarvedResource)
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.AfterEach(func(ctx context.Context) {
|
||||
prePullImagesIfNeccecary := func() {
|
||||
if expectedNodeCondition == v1.NodeDiskPressure && framework.TestContext.PrepullImages {
|
||||
// The disk eviction test may cause the prepulled images to be evicted,
|
||||
@@ -631,14 +631,14 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
|
||||
ginkgo.By("deleting pods")
|
||||
for _, spec := range testSpecs {
|
||||
ginkgo.By(fmt.Sprintf("deleting pod: %s", spec.pod.Name))
|
||||
e2epod.NewPodClient(f).DeleteSync(spec.pod.Name, metav1.DeleteOptions{}, 10*time.Minute)
|
||||
e2epod.NewPodClient(f).DeleteSync(ctx, spec.pod.Name, metav1.DeleteOptions{}, 10*time.Minute)
|
||||
}
|
||||
|
||||
// In case a test fails before verifying that NodeCondition no longer exist on the node,
|
||||
// we should wait for the NodeCondition to disappear
|
||||
ginkgo.By(fmt.Sprintf("making sure NodeCondition %s no longer exists on the node", expectedNodeCondition))
|
||||
gomega.Eventually(func() error {
|
||||
if expectedNodeCondition != noPressure && hasNodeCondition(f, expectedNodeCondition) {
|
||||
gomega.Eventually(ctx, func(ctx context.Context) error {
|
||||
if expectedNodeCondition != noPressure && hasNodeCondition(ctx, f, expectedNodeCondition) {
|
||||
return fmt.Errorf("Conditions haven't returned to normal, node still has %s", expectedNodeCondition)
|
||||
}
|
||||
return nil
|
||||
@@ -650,8 +650,8 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
|
||||
|
||||
// Ensure that the NodeCondition hasn't returned after pulling images
|
||||
ginkgo.By(fmt.Sprintf("making sure NodeCondition %s doesn't exist again after pulling images", expectedNodeCondition))
|
||||
gomega.Eventually(func() error {
|
||||
if expectedNodeCondition != noPressure && hasNodeCondition(f, expectedNodeCondition) {
|
||||
gomega.Eventually(ctx, func(ctx context.Context) error {
|
||||
if expectedNodeCondition != noPressure && hasNodeCondition(ctx, f, expectedNodeCondition) {
|
||||
return fmt.Errorf("Conditions haven't returned to normal, node still has %s", expectedNodeCondition)
|
||||
}
|
||||
return nil
|
||||
@@ -659,7 +659,7 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
|
||||
|
||||
ginkgo.By("making sure we can start a new pod after the test")
|
||||
podName := "test-admit-pod"
|
||||
e2epod.NewPodClient(f).CreateSync(&v1.Pod{
|
||||
e2epod.NewPodClient(f).CreateSync(ctx, &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
@@ -676,8 +676,8 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
|
||||
|
||||
if ginkgo.CurrentSpecReport().Failed() {
|
||||
if framework.TestContext.DumpLogsOnFailure {
|
||||
logPodEvents(f)
|
||||
logNodeEvents(f)
|
||||
logPodEvents(ctx, f)
|
||||
logNodeEvents(ctx, f)
|
||||
}
|
||||
}
|
||||
})
|
||||
@@ -686,9 +686,9 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
|
||||
|
||||
// verifyEvictionOrdering returns an error if all non-zero priority pods have not been evicted, nil otherwise
|
||||
// This function panics (via Expect) if eviction ordering is violated, or if a priority-zero pod fails.
|
||||
func verifyEvictionOrdering(f *framework.Framework, testSpecs []podEvictSpec) error {
|
||||
func verifyEvictionOrdering(ctx context.Context, f *framework.Framework, testSpecs []podEvictSpec) error {
|
||||
// Gather current information
|
||||
updatedPodList, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{})
|
||||
updatedPodList, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -752,10 +752,10 @@ func verifyEvictionOrdering(f *framework.Framework, testSpecs []podEvictSpec) er
|
||||
return fmt.Errorf("pods that should be evicted are still running: %#v", pendingPods)
|
||||
}
|
||||
|
||||
func verifyPodConditions(f *framework.Framework, testSpecs []podEvictSpec) {
|
||||
func verifyPodConditions(ctx context.Context, f *framework.Framework, testSpecs []podEvictSpec) {
|
||||
for _, spec := range testSpecs {
|
||||
if spec.wantPodDisruptionCondition != nil {
|
||||
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), spec.pod.Name, metav1.GetOptions{})
|
||||
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, spec.pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "Failed to get the recent pod object for name: %q", pod.Name)
|
||||
|
||||
cType := *spec.wantPodDisruptionCondition
|
||||
@@ -767,7 +767,7 @@ func verifyPodConditions(f *framework.Framework, testSpecs []podEvictSpec) {
|
||||
}
|
||||
}
|
||||
|
||||
func verifyEvictionEvents(f *framework.Framework, testSpecs []podEvictSpec, expectedStarvedResource v1.ResourceName) {
|
||||
func verifyEvictionEvents(ctx context.Context, f *framework.Framework, testSpecs []podEvictSpec, expectedStarvedResource v1.ResourceName) {
|
||||
for _, spec := range testSpecs {
|
||||
pod := spec.pod
|
||||
if spec.evictionPriority != 0 {
|
||||
@@ -777,7 +777,7 @@ func verifyEvictionEvents(f *framework.Framework, testSpecs []podEvictSpec, expe
|
||||
"involvedObject.namespace": f.Namespace.Name,
|
||||
"reason": eviction.Reason,
|
||||
}.AsSelector().String()
|
||||
podEvictEvents, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{FieldSelector: selector})
|
||||
podEvictEvents, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(ctx, metav1.ListOptions{FieldSelector: selector})
|
||||
gomega.Expect(err).To(gomega.BeNil(), "Unexpected error getting events during eviction test: %v", err)
|
||||
framework.ExpectEqual(len(podEvictEvents.Items), 1, "Expected to find 1 eviction event for pod %s, got %d", pod.Name, len(podEvictEvents.Items))
|
||||
event := podEvictEvents.Items[0]
|
||||
@@ -822,15 +822,15 @@ func verifyEvictionEvents(f *framework.Framework, testSpecs []podEvictSpec, expe
|
||||
}
|
||||
|
||||
// Returns TRUE if the node has the node condition, FALSE otherwise
|
||||
func hasNodeCondition(f *framework.Framework, expectedNodeCondition v1.NodeConditionType) bool {
|
||||
localNodeStatus := getLocalNode(f).Status
|
||||
func hasNodeCondition(ctx context.Context, f *framework.Framework, expectedNodeCondition v1.NodeConditionType) bool {
|
||||
localNodeStatus := getLocalNode(ctx, f).Status
|
||||
_, actualNodeCondition := testutils.GetNodeCondition(&localNodeStatus, expectedNodeCondition)
|
||||
gomega.Expect(actualNodeCondition).NotTo(gomega.BeNil())
|
||||
return actualNodeCondition.Status == v1.ConditionTrue
|
||||
}
|
||||
|
||||
func logInodeMetrics() {
|
||||
summary, err := getNodeSummary()
|
||||
func logInodeMetrics(ctx context.Context) {
|
||||
summary, err := getNodeSummary(ctx)
|
||||
if err != nil {
|
||||
framework.Logf("Error getting summary: %v", err)
|
||||
return
|
||||
@@ -856,8 +856,8 @@ func logInodeMetrics() {
|
||||
}
|
||||
}
|
||||
|
||||
func logDiskMetrics() {
|
||||
summary, err := getNodeSummary()
|
||||
func logDiskMetrics(ctx context.Context) {
|
||||
summary, err := getNodeSummary(ctx)
|
||||
if err != nil {
|
||||
framework.Logf("Error getting summary: %v", err)
|
||||
return
|
||||
@@ -883,8 +883,8 @@ func logDiskMetrics() {
|
||||
}
|
||||
}
|
||||
|
||||
func logMemoryMetrics() {
|
||||
summary, err := getNodeSummary()
|
||||
func logMemoryMetrics(ctx context.Context) {
|
||||
summary, err := getNodeSummary(ctx)
|
||||
if err != nil {
|
||||
framework.Logf("Error getting summary: %v", err)
|
||||
return
|
||||
@@ -907,8 +907,8 @@ func logMemoryMetrics() {
|
||||
}
|
||||
}
|
||||
|
||||
func logPidMetrics() {
|
||||
summary, err := getNodeSummary()
|
||||
func logPidMetrics(ctx context.Context) {
|
||||
summary, err := getNodeSummary(ctx)
|
||||
if err != nil {
|
||||
framework.Logf("Error getting summary: %v", err)
|
||||
return
|
||||
@@ -918,9 +918,9 @@ func logPidMetrics() {
|
||||
}
|
||||
}
|
||||
|
||||
func eventuallyGetSummary() (s *kubeletstatsv1alpha1.Summary) {
|
||||
gomega.Eventually(func() error {
|
||||
summary, err := getNodeSummary()
|
||||
func eventuallyGetSummary(ctx context.Context) (s *kubeletstatsv1alpha1.Summary) {
|
||||
gomega.Eventually(ctx, func() error {
|
||||
summary, err := getNodeSummary(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
Reference in New Issue
Block a user