Fix flaky test in maxSurge integration tests
Signed-off-by: kerthcet <kerthcet@gmail.com>
This commit is contained in:
		@@ -26,6 +26,7 @@ import (
 | 
				
			|||||||
	"time"
 | 
						"time"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	"github.com/google/go-cmp/cmp"
 | 
						"github.com/google/go-cmp/cmp"
 | 
				
			||||||
 | 
						"github.com/onsi/gomega"
 | 
				
			||||||
	v1 "k8s.io/api/core/v1"
 | 
						v1 "k8s.io/api/core/v1"
 | 
				
			||||||
	eventsv1 "k8s.io/api/events/v1"
 | 
						eventsv1 "k8s.io/api/events/v1"
 | 
				
			||||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
						metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
				
			||||||
@@ -41,6 +42,9 @@ import (
 | 
				
			|||||||
	featuregatetesting "k8s.io/component-base/featuregate/testing"
 | 
						featuregatetesting "k8s.io/component-base/featuregate/testing"
 | 
				
			||||||
	"k8s.io/klog/v2"
 | 
						"k8s.io/klog/v2"
 | 
				
			||||||
	"k8s.io/klog/v2/ktesting"
 | 
						"k8s.io/klog/v2/ktesting"
 | 
				
			||||||
 | 
						testingclock "k8s.io/utils/clock/testing"
 | 
				
			||||||
 | 
						"k8s.io/utils/ptr"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/features"
 | 
						"k8s.io/kubernetes/pkg/features"
 | 
				
			||||||
	schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
 | 
						schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/scheduler/apis/config/testing/defaults"
 | 
						"k8s.io/kubernetes/pkg/scheduler/apis/config/testing/defaults"
 | 
				
			||||||
@@ -54,8 +58,7 @@ import (
 | 
				
			|||||||
	"k8s.io/kubernetes/pkg/scheduler/profile"
 | 
						"k8s.io/kubernetes/pkg/scheduler/profile"
 | 
				
			||||||
	st "k8s.io/kubernetes/pkg/scheduler/testing"
 | 
						st "k8s.io/kubernetes/pkg/scheduler/testing"
 | 
				
			||||||
	tf "k8s.io/kubernetes/pkg/scheduler/testing/framework"
 | 
						tf "k8s.io/kubernetes/pkg/scheduler/testing/framework"
 | 
				
			||||||
	testingclock "k8s.io/utils/clock/testing"
 | 
						utiltesting "k8s.io/kubernetes/test/utils/ktesting"
 | 
				
			||||||
	"k8s.io/utils/ptr"
 | 
					 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func TestSchedulerCreation(t *testing.T) {
 | 
					func TestSchedulerCreation(t *testing.T) {
 | 
				
			||||||
@@ -994,18 +997,18 @@ func TestFrameworkHandler_IterateOverWaitingPods(t *testing.T) {
 | 
				
			|||||||
			fakeClient := fake.NewSimpleClientset(objs...)
 | 
								fakeClient := fake.NewSimpleClientset(objs...)
 | 
				
			||||||
			informerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
 | 
								informerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
 | 
				
			||||||
			eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: fakeClient.EventsV1()})
 | 
								eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: fakeClient.EventsV1()})
 | 
				
			||||||
 | 
								defer eventBroadcaster.Shutdown()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			eventRecorder := eventBroadcaster.NewRecorder(scheme.Scheme, fakePermit)
 | 
								eventRecorder := eventBroadcaster.NewRecorder(scheme.Scheme, fakePermit)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			outOfTreeRegistry := frameworkruntime.Registry{
 | 
								outOfTreeRegistry := frameworkruntime.Registry{
 | 
				
			||||||
				fakePermit: newFakePermitPlugin(eventRecorder),
 | 
									fakePermit: newFakePermitPlugin(eventRecorder),
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			_, ctx := ktesting.NewTestContext(t)
 | 
								tCtx := utiltesting.Init(t)
 | 
				
			||||||
			ctx, cancel := context.WithCancel(ctx)
 | 
					 | 
				
			||||||
			defer cancel()
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
			scheduler, err := New(
 | 
								scheduler, err := New(
 | 
				
			||||||
				ctx,
 | 
									tCtx,
 | 
				
			||||||
				fakeClient,
 | 
									fakeClient,
 | 
				
			||||||
				informerFactory,
 | 
									informerFactory,
 | 
				
			||||||
				nil,
 | 
									nil,
 | 
				
			||||||
@@ -1034,13 +1037,13 @@ func TestFrameworkHandler_IterateOverWaitingPods(t *testing.T) {
 | 
				
			|||||||
			defer stopFn()
 | 
								defer stopFn()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			// Run scheduler.
 | 
								// Run scheduler.
 | 
				
			||||||
			informerFactory.Start(ctx.Done())
 | 
								informerFactory.Start(tCtx.Done())
 | 
				
			||||||
			informerFactory.WaitForCacheSync(ctx.Done())
 | 
								informerFactory.WaitForCacheSync(tCtx.Done())
 | 
				
			||||||
			go scheduler.Run(ctx)
 | 
								go scheduler.Run(tCtx)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			// Send pods to be scheduled.
 | 
								// Send pods to be scheduled.
 | 
				
			||||||
			for _, p := range tc.waitSchedulingPods {
 | 
								for _, p := range tc.waitSchedulingPods {
 | 
				
			||||||
				_, err = fakeClient.CoreV1().Pods("").Create(ctx, p, metav1.CreateOptions{})
 | 
									_, err = fakeClient.CoreV1().Pods("").Create(tCtx, p, metav1.CreateOptions{})
 | 
				
			||||||
				if err != nil {
 | 
									if err != nil {
 | 
				
			||||||
					t.Fatal(err)
 | 
										t.Fatal(err)
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
@@ -1049,18 +1052,16 @@ func TestFrameworkHandler_IterateOverWaitingPods(t *testing.T) {
 | 
				
			|||||||
			// Wait all pods in waitSchedulingPods to be scheduled.
 | 
								// Wait all pods in waitSchedulingPods to be scheduled.
 | 
				
			||||||
			wg.Wait()
 | 
								wg.Wait()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								utiltesting.Eventually(tCtx, func(utiltesting.TContext) sets.Set[string] {
 | 
				
			||||||
				// Ensure that all waitingPods in scheduler can be obtained from any profiles.
 | 
									// Ensure that all waitingPods in scheduler can be obtained from any profiles.
 | 
				
			||||||
 | 
									actualPodNamesInWaitingPods := sets.New[string]()
 | 
				
			||||||
				for _, fwk := range scheduler.Profiles {
 | 
									for _, fwk := range scheduler.Profiles {
 | 
				
			||||||
				actualPodNamesInWaitingPods := sets.NewString()
 | 
					 | 
				
			||||||
					fwk.IterateOverWaitingPods(func(pod framework.WaitingPod) {
 | 
										fwk.IterateOverWaitingPods(func(pod framework.WaitingPod) {
 | 
				
			||||||
						actualPodNamesInWaitingPods.Insert(pod.GetPod().Name)
 | 
											actualPodNamesInWaitingPods.Insert(pod.GetPod().Name)
 | 
				
			||||||
					})
 | 
										})
 | 
				
			||||||
				// Validate the name of pods in waitingPods matches expectations.
 | 
					 | 
				
			||||||
				if actualPodNamesInWaitingPods.Len() != len(tc.expectPodNamesInWaitingPods) ||
 | 
					 | 
				
			||||||
					!actualPodNamesInWaitingPods.HasAll(tc.expectPodNamesInWaitingPods...) {
 | 
					 | 
				
			||||||
					t.Fatalf("Unexpected waitingPods in scheduler profile %s, expect: %#v, got: %#v", fwk.ProfileName(), tc.expectPodNamesInWaitingPods, actualPodNamesInWaitingPods.List())
 | 
					 | 
				
			||||||
				}
 | 
					 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
 | 
									return actualPodNamesInWaitingPods
 | 
				
			||||||
 | 
								}).WithTimeout(permitTimeout).Should(gomega.Equal(sets.New(tc.expectPodNamesInWaitingPods...)), "unexpected waitingPods in scheduler profile")
 | 
				
			||||||
		})
 | 
							})
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 
 | 
				
			|||||||
		Reference in New Issue
	
	Block a user