Fix flaky test on multi profiles waiting pod
Signed-off-by: kerthcet <kerthcet@gmail.com>
This commit is contained in:
		| @@ -31,6 +31,7 @@ import ( | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	"k8s.io/apimachinery/pkg/runtime" | ||||
| 	"k8s.io/apimachinery/pkg/util/sets" | ||||
| 	"k8s.io/apimachinery/pkg/util/wait" | ||||
| 	utilfeature "k8s.io/apiserver/pkg/util/feature" | ||||
| 	"k8s.io/client-go/informers" | ||||
| 	"k8s.io/client-go/kubernetes" | ||||
| @@ -977,6 +978,7 @@ func TestFrameworkHandler_IterateOverWaitingPods(t *testing.T) { | ||||
| 			fakeClient := fake.NewSimpleClientset(objs...) | ||||
| 			informerFactory := informers.NewSharedInformerFactory(fakeClient, 0) | ||||
| 			eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: fakeClient.EventsV1()}) | ||||
| 			defer eventBroadcaster.Shutdown() | ||||
| 			eventRecorder := eventBroadcaster.NewRecorder(scheme.Scheme, fakePermit) | ||||
|  | ||||
| 			outOfTreeRegistry := frameworkruntime.Registry{ | ||||
| @@ -984,7 +986,8 @@ func TestFrameworkHandler_IterateOverWaitingPods(t *testing.T) { | ||||
| 			} | ||||
|  | ||||
| 			_, ctx := ktesting.NewTestContext(t) | ||||
| 			ctx, cancel := context.WithCancel(ctx) | ||||
| 			// timeout equals to the permit plugin waiting time. | ||||
| 			ctx, cancel := context.WithTimeout(ctx, 100*time.Second) | ||||
| 			defer cancel() | ||||
|  | ||||
| 			scheduler, err := New( | ||||
| @@ -1032,17 +1035,23 @@ func TestFrameworkHandler_IterateOverWaitingPods(t *testing.T) { | ||||
| 			// Wait all pods in waitSchedulingPods to be scheduled. | ||||
| 			wg.Wait() | ||||
|  | ||||
| 			// Ensure that all waitingPods in scheduler can be obtained from any profiles. | ||||
| 			for _, fwk := range scheduler.Profiles { | ||||
| 				actualPodNamesInWaitingPods := sets.NewString() | ||||
| 				fwk.IterateOverWaitingPods(func(pod framework.WaitingPod) { | ||||
| 					actualPodNamesInWaitingPods.Insert(pod.GetPod().Name) | ||||
| 				}) | ||||
| 				// Validate the name of pods in waitingPods matches expectations. | ||||
| 				if actualPodNamesInWaitingPods.Len() != len(tc.expectPodNamesInWaitingPods) || | ||||
| 					!actualPodNamesInWaitingPods.HasAll(tc.expectPodNamesInWaitingPods...) { | ||||
| 					t.Fatalf("Unexpected waitingPods in scheduler profile %s, expect: %#v, got: %#v", fwk.ProfileName(), tc.expectPodNamesInWaitingPods, actualPodNamesInWaitingPods.List()) | ||||
| 			// When permit plugin emits the event, pod may not been added to the waitingPods pool yet, so we use pollUntil here. | ||||
| 			if err := wait.PollUntilContextCancel(ctx, 100*time.Microsecond, true, func(context.Context) (done bool, err error) { | ||||
| 				// Ensure that all waitingPods in scheduler can be obtained from any profiles. | ||||
| 				for _, fwk := range scheduler.Profiles { | ||||
| 					actualPodNamesInWaitingPods := sets.NewString() | ||||
| 					fwk.IterateOverWaitingPods(func(pod framework.WaitingPod) { | ||||
| 						actualPodNamesInWaitingPods.Insert(pod.GetPod().Name) | ||||
| 					}) | ||||
| 					// Validate the name of pods in waitingPods matches expectations. | ||||
| 					if actualPodNamesInWaitingPods.Len() != len(tc.expectPodNamesInWaitingPods) || | ||||
| 						!actualPodNamesInWaitingPods.HasAll(tc.expectPodNamesInWaitingPods...) { | ||||
| 						return false, fmt.Errorf("Unexpected waitingPods in scheduler profile %s, expect: %#v, got: %#v", fwk.ProfileName(), tc.expectPodNamesInWaitingPods, actualPodNamesInWaitingPods.List()) | ||||
| 					} | ||||
| 				} | ||||
| 				return true, nil | ||||
| 			}); err != nil { | ||||
| 				t.Fatal("got unexpected result") | ||||
| 			} | ||||
| 		}) | ||||
| 	} | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 kerthcet
					kerthcet