Use fake clock in TestGetPodsToSync to fix flake.
This commit is contained in:
parent
492762d394
commit
4cca5b2290
@ -480,7 +480,7 @@ func NewMainKubelet(
|
||||
}
|
||||
klet.runtimeCache = runtimeCache
|
||||
klet.reasonCache = NewReasonCache()
|
||||
klet.workQueue = queue.NewBasicWorkQueue()
|
||||
klet.workQueue = queue.NewBasicWorkQueue(klet.clock)
|
||||
klet.podWorkers = newPodWorkers(klet.syncPod, recorder, klet.workQueue, klet.resyncInterval, backOffPeriod, klet.podCache)
|
||||
|
||||
klet.backOff = flowcontrol.NewBackOff(backOffPeriod, MaxContainerBackOff)
|
||||
|
@ -194,7 +194,7 @@ func newTestKubelet(t *testing.T) *TestKubelet {
|
||||
api.ResourceMemory: resource.MustParse(testReservationMemory),
|
||||
},
|
||||
}
|
||||
kubelet.workQueue = queue.NewBasicWorkQueue()
|
||||
kubelet.workQueue = queue.NewBasicWorkQueue(fakeClock)
|
||||
// Relist period does not affect the tests.
|
||||
kubelet.pleg = pleg.NewGenericPLEG(fakeRuntime, 100, time.Hour, nil, util.RealClock{})
|
||||
kubelet.clock = fakeClock
|
||||
@ -4418,16 +4418,12 @@ func TestExtractBandwidthResources(t *testing.T) {
|
||||
func TestGetPodsToSync(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t)
|
||||
kubelet := testKubelet.kubelet
|
||||
clock := testKubelet.fakeClock
|
||||
pods := newTestPods(5)
|
||||
podUIDs := []types.UID{}
|
||||
for _, pod := range pods {
|
||||
podUIDs = append(podUIDs, pod.UID)
|
||||
}
|
||||
|
||||
exceededActiveDeadlineSeconds := int64(30)
|
||||
notYetActiveDeadlineSeconds := int64(120)
|
||||
now := unversioned.Now()
|
||||
startTime := unversioned.NewTime(now.Time.Add(-1 * time.Minute))
|
||||
startTime := unversioned.NewTime(clock.Now())
|
||||
pods[0].Status.StartTime = &startTime
|
||||
pods[0].Spec.ActiveDeadlineSeconds = &exceededActiveDeadlineSeconds
|
||||
pods[1].Status.StartTime = &startTime
|
||||
@ -4437,34 +4433,30 @@ func TestGetPodsToSync(t *testing.T) {
|
||||
|
||||
kubelet.podManager.SetPods(pods)
|
||||
kubelet.workQueue.Enqueue(pods[2].UID, 0)
|
||||
kubelet.workQueue.Enqueue(pods[3].UID, 0)
|
||||
kubelet.workQueue.Enqueue(pods[4].UID, time.Hour)
|
||||
kubelet.workQueue.Enqueue(pods[3].UID, 30*time.Second)
|
||||
kubelet.workQueue.Enqueue(pods[4].UID, 2*time.Minute)
|
||||
|
||||
expectedPodsUID := []types.UID{pods[0].UID, pods[2].UID, pods[3].UID}
|
||||
clock.Step(1 * time.Minute)
|
||||
|
||||
expectedPods := []*api.Pod{pods[0], pods[2], pods[3]}
|
||||
|
||||
podsToSync := kubelet.getPodsToSync()
|
||||
|
||||
if len(podsToSync) == len(expectedPodsUID) {
|
||||
var rightNum int
|
||||
for _, podUID := range expectedPodsUID {
|
||||
for _, podToSync := range podsToSync {
|
||||
if podToSync.UID == podUID {
|
||||
rightNum++
|
||||
if len(podsToSync) == len(expectedPods) {
|
||||
for _, expect := range expectedPods {
|
||||
var found bool
|
||||
for _, got := range podsToSync {
|
||||
if expect.UID == got.UID {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if rightNum != len(expectedPodsUID) {
|
||||
// Just for report error
|
||||
podsToSyncUID := []types.UID{}
|
||||
for _, podToSync := range podsToSync {
|
||||
podsToSyncUID = append(podsToSyncUID, podToSync.UID)
|
||||
if !found {
|
||||
t.Errorf("expected pod not found: %+v", expect)
|
||||
}
|
||||
t.Errorf("expected pods %v to sync, got %v", expectedPodsUID, podsToSyncUID)
|
||||
}
|
||||
|
||||
} else {
|
||||
t.Errorf("expected %d pods to sync, got %d", 3, len(podsToSync))
|
||||
t.Errorf("expected %d pods to sync, got %d", len(expectedPods), len(podsToSync))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -29,6 +29,7 @@ import (
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/queue"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
)
|
||||
|
||||
// fakePodWorkers runs sync pod function in serial, so we can have
|
||||
@ -82,7 +83,7 @@ func createPodWorkers() (*podWorkers, map[types.UID][]string) {
|
||||
return nil
|
||||
},
|
||||
fakeRecorder,
|
||||
queue.NewBasicWorkQueue(),
|
||||
queue.NewBasicWorkQueue(&util.RealClock{}),
|
||||
time.Second,
|
||||
time.Second,
|
||||
fakeCache,
|
||||
@ -216,7 +217,7 @@ func TestFakePodWorkers(t *testing.T) {
|
||||
kubeletForRealWorkers := &simpleFakeKubelet{}
|
||||
kubeletForFakeWorkers := &simpleFakeKubelet{}
|
||||
|
||||
realPodWorkers := newPodWorkers(kubeletForRealWorkers.syncPodWithWaitGroup, fakeRecorder, queue.NewBasicWorkQueue(), time.Second, time.Second, fakeCache)
|
||||
realPodWorkers := newPodWorkers(kubeletForRealWorkers.syncPodWithWaitGroup, fakeRecorder, queue.NewBasicWorkQueue(&util.RealClock{}), time.Second, time.Second, fakeCache)
|
||||
fakePodWorkers := &fakePodWorkers{kubeletForFakeWorkers.syncPod, fakeCache, t}
|
||||
|
||||
tests := []struct {
|
||||
|
@ -41,9 +41,9 @@ type basicWorkQueue struct {
|
||||
|
||||
var _ WorkQueue = &basicWorkQueue{}
|
||||
|
||||
func NewBasicWorkQueue() WorkQueue {
|
||||
func NewBasicWorkQueue(clock util.Clock) WorkQueue {
|
||||
queue := make(map[types.UID]time.Time)
|
||||
return &basicWorkQueue{queue: queue, clock: util.RealClock{}}
|
||||
return &basicWorkQueue{queue: queue, clock: clock}
|
||||
}
|
||||
|
||||
func (q *basicWorkQueue) GetWork() []types.UID {
|
||||
|
Loading…
Reference in New Issue
Block a user