Use fake clock in TestGetPodsToSync to fix flake.

This commit is contained in:
Random-Liu
2016-04-29 01:39:46 +00:00
committed by Random-Liu
parent 492762d394
commit 4cca5b2290
4 changed files with 23 additions and 30 deletions

View File

@@ -480,7 +480,7 @@ func NewMainKubelet(
} }
klet.runtimeCache = runtimeCache klet.runtimeCache = runtimeCache
klet.reasonCache = NewReasonCache() klet.reasonCache = NewReasonCache()
klet.workQueue = queue.NewBasicWorkQueue() klet.workQueue = queue.NewBasicWorkQueue(klet.clock)
klet.podWorkers = newPodWorkers(klet.syncPod, recorder, klet.workQueue, klet.resyncInterval, backOffPeriod, klet.podCache) klet.podWorkers = newPodWorkers(klet.syncPod, recorder, klet.workQueue, klet.resyncInterval, backOffPeriod, klet.podCache)
klet.backOff = flowcontrol.NewBackOff(backOffPeriod, MaxContainerBackOff) klet.backOff = flowcontrol.NewBackOff(backOffPeriod, MaxContainerBackOff)

View File

@@ -194,7 +194,7 @@ func newTestKubelet(t *testing.T) *TestKubelet {
api.ResourceMemory: resource.MustParse(testReservationMemory), api.ResourceMemory: resource.MustParse(testReservationMemory),
}, },
} }
kubelet.workQueue = queue.NewBasicWorkQueue() kubelet.workQueue = queue.NewBasicWorkQueue(fakeClock)
// Relist period does not affect the tests. // Relist period does not affect the tests.
kubelet.pleg = pleg.NewGenericPLEG(fakeRuntime, 100, time.Hour, nil, util.RealClock{}) kubelet.pleg = pleg.NewGenericPLEG(fakeRuntime, 100, time.Hour, nil, util.RealClock{})
kubelet.clock = fakeClock kubelet.clock = fakeClock
@@ -4418,16 +4418,12 @@ func TestExtractBandwidthResources(t *testing.T) {
func TestGetPodsToSync(t *testing.T) { func TestGetPodsToSync(t *testing.T) {
testKubelet := newTestKubelet(t) testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet kubelet := testKubelet.kubelet
clock := testKubelet.fakeClock
pods := newTestPods(5) pods := newTestPods(5)
podUIDs := []types.UID{}
for _, pod := range pods {
podUIDs = append(podUIDs, pod.UID)
}
exceededActiveDeadlineSeconds := int64(30) exceededActiveDeadlineSeconds := int64(30)
notYetActiveDeadlineSeconds := int64(120) notYetActiveDeadlineSeconds := int64(120)
now := unversioned.Now() startTime := unversioned.NewTime(clock.Now())
startTime := unversioned.NewTime(now.Time.Add(-1 * time.Minute))
pods[0].Status.StartTime = &startTime pods[0].Status.StartTime = &startTime
pods[0].Spec.ActiveDeadlineSeconds = &exceededActiveDeadlineSeconds pods[0].Spec.ActiveDeadlineSeconds = &exceededActiveDeadlineSeconds
pods[1].Status.StartTime = &startTime pods[1].Status.StartTime = &startTime
@@ -4437,34 +4433,30 @@ func TestGetPodsToSync(t *testing.T) {
kubelet.podManager.SetPods(pods) kubelet.podManager.SetPods(pods)
kubelet.workQueue.Enqueue(pods[2].UID, 0) kubelet.workQueue.Enqueue(pods[2].UID, 0)
kubelet.workQueue.Enqueue(pods[3].UID, 0) kubelet.workQueue.Enqueue(pods[3].UID, 30*time.Second)
kubelet.workQueue.Enqueue(pods[4].UID, time.Hour) kubelet.workQueue.Enqueue(pods[4].UID, 2*time.Minute)
expectedPodsUID := []types.UID{pods[0].UID, pods[2].UID, pods[3].UID} clock.Step(1 * time.Minute)
expectedPods := []*api.Pod{pods[0], pods[2], pods[3]}
podsToSync := kubelet.getPodsToSync() podsToSync := kubelet.getPodsToSync()
if len(podsToSync) == len(expectedPodsUID) { if len(podsToSync) == len(expectedPods) {
var rightNum int for _, expect := range expectedPods {
for _, podUID := range expectedPodsUID { var found bool
for _, podToSync := range podsToSync { for _, got := range podsToSync {
if podToSync.UID == podUID { if expect.UID == got.UID {
rightNum++ found = true
break break
} }
} }
} if !found {
if rightNum != len(expectedPodsUID) { t.Errorf("expected pod not found: %+v", expect)
// Just for report error
podsToSyncUID := []types.UID{}
for _, podToSync := range podsToSync {
podsToSyncUID = append(podsToSyncUID, podToSync.UID)
} }
t.Errorf("expected pods %v to sync, got %v", expectedPodsUID, podsToSyncUID)
} }
} else { } else {
t.Errorf("expected %d pods to sync, got %d", 3, len(podsToSync)) t.Errorf("expected %d pods to sync, got %d", len(expectedPods), len(podsToSync))
} }
} }

View File

@@ -29,6 +29,7 @@ import (
kubetypes "k8s.io/kubernetes/pkg/kubelet/types" kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util/queue" "k8s.io/kubernetes/pkg/kubelet/util/queue"
"k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util"
) )
// fakePodWorkers runs sync pod function in serial, so we can have // fakePodWorkers runs sync pod function in serial, so we can have
@@ -82,7 +83,7 @@ func createPodWorkers() (*podWorkers, map[types.UID][]string) {
return nil return nil
}, },
fakeRecorder, fakeRecorder,
queue.NewBasicWorkQueue(), queue.NewBasicWorkQueue(&util.RealClock{}),
time.Second, time.Second,
time.Second, time.Second,
fakeCache, fakeCache,
@@ -216,7 +217,7 @@ func TestFakePodWorkers(t *testing.T) {
kubeletForRealWorkers := &simpleFakeKubelet{} kubeletForRealWorkers := &simpleFakeKubelet{}
kubeletForFakeWorkers := &simpleFakeKubelet{} kubeletForFakeWorkers := &simpleFakeKubelet{}
realPodWorkers := newPodWorkers(kubeletForRealWorkers.syncPodWithWaitGroup, fakeRecorder, queue.NewBasicWorkQueue(), time.Second, time.Second, fakeCache) realPodWorkers := newPodWorkers(kubeletForRealWorkers.syncPodWithWaitGroup, fakeRecorder, queue.NewBasicWorkQueue(&util.RealClock{}), time.Second, time.Second, fakeCache)
fakePodWorkers := &fakePodWorkers{kubeletForFakeWorkers.syncPod, fakeCache, t} fakePodWorkers := &fakePodWorkers{kubeletForFakeWorkers.syncPod, fakeCache, t}
tests := []struct { tests := []struct {

View File

@@ -41,9 +41,9 @@ type basicWorkQueue struct {
var _ WorkQueue = &basicWorkQueue{} var _ WorkQueue = &basicWorkQueue{}
func NewBasicWorkQueue() WorkQueue { func NewBasicWorkQueue(clock util.Clock) WorkQueue {
queue := make(map[types.UID]time.Time) queue := make(map[types.UID]time.Time)
return &basicWorkQueue{queue: queue, clock: util.RealClock{}} return &basicWorkQueue{queue: queue, clock: clock}
} }
func (q *basicWorkQueue) GetWork() []types.UID { func (q *basicWorkQueue) GetWork() []types.UID {