Organize scheduler unit tests into subtests

This commit is contained in:
drfish
2021-02-02 21:41:20 +08:00
parent 645c40fcf6
commit ebb6fb5b0c
4 changed files with 98 additions and 85 deletions

View File

@@ -394,32 +394,34 @@ func TestDump(t *testing.T) {
podsToAdd: []*v1.Pod{testPods[0]},
}}
for _, tt := range tests {
cache := newSchedulerCache(ttl, time.Second, nil)
for _, podToAssume := range tt.podsToAssume {
if err := assumeAndFinishBinding(cache, podToAssume, now); err != nil {
t.Errorf("assumePod failed: %v", err)
for i, tt := range tests {
t.Run(fmt.Sprintf("case_%d", i), func(t *testing.T) {
cache := newSchedulerCache(ttl, time.Second, nil)
for _, podToAssume := range tt.podsToAssume {
if err := assumeAndFinishBinding(cache, podToAssume, now); err != nil {
t.Errorf("assumePod failed: %v", err)
}
}
}
for _, podToAdd := range tt.podsToAdd {
if err := cache.AddPod(podToAdd); err != nil {
t.Errorf("AddPod failed: %v", err)
for _, podToAdd := range tt.podsToAdd {
if err := cache.AddPod(podToAdd); err != nil {
t.Errorf("AddPod failed: %v", err)
}
}
}
snapshot := cache.Dump()
if len(snapshot.Nodes) != len(cache.nodes) {
t.Errorf("Unequal number of nodes in the cache and its snapshot. expected: %v, got: %v", len(cache.nodes), len(snapshot.Nodes))
}
for name, ni := range snapshot.Nodes {
nItem := cache.nodes[name]
if !reflect.DeepEqual(ni, nItem.info) {
t.Errorf("expect \n%+v; got \n%+v", nItem.info, ni)
snapshot := cache.Dump()
if len(snapshot.Nodes) != len(cache.nodes) {
t.Errorf("Unequal number of nodes in the cache and its snapshot. expected: %v, got: %v", len(cache.nodes), len(snapshot.Nodes))
}
}
if !reflect.DeepEqual(snapshot.AssumedPods, cache.assumedPods) {
t.Errorf("expect \n%+v; got \n%+v", cache.assumedPods, snapshot.AssumedPods)
}
for name, ni := range snapshot.Nodes {
nItem := cache.nodes[name]
if !reflect.DeepEqual(ni, nItem.info) {
t.Errorf("expect \n%+v; got \n%+v", nItem.info, ni)
}
}
if !reflect.DeepEqual(snapshot.AssumedPods, cache.assumedPods) {
t.Errorf("expect \n%+v; got \n%+v", cache.assumedPods, snapshot.AssumedPods)
}
})
}
}
@@ -647,26 +649,28 @@ func TestUpdatePodAndGet(t *testing.T) {
},
}
for _, tt := range tests {
cache := newSchedulerCache(ttl, time.Second, nil)
for i, tt := range tests {
t.Run(fmt.Sprintf("case_%d", i), func(t *testing.T) {
cache := newSchedulerCache(ttl, time.Second, nil)
if err := tt.handler(cache, tt.pod); err != nil {
t.Fatalf("unexpected err: %v", err)
}
if !tt.assumePod {
if err := cache.UpdatePod(tt.pod, tt.podToUpdate); err != nil {
t.Fatalf("UpdatePod failed: %v", err)
if err := tt.handler(cache, tt.pod); err != nil {
t.Fatalf("unexpected err: %v", err)
}
}
cachedPod, err := cache.GetPod(tt.pod)
if err != nil {
t.Fatalf("GetPod failed: %v", err)
}
if !reflect.DeepEqual(tt.podToUpdate, cachedPod) {
t.Fatalf("pod get=%s, want=%s", cachedPod, tt.podToUpdate)
}
if !tt.assumePod {
if err := cache.UpdatePod(tt.pod, tt.podToUpdate); err != nil {
t.Fatalf("UpdatePod failed: %v", err)
}
}
cachedPod, err := cache.GetPod(tt.pod)
if err != nil {
t.Fatalf("GetPod failed: %v", err)
}
if !reflect.DeepEqual(tt.podToUpdate, cachedPod) {
t.Fatalf("pod get=%s, want=%s", cachedPod, tt.podToUpdate)
}
})
}
}