rename some declartions named context in tests

This commit is contained in:
Mike Danese
2020-01-15 15:18:54 -08:00
parent 9701baea0f
commit d86fcd8c90
14 changed files with 512 additions and 512 deletions

View File

@@ -317,13 +317,13 @@ func TestSchedulerCreationFromNonExistentConfigMap(t *testing.T) {
}
func TestUnschedulableNodes(t *testing.T) {
context := initTest(t, "unschedulable-nodes")
defer cleanupTest(t, context)
testCtx := initTest(t, "unschedulable-nodes")
defer cleanupTest(t, testCtx)
nodeLister := context.informerFactory.Core().V1().Nodes().Lister()
nodeLister := testCtx.informerFactory.Core().V1().Nodes().Lister()
// NOTE: This test cannot run in parallel, because it is creating and deleting
// non-namespaced objects (Nodes).
defer context.clientSet.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{})
defer testCtx.clientSet.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{})
goodCondition := v1.NodeCondition{
Type: v1.NodeReady,
@@ -392,23 +392,23 @@ func TestUnschedulableNodes(t *testing.T) {
}
for i, mod := range nodeModifications {
unSchedNode, err := context.clientSet.CoreV1().Nodes().Create(node)
unSchedNode, err := testCtx.clientSet.CoreV1().Nodes().Create(node)
if err != nil {
t.Fatalf("Failed to create node: %v", err)
}
// Apply the unschedulable modification to the node, and wait for the reflection
mod.makeUnSchedulable(t, unSchedNode, nodeLister, context.clientSet)
mod.makeUnSchedulable(t, unSchedNode, nodeLister, testCtx.clientSet)
// Create the new pod, note that this needs to happen post unschedulable
// modification or we have a race in the test.
myPod, err := createPausePodWithResource(context.clientSet, "node-scheduling-test-pod", context.ns.Name, nil)
myPod, err := createPausePodWithResource(testCtx.clientSet, "node-scheduling-test-pod", testCtx.ns.Name, nil)
if err != nil {
t.Fatalf("Failed to create pod: %v", err)
}
// There are no schedulable nodes - the pod shouldn't be scheduled.
err = waitForPodToScheduleWithTimeout(context.clientSet, myPod, 2*time.Second)
err = waitForPodToScheduleWithTimeout(testCtx.clientSet, myPod, 2*time.Second)
if err == nil {
t.Errorf("Test %d: Pod scheduled successfully on unschedulable nodes", i)
}
@@ -419,23 +419,23 @@ func TestUnschedulableNodes(t *testing.T) {
}
// Apply the schedulable modification to the node, and wait for the reflection
schedNode, err := context.clientSet.CoreV1().Nodes().Get(unSchedNode.Name, metav1.GetOptions{})
schedNode, err := testCtx.clientSet.CoreV1().Nodes().Get(unSchedNode.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("Failed to get node: %v", err)
}
mod.makeSchedulable(t, schedNode, nodeLister, context.clientSet)
mod.makeSchedulable(t, schedNode, nodeLister, testCtx.clientSet)
// Wait until the pod is scheduled.
if err := waitForPodToSchedule(context.clientSet, myPod); err != nil {
if err := waitForPodToSchedule(testCtx.clientSet, myPod); err != nil {
t.Errorf("Test %d: failed to schedule a pod: %v", i, err)
} else {
t.Logf("Test %d: Pod got scheduled on a schedulable node", i)
}
// Clean up.
if err := deletePod(context.clientSet, myPod.Name, myPod.Namespace); err != nil {
if err := deletePod(testCtx.clientSet, myPod.Name, myPod.Namespace); err != nil {
t.Errorf("Failed to delete pod: %v", err)
}
err = context.clientSet.CoreV1().Nodes().Delete(schedNode.Name, nil)
err = testCtx.clientSet.CoreV1().Nodes().Delete(schedNode.Name, nil)
if err != nil {
t.Errorf("Failed to delete node: %v", err)
}
@@ -462,8 +462,8 @@ func TestMultiScheduler(t *testing.T) {
// - testPodNoAnnotation2 and testPodWithAnnotationFitsDefault2 should NOT be scheduled
// 1. create and start default-scheduler
context := initTest(t, "multi-scheduler")
defer cleanupTest(t, context)
testCtx := initTest(t, "multi-scheduler")
defer cleanupTest(t, testCtx)
// 2. create a node
node := &v1.Node{
@@ -475,23 +475,23 @@ func TestMultiScheduler(t *testing.T) {
},
},
}
context.clientSet.CoreV1().Nodes().Create(node)
testCtx.clientSet.CoreV1().Nodes().Create(node)
// 3. create 3 pods for testing
t.Logf("create 3 pods for testing")
testPod, err := createPausePodWithResource(context.clientSet, "pod-without-scheduler-name", context.ns.Name, nil)
testPod, err := createPausePodWithResource(testCtx.clientSet, "pod-without-scheduler-name", testCtx.ns.Name, nil)
if err != nil {
t.Fatalf("Failed to create pod: %v", err)
}
defaultScheduler := "default-scheduler"
testPodFitsDefault, err := createPausePod(context.clientSet, initPausePod(context.clientSet, &pausePodConfig{Name: "pod-fits-default", Namespace: context.ns.Name, SchedulerName: defaultScheduler}))
testPodFitsDefault, err := createPausePod(testCtx.clientSet, initPausePod(testCtx.clientSet, &pausePodConfig{Name: "pod-fits-default", Namespace: testCtx.ns.Name, SchedulerName: defaultScheduler}))
if err != nil {
t.Fatalf("Failed to create pod: %v", err)
}
fooScheduler := "foo-scheduler"
testPodFitsFoo, err := createPausePod(context.clientSet, initPausePod(context.clientSet, &pausePodConfig{Name: "pod-fits-foo", Namespace: context.ns.Name, SchedulerName: fooScheduler}))
testPodFitsFoo, err := createPausePod(testCtx.clientSet, initPausePod(testCtx.clientSet, &pausePodConfig{Name: "pod-fits-foo", Namespace: testCtx.ns.Name, SchedulerName: fooScheduler}))
if err != nil {
t.Fatalf("Failed to create pod: %v", err)
}
@@ -500,30 +500,30 @@ func TestMultiScheduler(t *testing.T) {
// - testPod, testPodFitsDefault should be scheduled
// - testPodFitsFoo should NOT be scheduled
t.Logf("wait for pods scheduled")
if err := waitForPodToSchedule(context.clientSet, testPod); err != nil {
if err := waitForPodToSchedule(testCtx.clientSet, testPod); err != nil {
t.Errorf("Test MultiScheduler: %s Pod not scheduled: %v", testPod.Name, err)
} else {
t.Logf("Test MultiScheduler: %s Pod scheduled", testPod.Name)
}
if err := waitForPodToSchedule(context.clientSet, testPodFitsDefault); err != nil {
if err := waitForPodToSchedule(testCtx.clientSet, testPodFitsDefault); err != nil {
t.Errorf("Test MultiScheduler: %s Pod not scheduled: %v", testPodFitsDefault.Name, err)
} else {
t.Logf("Test MultiScheduler: %s Pod scheduled", testPodFitsDefault.Name)
}
if err := waitForPodToScheduleWithTimeout(context.clientSet, testPodFitsFoo, time.Second*5); err == nil {
if err := waitForPodToScheduleWithTimeout(testCtx.clientSet, testPodFitsFoo, time.Second*5); err == nil {
t.Errorf("Test MultiScheduler: %s Pod got scheduled, %v", testPodFitsFoo.Name, err)
} else {
t.Logf("Test MultiScheduler: %s Pod not scheduled", testPodFitsFoo.Name)
}
// 5. create and start a scheduler with name "foo-scheduler"
context = initTestSchedulerWithOptions(t, context, true, nil, time.Second, scheduler.WithName(fooScheduler))
testCtx = initTestSchedulerWithOptions(t, testCtx, true, nil, time.Second, scheduler.WithName(fooScheduler))
// 6. **check point-2**:
// - testPodWithAnnotationFitsFoo should be scheduled
err = waitForPodToSchedule(context.clientSet, testPodFitsFoo)
err = waitForPodToSchedule(testCtx.clientSet, testPodFitsFoo)
if err != nil {
t.Errorf("Test MultiScheduler: %s Pod not scheduled, %v", testPodFitsFoo.Name, err)
} else {
@@ -531,10 +531,10 @@ func TestMultiScheduler(t *testing.T) {
}
// 7. delete the pods that were scheduled by the default scheduler, and stop the default scheduler
if err := deletePod(context.clientSet, testPod.Name, context.ns.Name); err != nil {
if err := deletePod(testCtx.clientSet, testPod.Name, testCtx.ns.Name); err != nil {
t.Errorf("Failed to delete pod: %v", err)
}
if err := deletePod(context.clientSet, testPodFitsDefault.Name, context.ns.Name); err != nil {
if err := deletePod(testCtx.clientSet, testPodFitsDefault.Name, testCtx.ns.Name); err != nil {
t.Errorf("Failed to delete pod: %v", err)
}
@@ -579,8 +579,8 @@ func TestMultiScheduler(t *testing.T) {
// This test will verify scheduler can work well regardless of whether kubelet is allocatable aware or not.
func TestAllocatable(t *testing.T) {
context := initTest(t, "allocatable")
defer cleanupTest(t, context)
testCtx := initTest(t, "allocatable")
defer cleanupTest(t, testCtx)
// 2. create a node without allocatable awareness
nodeRes := &v1.ResourceList{
@@ -588,7 +588,7 @@ func TestAllocatable(t *testing.T) {
v1.ResourceCPU: *resource.NewMilliQuantity(30, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(30, resource.BinarySI),
}
allocNode, err := createNode(context.clientSet, "node-allocatable-scheduler-test-node", nodeRes)
allocNode, err := createNode(testCtx.clientSet, "node-allocatable-scheduler-test-node", nodeRes)
if err != nil {
t.Fatalf("Failed to create node: %v", err)
}
@@ -599,13 +599,13 @@ func TestAllocatable(t *testing.T) {
v1.ResourceCPU: *resource.NewMilliQuantity(20, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(20, resource.BinarySI),
}
testAllocPod, err := createPausePodWithResource(context.clientSet, podName, context.ns.Name, podRes)
testAllocPod, err := createPausePodWithResource(testCtx.clientSet, podName, testCtx.ns.Name, podRes)
if err != nil {
t.Fatalf("Test allocatable unawareness failed to create pod: %v", err)
}
// 4. Test: this test pod should be scheduled since api-server will use Capacity as Allocatable
err = waitForPodToScheduleWithTimeout(context.clientSet, testAllocPod, time.Second*5)
err = waitForPodToScheduleWithTimeout(testCtx.clientSet, testAllocPod, time.Second*5)
if err != nil {
t.Errorf("Test allocatable unawareness: %s Pod not scheduled: %v", testAllocPod.Name, err)
} else {
@@ -626,23 +626,23 @@ func TestAllocatable(t *testing.T) {
},
}
if _, err := context.clientSet.CoreV1().Nodes().UpdateStatus(allocNode); err != nil {
if _, err := testCtx.clientSet.CoreV1().Nodes().UpdateStatus(allocNode); err != nil {
t.Fatalf("Failed to update node with Status.Allocatable: %v", err)
}
if err := deletePod(context.clientSet, testAllocPod.Name, context.ns.Name); err != nil {
if err := deletePod(testCtx.clientSet, testAllocPod.Name, testCtx.ns.Name); err != nil {
t.Fatalf("Failed to remove the first pod: %v", err)
}
// 6. Make another pod with different name, same resource request
podName2 := "pod-test-allocatable2"
testAllocPod2, err := createPausePodWithResource(context.clientSet, podName2, context.ns.Name, podRes)
testAllocPod2, err := createPausePodWithResource(testCtx.clientSet, podName2, testCtx.ns.Name, podRes)
if err != nil {
t.Fatalf("Test allocatable awareness failed to create pod: %v", err)
}
// 7. Test: this test pod should not be scheduled since it request more than Allocatable
if err := waitForPodToScheduleWithTimeout(context.clientSet, testAllocPod2, time.Second*5); err == nil {
if err := waitForPodToScheduleWithTimeout(testCtx.clientSet, testAllocPod2, time.Second*5); err == nil {
t.Errorf("Test allocatable awareness: %s Pod got scheduled unexpectedly, %v", testAllocPod2.Name, err)
} else {
t.Logf("Test allocatable awareness: %s Pod not scheduled as expected", testAllocPod2.Name)
@@ -653,9 +653,9 @@ func TestAllocatable(t *testing.T) {
// pods are scheduled by other schedulers.
func TestSchedulerInformers(t *testing.T) {
// Initialize scheduler.
context := initTest(t, "scheduler-informer")
defer cleanupTest(t, context)
cs := context.clientSet
testCtx := initTest(t, "scheduler-informer")
defer cleanupTest(t, testCtx)
cs := testCtx.clientSet
defaultPodRes := &v1.ResourceRequirements{Requests: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
@@ -683,17 +683,17 @@ func TestSchedulerInformers(t *testing.T) {
description: "Pod cannot be scheduled when node is occupied by pods scheduled by other schedulers",
nodes: []*nodeConfig{{name: "node-1", res: defaultNodeRes}},
existingPods: []*v1.Pod{
initPausePod(context.clientSet, &pausePodConfig{
initPausePod(testCtx.clientSet, &pausePodConfig{
Name: "pod1",
Namespace: context.ns.Name,
Namespace: testCtx.ns.Name,
Resources: defaultPodRes,
Labels: map[string]string{"foo": "bar"},
NodeName: "node-1",
SchedulerName: "foo-scheduler",
}),
initPausePod(context.clientSet, &pausePodConfig{
initPausePod(testCtx.clientSet, &pausePodConfig{
Name: "pod2",
Namespace: context.ns.Name,
Namespace: testCtx.ns.Name,
Resources: defaultPodRes,
Labels: map[string]string{"foo": "bar"},
NodeName: "node-1",
@@ -702,7 +702,7 @@ func TestSchedulerInformers(t *testing.T) {
},
pod: initPausePod(cs, &pausePodConfig{
Name: "unschedulable-pod",
Namespace: context.ns.Name,
Namespace: testCtx.ns.Name,
Resources: defaultPodRes,
}),
preemptedPodIndexes: map[int]struct{}{2: {}},
@@ -737,7 +737,7 @@ func TestSchedulerInformers(t *testing.T) {
// Cleanup
pods = append(pods, unschedulable)
cleanupPods(cs, t, pods)
cs.PolicyV1beta1().PodDisruptionBudgets(context.ns.Name).DeleteCollection(nil, metav1.ListOptions{})
cs.PolicyV1beta1().PodDisruptionBudgets(testCtx.ns.Name).DeleteCollection(nil, metav1.ListOptions{})
cs.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{})
}
}