Create PredicateFailureReason, modify scheduler predicate interface.

This commit is contained in:
mksalawa
2016-08-09 14:01:46 +02:00
parent 899d98ad15
commit 2749ec7555
10 changed files with 530 additions and 371 deletions

View File

@@ -125,7 +125,7 @@ func TestPodFitsResources(t *testing.T) {
nodeInfo *schedulercache.NodeInfo
fits bool
test string
wErr error
reasons []algorithm.PredicateFailureReason
}{
{
pod: &api.Pod{},
@@ -133,7 +133,6 @@ func TestPodFitsResources(t *testing.T) {
newResourcePod(schedulercache.Resource{MilliCPU: 10, Memory: 20})),
fits: true,
test: "no resources requested always fits",
wErr: nil,
},
{
pod: newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}),
@@ -141,39 +140,42 @@ func TestPodFitsResources(t *testing.T) {
newResourcePod(schedulercache.Resource{MilliCPU: 10, Memory: 20})),
fits: false,
test: "too many resources fails",
wErr: newInsufficientResourceError(cpuResourceName, 1, 10, 10),
reasons: []algorithm.PredicateFailureReason{
NewInsufficientResourceError(api.ResourceCPU, 1, 10, 10),
NewInsufficientResourceError(api.ResourceMemory, 1, 20, 20),
},
},
{
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}), schedulercache.Resource{MilliCPU: 3, Memory: 1}),
nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 8, Memory: 19})),
fits: false,
test: "too many resources fails due to init container cpu",
wErr: newInsufficientResourceError(cpuResourceName, 3, 8, 10),
fits: false,
test: "too many resources fails due to init container cpu",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(api.ResourceCPU, 3, 8, 10)},
},
{
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}), schedulercache.Resource{MilliCPU: 3, Memory: 1}, schedulercache.Resource{MilliCPU: 2, Memory: 1}),
nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 8, Memory: 19})),
fits: false,
test: "too many resources fails due to highest init container cpu",
wErr: newInsufficientResourceError(cpuResourceName, 3, 8, 10),
fits: false,
test: "too many resources fails due to highest init container cpu",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(api.ResourceCPU, 3, 8, 10)},
},
{
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}), schedulercache.Resource{MilliCPU: 1, Memory: 3}),
nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 9, Memory: 19})),
fits: false,
test: "too many resources fails due to init container memory",
wErr: newInsufficientResourceError(memoryResourceName, 3, 19, 20),
fits: false,
test: "too many resources fails due to init container memory",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(api.ResourceMemory, 3, 19, 20)},
},
{
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}), schedulercache.Resource{MilliCPU: 1, Memory: 3}, schedulercache.Resource{MilliCPU: 1, Memory: 2}),
nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 9, Memory: 19})),
fits: false,
test: "too many resources fails due to highest init container memory",
wErr: newInsufficientResourceError(memoryResourceName, 3, 19, 20),
fits: false,
test: "too many resources fails due to highest init container memory",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(api.ResourceMemory, 3, 19, 20)},
},
{
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}), schedulercache.Resource{MilliCPU: 1, Memory: 1}),
@@ -181,7 +183,6 @@ func TestPodFitsResources(t *testing.T) {
newResourcePod(schedulercache.Resource{MilliCPU: 9, Memory: 19})),
fits: true,
test: "init container fits because it's the max, not sum, of containers and init containers",
wErr: nil,
},
{
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}), schedulercache.Resource{MilliCPU: 1, Memory: 1}, schedulercache.Resource{MilliCPU: 1, Memory: 1}),
@@ -189,7 +190,6 @@ func TestPodFitsResources(t *testing.T) {
newResourcePod(schedulercache.Resource{MilliCPU: 9, Memory: 19})),
fits: true,
test: "multiple init containers fit because it's the max, not sum, of containers and init containers",
wErr: nil,
},
{
pod: newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}),
@@ -197,23 +197,22 @@ func TestPodFitsResources(t *testing.T) {
newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 5})),
fits: true,
test: "both resources fit",
wErr: nil,
},
{
pod: newResourcePod(schedulercache.Resource{MilliCPU: 2, Memory: 1}),
nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 9, Memory: 5})),
fits: false,
test: "one resource memory fits",
wErr: newInsufficientResourceError(cpuResourceName, 2, 9, 10),
fits: false,
test: "one resource memory fits",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(api.ResourceCPU, 2, 9, 10)},
},
{
pod: newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 2}),
nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 19})),
fits: false,
test: "one resource cpu fits",
wErr: newInsufficientResourceError(memoryResourceName, 2, 19, 20),
fits: false,
test: "one resource cpu fits",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(api.ResourceMemory, 2, 19, 20)},
},
{
pod: newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 1}),
@@ -221,7 +220,6 @@ func TestPodFitsResources(t *testing.T) {
newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 19})),
fits: true,
test: "equal edge case",
wErr: nil,
},
{
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{MilliCPU: 4, Memory: 1}), schedulercache.Resource{MilliCPU: 5, Memory: 1}),
@@ -229,7 +227,6 @@ func TestPodFitsResources(t *testing.T) {
newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 19})),
fits: true,
test: "equal edge case for init container",
wErr: nil,
},
}
@@ -237,9 +234,12 @@ func TestPodFitsResources(t *testing.T) {
node := api.Node{Status: api.NodeStatus{Capacity: makeResources(10, 20, 0, 32).Capacity, Allocatable: makeAllocatableResources(10, 20, 0, 32)}}
test.nodeInfo.SetNode(&node)
fits, err := PodFitsResources(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
if !reflect.DeepEqual(err, test.wErr) {
t.Errorf("%s: unexpected error: %v, want: %v", test.test, err, test.wErr)
fits, reasons, err := PodFitsResources(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
if err != nil {
t.Errorf("%s: unexpected error: %v", test.test, err)
}
if !fits && !reflect.DeepEqual(reasons, test.reasons) {
t.Errorf("%s: unexpected failure reasons: %v, want: %v", test.test, reasons, test.reasons)
}
if fits != test.fits {
t.Errorf("%s: expected: %v got %v", test.test, test.fits, fits)
@@ -251,48 +251,51 @@ func TestPodFitsResources(t *testing.T) {
nodeInfo *schedulercache.NodeInfo
fits bool
test string
wErr error
reasons []algorithm.PredicateFailureReason
}{
{
pod: &api.Pod{},
nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 10, Memory: 20})),
fits: false,
test: "even without specified resources predicate fails when there's no space for additional pod",
wErr: newInsufficientResourceError(podCountResourceName, 1, 1, 1),
fits: false,
test: "even without specified resources predicate fails when there's no space for additional pod",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(api.ResourcePods, 1, 1, 1)},
},
{
pod: newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 5})),
fits: false,
test: "even if both resources fit predicate fails when there's no space for additional pod",
wErr: newInsufficientResourceError(podCountResourceName, 1, 1, 1),
fits: false,
test: "even if both resources fit predicate fails when there's no space for additional pod",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(api.ResourcePods, 1, 1, 1)},
},
{
pod: newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 1}),
nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 19})),
fits: false,
test: "even for equal edge case predicate fails when there's no space for additional pod",
wErr: newInsufficientResourceError(podCountResourceName, 1, 1, 1),
fits: false,
test: "even for equal edge case predicate fails when there's no space for additional pod",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(api.ResourcePods, 1, 1, 1)},
},
{
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 1}), schedulercache.Resource{MilliCPU: 5, Memory: 1}),
nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 19})),
fits: false,
test: "even for equal edge case predicate fails when there's no space for additional pod due to init container",
wErr: newInsufficientResourceError(podCountResourceName, 1, 1, 1),
fits: false,
test: "even for equal edge case predicate fails when there's no space for additional pod due to init container",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(api.ResourcePods, 1, 1, 1)},
},
}
for _, test := range notEnoughPodsTests {
node := api.Node{Status: api.NodeStatus{Capacity: api.ResourceList{}, Allocatable: makeAllocatableResources(10, 20, 0, 1)}}
test.nodeInfo.SetNode(&node)
fits, err := PodFitsResources(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
if !reflect.DeepEqual(err, test.wErr) {
t.Errorf("%s: unexpected error: %v, want: %v", test.test, err, test.wErr)
fits, reasons, err := PodFitsResources(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
if err != nil {
t.Errorf("%s: unexpected error: %v", test.test, err)
}
if !fits && !reflect.DeepEqual(reasons, test.reasons) {
t.Errorf("%s: unexpected failure reasons: %v, want: %v", test.test, reasons, test.reasons)
}
if fits != test.fits {
t.Errorf("%s: expected: %v got %v", test.test, test.fits, fits)
@@ -342,19 +345,20 @@ func TestPodFitsHost(t *testing.T) {
test: "host doesn't match",
},
}
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrPodNotMatchHostName}
for _, test := range tests {
nodeInfo := schedulercache.NewNodeInfo()
nodeInfo.SetNode(test.node)
result, err := PodFitsHost(test.pod, PredicateMetadata(test.pod, nil), nodeInfo)
if !reflect.DeepEqual(err, ErrPodNotMatchHostName) && err != nil {
t.Errorf("unexpected error: %v", err)
fits, reasons, err := PodFitsHost(test.pod, PredicateMetadata(test.pod, nil), nodeInfo)
if err != nil {
t.Errorf("%s: unexpected error: %v", test.test, err)
}
if result == false && !reflect.DeepEqual(err, ErrPodNotMatchHostName) {
t.Errorf("unexpected error: %v", err)
if !fits && !reflect.DeepEqual(reasons, expectedFailureReasons) {
t.Errorf("%s: unexpected failure reasons: %v, want: %v", test.test, reasons, expectedFailureReasons)
}
if result != test.fits {
t.Errorf("unexpected difference for %s: expected: %v got %v", test.test, test.fits, result)
if fits != test.fits {
t.Errorf("%s: unexpected difference: expected: %v got %v", test.test, test.fits, fits)
}
}
}
@@ -418,13 +422,15 @@ func TestPodFitsHostPorts(t *testing.T) {
test: "second port",
},
}
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrPodNotFitsHostPorts}
for _, test := range tests {
fits, err := PodFitsHostPorts(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
if !reflect.DeepEqual(err, ErrPodNotFitsHostPorts) && err != nil {
t.Errorf("unexpected error: %v", err)
fits, reasons, err := PodFitsHostPorts(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
if err != nil {
t.Errorf("%s: unexpected error: %v", test.test, err)
}
if fits == false && !reflect.DeepEqual(err, ErrPodNotFitsHostPorts) {
t.Errorf("unexpected error: %v", err)
if !fits && !reflect.DeepEqual(reasons, expectedFailureReasons) {
t.Errorf("%s: unexpected failure reasons: %v, want: %v", test.test, reasons, expectedFailureReasons)
}
if test.fits != fits {
t.Errorf("%s: expected %v, saw %v", test.test, test.fits, fits)
@@ -463,7 +469,7 @@ func TestGetUsedPorts(t *testing.T) {
for _, test := range tests {
ports := getUsedPorts(test.pods...)
if !reflect.DeepEqual(test.ports, ports) {
t.Errorf("expect %v, got %v", test.ports, ports)
t.Errorf("%s: expected %v, got %v", "test get used ports", test.ports, ports)
}
}
}
@@ -502,20 +508,21 @@ func TestDiskConflicts(t *testing.T) {
{&api.Pod{Spec: volState}, schedulercache.NewNodeInfo(&api.Pod{Spec: volState}), false, "same state"},
{&api.Pod{Spec: volState2}, schedulercache.NewNodeInfo(&api.Pod{Spec: volState}), true, "different state"},
}
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrDiskConflict}
for _, test := range tests {
ok, err := NoDiskConflict(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
if !reflect.DeepEqual(err, ErrDiskConflict) && err != nil {
t.Errorf("unexpected error: %v", err)
ok, reasons, err := NoDiskConflict(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
if err != nil {
t.Errorf("%s: unexpected error: %v", test.test, err)
}
if ok == false && !reflect.DeepEqual(err, ErrDiskConflict) {
t.Errorf("unexpected error: %v", err)
if !ok && !reflect.DeepEqual(reasons, expectedFailureReasons) {
t.Errorf("%s: unexpected failure reasons: %v, want: %v", test.test, reasons, expectedFailureReasons)
}
if test.isOk && !ok {
t.Errorf("expected ok, got none. %v %s %s", test.pod, test.nodeInfo, test.test)
t.Errorf("%s: expected ok, got none. %v %s %s", test.test, test.pod, test.nodeInfo, test.test)
}
if !test.isOk && ok {
t.Errorf("expected no ok, got one. %v %s %s", test.pod, test.nodeInfo, test.test)
t.Errorf("%s: expected no ok, got one. %v %s %s", test.test, test.pod, test.nodeInfo, test.test)
}
}
}
@@ -554,20 +561,21 @@ func TestAWSDiskConflicts(t *testing.T) {
{&api.Pod{Spec: volState}, schedulercache.NewNodeInfo(&api.Pod{Spec: volState}), false, "same state"},
{&api.Pod{Spec: volState2}, schedulercache.NewNodeInfo(&api.Pod{Spec: volState}), true, "different state"},
}
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrDiskConflict}
for _, test := range tests {
ok, err := NoDiskConflict(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
if !reflect.DeepEqual(err, ErrDiskConflict) && err != nil {
t.Errorf("unexpected error: %v", err)
ok, reasons, err := NoDiskConflict(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
if err != nil {
t.Errorf("%s: unexpected error: %v", test.test, err)
}
if ok == false && !reflect.DeepEqual(err, ErrDiskConflict) {
t.Errorf("unexpected error: %v", err)
if !ok && !reflect.DeepEqual(reasons, expectedFailureReasons) {
t.Errorf("%s: unexpected failure reasons: %v, want: %v", test.test, reasons, expectedFailureReasons)
}
if test.isOk && !ok {
t.Errorf("expected ok, got none. %v %s %s", test.pod, test.nodeInfo, test.test)
t.Errorf("%s: expected ok, got none. %v %s %s", test.test, test.pod, test.nodeInfo, test.test)
}
if !test.isOk && ok {
t.Errorf("expected no ok, got one. %v %s %s", test.pod, test.nodeInfo, test.test)
t.Errorf("%s: expected no ok, got one. %v %s %s", test.test, test.pod, test.nodeInfo, test.test)
}
}
}
@@ -612,20 +620,21 @@ func TestRBDDiskConflicts(t *testing.T) {
{&api.Pod{Spec: volState}, schedulercache.NewNodeInfo(&api.Pod{Spec: volState}), false, "same state"},
{&api.Pod{Spec: volState2}, schedulercache.NewNodeInfo(&api.Pod{Spec: volState}), true, "different state"},
}
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrDiskConflict}
for _, test := range tests {
ok, err := NoDiskConflict(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
if !reflect.DeepEqual(err, ErrDiskConflict) && err != nil {
t.Errorf("unexpected error: %v", err)
ok, reasons, err := NoDiskConflict(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
if err != nil {
t.Errorf("%s: unexpected error: %v", test.test, err)
}
if ok == false && !reflect.DeepEqual(err, ErrDiskConflict) {
t.Errorf("unexpected error: %v", err)
if !ok && !reflect.DeepEqual(reasons, expectedFailureReasons) {
t.Errorf("%s: unexpected failure reasons: %v, want: %v", test.test, reasons, expectedFailureReasons)
}
if test.isOk && !ok {
t.Errorf("expected ok, got none. %v %s %s", test.pod, test.nodeInfo, test.test)
t.Errorf("%s: expected ok, got none. %v %s %s", test.test, test.pod, test.nodeInfo, test.test)
}
if !test.isOk && ok {
t.Errorf("expected no ok, got one. %v %s %s", test.pod, test.nodeInfo, test.test)
t.Errorf("%s: expected no ok, got one. %v %s %s", test.test, test.pod, test.nodeInfo, test.test)
}
}
}
@@ -1087,18 +1096,19 @@ func TestPodFitsSelector(t *testing.T) {
"is not satisfied, won't schedule onto the node",
},
}
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrNodeSelectorNotMatch}
for _, test := range tests {
node := api.Node{ObjectMeta: api.ObjectMeta{Labels: test.labels}}
nodeInfo := schedulercache.NewNodeInfo()
nodeInfo.SetNode(&node)
fits, err := PodSelectorMatches(test.pod, PredicateMetadata(test.pod, nil), nodeInfo)
if !reflect.DeepEqual(err, ErrNodeSelectorNotMatch) && err != nil {
t.Errorf("unexpected error: %v", err)
fits, reasons, err := PodSelectorMatches(test.pod, PredicateMetadata(test.pod, nil), nodeInfo)
if err != nil {
t.Errorf("%s: unexpected error: %v", test.test, err)
}
if fits == false && !reflect.DeepEqual(err, ErrNodeSelectorNotMatch) {
t.Errorf("unexpected error: %v", err)
if !fits && !reflect.DeepEqual(reasons, expectedFailureReasons) {
t.Errorf("%s: unexpected failure reasons: %v, want: %v", test.test, reasons, expectedFailureReasons)
}
if fits != test.fits {
t.Errorf("%s: expected: %v got %v", test.test, test.fits, fits)
@@ -1152,18 +1162,20 @@ func TestNodeLabelPresence(t *testing.T) {
test: "all labels match, presence false",
},
}
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrNodeLabelPresenceViolated}
for _, test := range tests {
node := api.Node{ObjectMeta: api.ObjectMeta{Labels: label}}
nodeInfo := schedulercache.NewNodeInfo()
nodeInfo.SetNode(&node)
labelChecker := NodeLabelChecker{test.labels, test.presence}
fits, err := labelChecker.CheckNodeLabelPresence(test.pod, PredicateMetadata(test.pod, nil), nodeInfo)
if !reflect.DeepEqual(err, ErrNodeLabelPresenceViolated) && err != nil {
t.Errorf("unexpected error: %v", err)
fits, reasons, err := labelChecker.CheckNodeLabelPresence(test.pod, PredicateMetadata(test.pod, nil), nodeInfo)
if err != nil {
t.Errorf("%s: unexpected error: %v", test.test, err)
}
if fits == false && !reflect.DeepEqual(err, ErrNodeLabelPresenceViolated) {
t.Errorf("unexpected error: %v", err)
if !fits && !reflect.DeepEqual(reasons, expectedFailureReasons) {
t.Errorf("%s: unexpected failure reasons: %v, want: %v", test.test, reasons, expectedFailureReasons)
}
if fits != test.fits {
t.Errorf("%s: expected: %v got %v", test.test, test.fits, fits)
@@ -1297,18 +1309,19 @@ func TestServiceAffinity(t *testing.T) {
test: "service pod on different node, multiple labels, all match",
},
}
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrServiceAffinityViolated}
for _, test := range tests {
nodes := []api.Node{node1, node2, node3, node4, node5}
serviceAffinity := ServiceAffinity{algorithm.FakePodLister(test.pods), algorithm.FakeServiceLister(test.services), FakeNodeListInfo(nodes), test.labels}
nodeInfo := schedulercache.NewNodeInfo()
nodeInfo.SetNode(test.node)
fits, err := serviceAffinity.CheckServiceAffinity(test.pod, PredicateMetadata(test.pod, nil), nodeInfo)
if !reflect.DeepEqual(err, ErrServiceAffinityViolated) && err != nil {
t.Errorf("unexpected error: %v", err)
fits, reasons, err := serviceAffinity.CheckServiceAffinity(test.pod, PredicateMetadata(test.pod, nil), nodeInfo)
if err != nil {
t.Errorf("%s: unexpected error: %v", test.test, err)
}
if fits == false && !reflect.DeepEqual(err, ErrServiceAffinityViolated) {
t.Errorf("unexpected error: %v", err)
if !fits && !reflect.DeepEqual(reasons, expectedFailureReasons) {
t.Errorf("%s: unexpected failure reasons: %v, want: %v", test.test, reasons, expectedFailureReasons)
}
if fits != test.fits {
t.Errorf("%s: expected: %v got %v", test.test, test.fits, fits)
@@ -1581,14 +1594,17 @@ func TestEBSVolumeCountConflicts(t *testing.T) {
return "", false
},
}
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrMaxVolumeCountExceeded}
for _, test := range tests {
pred := NewMaxPDVolumeCountPredicate(filter, test.maxVols, pvInfo, pvcInfo)
fits, err := pred(test.newPod, PredicateMetadata(test.newPod, nil), schedulercache.NewNodeInfo(test.existingPods...))
if err != nil && !reflect.DeepEqual(err, ErrMaxVolumeCountExceeded) {
t.Errorf("unexpected error: %v", err)
fits, reasons, err := pred(test.newPod, PredicateMetadata(test.newPod, nil), schedulercache.NewNodeInfo(test.existingPods...))
if err != nil {
t.Errorf("%s: unexpected error: %v", test.test, err)
}
if !fits && !reflect.DeepEqual(reasons, expectedFailureReasons) {
t.Errorf("%s: unexpected failure reasons: %v, want: %v", test.test, reasons, expectedFailureReasons)
}
if fits != test.fits {
t.Errorf("%s: expected %v, got %v", test.test, test.fits, fits)
}
@@ -1647,7 +1663,7 @@ func TestPredicatesRegistered(t *testing.T) {
// Check if all public predicates are referenced in target files.
for _, function := range functions {
// Ignore functions that doesn't match FitPredicate signature.
// Ignore functions that don't match FitPredicate signature.
signature := function.Underlying.Signature
if len(predSignature.Parameters) != len(signature.Parameters) {
continue
@@ -1697,6 +1713,7 @@ func TestRunGeneralPredicates(t *testing.T) {
fits bool
test string
wErr error
reasons []algorithm.PredicateFailureReason
}{
{
pod: &api.Pod{},
@@ -1719,8 +1736,12 @@ func TestRunGeneralPredicates(t *testing.T) {
Status: api.NodeStatus{Capacity: makeResources(10, 20, 0, 32).Capacity, Allocatable: makeAllocatableResources(10, 20, 0, 32)},
},
fits: false,
wErr: newInsufficientResourceError("CPU", 8, 5, 10),
test: "not enough cpu resource",
wErr: nil,
reasons: []algorithm.PredicateFailureReason{
NewInsufficientResourceError(api.ResourceCPU, 8, 5, 10),
NewInsufficientResourceError(api.ResourceMemory, 10, 19, 20),
},
test: "not enough cpu and memory resource",
},
{
pod: &api.Pod{},
@@ -1735,10 +1756,11 @@ func TestRunGeneralPredicates(t *testing.T) {
pod: newResourcePod(schedulercache.Resource{MilliCPU: 3, Memory: 1, NvidiaGPU: 1}),
nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 10, NvidiaGPU: 1})),
node: &api.Node{Status: api.NodeStatus{Capacity: makeResources(10, 20, 1, 32).Capacity, Allocatable: makeAllocatableResources(10, 20, 1, 32)}},
fits: false,
wErr: newInsufficientResourceError("NvidiaGpu", 1, 1, 1),
test: "not enough GPU resource",
node: &api.Node{Status: api.NodeStatus{Capacity: makeResources(10, 20, 1, 32).Capacity, Allocatable: makeAllocatableResources(10, 20, 1, 32)}},
fits: false,
wErr: nil,
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(api.ResourceNvidiaGPU, 1, 1, 1)},
test: "not enough GPU resource",
},
{
pod: newResourcePod(schedulercache.Resource{MilliCPU: 3, Memory: 1, NvidiaGPU: 1}),
@@ -1760,9 +1782,10 @@ func TestRunGeneralPredicates(t *testing.T) {
ObjectMeta: api.ObjectMeta{Name: "machine1"},
Status: api.NodeStatus{Capacity: makeResources(10, 20, 0, 32).Capacity, Allocatable: makeAllocatableResources(10, 20, 0, 32)},
},
fits: false,
wErr: ErrPodNotMatchHostName,
test: "host not match",
fits: false,
wErr: nil,
reasons: []algorithm.PredicateFailureReason{ErrPodNotMatchHostName},
test: "host not match",
},
{
pod: newPodWithPort(123),
@@ -1771,16 +1794,20 @@ func TestRunGeneralPredicates(t *testing.T) {
ObjectMeta: api.ObjectMeta{Name: "machine1"},
Status: api.NodeStatus{Capacity: makeResources(10, 20, 0, 32).Capacity, Allocatable: makeAllocatableResources(10, 20, 0, 32)},
},
fits: false,
wErr: ErrPodNotFitsHostPorts,
test: "hostport conflict",
fits: false,
wErr: nil,
reasons: []algorithm.PredicateFailureReason{ErrPodNotFitsHostPorts},
test: "hostport conflict",
},
}
for _, test := range resourceTests {
test.nodeInfo.SetNode(test.node)
fits, err := GeneralPredicates(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
if !reflect.DeepEqual(err, test.wErr) {
t.Errorf("%s: unexpected error: %v, want: %v", test.test, err, test.wErr)
fits, reasons, err := GeneralPredicates(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
if err != nil {
t.Errorf("%s: unexpected error: %v", test.test, err)
}
if !fits && !reflect.DeepEqual(reasons, test.reasons) {
t.Errorf("%s: unexpected failure reasons: %v, want: %v", test.test, reasons, test.reasons)
}
if fits != test.fits {
t.Errorf("%s: expected: %v got %v", test.test, test.fits, fits)
@@ -2310,6 +2337,8 @@ func TestInterPodAffinity(t *testing.T) {
test: "verify that PodAntiAffinity from existing pod is respected when pod has no AntiAffinity constraints. satisfy PodAntiAffinity symmetry with the existing pod",
},
}
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch}
for _, test := range tests {
node := test.node
var podsOnNode []*api.Pod
@@ -2327,10 +2356,13 @@ func TestInterPodAffinity(t *testing.T) {
nodeInfo := schedulercache.NewNodeInfo(podsOnNode...)
nodeInfo.SetNode(test.node)
nodeInfoMap := map[string]*schedulercache.NodeInfo{test.node.Name: nodeInfo}
fits, err := fit.InterPodAffinityMatches(test.pod, PredicateMetadata(test.pod, nodeInfoMap), nodeInfo)
if !reflect.DeepEqual(err, ErrPodAffinityNotMatch) && err != nil {
fits, reasons, err := fit.InterPodAffinityMatches(test.pod, PredicateMetadata(test.pod, nodeInfoMap), nodeInfo)
if err != nil {
t.Errorf("%s: unexpected error %v", test.test, err)
}
if !fits && !reflect.DeepEqual(reasons, expectedFailureReasons) {
t.Errorf("%s: unexpected failure reasons: %v, want: %v", test.test, reasons, expectedFailureReasons)
}
if fits != test.fits {
t.Errorf("%s: expected %v got %v", test.test, test.fits, fits)
}
@@ -2476,6 +2508,9 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
"should not be blocked from being scheduled onto any node, even there's no existing pod that match the rule anywhere.",
},
}
affinityExpectedFailureReasons := []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch}
selectorExpectedFailureReasons := []algorithm.PredicateFailureReason{ErrNodeSelectorNotMatch}
for _, test := range tests {
nodeListInfo := FakeNodeListInfo(test.nodes)
for _, node := range test.nodes {
@@ -2494,21 +2529,27 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
nodeInfo := schedulercache.NewNodeInfo(podsOnNode...)
nodeInfo.SetNode(&node)
nodeInfoMap := map[string]*schedulercache.NodeInfo{node.Name: nodeInfo}
fits, err := testFit.InterPodAffinityMatches(test.pod, PredicateMetadata(test.pod, nodeInfoMap), nodeInfo)
if !reflect.DeepEqual(err, ErrPodAffinityNotMatch) && err != nil {
fits, reasons, err := testFit.InterPodAffinityMatches(test.pod, PredicateMetadata(test.pod, nodeInfoMap), nodeInfo)
if err != nil {
t.Errorf("%s: unexpected error %v", test.test, err)
}
if !fits && !reflect.DeepEqual(reasons, affinityExpectedFailureReasons) {
t.Errorf("%s: unexpected failure reasons: %v", test.test, reasons)
}
affinity, err := api.GetAffinityFromPodAnnotations(test.pod.ObjectMeta.Annotations)
if err != nil {
t.Errorf("unexpected error: %v", err)
t.Errorf("%s: unexpected error: %v", test.test, err)
}
if affinity != nil && affinity.NodeAffinity != nil {
nodeInfo := schedulercache.NewNodeInfo()
nodeInfo.SetNode(&node)
nodeInfoMap := map[string]*schedulercache.NodeInfo{node.Name: nodeInfo}
fits2, err := PodSelectorMatches(test.pod, PredicateMetadata(test.pod, nodeInfoMap), nodeInfo)
if !reflect.DeepEqual(err, ErrNodeSelectorNotMatch) && err != nil {
t.Errorf("unexpected error: %v", err)
fits2, reasons, err := PodSelectorMatches(test.pod, PredicateMetadata(test.pod, nodeInfoMap), nodeInfo)
if err != nil {
t.Errorf("%s: unexpected error: %v", test.test, err)
}
if !fits2 && !reflect.DeepEqual(reasons, selectorExpectedFailureReasons) {
t.Errorf("%s: unexpected failure reasons: %v, want: %v", test.test, reasons, selectorExpectedFailureReasons)
}
fits = fits && fits2
}
@@ -2788,14 +2829,18 @@ func TestPodToleratesTaints(t *testing.T) {
"but the effect of taint on node is PreferNochedule. Pod can be shceduled onto the node",
},
}
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrTaintsTolerationsNotMatch}
for _, test := range podTolerateTaintsTests {
nodeInfo := schedulercache.NewNodeInfo()
nodeInfo.SetNode(&test.node)
fits, err := PodToleratesNodeTaints(test.pod, PredicateMetadata(test.pod, nil), nodeInfo)
if fits == false && !reflect.DeepEqual(err, ErrTaintsTolerationsNotMatch) {
fits, reasons, err := PodToleratesNodeTaints(test.pod, PredicateMetadata(test.pod, nil), nodeInfo)
if err != nil {
t.Errorf("%s, unexpected error: %v", test.test, err)
}
if !fits && !reflect.DeepEqual(reasons, expectedFailureReasons) {
t.Errorf("%s, unexpected failure reason: %v, want: %v", test.test, reasons, expectedFailureReasons)
}
if fits != test.fits {
t.Errorf("%s, expected: %v got %v", test.test, test.fits, fits)
}
@@ -2896,17 +2941,19 @@ func TestPodSchedulesOnNodeWithMemoryPressureCondition(t *testing.T) {
name: "non best-effort pod schedulable on node without memory pressure condition on",
},
}
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrNodeUnderMemoryPressure}
for _, test := range tests {
fits, err := CheckNodeMemoryPressurePredicate(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
fits, reasons, err := CheckNodeMemoryPressurePredicate(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
if err != nil {
t.Errorf("%s: unexpected error: %v", test.name, err)
}
if !fits && !reflect.DeepEqual(reasons, expectedFailureReasons) {
t.Errorf("%s: unexpected failure reasons: %v, want: %v", test.name, reasons, expectedFailureReasons)
}
if fits != test.fits {
t.Errorf("%s: expected %v got %v", test.name, test.fits, fits)
}
if err != nil && err != ErrNodeUnderMemoryPressure {
t.Errorf("%s: unexpected error: %v", test.name, err)
continue
}
}
}
@@ -2966,15 +3013,18 @@ func TestPodSchedulesOnNodeWithDiskPressureCondition(t *testing.T) {
name: "pod not schedulable on node with pressure condition on",
},
}
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrNodeUnderDiskPressure}
for _, test := range tests {
fits, err := CheckNodeDiskPressurePredicate(test.pod, nil, test.nodeInfo)
fits, reasons, err := CheckNodeDiskPressurePredicate(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
if err != nil {
t.Errorf("%s: unexpected error: %v", test.name, err)
}
if !fits && !reflect.DeepEqual(reasons, expectedFailureReasons) {
t.Errorf("%s: unexpected failure reasons: %v, want: %v", test.name, reasons, expectedFailureReasons)
}
if fits != test.fits {
t.Errorf("%s: expected %v got %v", test.name, test.fits, fits)
}
if err != nil && err != ErrNodeUnderDiskPressure {
t.Errorf("%s: unexpected error: %v", test.name, err)
}
}
}