refactor scheduler extender related API

- move extender related API from pkg/scheduler/api to pkg/scheduler/apis/extender/v1

- alias extenderv1 to pkg/scheduler/apis/extender/v1

- use NodeScore and NodeScoreList in non-extender logic
This commit is contained in:
Wei Huang
2019-09-27 11:23:29 -07:00
parent 2ebcd2509c
commit cbdb4e3fdb
46 changed files with 703 additions and 1390 deletions

View File

@@ -27,6 +27,7 @@ import (
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/kubernetes/pkg/features"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
@@ -215,7 +216,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
pod *v1.Pod
pods []*v1.Pod
nodes []*v1.Node
expectedList schedulerapi.HostPriorityList
expectedList framework.NodeScoreList
name string
}{
{
@@ -232,7 +233,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
*/
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: schedulerapi.MaxPriority}},
name: "nothing scheduled, nothing requested",
},
{
@@ -249,7 +250,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
*/
pod: &v1.Pod{Spec: cpuAndMemory},
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 7}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 7}, {Name: "machine2", Score: schedulerapi.MaxPriority}},
name: "nothing scheduled, resources requested, differently sized machines",
},
{
@@ -266,7 +267,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
*/
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: schedulerapi.MaxPriority}},
name: "no resources requested, pods scheduled",
pods: []*v1.Pod{
{Spec: machine1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
@@ -289,7 +290,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
*/
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 4}, {Host: "machine2", Score: 6}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 4}, {Name: "machine2", Score: 6}},
name: "no resources requested, pods scheduled with resources",
pods: []*v1.Pod{
{Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
@@ -312,7 +313,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
*/
pod: &v1.Pod{Spec: cpuAndMemory},
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 6}, {Host: "machine2", Score: 9}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 6}, {Name: "machine2", Score: 9}},
name: "resources requested, pods scheduled with resources",
pods: []*v1.Pod{
{Spec: cpuOnly},
@@ -333,7 +334,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
*/
pod: &v1.Pod{Spec: cpuAndMemory},
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 6}, {Host: "machine2", Score: 6}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 6}, {Name: "machine2", Score: 6}},
name: "resources requested, pods scheduled with resources, differently sized machines",
pods: []*v1.Pod{
{Spec: cpuOnly},
@@ -354,7 +355,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
*/
pod: &v1.Pod{Spec: cpuOnly},
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
name: "requested resources exceed node capacity",
pods: []*v1.Pod{
{Spec: cpuOnly},
@@ -364,7 +365,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
{
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
name: "zero node resources, pods scheduled with resources",
pods: []*v1.Pod{
{Spec: cpuOnly},
@@ -388,7 +389,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
},
},
nodes: []*v1.Node{makeNode("machine3", 3500, 40000), makeNode("machine4", 4000, 10000)},
expectedList: []schedulerapi.HostPriority{{Host: "machine3", Score: 8}, {Host: "machine4", Score: 9}},
expectedList: []framework.NodeScore{{Name: "machine3", Score: 8}, {Name: "machine4", Score: 9}},
name: "Include volume count on a node for balanced resource allocation",
pods: []*v1.Pod{
{Spec: cpuAndMemory3},

View File

@@ -25,6 +25,7 @@ import (
"k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
@@ -82,8 +83,8 @@ func (t *topologySpreadConstraintsMap) initialize(pod *v1.Pod, nodes []*v1.Node)
// Note: Symmetry is not applicable. We only weigh how incomingPod matches existingPod.
// Whether existingPod matches incomingPod doesn't contribute to the final score.
// This is different from the Affinity API.
func CalculateEvenPodsSpreadPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
result := make(schedulerapi.HostPriorityList, len(nodes))
func CalculateEvenPodsSpreadPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (framework.NodeScoreList, error) {
result := make(framework.NodeScoreList, len(nodes))
// return if incoming pod doesn't have soft topology spread constraints.
constraints := getSoftTopologySpreadConstraints(pod)
if len(constraints) == 0 {
@@ -171,7 +172,7 @@ func CalculateEvenPodsSpreadPriority(pod *v1.Pod, nodeNameToInfo map[string]*sch
maxMinDiff := total - minCount
for i := range nodes {
node := nodes[i]
result[i].Host = node.Name
result[i].Name = node.Name
// debugging purpose: print the value for each node
// score must be pointer here, otherwise it's always 0

View File

@@ -21,7 +21,7 @@ import (
"testing"
"k8s.io/api/core/v1"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
st "k8s.io/kubernetes/pkg/scheduler/testing"
)
@@ -101,7 +101,7 @@ func TestCalculateEvenPodsSpreadPriority(t *testing.T) {
existingPods []*v1.Pod
nodes []*v1.Node
failedNodes []*v1.Node // nodes + failedNodes = all nodes
want schedulerapi.HostPriorityList
want framework.NodeScoreList
}{
// Explanation on the Legend:
// a) X/Y means there are X matching pods on node1 and Y on node2, both nodes are candidates
@@ -120,9 +120,9 @@ func TestCalculateEvenPodsSpreadPriority(t *testing.T) {
st.MakeNode().Name("node-a").Label("node", "node-a").Obj(),
st.MakeNode().Name("node-b").Label("node", "node-b").Obj(),
},
want: []schedulerapi.HostPriority{
{Host: "node-a", Score: 10},
{Host: "node-b", Score: 10},
want: []framework.NodeScore{
{Name: "node-a", Score: 10},
{Name: "node-b", Score: 10},
},
},
{
@@ -142,8 +142,8 @@ func TestCalculateEvenPodsSpreadPriority(t *testing.T) {
failedNodes: []*v1.Node{
st.MakeNode().Name("node-b").Label("node", "node-b").Obj(),
},
want: []schedulerapi.HostPriority{
{Host: "node-a", Score: 10},
want: []framework.NodeScore{
{Name: "node-a", Score: 10},
},
},
{
@@ -159,9 +159,9 @@ func TestCalculateEvenPodsSpreadPriority(t *testing.T) {
st.MakeNode().Name("node-a").Label("node", "node-a").Obj(),
st.MakeNode().Name("node-b").Label("node", "node-b").Obj(),
},
want: []schedulerapi.HostPriority{
{Host: "node-a", Score: 10},
{Host: "node-b", Score: 10},
want: []framework.NodeScore{
{Name: "node-a", Score: 10},
{Name: "node-b", Score: 10},
},
},
{
@@ -187,11 +187,11 @@ func TestCalculateEvenPodsSpreadPriority(t *testing.T) {
st.MakeNode().Name("node-d").Label("node", "node-d").Obj(),
},
failedNodes: []*v1.Node{},
want: []schedulerapi.HostPriority{
{Host: "node-a", Score: 6},
{Host: "node-b", Score: 8},
{Host: "node-c", Score: 10},
{Host: "node-d", Score: 5},
want: []framework.NodeScore{
{Name: "node-a", Score: 6},
{Name: "node-b", Score: 8},
{Name: "node-c", Score: 10},
{Name: "node-d", Score: 5},
},
},
{
@@ -222,10 +222,10 @@ func TestCalculateEvenPodsSpreadPriority(t *testing.T) {
failedNodes: []*v1.Node{
st.MakeNode().Name("node-y").Label("node", "node-y").Obj(),
},
want: []schedulerapi.HostPriority{
{Host: "node-a", Score: 5},
{Host: "node-b", Score: 8},
{Host: "node-x", Score: 10},
want: []framework.NodeScore{
{Name: "node-a", Score: 5},
{Name: "node-b", Score: 8},
{Name: "node-x", Score: 10},
},
},
{
@@ -256,10 +256,10 @@ func TestCalculateEvenPodsSpreadPriority(t *testing.T) {
failedNodes: []*v1.Node{
st.MakeNode().Name("node-y").Label("node", "node-y").Obj(),
},
want: []schedulerapi.HostPriority{
{Host: "node-a", Score: 2},
{Host: "node-b", Score: 0},
{Host: "node-x", Score: 10},
want: []framework.NodeScore{
{Name: "node-a", Score: 2},
{Name: "node-b", Score: 0},
{Name: "node-x", Score: 10},
},
},
{
@@ -290,10 +290,10 @@ func TestCalculateEvenPodsSpreadPriority(t *testing.T) {
failedNodes: []*v1.Node{
st.MakeNode().Name("node-y").Label("zone", "zone2").Label("node", "node-y").Obj(),
},
want: []schedulerapi.HostPriority{
{Host: "node-a", Score: 8},
{Host: "node-b", Score: 8},
{Host: "node-x", Score: 10},
want: []framework.NodeScore{
{Name: "node-a", Score: 8},
{Name: "node-b", Score: 8},
{Name: "node-x", Score: 10},
},
},
{
@@ -324,9 +324,9 @@ func TestCalculateEvenPodsSpreadPriority(t *testing.T) {
st.MakeNode().Name("node-b").Label("zone", "zone1").Label("node", "node-b").Obj(),
st.MakeNode().Name("node-y").Label("zone", "zone2").Label("node", "node-y").Obj(),
},
want: []schedulerapi.HostPriority{
{Host: "node-a", Score: 10},
{Host: "node-x", Score: 6},
want: []framework.NodeScore{
{Name: "node-a", Score: 10},
{Name: "node-x", Score: 6},
},
},
{
@@ -361,11 +361,11 @@ func TestCalculateEvenPodsSpreadPriority(t *testing.T) {
st.MakeNode().Name("node-y").Label("zone", "zone2").Label("node", "node-y").Obj(),
},
failedNodes: []*v1.Node{},
want: []schedulerapi.HostPriority{
{Host: "node-a", Score: 8},
{Host: "node-b", Score: 7},
{Host: "node-x", Score: 10},
{Host: "node-y", Score: 8},
want: []framework.NodeScore{
{Name: "node-a", Score: 8},
{Name: "node-b", Score: 7},
{Name: "node-x", Score: 10},
{Name: "node-y", Score: 8},
},
},
{
@@ -391,11 +391,11 @@ func TestCalculateEvenPodsSpreadPriority(t *testing.T) {
st.MakeNode().Name("node-y").Label("zone", "zone2").Label("node", "node-y").Obj(),
},
failedNodes: []*v1.Node{},
want: []schedulerapi.HostPriority{
{Host: "node-a", Score: 10},
{Host: "node-b", Score: 8},
{Host: "node-x", Score: 6},
{Host: "node-y", Score: 5},
want: []framework.NodeScore{
{Name: "node-a", Score: 10},
{Name: "node-b", Score: 8},
{Name: "node-x", Score: 6},
{Name: "node-y", Score: 5},
},
},
{
@@ -423,10 +423,10 @@ func TestCalculateEvenPodsSpreadPriority(t *testing.T) {
failedNodes: []*v1.Node{
st.MakeNode().Name("node-y").Label("zone", "zone2").Label("node", "node-y").Obj(),
},
want: []schedulerapi.HostPriority{
{Host: "node-a", Score: 8},
{Host: "node-b", Score: 6},
{Host: "node-x", Score: 10},
want: []framework.NodeScore{
{Name: "node-a", Score: 8},
{Name: "node-b", Score: 6},
{Name: "node-x", Score: 10},
},
},
}

View File

@@ -22,6 +22,7 @@ import (
"k8s.io/api/core/v1"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
"k8s.io/kubernetes/pkg/util/parsers"
)
@@ -39,10 +40,10 @@ const (
// based on the total size of those images.
// - If none of the images are present, this node will be given the lowest priority.
// - If some of the images are present on a node, the larger their sizes' sum, the higher the node's priority.
func ImageLocalityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) {
func ImageLocalityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (framework.NodeScore, error) {
node := nodeInfo.Node()
if node == nil {
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
return framework.NodeScore{}, fmt.Errorf("node not found")
}
var score int
@@ -53,8 +54,8 @@ func ImageLocalityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *scheduler
score = 0
}
return schedulerapi.HostPriority{
Host: node.Name,
return framework.NodeScore{
Name: node.Name,
Score: int64(score),
}, nil
}

View File

@@ -18,14 +18,14 @@ package priorities
import (
"crypto/sha256"
"encoding/hex"
"reflect"
"sort"
"testing"
"encoding/hex"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
"k8s.io/kubernetes/pkg/util/parsers"
)
@@ -114,7 +114,7 @@ func TestImageLocalityPriority(t *testing.T) {
pod *v1.Pod
pods []*v1.Pod
nodes []*v1.Node
expectedList schedulerapi.HostPriorityList
expectedList framework.NodeScoreList
name string
}{
{
@@ -129,7 +129,7 @@ func TestImageLocalityPriority(t *testing.T) {
// Score: 10 * (250M/2 - 23M)/(1000M - 23M) = 1
pod: &v1.Pod{Spec: test40250},
nodes: []*v1.Node{makeImageNode("machine1", node403002000), makeImageNode("machine2", node25010)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 1}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 1}},
name: "two images spread on two nodes, prefer the larger image one",
},
{
@@ -144,7 +144,7 @@ func TestImageLocalityPriority(t *testing.T) {
// Score: 0
pod: &v1.Pod{Spec: test40300},
nodes: []*v1.Node{makeImageNode("machine1", node403002000), makeImageNode("machine2", node25010)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 1}, {Host: "machine2", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 1}, {Name: "machine2", Score: 0}},
name: "two images on one node, prefer this node",
},
{
@@ -159,7 +159,7 @@ func TestImageLocalityPriority(t *testing.T) {
// Score: 0 (10M/2 < 23M, min-threshold)
pod: &v1.Pod{Spec: testMinMax},
nodes: []*v1.Node{makeImageNode("machine1", node403002000), makeImageNode("machine2", node25010)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: 0}},
name: "if exceed limit, use limit",
},
{
@@ -178,7 +178,7 @@ func TestImageLocalityPriority(t *testing.T) {
// Score: 0
pod: &v1.Pod{Spec: testMinMax},
nodes: []*v1.Node{makeImageNode("machine1", node403002000), makeImageNode("machine2", node25010), makeImageNode("machine3", nodeWithNoImages)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 6}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 6}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: 0}},
name: "if exceed limit, use limit (with node which has no images present)",
},
}
@@ -191,8 +191,8 @@ func TestImageLocalityPriority(t *testing.T) {
t.Errorf("unexpected error: %v", err)
}
sort.Sort(test.expectedList)
sort.Sort(list)
sortNodeScoreList(test.expectedList)
sortNodeScoreList(list)
if !reflect.DeepEqual(test.expectedList, list) {
t.Errorf("expected %#v, got %#v", test.expectedList, list)

View File

@@ -27,6 +27,7 @@ import (
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
@@ -96,7 +97,7 @@ func (p *podAffinityPriorityMap) processTerms(terms []v1.WeightedPodAffinityTerm
// that node; the node(s) with the highest sum are the most preferred.
// Symmetry need to be considered for preferredDuringSchedulingIgnoredDuringExecution from podAffinity & podAntiAffinity,
// symmetry need to be considered for hard requirements from podAffinity
func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (framework.NodeScoreList, error) {
affinity := pod.Spec.Affinity
hasAffinityConstraints := affinity != nil && affinity.PodAffinity != nil
hasAntiAffinityConstraints := affinity != nil && affinity.PodAntiAffinity != nil
@@ -219,14 +220,14 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, node
}
// calculate final priority score for each node
result := make(schedulerapi.HostPriorityList, 0, len(nodes))
result := make(framework.NodeScoreList, 0, len(nodes))
maxMinDiff := maxCount - minCount
for i, node := range nodes {
fScore := float64(0)
if maxMinDiff > 0 {
fScore = float64(schedulerapi.MaxPriority) * (float64(pm.counts[i]-minCount) / float64(maxCount-minCount))
}
result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: int64(fScore)})
result = append(result, framework.NodeScore{Name: node.Name, Score: int64(fScore)})
if klog.V(10) {
klog.Infof("%v -> %v: InterPodAffinityPriority, Score: (%d)", pod.Name, node.Name, int(fScore))
}

View File

@@ -24,6 +24,7 @@ import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
st "k8s.io/kubernetes/pkg/scheduler/testing"
)
@@ -266,7 +267,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
pod *v1.Pod
pods []*v1.Pod
nodes []*v1.Node
expectedList schedulerapi.HostPriorityList
expectedList framework.NodeScoreList
name string
}{
{
@@ -276,7 +277,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: 0}},
name: "all machines are same priority as Affinity is nil",
},
// the node(machine1) that have the label {"region": "China"} (match the topology key) and that have existing pods that match the labelSelector get high score
@@ -294,7 +295,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: 0}},
name: "Affinity: pod that matches topology key & pods in nodes will get high score comparing to others" +
"which doesn't match either pods in nodes or in topology key",
},
@@ -312,7 +313,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgChinaAzAz1}},
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelRgIndia}},
},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: schedulerapi.MaxPriority}, {Name: "machine3", Score: 0}},
name: "All the nodes that have the same topology key & label value with one of them has an existing pod that match the affinity rules, have the same score",
},
// there are 2 regions, say regionChina(machine1,machine3,machine4) and regionIndia(machine2,machine5), both regions have nodes that match the preference.
@@ -336,7 +337,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine4", Labels: labelRgChina}},
{ObjectMeta: metav1.ObjectMeta{Name: "machine5", Labels: labelRgIndia}},
},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 5}, {Host: "machine3", Score: schedulerapi.MaxPriority}, {Host: "machine4", Score: schedulerapi.MaxPriority}, {Host: "machine5", Score: 5}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: 5}, {Name: "machine3", Score: schedulerapi.MaxPriority}, {Name: "machine4", Score: schedulerapi.MaxPriority}, {Name: "machine5", Score: 5}},
name: "Affinity: nodes in one region has more matching pods comparing to other reqion, so the region which has more macthes will get high score",
},
// Test with the different operators and values for pod affinity scheduling preference, including some match failures.
@@ -352,7 +353,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 2}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 2}, {Name: "machine2", Score: schedulerapi.MaxPriority}, {Name: "machine3", Score: 0}},
name: "Affinity: different Label operators and values for pod affinity scheduling preference, including some match failures ",
},
// Test the symmetry cases for affinity, the difference between affinity and symmetry is not the pod wants to run together with some existing pods,
@@ -368,7 +369,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: schedulerapi.MaxPriority}, {Name: "machine3", Score: 0}},
name: "Affinity symmetry: considred only the preferredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry",
},
{
@@ -382,7 +383,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: schedulerapi.MaxPriority}, {Name: "machine3", Score: 0}},
name: "Affinity symmetry: considred RequiredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry",
},
@@ -402,7 +403,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgChina}},
},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: schedulerapi.MaxPriority}},
name: "Anti Affinity: pod that doesnot match existing pods in node will get high score ",
},
{
@@ -415,7 +416,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgChina}},
},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: schedulerapi.MaxPriority}},
name: "Anti Affinity: pod that does not matches topology key & matches the pods in nodes will get higher score comparing to others ",
},
{
@@ -429,7 +430,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: schedulerapi.MaxPriority}},
name: "Anti Affinity: one node has more matching pods comparing to other node, so the node which has more unmacthes will get high score",
},
// Test the symmetry cases for anti affinity
@@ -443,7 +444,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelAzAz2}},
},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: schedulerapi.MaxPriority}},
name: "Anti Affinity symmetry: the existing pods in node which has anti affinity match will get high score",
},
// Test both affinity and anti-affinity
@@ -457,7 +458,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelAzAz1}},
},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: 0}},
name: "Affinity and Anti Affinity: considered only preferredDuringSchedulingIgnoredDuringExecution in both pod affinity & anti affinity",
},
// Combined cases considering both affinity and anti-affinity, the pod to schedule and existing pods have the same labels (they are in the same RC/service),
@@ -482,7 +483,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine4", Labels: labelRgChina}},
{ObjectMeta: metav1.ObjectMeta{Name: "machine5", Labels: labelRgIndia}},
},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 4}, {Host: "machine3", Score: schedulerapi.MaxPriority}, {Host: "machine4", Score: schedulerapi.MaxPriority}, {Host: "machine5", Score: 4}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: 4}, {Name: "machine3", Score: schedulerapi.MaxPriority}, {Name: "machine4", Score: schedulerapi.MaxPriority}, {Name: "machine5", Score: 4}},
name: "Affinity and Anti Affinity: considering both affinity and anti-affinity, the pod to schedule and existing pods have the same labels",
},
// Consider Affinity, Anti Affinity and symmetry together.
@@ -504,7 +505,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelRgIndia}},
{ObjectMeta: metav1.ObjectMeta{Name: "machine4", Labels: labelAzAz2}},
},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: schedulerapi.MaxPriority}, {Host: "machine4", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: schedulerapi.MaxPriority}, {Name: "machine4", Score: 0}},
name: "Affinity and Anti Affinity and symmetry: considered only preferredDuringSchedulingIgnoredDuringExecution in both pod affinity & anti affinity & symmetry",
},
// Cover https://github.com/kubernetes/kubernetes/issues/82796 which panics upon:
@@ -520,7 +521,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgChina}},
},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: schedulerapi.MaxPriority}},
name: "Avoid panic when partial nodes in a topology don't have pods with affinity",
},
}
@@ -578,7 +579,7 @@ func TestHardPodAffinitySymmetricWeight(t *testing.T) {
pods []*v1.Pod
nodes []*v1.Node
hardPodAffinityWeight int32
expectedList schedulerapi.HostPriorityList
expectedList framework.NodeScoreList
name string
}{
{
@@ -593,7 +594,7 @@ func TestHardPodAffinitySymmetricWeight(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
},
hardPodAffinityWeight: v1.DefaultHardPodAffinitySymmetricWeight,
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: schedulerapi.MaxPriority}, {Name: "machine3", Score: 0}},
name: "Hard Pod Affinity symmetry: hard pod affinity symmetry weights 1 by default, then nodes that match the hard pod affinity symmetry rules, get a high score",
},
{
@@ -608,7 +609,7 @@ func TestHardPodAffinitySymmetricWeight(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
},
hardPodAffinityWeight: 0,
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: 0}},
name: "Hard Pod Affinity symmetry: hard pod affinity symmetry is closed(weights 0), then nodes that match the hard pod affinity symmetry rules, get same score with those not match",
},
}

View File

@@ -24,6 +24,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
@@ -93,7 +94,7 @@ func TestLeastRequested(t *testing.T) {
pod *v1.Pod
pods []*v1.Pod
nodes []*v1.Node
expectedList schedulerapi.HostPriorityList
expectedList framework.NodeScoreList
name string
}{
{
@@ -110,7 +111,7 @@ func TestLeastRequested(t *testing.T) {
*/
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: schedulerapi.MaxPriority}},
name: "nothing scheduled, nothing requested",
},
{
@@ -127,7 +128,7 @@ func TestLeastRequested(t *testing.T) {
*/
pod: &v1.Pod{Spec: cpuAndMemory},
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 3}, {Host: "machine2", Score: 5}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 3}, {Name: "machine2", Score: 5}},
name: "nothing scheduled, resources requested, differently sized machines",
},
{
@@ -144,7 +145,7 @@ func TestLeastRequested(t *testing.T) {
*/
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: schedulerapi.MaxPriority}},
name: "no resources requested, pods scheduled",
pods: []*v1.Pod{
{Spec: machine1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
@@ -167,7 +168,7 @@ func TestLeastRequested(t *testing.T) {
*/
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 7}, {Host: "machine2", Score: 5}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 7}, {Name: "machine2", Score: 5}},
name: "no resources requested, pods scheduled with resources",
pods: []*v1.Pod{
{Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
@@ -190,7 +191,7 @@ func TestLeastRequested(t *testing.T) {
*/
pod: &v1.Pod{Spec: cpuAndMemory},
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 4}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 5}, {Name: "machine2", Score: 4}},
name: "resources requested, pods scheduled with resources",
pods: []*v1.Pod{
{Spec: cpuOnly},
@@ -211,7 +212,7 @@ func TestLeastRequested(t *testing.T) {
*/
pod: &v1.Pod{Spec: cpuAndMemory},
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 6}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 5}, {Name: "machine2", Score: 6}},
name: "resources requested, pods scheduled with resources, differently sized machines",
pods: []*v1.Pod{
{Spec: cpuOnly},
@@ -232,7 +233,7 @@ func TestLeastRequested(t *testing.T) {
*/
pod: &v1.Pod{Spec: cpuOnly},
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 2}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 5}, {Name: "machine2", Score: 2}},
name: "requested resources exceed node capacity",
pods: []*v1.Pod{
{Spec: cpuOnly},
@@ -242,7 +243,7 @@ func TestLeastRequested(t *testing.T) {
{
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
name: "zero node resources, pods scheduled with resources",
pods: []*v1.Pod{
{Spec: cpuOnly},

View File

@@ -23,7 +23,7 @@ import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
@@ -108,7 +108,7 @@ func TestMostRequested(t *testing.T) {
pod *v1.Pod
pods []*v1.Pod
nodes []*v1.Node
expectedList schedulerapi.HostPriorityList
expectedList framework.NodeScoreList
name string
}{
{
@@ -125,7 +125,7 @@ func TestMostRequested(t *testing.T) {
*/
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
name: "nothing scheduled, nothing requested",
},
{
@@ -142,7 +142,7 @@ func TestMostRequested(t *testing.T) {
*/
pod: &v1.Pod{Spec: cpuAndMemory},
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 6}, {Host: "machine2", Score: 5}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 6}, {Name: "machine2", Score: 5}},
name: "nothing scheduled, resources requested, differently sized machines",
},
{
@@ -159,7 +159,7 @@ func TestMostRequested(t *testing.T) {
*/
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 3}, {Host: "machine2", Score: 4}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 3}, {Name: "machine2", Score: 4}},
name: "no resources requested, pods scheduled with resources",
pods: []*v1.Pod{
{Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
@@ -182,7 +182,7 @@ func TestMostRequested(t *testing.T) {
*/
pod: &v1.Pod{Spec: cpuAndMemory},
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 4}, {Host: "machine2", Score: 5}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 4}, {Name: "machine2", Score: 5}},
name: "resources requested, pods scheduled with resources",
pods: []*v1.Pod{
{Spec: cpuOnly},
@@ -203,7 +203,7 @@ func TestMostRequested(t *testing.T) {
*/
pod: &v1.Pod{Spec: bigCPUAndMemory},
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 10000, 8000)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 4}, {Host: "machine2", Score: 2}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 4}, {Name: "machine2", Score: 2}},
name: "resources requested with more than the node, pods scheduled with resources",
},
}

View File

@@ -23,6 +23,7 @@ import (
"k8s.io/apimachinery/pkg/labels"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
@@ -31,10 +32,10 @@ import (
// it will get an add of preferredSchedulingTerm.Weight. Thus, the more preferredSchedulingTerms
// the node satisfies and the more the preferredSchedulingTerm that is satisfied weights, the higher
// score the node gets.
func CalculateNodeAffinityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) {
func CalculateNodeAffinityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (framework.NodeScore, error) {
node := nodeInfo.Node()
if node == nil {
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
return framework.NodeScore{}, fmt.Errorf("node not found")
}
// default is the podspec.
@@ -59,7 +60,7 @@ func CalculateNodeAffinityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *s
// TODO: Avoid computing it for all nodes if this becomes a performance problem.
nodeSelector, err := v1helper.NodeSelectorRequirementsAsSelector(preferredSchedulingTerm.Preference.MatchExpressions)
if err != nil {
return schedulerapi.HostPriority{}, err
return framework.NodeScore{}, err
}
if nodeSelector.Matches(labels.Set(node.Labels)) {
count += preferredSchedulingTerm.Weight
@@ -67,8 +68,8 @@ func CalculateNodeAffinityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *s
}
}
return schedulerapi.HostPriority{
Host: node.Name,
return framework.NodeScore{
Name: node.Name,
Score: int64(count),
}, nil
}

View File

@@ -23,6 +23,7 @@ import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
@@ -104,7 +105,7 @@ func TestNodeAffinityPriority(t *testing.T) {
tests := []struct {
pod *v1.Pod
nodes []*v1.Node
expectedList schedulerapi.HostPriorityList
expectedList framework.NodeScoreList
name string
}{
{
@@ -118,7 +119,7 @@ func TestNodeAffinityPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: 0}},
name: "all machines are same priority as NodeAffinity is nil",
},
{
@@ -132,7 +133,7 @@ func TestNodeAffinityPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: 0}},
name: "no machine macthes preferred scheduling requirements in NodeAffinity of pod so all machines' priority is zero",
},
{
@@ -146,7 +147,7 @@ func TestNodeAffinityPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: 0}},
name: "only machine1 matches the preferred scheduling requirements of pod",
},
{
@@ -160,7 +161,7 @@ func TestNodeAffinityPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine5", Labels: label5}},
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 1}, {Host: "machine5", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 3}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 1}, {Name: "machine5", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: 3}},
name: "all machines matches the preferred scheduling requirements of pod but with different priorities ",
},
}

View File

@@ -22,6 +22,7 @@ import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
@@ -43,10 +44,10 @@ func NewNodeLabelPriority(label string, presence bool) (PriorityMapFunction, Pri
// CalculateNodeLabelPriorityMap checks whether a particular label exists on a node or not, regardless of its value.
// If presence is true, prioritizes nodes that have the specified label, regardless of value.
// If presence is false, prioritizes nodes that do not have the specified label.
func (n *NodeLabelPrioritizer) CalculateNodeLabelPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) {
func (n *NodeLabelPrioritizer) CalculateNodeLabelPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (framework.NodeScore, error) {
node := nodeInfo.Node()
if node == nil {
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
return framework.NodeScore{}, fmt.Errorf("node not found")
}
exists := labels.Set(node.Labels).Has(n.label)
@@ -54,8 +55,8 @@ func (n *NodeLabelPrioritizer) CalculateNodeLabelPriorityMap(pod *v1.Pod, meta i
if (exists && n.presence) || (!exists && !n.presence) {
score = schedulerapi.MaxPriority
}
return schedulerapi.HostPriority{
Host: node.Name,
return framework.NodeScore{
Name: node.Name,
Score: int64(score),
}, nil
}

View File

@@ -18,12 +18,12 @@ package priorities
import (
"reflect"
"sort"
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
@@ -35,7 +35,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
nodes []*v1.Node
label string
presence bool
expectedList schedulerapi.HostPriorityList
expectedList framework.NodeScoreList
name string
}{
{
@@ -44,7 +44,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: 0}},
label: "baz",
presence: true,
name: "no match found, presence true",
@@ -55,7 +55,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: schedulerapi.MaxPriority}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: schedulerapi.MaxPriority}, {Name: "machine3", Score: schedulerapi.MaxPriority}},
label: "baz",
presence: false,
name: "no match found, presence false",
@@ -66,7 +66,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: 0}},
label: "foo",
presence: true,
name: "one match found, presence true",
@@ -77,7 +77,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: schedulerapi.MaxPriority}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: schedulerapi.MaxPriority}, {Name: "machine3", Score: schedulerapi.MaxPriority}},
label: "foo",
presence: false,
name: "one match found, presence false",
@@ -88,7 +88,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: schedulerapi.MaxPriority}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: schedulerapi.MaxPriority}, {Name: "machine3", Score: schedulerapi.MaxPriority}},
label: "bar",
presence: true,
name: "two matches found, presence true",
@@ -99,7 +99,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: 0}},
label: "bar",
presence: false,
name: "two matches found, presence false",
@@ -118,8 +118,8 @@ func TestNewNodeLabelPriority(t *testing.T) {
t.Errorf("unexpected error: %v", err)
}
// sort the two lists to avoid failures on account of different ordering
sort.Sort(test.expectedList)
sort.Sort(list)
sortNodeScoreList(test.expectedList)
sortNodeScoreList(list)
if !reflect.DeepEqual(test.expectedList, list) {
t.Errorf("expected %#v, got %#v", test.expectedList, list)
}

View File

@@ -23,15 +23,16 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
// CalculateNodePreferAvoidPodsPriorityMap priorities nodes according to the node annotation
// "scheduler.alpha.kubernetes.io/preferAvoidPods".
func CalculateNodePreferAvoidPodsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) {
func CalculateNodePreferAvoidPodsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (framework.NodeScore, error) {
node := nodeInfo.Node()
if node == nil {
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
return framework.NodeScore{}, fmt.Errorf("node not found")
}
var controllerRef *metav1.OwnerReference
if priorityMeta, ok := meta.(*priorityMetadata); ok {
@@ -49,19 +50,19 @@ func CalculateNodePreferAvoidPodsPriorityMap(pod *v1.Pod, meta interface{}, node
}
}
if controllerRef == nil {
return schedulerapi.HostPriority{Host: node.Name, Score: schedulerapi.MaxPriority}, nil
return framework.NodeScore{Name: node.Name, Score: schedulerapi.MaxPriority}, nil
}
avoids, err := v1helper.GetAvoidPodsFromNodeAnnotations(node.Annotations)
if err != nil {
// If we cannot get annotation, assume it's schedulable there.
return schedulerapi.HostPriority{Host: node.Name, Score: schedulerapi.MaxPriority}, nil
return framework.NodeScore{Name: node.Name, Score: schedulerapi.MaxPriority}, nil
}
for i := range avoids.PreferAvoidPods {
avoid := &avoids.PreferAvoidPods[i]
if avoid.PodSignature.PodController.Kind == controllerRef.Kind && avoid.PodSignature.PodController.UID == controllerRef.UID {
return schedulerapi.HostPriority{Host: node.Name, Score: 0}, nil
return framework.NodeScore{Name: node.Name, Score: 0}, nil
}
}
return schedulerapi.HostPriority{Host: node.Name, Score: schedulerapi.MaxPriority}, nil
return framework.NodeScore{Name: node.Name, Score: schedulerapi.MaxPriority}, nil
}

View File

@@ -18,12 +18,12 @@ package priorities
import (
"reflect"
"sort"
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
@@ -83,7 +83,7 @@ func TestNodePreferAvoidPriority(t *testing.T) {
tests := []struct {
pod *v1.Pod
nodes []*v1.Node
expectedList schedulerapi.HostPriorityList
expectedList framework.NodeScoreList
name string
}{
{
@@ -96,7 +96,7 @@ func TestNodePreferAvoidPriority(t *testing.T) {
},
},
nodes: testNodes,
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: schedulerapi.MaxPriority}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: schedulerapi.MaxPriority}, {Name: "machine3", Score: schedulerapi.MaxPriority}},
name: "pod managed by ReplicationController should avoid a node, this node get lowest priority score",
},
{
@@ -109,7 +109,7 @@ func TestNodePreferAvoidPriority(t *testing.T) {
},
},
nodes: testNodes,
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: schedulerapi.MaxPriority}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: schedulerapi.MaxPriority}, {Name: "machine3", Score: schedulerapi.MaxPriority}},
name: "ownership by random controller should be ignored",
},
{
@@ -122,7 +122,7 @@ func TestNodePreferAvoidPriority(t *testing.T) {
},
},
nodes: testNodes,
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: schedulerapi.MaxPriority}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: schedulerapi.MaxPriority}, {Name: "machine3", Score: schedulerapi.MaxPriority}},
name: "owner without Controller field set should be ignored",
},
{
@@ -135,7 +135,7 @@ func TestNodePreferAvoidPriority(t *testing.T) {
},
},
nodes: testNodes,
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: schedulerapi.MaxPriority}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: schedulerapi.MaxPriority}},
name: "pod managed by ReplicaSet should avoid a node, this node get lowest priority score",
},
}
@@ -148,8 +148,8 @@ func TestNodePreferAvoidPriority(t *testing.T) {
t.Errorf("unexpected error: %v", err)
}
// sort the two lists to avoid failures on account of different ordering
sort.Sort(test.expectedList)
sort.Sort(list)
sortNodeScoreList(test.expectedList)
sortNodeScoreList(list)
if !reflect.DeepEqual(test.expectedList, list) {
t.Errorf("expected %#v, got %#v", test.expectedList, list)
}

View File

@@ -18,7 +18,7 @@ package priorities
import (
"k8s.io/api/core/v1"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
@@ -30,7 +30,7 @@ func NormalizeReduce(maxPriority int64, reverse bool) PriorityReduceFunction {
_ *v1.Pod,
_ interface{},
_ map[string]*schedulernodeinfo.NodeInfo,
result schedulerapi.HostPriorityList) error {
result framework.NodeScoreList) error {
var maxCount int64
for i := range result {

View File

@@ -24,7 +24,7 @@ import (
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
@@ -150,7 +150,7 @@ func TestRequestedToCapacityRatio(t *testing.T) {
test string
requested resources
nodes map[string]nodeResources
expectedPriorities schedulerapi.HostPriorityList
expectedPriorities framework.NodeScoreList
}
tests := []test{
@@ -167,7 +167,7 @@ func TestRequestedToCapacityRatio(t *testing.T) {
used: resources{0, 0},
},
},
expectedPriorities: []schedulerapi.HostPriority{{Host: "node1", Score: 10}, {Host: "node2", Score: 10}},
expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 10}, {Name: "node2", Score: 10}},
},
{
test: "nothing scheduled, resources requested, differently sized machines (default - least requested nodes have priority)",
@@ -182,7 +182,7 @@ func TestRequestedToCapacityRatio(t *testing.T) {
used: resources{0, 0},
},
},
expectedPriorities: []schedulerapi.HostPriority{{Host: "node1", Score: 4}, {Host: "node2", Score: 5}},
expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 4}, {Name: "node2", Score: 5}},
},
{
test: "no resources requested, pods scheduled with resources (default - least requested nodes have priority)",
@@ -197,7 +197,7 @@ func TestRequestedToCapacityRatio(t *testing.T) {
used: resources{3000, 5000},
},
},
expectedPriorities: []schedulerapi.HostPriority{{Host: "node1", Score: 4}, {Host: "node2", Score: 5}},
expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 4}, {Name: "node2", Score: 5}},
},
}
@@ -291,7 +291,7 @@ func TestResourceBinPackingSingleExtended(t *testing.T) {
pod *v1.Pod
pods []*v1.Pod
nodes []*v1.Node
expectedList schedulerapi.HostPriorityList
expectedList framework.NodeScoreList
name string
}{
{
@@ -310,7 +310,7 @@ func TestResourceBinPackingSingleExtended(t *testing.T) {
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResource2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResource1)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
name: "nothing scheduled, nothing requested",
},
@@ -330,7 +330,7 @@ func TestResourceBinPackingSingleExtended(t *testing.T) {
pod: &v1.Pod{Spec: extendedResourcePod1},
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResource2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResource1)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 2}, {Host: "machine2", Score: 5}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 2}, {Name: "machine2", Score: 5}},
name: "resources requested, pods scheduled with less resources",
pods: []*v1.Pod{
{Spec: noResources},
@@ -353,7 +353,7 @@ func TestResourceBinPackingSingleExtended(t *testing.T) {
pod: &v1.Pod{Spec: extendedResourcePod1},
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResource2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResource1)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 2}, {Host: "machine2", Score: 10}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 2}, {Name: "machine2", Score: 10}},
name: "resources requested, pods scheduled with resources, on node with existing pod running ",
pods: []*v1.Pod{
{Spec: machine2Pod},
@@ -376,7 +376,7 @@ func TestResourceBinPackingSingleExtended(t *testing.T) {
pod: &v1.Pod{Spec: extendedResourcePod2},
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResource2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResource1)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 10}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 5}, {Name: "machine2", Score: 10}},
name: "resources requested, pods scheduled with more resources",
pods: []*v1.Pod{
{Spec: noResources},
@@ -447,7 +447,7 @@ func TestResourceBinPackingMultipleExtended(t *testing.T) {
pod *v1.Pod
pods []*v1.Pod
nodes []*v1.Node
expectedList schedulerapi.HostPriorityList
expectedList framework.NodeScoreList
name string
}{
{
@@ -480,7 +480,7 @@ func TestResourceBinPackingMultipleExtended(t *testing.T) {
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResources2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResources1)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
name: "nothing scheduled, nothing requested",
},
@@ -514,7 +514,7 @@ func TestResourceBinPackingMultipleExtended(t *testing.T) {
pod: &v1.Pod{Spec: extnededResourcePod1},
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResources2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResources1)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 4}, {Host: "machine2", Score: 3}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 4}, {Name: "machine2", Score: 3}},
name: "resources requested, pods scheduled with less resources",
pods: []*v1.Pod{
{Spec: noResources},
@@ -550,7 +550,7 @@ func TestResourceBinPackingMultipleExtended(t *testing.T) {
pod: &v1.Pod{Spec: extnededResourcePod1},
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResources2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResources1)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 4}, {Host: "machine2", Score: 7}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 4}, {Name: "machine2", Score: 7}},
name: "resources requested, pods scheduled with resources, on node with existing pod running ",
pods: []*v1.Pod{
{Spec: machine2Pod},
@@ -601,7 +601,7 @@ func TestResourceBinPackingMultipleExtended(t *testing.T) {
pod: &v1.Pod{Spec: extnededResourcePod2},
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResources2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResources1)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 5}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 5}, {Name: "machine2", Score: 5}},
name: "resources requested, pods scheduled with more resources",
pods: []*v1.Pod{
{Spec: noResources},

View File

@@ -25,7 +25,7 @@ import (
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/features"
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
@@ -50,13 +50,13 @@ var DefaultRequestedRatioResources = ResourceToWeightMap{v1.ResourceMemory: 1, v
func (r *ResourceAllocationPriority) PriorityMap(
pod *v1.Pod,
meta interface{},
nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) {
nodeInfo *schedulernodeinfo.NodeInfo) (framework.NodeScore, error) {
node := nodeInfo.Node()
if node == nil {
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
return framework.NodeScore{}, fmt.Errorf("node not found")
}
if r.resourceToWeightMap == nil {
return schedulerapi.HostPriority{}, fmt.Errorf("resources not found")
return framework.NodeScore{}, fmt.Errorf("resources not found")
}
requested := make(ResourceToValueMap, len(r.resourceToWeightMap))
allocatable := make(ResourceToValueMap, len(r.resourceToWeightMap))
@@ -90,8 +90,8 @@ func (r *ResourceAllocationPriority) PriorityMap(
}
}
return schedulerapi.HostPriority{
Host: node.Name,
return framework.NodeScore{
Name: node.Name,
Score: score,
}, nil
}

View File

@@ -20,7 +20,7 @@ import (
"fmt"
"k8s.io/api/core/v1"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
"k8s.io/klog"
@@ -33,10 +33,10 @@ import (
// of the pod are satisfied, the node is assigned a score of 1.
// Rationale of choosing the lowest score of 1 is that this is mainly selected to break ties between nodes that have
// same scores assigned by one of least and most requested priority functions.
func ResourceLimitsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) {
func ResourceLimitsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (framework.NodeScore, error) {
node := nodeInfo.Node()
if node == nil {
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
return framework.NodeScore{}, fmt.Errorf("node not found")
}
allocatableResources := nodeInfo.AllocatableResource()
@@ -71,8 +71,8 @@ func ResourceLimitsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedule
)
}
return schedulerapi.HostPriority{
Host: node.Name,
return framework.NodeScore{
Name: node.Name,
Score: score,
}, nil
}

View File

@@ -22,8 +22,7 @@ import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
//metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
@@ -102,37 +101,37 @@ func TestResourceLimitsPriority(t *testing.T) {
// input pod
pod *v1.Pod
nodes []*v1.Node
expectedList schedulerapi.HostPriorityList
expectedList framework.NodeScoreList
name string
}{
{
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 0), makeNode("machine3", 0, 10000), makeNode("machine4", 0, 0)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}, {Host: "machine4", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: 0}, {Name: "machine4", Score: 0}},
name: "pod does not specify its resource limits",
},
{
pod: &v1.Pod{Spec: cpuOnly},
nodes: []*v1.Node{makeNode("machine1", 3000, 10000), makeNode("machine2", 2000, 10000)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 1}, {Host: "machine2", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 1}, {Name: "machine2", Score: 0}},
name: "pod only specifies cpu limits",
},
{
pod: &v1.Pod{Spec: memOnly},
nodes: []*v1.Node{makeNode("machine1", 4000, 4000), makeNode("machine2", 5000, 10000)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 1}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 1}},
name: "pod only specifies mem limits",
},
{
pod: &v1.Pod{Spec: cpuAndMemory},
nodes: []*v1.Node{makeNode("machine1", 4000, 4000), makeNode("machine2", 5000, 10000)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 1}, {Host: "machine2", Score: 1}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 1}, {Name: "machine2", Score: 1}},
name: "pod specifies both cpu and mem limits",
},
{
pod: &v1.Pod{Spec: cpuAndMemory},
nodes: []*v1.Node{makeNode("machine1", 0, 0)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}},
name: "node does not advertise its allocatables",
},
}

View File

@@ -23,6 +23,7 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/kubernetes/pkg/scheduler/algorithm"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
utilnode "k8s.io/kubernetes/pkg/util/node"
@@ -63,11 +64,11 @@ func NewSelectorSpreadPriority(
// It favors nodes that have fewer existing matching pods.
// i.e. it pushes the scheduler towards a node where there's the smallest number of
// pods which match the same service, RC,RSs or StatefulSets selectors as the pod being scheduled.
func (s *SelectorSpread) CalculateSpreadPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) {
func (s *SelectorSpread) CalculateSpreadPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (framework.NodeScore, error) {
var selectors []labels.Selector
node := nodeInfo.Node()
if node == nil {
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
return framework.NodeScore{}, fmt.Errorf("node not found")
}
priorityMeta, ok := meta.(*priorityMetadata)
@@ -78,16 +79,16 @@ func (s *SelectorSpread) CalculateSpreadPriorityMap(pod *v1.Pod, meta interface{
}
if len(selectors) == 0 {
return schedulerapi.HostPriority{
Host: node.Name,
return framework.NodeScore{
Name: node.Name,
Score: 0,
}, nil
}
count := countMatchingPods(pod.Namespace, selectors, nodeInfo)
return schedulerapi.HostPriority{
Host: node.Name,
return framework.NodeScore{
Name: node.Name,
Score: int64(count),
}, nil
}
@@ -96,7 +97,7 @@ func (s *SelectorSpread) CalculateSpreadPriorityMap(pod *v1.Pod, meta interface{
// based on the number of existing matching pods on the node
// where zone information is included on the nodes, it favors nodes
// in zones with fewer existing matching pods.
func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, result schedulerapi.HostPriorityList) error {
func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, result framework.NodeScoreList) error {
countsByZone := make(map[string]int64, 10)
maxCountByZone := int64(0)
maxCountByNodeName := int64(0)
@@ -105,7 +106,7 @@ func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interfa
if result[i].Score > maxCountByNodeName {
maxCountByNodeName = result[i].Score
}
zoneID := utilnode.GetZoneKey(nodeNameToInfo[result[i].Host].Node())
zoneID := utilnode.GetZoneKey(nodeNameToInfo[result[i].Name].Node())
if zoneID == "" {
continue
}
@@ -132,7 +133,7 @@ func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interfa
}
// If there is zone information present, incorporate it
if haveZones {
zoneID := utilnode.GetZoneKey(nodeNameToInfo[result[i].Host].Node())
zoneID := utilnode.GetZoneKey(nodeNameToInfo[result[i].Name].Node())
if zoneID != "" {
zoneScore := MaxPriorityFloat64
if maxCountByZone > 0 {
@@ -144,7 +145,7 @@ func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interfa
result[i].Score = int64(fScore)
if klog.V(10) {
klog.Infof(
"%v -> %v: SelectorSpreadPriority, Score: (%d)", pod.Name, result[i].Host, int64(fScore),
"%v -> %v: SelectorSpreadPriority, Score: (%d)", pod.Name, result[i].Name, int64(fScore),
)
}
}
@@ -210,12 +211,12 @@ func countMatchingPods(namespace string, selectors []labels.Selector, nodeInfo *
// CalculateAntiAffinityPriorityMap spreads pods by minimizing the number of pods belonging to the same service
// on given machine
func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) {
func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (framework.NodeScore, error) {
var firstServiceSelector labels.Selector
node := nodeInfo.Node()
if node == nil {
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
return framework.NodeScore{}, fmt.Errorf("node not found")
}
priorityMeta, ok := meta.(*priorityMetadata)
if ok {
@@ -230,15 +231,15 @@ func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityMap(pod *v1.Pod, meta
}
score := countMatchingPods(pod.Namespace, selectors, nodeInfo)
return schedulerapi.HostPriority{
Host: node.Name,
return framework.NodeScore{
Name: node.Name,
Score: int64(score),
}, nil
}
// CalculateAntiAffinityPriorityReduce computes each node score with the same value for a particular label.
// The label to be considered is provided to the struct (ServiceAntiAffinity).
func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityReduce(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, result schedulerapi.HostPriorityList) error {
func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityReduce(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, result framework.NodeScoreList) error {
var numServicePods int64
var label string
podCounts := map[string]int64{}
@@ -247,20 +248,20 @@ func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityReduce(pod *v1.Pod, m
for _, hostPriority := range result {
numServicePods += hostPriority.Score
if !labels.Set(nodeNameToInfo[hostPriority.Host].Node().Labels).Has(s.label) {
if !labels.Set(nodeNameToInfo[hostPriority.Name].Node().Labels).Has(s.label) {
continue
}
label = labels.Set(nodeNameToInfo[hostPriority.Host].Node().Labels).Get(s.label)
labelNodesStatus[hostPriority.Host] = label
label = labels.Set(nodeNameToInfo[hostPriority.Name].Node().Labels).Get(s.label)
labelNodesStatus[hostPriority.Name] = label
podCounts[label] += hostPriority.Score
}
//score int - scale of 0-maxPriority
// 0 being the lowest priority and maxPriority being the highest
for i, hostPriority := range result {
label, ok := labelNodesStatus[hostPriority.Host]
label, ok := labelNodesStatus[hostPriority.Name]
if !ok {
result[i].Host = hostPriority.Host
result[i].Name = hostPriority.Name
result[i].Score = 0
continue
}
@@ -269,7 +270,7 @@ func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityReduce(pod *v1.Pod, m
if numServicePods > 0 {
fScore = maxPriorityFloat64 * (float64(numServicePods-podCounts[label]) / float64(numServicePods))
}
result[i].Host = hostPriority.Host
result[i].Name = hostPriority.Name
result[i].Score = int64(fScore)
}

View File

@@ -18,13 +18,13 @@ package priorities
import (
"reflect"
"sort"
"testing"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing"
)
@@ -61,20 +61,20 @@ func TestSelectorSpreadPriority(t *testing.T) {
rss []*apps.ReplicaSet
services []*v1.Service
sss []*apps.StatefulSet
expectedList schedulerapi.HostPriorityList
expectedList framework.NodeScoreList
name string
}{
{
pod: new(v1.Pod),
nodes: []string{"machine1", "machine2"},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: schedulerapi.MaxPriority}},
name: "nothing scheduled",
},
{
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
pods: []*v1.Pod{{Spec: zone1Spec}},
nodes: []string{"machine1", "machine2"},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: schedulerapi.MaxPriority}},
name: "no services",
},
{
@@ -82,7 +82,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
pods: []*v1.Pod{{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}},
nodes: []string{"machine1", "machine2"},
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: schedulerapi.MaxPriority}},
name: "different services",
},
{
@@ -93,7 +93,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
},
nodes: []string{"machine1", "machine2"},
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: 0}},
name: "two pods, one service pod",
},
{
@@ -107,7 +107,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
},
nodes: []string{"machine1", "machine2"},
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: 0}},
name: "five pods, one service pod in no namespace",
},
{
@@ -120,7 +120,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
},
nodes: []string{"machine1", "machine2"},
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault}}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: 0}},
name: "four pods, one service pod in default namespace",
},
{
@@ -134,7 +134,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
},
nodes: []string{"machine1", "machine2"},
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}, ObjectMeta: metav1.ObjectMeta{Namespace: "ns1"}}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: 0}},
name: "five pods, one service pod in specific namespace",
},
{
@@ -146,7 +146,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
},
nodes: []string{"machine1", "machine2"},
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
name: "three pods, two service pods on different machines",
},
{
@@ -159,7 +159,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
},
nodes: []string{"machine1", "machine2"},
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 5}, {Name: "machine2", Score: 0}},
name: "four pods, three service pods",
},
{
@@ -171,7 +171,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
},
nodes: []string{"machine1", "machine2"},
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 5}},
name: "service with partial pod label matches",
},
{
@@ -186,7 +186,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
// "baz=blah" matches both labels1 and labels2, and "foo=bar" matches only labels 1. This means that we assume that we want to
// do spreading pod2 and pod3 and not pod1.
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
name: "service with partial pod label matches with service and replication controller",
},
{
@@ -200,7 +200,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
rss: []*apps.ReplicaSet{{Spec: apps.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
name: "service with partial pod label matches with service and replica set",
},
{
@@ -213,7 +213,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
nodes: []string{"machine1", "machine2"},
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
sss: []*apps.StatefulSet{{Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
name: "service with partial pod label matches with service and stateful set",
},
{
@@ -227,7 +227,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
rcs: []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}},
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}},
// Taken together Service and Replication Controller should match no pods.
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 10}, {Name: "machine2", Score: 10}},
name: "disjoined service and replication controller matches no pods",
},
{
@@ -241,7 +241,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}},
rss: []*apps.ReplicaSet{{Spec: apps.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 10}, {Name: "machine2", Score: 10}},
name: "disjoined service and replica set matches no pods",
},
{
@@ -254,7 +254,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
nodes: []string{"machine1", "machine2"},
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}},
sss: []*apps.StatefulSet{{Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 10}, {Name: "machine2", Score: 10}},
name: "disjoined service and stateful set matches no pods",
},
{
@@ -267,7 +267,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
nodes: []string{"machine1", "machine2"},
rcs: []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}},
// Both Nodes have one pod from the given RC, hence both get 0 score.
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
name: "Replication controller with partial pod label matches",
},
{
@@ -280,7 +280,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
nodes: []string{"machine1", "machine2"},
rss: []*apps.ReplicaSet{{Spec: apps.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
name: "Replica set with partial pod label matches",
},
{
@@ -293,7 +293,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
nodes: []string{"machine1", "machine2"},
sss: []*apps.StatefulSet{{Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
// We use StatefulSet, instead of ReplicationController. The result should be exactly as above.
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
name: "StatefulSet with partial pod label matches",
},
{
@@ -305,7 +305,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
},
nodes: []string{"machine1", "machine2"},
rcs: []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"baz": "blah"}}}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 5}},
name: "Another replication controller with partial pod label matches",
},
{
@@ -318,7 +318,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
nodes: []string{"machine1", "machine2"},
rss: []*apps.ReplicaSet{{Spec: apps.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"baz": "blah"}}}}},
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 5}},
name: "Another replication set with partial pod label matches",
},
{
@@ -331,7 +331,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
nodes: []string{"machine1", "machine2"},
sss: []*apps.StatefulSet{{Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"baz": "blah"}}}}},
// We use StatefulSet, instead of ReplicationController. The result should be exactly as above.
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 5}},
name: "Another stateful set with partial pod label matches",
},
}
@@ -411,31 +411,31 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
rss []*apps.ReplicaSet
services []*v1.Service
sss []*apps.StatefulSet
expectedList schedulerapi.HostPriorityList
expectedList framework.NodeScoreList
name string
}{
{
pod: new(v1.Pod),
expectedList: []schedulerapi.HostPriority{
{Host: nodeMachine1Zone1, Score: schedulerapi.MaxPriority},
{Host: nodeMachine1Zone2, Score: schedulerapi.MaxPriority},
{Host: nodeMachine2Zone2, Score: schedulerapi.MaxPriority},
{Host: nodeMachine1Zone3, Score: schedulerapi.MaxPriority},
{Host: nodeMachine2Zone3, Score: schedulerapi.MaxPriority},
{Host: nodeMachine3Zone3, Score: schedulerapi.MaxPriority},
expectedList: []framework.NodeScore{
{Name: nodeMachine1Zone1, Score: schedulerapi.MaxPriority},
{Name: nodeMachine1Zone2, Score: schedulerapi.MaxPriority},
{Name: nodeMachine2Zone2, Score: schedulerapi.MaxPriority},
{Name: nodeMachine1Zone3, Score: schedulerapi.MaxPriority},
{Name: nodeMachine2Zone3, Score: schedulerapi.MaxPriority},
{Name: nodeMachine3Zone3, Score: schedulerapi.MaxPriority},
},
name: "nothing scheduled",
},
{
pod: buildPod("", labels1, nil),
pods: []*v1.Pod{buildPod(nodeMachine1Zone1, nil, nil)},
expectedList: []schedulerapi.HostPriority{
{Host: nodeMachine1Zone1, Score: schedulerapi.MaxPriority},
{Host: nodeMachine1Zone2, Score: schedulerapi.MaxPriority},
{Host: nodeMachine2Zone2, Score: schedulerapi.MaxPriority},
{Host: nodeMachine1Zone3, Score: schedulerapi.MaxPriority},
{Host: nodeMachine2Zone3, Score: schedulerapi.MaxPriority},
{Host: nodeMachine3Zone3, Score: schedulerapi.MaxPriority},
expectedList: []framework.NodeScore{
{Name: nodeMachine1Zone1, Score: schedulerapi.MaxPriority},
{Name: nodeMachine1Zone2, Score: schedulerapi.MaxPriority},
{Name: nodeMachine2Zone2, Score: schedulerapi.MaxPriority},
{Name: nodeMachine1Zone3, Score: schedulerapi.MaxPriority},
{Name: nodeMachine2Zone3, Score: schedulerapi.MaxPriority},
{Name: nodeMachine3Zone3, Score: schedulerapi.MaxPriority},
},
name: "no services",
},
@@ -443,13 +443,13 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
pod: buildPod("", labels1, nil),
pods: []*v1.Pod{buildPod(nodeMachine1Zone1, labels2, nil)},
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
expectedList: []schedulerapi.HostPriority{
{Host: nodeMachine1Zone1, Score: schedulerapi.MaxPriority},
{Host: nodeMachine1Zone2, Score: schedulerapi.MaxPriority},
{Host: nodeMachine2Zone2, Score: schedulerapi.MaxPriority},
{Host: nodeMachine1Zone3, Score: schedulerapi.MaxPriority},
{Host: nodeMachine2Zone3, Score: schedulerapi.MaxPriority},
{Host: nodeMachine3Zone3, Score: schedulerapi.MaxPriority},
expectedList: []framework.NodeScore{
{Name: nodeMachine1Zone1, Score: schedulerapi.MaxPriority},
{Name: nodeMachine1Zone2, Score: schedulerapi.MaxPriority},
{Name: nodeMachine2Zone2, Score: schedulerapi.MaxPriority},
{Name: nodeMachine1Zone3, Score: schedulerapi.MaxPriority},
{Name: nodeMachine2Zone3, Score: schedulerapi.MaxPriority},
{Name: nodeMachine3Zone3, Score: schedulerapi.MaxPriority},
},
name: "different services",
},
@@ -460,13 +460,13 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
buildPod(nodeMachine1Zone2, labels2, nil),
},
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
expectedList: []schedulerapi.HostPriority{
{Host: nodeMachine1Zone1, Score: schedulerapi.MaxPriority},
{Host: nodeMachine1Zone2, Score: schedulerapi.MaxPriority},
{Host: nodeMachine2Zone2, Score: schedulerapi.MaxPriority},
{Host: nodeMachine1Zone3, Score: schedulerapi.MaxPriority},
{Host: nodeMachine2Zone3, Score: schedulerapi.MaxPriority},
{Host: nodeMachine3Zone3, Score: schedulerapi.MaxPriority},
expectedList: []framework.NodeScore{
{Name: nodeMachine1Zone1, Score: schedulerapi.MaxPriority},
{Name: nodeMachine1Zone2, Score: schedulerapi.MaxPriority},
{Name: nodeMachine2Zone2, Score: schedulerapi.MaxPriority},
{Name: nodeMachine1Zone3, Score: schedulerapi.MaxPriority},
{Name: nodeMachine2Zone3, Score: schedulerapi.MaxPriority},
{Name: nodeMachine3Zone3, Score: schedulerapi.MaxPriority},
},
name: "two pods, 0 matching",
},
@@ -477,13 +477,13 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
buildPod(nodeMachine1Zone2, labels1, nil),
},
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
expectedList: []schedulerapi.HostPriority{
{Host: nodeMachine1Zone1, Score: schedulerapi.MaxPriority},
{Host: nodeMachine1Zone2, Score: 0}, // Already have pod on machine
{Host: nodeMachine2Zone2, Score: 3}, // Already have pod in zone
{Host: nodeMachine1Zone3, Score: schedulerapi.MaxPriority},
{Host: nodeMachine2Zone3, Score: schedulerapi.MaxPriority},
{Host: nodeMachine3Zone3, Score: schedulerapi.MaxPriority},
expectedList: []framework.NodeScore{
{Name: nodeMachine1Zone1, Score: schedulerapi.MaxPriority},
{Name: nodeMachine1Zone2, Score: 0}, // Already have pod on machine
{Name: nodeMachine2Zone2, Score: 3}, // Already have pod in zone
{Name: nodeMachine1Zone3, Score: schedulerapi.MaxPriority},
{Name: nodeMachine2Zone3, Score: schedulerapi.MaxPriority},
{Name: nodeMachine3Zone3, Score: schedulerapi.MaxPriority},
},
name: "two pods, 1 matching (in z2)",
},
@@ -497,13 +497,13 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
buildPod(nodeMachine2Zone3, labels1, nil),
},
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
expectedList: []schedulerapi.HostPriority{
{Host: nodeMachine1Zone1, Score: schedulerapi.MaxPriority},
{Host: nodeMachine1Zone2, Score: 0}, // Pod on node
{Host: nodeMachine2Zone2, Score: 0}, // Pod on node
{Host: nodeMachine1Zone3, Score: 6}, // Pod in zone
{Host: nodeMachine2Zone3, Score: 3}, // Pod on node
{Host: nodeMachine3Zone3, Score: 6}, // Pod in zone
expectedList: []framework.NodeScore{
{Name: nodeMachine1Zone1, Score: schedulerapi.MaxPriority},
{Name: nodeMachine1Zone2, Score: 0}, // Pod on node
{Name: nodeMachine2Zone2, Score: 0}, // Pod on node
{Name: nodeMachine1Zone3, Score: 6}, // Pod in zone
{Name: nodeMachine2Zone3, Score: 3}, // Pod on node
{Name: nodeMachine3Zone3, Score: 6}, // Pod in zone
},
name: "five pods, 3 matching (z2=2, z3=1)",
},
@@ -516,13 +516,13 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
buildPod(nodeMachine1Zone3, labels1, nil),
},
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
expectedList: []schedulerapi.HostPriority{
{Host: nodeMachine1Zone1, Score: 0}, // Pod on node
{Host: nodeMachine1Zone2, Score: 0}, // Pod on node
{Host: nodeMachine2Zone2, Score: 3}, // Pod in zone
{Host: nodeMachine1Zone3, Score: 0}, // Pod on node
{Host: nodeMachine2Zone3, Score: 3}, // Pod in zone
{Host: nodeMachine3Zone3, Score: 3}, // Pod in zone
expectedList: []framework.NodeScore{
{Name: nodeMachine1Zone1, Score: 0}, // Pod on node
{Name: nodeMachine1Zone2, Score: 0}, // Pod on node
{Name: nodeMachine2Zone2, Score: 3}, // Pod in zone
{Name: nodeMachine1Zone3, Score: 0}, // Pod on node
{Name: nodeMachine2Zone3, Score: 3}, // Pod in zone
{Name: nodeMachine3Zone3, Score: 3}, // Pod in zone
},
name: "four pods, 3 matching (z1=1, z2=1, z3=1)",
},
@@ -535,13 +535,13 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
buildPod(nodeMachine2Zone2, labels2, nil),
},
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
expectedList: []schedulerapi.HostPriority{
{Host: nodeMachine1Zone1, Score: 0}, // Pod on node
{Host: nodeMachine1Zone2, Score: 0}, // Pod on node
{Host: nodeMachine2Zone2, Score: 3}, // Pod in zone
{Host: nodeMachine1Zone3, Score: 0}, // Pod on node
{Host: nodeMachine2Zone3, Score: 3}, // Pod in zone
{Host: nodeMachine3Zone3, Score: 3}, // Pod in zone
expectedList: []framework.NodeScore{
{Name: nodeMachine1Zone1, Score: 0}, // Pod on node
{Name: nodeMachine1Zone2, Score: 0}, // Pod on node
{Name: nodeMachine2Zone2, Score: 3}, // Pod in zone
{Name: nodeMachine1Zone3, Score: 0}, // Pod on node
{Name: nodeMachine2Zone3, Score: 3}, // Pod in zone
{Name: nodeMachine3Zone3, Score: 3}, // Pod in zone
},
name: "four pods, 3 matching (z1=1, z2=1, z3=1)",
},
@@ -553,7 +553,7 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
buildPod(nodeMachine1Zone3, labels1, controllerRef("ReplicationController", "name", "abc123")),
},
rcs: []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: labels1}}},
expectedList: []schedulerapi.HostPriority{
expectedList: []framework.NodeScore{
// Note that because we put two pods on the same node (nodeMachine1Zone3),
// the values here are questionable for zone2, in particular for nodeMachine1Zone2.
// However they kind of make sense; zone1 is still most-highly favored.
@@ -561,12 +561,12 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
// We would probably prefer to see a bigger gap between putting a second
// pod on m1.z2 and putting a pod on m2.z2, but the ordering is correct.
// This is also consistent with what we have already.
{Host: nodeMachine1Zone1, Score: schedulerapi.MaxPriority}, // No pods in zone
{Host: nodeMachine1Zone2, Score: 5}, // Pod on node
{Host: nodeMachine2Zone2, Score: 6}, // Pod in zone
{Host: nodeMachine1Zone3, Score: 0}, // Two pods on node
{Host: nodeMachine2Zone3, Score: 3}, // Pod in zone
{Host: nodeMachine3Zone3, Score: 3}, // Pod in zone
{Name: nodeMachine1Zone1, Score: schedulerapi.MaxPriority}, // No pods in zone
{Name: nodeMachine1Zone2, Score: 5}, // Pod on node
{Name: nodeMachine2Zone2, Score: 6}, // Pod in zone
{Name: nodeMachine1Zone3, Score: 0}, // Two pods on node
{Name: nodeMachine2Zone3, Score: 3}, // Pod in zone
{Name: nodeMachine3Zone3, Score: 3}, // Pod in zone
},
name: "Replication controller spreading (z1=0, z2=1, z3=2)",
},
@@ -594,8 +594,8 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
t.Errorf("unexpected error: %v", err)
}
// sort the two lists to avoid failures on account of different ordering
sort.Sort(test.expectedList)
sort.Sort(list)
sortNodeScoreList(test.expectedList)
sortNodeScoreList(list)
if !reflect.DeepEqual(test.expectedList, list) {
t.Errorf("expected %#v, got %#v", test.expectedList, list)
}
@@ -640,24 +640,24 @@ func TestZoneSpreadPriority(t *testing.T) {
pods []*v1.Pod
nodes map[string]map[string]string
services []*v1.Service
expectedList schedulerapi.HostPriorityList
expectedList framework.NodeScoreList
name string
}{
{
pod: new(v1.Pod),
nodes: labeledNodes,
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: schedulerapi.MaxPriority}, {Host: "machine12", Score: schedulerapi.MaxPriority},
{Host: "machine21", Score: schedulerapi.MaxPriority}, {Host: "machine22", Score: schedulerapi.MaxPriority},
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine11", Score: schedulerapi.MaxPriority}, {Name: "machine12", Score: schedulerapi.MaxPriority},
{Name: "machine21", Score: schedulerapi.MaxPriority}, {Name: "machine22", Score: schedulerapi.MaxPriority},
{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}},
name: "nothing scheduled",
},
{
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
pods: []*v1.Pod{{Spec: zone1Spec}},
nodes: labeledNodes,
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: schedulerapi.MaxPriority}, {Host: "machine12", Score: schedulerapi.MaxPriority},
{Host: "machine21", Score: schedulerapi.MaxPriority}, {Host: "machine22", Score: schedulerapi.MaxPriority},
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine11", Score: schedulerapi.MaxPriority}, {Name: "machine12", Score: schedulerapi.MaxPriority},
{Name: "machine21", Score: schedulerapi.MaxPriority}, {Name: "machine22", Score: schedulerapi.MaxPriority},
{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}},
name: "no services",
},
{
@@ -665,9 +665,9 @@ func TestZoneSpreadPriority(t *testing.T) {
pods: []*v1.Pod{{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}},
nodes: labeledNodes,
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: schedulerapi.MaxPriority}, {Host: "machine12", Score: schedulerapi.MaxPriority},
{Host: "machine21", Score: schedulerapi.MaxPriority}, {Host: "machine22", Score: schedulerapi.MaxPriority},
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine11", Score: schedulerapi.MaxPriority}, {Name: "machine12", Score: schedulerapi.MaxPriority},
{Name: "machine21", Score: schedulerapi.MaxPriority}, {Name: "machine22", Score: schedulerapi.MaxPriority},
{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}},
name: "different services",
},
{
@@ -679,9 +679,9 @@ func TestZoneSpreadPriority(t *testing.T) {
},
nodes: labeledNodes,
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: schedulerapi.MaxPriority}, {Host: "machine12", Score: schedulerapi.MaxPriority},
{Host: "machine21", Score: 0}, {Host: "machine22", Score: 0},
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine11", Score: schedulerapi.MaxPriority}, {Name: "machine12", Score: schedulerapi.MaxPriority},
{Name: "machine21", Score: 0}, {Name: "machine22", Score: 0},
{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}},
name: "three pods, one service pod",
},
{
@@ -693,9 +693,9 @@ func TestZoneSpreadPriority(t *testing.T) {
},
nodes: labeledNodes,
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 5}, {Host: "machine12", Score: 5},
{Host: "machine21", Score: 5}, {Host: "machine22", Score: 5},
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine11", Score: 5}, {Name: "machine12", Score: 5},
{Name: "machine21", Score: 5}, {Name: "machine22", Score: 5},
{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}},
name: "three pods, two service pods on different machines",
},
{
@@ -708,9 +708,9 @@ func TestZoneSpreadPriority(t *testing.T) {
},
nodes: labeledNodes,
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault}}},
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 0}, {Host: "machine12", Score: 0},
{Host: "machine21", Score: schedulerapi.MaxPriority}, {Host: "machine22", Score: schedulerapi.MaxPriority},
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine11", Score: 0}, {Name: "machine12", Score: 0},
{Name: "machine21", Score: schedulerapi.MaxPriority}, {Name: "machine22", Score: schedulerapi.MaxPriority},
{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}},
name: "three service label match pods in different namespaces",
},
{
@@ -723,9 +723,9 @@ func TestZoneSpreadPriority(t *testing.T) {
},
nodes: labeledNodes,
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 6}, {Host: "machine12", Score: 6},
{Host: "machine21", Score: 3}, {Host: "machine22", Score: 3},
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine11", Score: 6}, {Name: "machine12", Score: 6},
{Name: "machine21", Score: 3}, {Name: "machine22", Score: 3},
{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}},
name: "four pods, three service pods",
},
{
@@ -737,9 +737,9 @@ func TestZoneSpreadPriority(t *testing.T) {
},
nodes: labeledNodes,
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 3}, {Host: "machine12", Score: 3},
{Host: "machine21", Score: 6}, {Host: "machine22", Score: 6},
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine11", Score: 3}, {Name: "machine12", Score: 3},
{Name: "machine21", Score: 6}, {Name: "machine22", Score: 6},
{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}},
name: "service with partial pod label matches",
},
{
@@ -752,9 +752,9 @@ func TestZoneSpreadPriority(t *testing.T) {
},
nodes: labeledNodes,
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 7}, {Host: "machine12", Score: 7},
{Host: "machine21", Score: 5}, {Host: "machine22", Score: 5},
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine11", Score: 7}, {Name: "machine12", Score: 7},
{Name: "machine21", Score: 5}, {Name: "machine22", Score: 5},
{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}},
name: "service pod on non-zoned node",
},
}
@@ -782,8 +782,8 @@ func TestZoneSpreadPriority(t *testing.T) {
}
// sort the two lists to avoid failures on account of different ordering
sort.Sort(test.expectedList)
sort.Sort(list)
sortNodeScoreList(test.expectedList)
sortNodeScoreList(list)
if !reflect.DeepEqual(test.expectedList, list) {
t.Errorf("expected %#v, got %#v", test.expectedList, list)
}

View File

@@ -22,6 +22,7 @@ import (
"k8s.io/api/core/v1"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
@@ -52,10 +53,10 @@ func getAllTolerationPreferNoSchedule(tolerations []v1.Toleration) (tolerationLi
}
// ComputeTaintTolerationPriorityMap prepares the priority list for all the nodes based on the number of intolerable taints on the node
func ComputeTaintTolerationPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) {
func ComputeTaintTolerationPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (framework.NodeScore, error) {
node := nodeInfo.Node()
if node == nil {
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
return framework.NodeScore{}, fmt.Errorf("node not found")
}
// To hold all the tolerations with Effect PreferNoSchedule
var tolerationsPreferNoSchedule []v1.Toleration
@@ -66,8 +67,8 @@ func ComputeTaintTolerationPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *
tolerationsPreferNoSchedule = getAllTolerationPreferNoSchedule(pod.Spec.Tolerations)
}
return schedulerapi.HostPriority{
Host: node.Name,
return framework.NodeScore{
Name: node.Name,
Score: int64(countIntolerableTaintsPreferNoSchedule(node.Spec.Taints, tolerationsPreferNoSchedule)),
}, nil
}

View File

@@ -23,6 +23,7 @@ import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
@@ -53,7 +54,7 @@ func TestTaintAndToleration(t *testing.T) {
tests := []struct {
pod *v1.Pod
nodes []*v1.Node
expectedList schedulerapi.HostPriorityList
expectedList framework.NodeScoreList
name string
}{
// basic test case
@@ -77,9 +78,9 @@ func TestTaintAndToleration(t *testing.T) {
Effect: v1.TaintEffectPreferNoSchedule,
}}),
},
expectedList: []schedulerapi.HostPriority{
{Host: "nodeA", Score: schedulerapi.MaxPriority},
{Host: "nodeB", Score: 0},
expectedList: []framework.NodeScore{
{Name: "nodeA", Score: schedulerapi.MaxPriority},
{Name: "nodeB", Score: 0},
},
},
// the count of taints that are tolerated by pod, does not matter.
@@ -119,10 +120,10 @@ func TestTaintAndToleration(t *testing.T) {
},
}),
},
expectedList: []schedulerapi.HostPriority{
{Host: "nodeA", Score: schedulerapi.MaxPriority},
{Host: "nodeB", Score: schedulerapi.MaxPriority},
{Host: "nodeC", Score: schedulerapi.MaxPriority},
expectedList: []framework.NodeScore{
{Name: "nodeA", Score: schedulerapi.MaxPriority},
{Name: "nodeB", Score: schedulerapi.MaxPriority},
{Name: "nodeC", Score: schedulerapi.MaxPriority},
},
},
// the count of taints on a node that are not tolerated by pod, matters.
@@ -155,10 +156,10 @@ func TestTaintAndToleration(t *testing.T) {
},
}),
},
expectedList: []schedulerapi.HostPriority{
{Host: "nodeA", Score: schedulerapi.MaxPriority},
{Host: "nodeB", Score: 5},
{Host: "nodeC", Score: 0},
expectedList: []framework.NodeScore{
{Name: "nodeA", Score: schedulerapi.MaxPriority},
{Name: "nodeB", Score: 5},
{Name: "nodeC", Score: 0},
},
},
// taints-tolerations priority only takes care about the taints and tolerations that have effect PreferNoSchedule
@@ -198,10 +199,10 @@ func TestTaintAndToleration(t *testing.T) {
},
}),
},
expectedList: []schedulerapi.HostPriority{
{Host: "nodeA", Score: schedulerapi.MaxPriority},
{Host: "nodeB", Score: schedulerapi.MaxPriority},
{Host: "nodeC", Score: 0},
expectedList: []framework.NodeScore{
{Name: "nodeA", Score: schedulerapi.MaxPriority},
{Name: "nodeB", Score: schedulerapi.MaxPriority},
{Name: "nodeC", Score: 0},
},
},
{
@@ -219,9 +220,9 @@ func TestTaintAndToleration(t *testing.T) {
},
}),
},
expectedList: []schedulerapi.HostPriority{
{Host: "nodeA", Score: schedulerapi.MaxPriority},
{Host: "nodeB", Score: 0},
expectedList: []framework.NodeScore{
{Name: "nodeA", Score: schedulerapi.MaxPriority},
{Name: "nodeB", Score: 0},
},
},
}

View File

@@ -17,10 +17,12 @@ limitations under the License.
package priorities
import (
"sort"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
@@ -57,8 +59,8 @@ func makeNodeWithExtendedResource(node string, milliCPU, memory int64, extendedR
}
func priorityFunction(mapFn PriorityMapFunction, reduceFn PriorityReduceFunction, metaData interface{}) PriorityFunction {
return func(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
result := make(schedulerapi.HostPriorityList, 0, len(nodes))
return func(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (framework.NodeScoreList, error) {
result := make(framework.NodeScoreList, 0, len(nodes))
for i := range nodes {
hostResult, err := mapFn(pod, metaData, nodeNameToInfo[nodes[i].Name])
if err != nil {
@@ -74,3 +76,12 @@ func priorityFunction(mapFn PriorityMapFunction, reduceFn PriorityReduceFunction
return result, nil
}
}
func sortNodeScoreList(out framework.NodeScoreList) {
sort.Slice(out, func(i, j int) bool {
if out[i].Score == out[j].Score {
return out[i].Name < out[j].Name
}
return out[i].Score < out[j].Score
})
}

View File

@@ -18,20 +18,20 @@ package priorities
import (
"k8s.io/api/core/v1"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
// PriorityMapFunction is a function that computes per-node results for a given node.
// TODO: Figure out the exact API of this method.
// TODO: Change interface{} to a specific type.
type PriorityMapFunction func(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error)
type PriorityMapFunction func(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (framework.NodeScore, error)
// PriorityReduceFunction is a function that aggregated per-node results and computes
// final scores for all nodes.
// TODO: Figure out the exact API of this method.
// TODO: Change interface{} to a specific type.
type PriorityReduceFunction func(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, result schedulerapi.HostPriorityList) error
type PriorityReduceFunction func(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, result framework.NodeScoreList) error
// PriorityMetadataProducer is a function that computes metadata for a given pod. This
// is now used for only for priority functions. For predicates please use PredicateMetadataProducer.
@@ -40,7 +40,7 @@ type PriorityMetadataProducer func(pod *v1.Pod, nodeNameToInfo map[string]*sched
// PriorityFunction is a function that computes scores for all nodes.
// DEPRECATED
// Use Map-Reduce pattern for priority functions.
type PriorityFunction func(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error)
type PriorityFunction func(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (framework.NodeScoreList, error)
// PriorityConfig is a config used for a priority function.
type PriorityConfig struct {

View File

@@ -18,7 +18,7 @@ package algorithm
import (
"k8s.io/api/core/v1"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
extenderv1 "k8s.io/kubernetes/pkg/scheduler/apis/extender/v1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
@@ -34,12 +34,12 @@ type SchedulerExtender interface {
// the list of failed nodes and failure reasons.
Filter(pod *v1.Pod,
nodes []*v1.Node, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo,
) (filteredNodes []*v1.Node, failedNodesMap schedulerapi.FailedNodesMap, err error)
) (filteredNodes []*v1.Node, failedNodesMap extenderv1.FailedNodesMap, err error)
// Prioritize based on extender-implemented priority functions. The returned scores & weight
// are used to compute the weighted score for an extender. The weighted scores are added to
// the scores computed by Kubernetes scheduler. The total scores are used to do the host selection.
Prioritize(pod *v1.Pod, nodes []*v1.Node) (hostPriorities *schedulerapi.HostPriorityList, weight int64, err error)
Prioritize(pod *v1.Pod, nodes []*v1.Node) (hostPriorities *extenderv1.HostPriorityList, weight int64, err error)
// Bind delegates the action of binding a pod to a node to the extender.
Bind(binding *v1.Binding) error
@@ -61,9 +61,9 @@ type SchedulerExtender interface {
// 2. A different set of victim pod for every given candidate node after preemption phase of extender.
ProcessPreemption(
pod *v1.Pod,
nodeToVictims map[*v1.Node]*schedulerapi.Victims,
nodeToVictims map[*v1.Node]*extenderv1.Victims,
nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo,
) (map[*v1.Node]*schedulerapi.Victims, error)
) (map[*v1.Node]*extenderv1.Victims, error)
// SupportsPreemption returns if the scheduler extender support preemption or not.
SupportsPreemption() bool