feat(scheduler): expand node score range to [0, 100]
This commit is contained in:
		| @@ -223,12 +223,12 @@ func TestBalancedResourceAllocation(t *testing.T) { | |||||||
| 				Node1 scores (remaining resources) on 0-10 scale | 				Node1 scores (remaining resources) on 0-10 scale | ||||||
| 				CPU Fraction: 0 / 4000 = 0% | 				CPU Fraction: 0 / 4000 = 0% | ||||||
| 				Memory Fraction: 0 / 10000 = 0% | 				Memory Fraction: 0 / 10000 = 0% | ||||||
| 				Node1 Score: 10 - (0-0)*10 = 10 | 				Node1 Score: 10 - (0-0)*100 = 100 | ||||||
|  |  | ||||||
| 				Node2 scores (remaining resources) on 0-10 scale | 				Node2 scores (remaining resources) on 0-10 scale | ||||||
| 				CPU Fraction: 0 / 4000 = 0 % | 				CPU Fraction: 0 / 4000 = 0 % | ||||||
| 				Memory Fraction: 0 / 10000 = 0% | 				Memory Fraction: 0 / 10000 = 0% | ||||||
| 				Node2 Score: 10 - (0-0)*10 = 10 | 				Node2 Score: 10 - (0-0)*100 = 100 | ||||||
| 			*/ | 			*/ | ||||||
| 			pod:          &v1.Pod{Spec: noResources}, | 			pod:          &v1.Pod{Spec: noResources}, | ||||||
| 			nodes:        []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, | 			nodes:        []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, | ||||||
| @@ -240,16 +240,16 @@ func TestBalancedResourceAllocation(t *testing.T) { | |||||||
| 				Node1 scores on 0-10 scale | 				Node1 scores on 0-10 scale | ||||||
| 				CPU Fraction: 3000 / 4000= 75% | 				CPU Fraction: 3000 / 4000= 75% | ||||||
| 				Memory Fraction: 5000 / 10000 = 50% | 				Memory Fraction: 5000 / 10000 = 50% | ||||||
| 				Node1 Score: 10 - (0.75-0.5)*10 = 7 | 				Node1 Score: 10 - (0.75-0.5)*100 = 75 | ||||||
|  |  | ||||||
| 				Node2 scores on 0-10 scale | 				Node2 scores on 0-10 scale | ||||||
| 				CPU Fraction: 3000 / 6000= 50% | 				CPU Fraction: 3000 / 6000= 50% | ||||||
| 				Memory Fraction: 5000/10000 = 50% | 				Memory Fraction: 5000/10000 = 50% | ||||||
| 				Node2 Score: 10 - (0.5-0.5)*10 = 10 | 				Node2 Score: 10 - (0.5-0.5)*100 = 100 | ||||||
| 			*/ | 			*/ | ||||||
| 			pod:          &v1.Pod{Spec: cpuAndMemory}, | 			pod:          &v1.Pod{Spec: cpuAndMemory}, | ||||||
| 			nodes:        []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)}, | 			nodes:        []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)}, | ||||||
| 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 7}, {Name: "machine2", Score: framework.MaxNodeScore}}, | 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 75}, {Name: "machine2", Score: framework.MaxNodeScore}}, | ||||||
| 			name:         "nothing scheduled, resources requested, differently sized machines", | 			name:         "nothing scheduled, resources requested, differently sized machines", | ||||||
| 		}, | 		}, | ||||||
| 		{ | 		{ | ||||||
| @@ -257,12 +257,12 @@ func TestBalancedResourceAllocation(t *testing.T) { | |||||||
| 				Node1 scores on 0-10 scale | 				Node1 scores on 0-10 scale | ||||||
| 				CPU Fraction: 0 / 4000= 0% | 				CPU Fraction: 0 / 4000= 0% | ||||||
| 				Memory Fraction: 0 / 10000 = 0% | 				Memory Fraction: 0 / 10000 = 0% | ||||||
| 				Node1 Score: 10 - (0-0)*10 = 10 | 				Node1 Score: 10 - (0-0)*100 = 100 | ||||||
|  |  | ||||||
| 				Node2 scores on 0-10 scale | 				Node2 scores on 0-10 scale | ||||||
| 				CPU Fraction: 0 / 4000= 0% | 				CPU Fraction: 0 / 4000= 0% | ||||||
| 				Memory Fraction: 0 / 10000 = 0% | 				Memory Fraction: 0 / 10000 = 0% | ||||||
| 				Node2 Score: 10 - (0-0)*10 = 10 | 				Node2 Score: 10 - (0-0)*100 = 100 | ||||||
| 			*/ | 			*/ | ||||||
| 			pod:          &v1.Pod{Spec: noResources}, | 			pod:          &v1.Pod{Spec: noResources}, | ||||||
| 			nodes:        []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, | 			nodes:        []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, | ||||||
| @@ -280,16 +280,16 @@ func TestBalancedResourceAllocation(t *testing.T) { | |||||||
| 				Node1 scores on 0-10 scale | 				Node1 scores on 0-10 scale | ||||||
| 				CPU Fraction: 6000 / 10000 = 60% | 				CPU Fraction: 6000 / 10000 = 60% | ||||||
| 				Memory Fraction: 0 / 20000 = 0% | 				Memory Fraction: 0 / 20000 = 0% | ||||||
| 				Node1 Score: 10 - (0.6-0)*10 = 4 | 				Node1 Score: 10 - (0.6-0)*100 = 40 | ||||||
|  |  | ||||||
| 				Node2 scores on 0-10 scale | 				Node2 scores on 0-10 scale | ||||||
| 				CPU Fraction: 6000 / 10000 = 60% | 				CPU Fraction: 6000 / 10000 = 60% | ||||||
| 				Memory Fraction: 5000 / 20000 = 25% | 				Memory Fraction: 5000 / 20000 = 25% | ||||||
| 				Node2 Score: 10 - (0.6-0.25)*10 = 6 | 				Node2 Score: 10 - (0.6-0.25)*100 = 65 | ||||||
| 			*/ | 			*/ | ||||||
| 			pod:          &v1.Pod{Spec: noResources}, | 			pod:          &v1.Pod{Spec: noResources}, | ||||||
| 			nodes:        []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)}, | 			nodes:        []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)}, | ||||||
| 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 4}, {Name: "machine2", Score: 6}}, | 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 40}, {Name: "machine2", Score: 65}}, | ||||||
| 			name:         "no resources requested, pods scheduled with resources", | 			name:         "no resources requested, pods scheduled with resources", | ||||||
| 			pods: []*v1.Pod{ | 			pods: []*v1.Pod{ | ||||||
| 				{Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}, | 				{Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}, | ||||||
| @@ -303,16 +303,16 @@ func TestBalancedResourceAllocation(t *testing.T) { | |||||||
| 				Node1 scores on 0-10 scale | 				Node1 scores on 0-10 scale | ||||||
| 				CPU Fraction: 6000 / 10000 = 60% | 				CPU Fraction: 6000 / 10000 = 60% | ||||||
| 				Memory Fraction: 5000 / 20000 = 25% | 				Memory Fraction: 5000 / 20000 = 25% | ||||||
| 				Node1 Score: 10 - (0.6-0.25)*10 = 6 | 				Node1 Score: 10 - (0.6-0.25)*100 = 65 | ||||||
|  |  | ||||||
| 				Node2 scores on 0-10 scale | 				Node2 scores on 0-10 scale | ||||||
| 				CPU Fraction: 6000 / 10000 = 60% | 				CPU Fraction: 6000 / 10000 = 60% | ||||||
| 				Memory Fraction: 10000 / 20000 = 50% | 				Memory Fraction: 10000 / 20000 = 50% | ||||||
| 				Node2 Score: 10 - (0.6-0.5)*10 = 9 | 				Node2 Score: 10 - (0.6-0.5)*100 = 9 | ||||||
| 			*/ | 			*/ | ||||||
| 			pod:          &v1.Pod{Spec: cpuAndMemory}, | 			pod:          &v1.Pod{Spec: cpuAndMemory}, | ||||||
| 			nodes:        []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)}, | 			nodes:        []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)}, | ||||||
| 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 6}, {Name: "machine2", Score: 9}}, | 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 65}, {Name: "machine2", Score: 90}}, | ||||||
| 			name:         "resources requested, pods scheduled with resources", | 			name:         "resources requested, pods scheduled with resources", | ||||||
| 			pods: []*v1.Pod{ | 			pods: []*v1.Pod{ | ||||||
| 				{Spec: cpuOnly}, | 				{Spec: cpuOnly}, | ||||||
| @@ -324,16 +324,16 @@ func TestBalancedResourceAllocation(t *testing.T) { | |||||||
| 				Node1 scores on 0-10 scale | 				Node1 scores on 0-10 scale | ||||||
| 				CPU Fraction: 6000 / 10000 = 60% | 				CPU Fraction: 6000 / 10000 = 60% | ||||||
| 				Memory Fraction: 5000 / 20000 = 25% | 				Memory Fraction: 5000 / 20000 = 25% | ||||||
| 				Node1 Score: 10 - (0.6-0.25)*10 = 6 | 				Node1 Score: 10 - (0.6-0.25)*100 = 65 | ||||||
|  |  | ||||||
| 				Node2 scores on 0-10 scale | 				Node2 scores on 0-10 scale | ||||||
| 				CPU Fraction: 6000 / 10000 = 60% | 				CPU Fraction: 6000 / 10000 = 60% | ||||||
| 				Memory Fraction: 10000 / 50000 = 20% | 				Memory Fraction: 10000 / 50000 = 20% | ||||||
| 				Node2 Score: 10 - (0.6-0.2)*10 = 6 | 				Node2 Score: 10 - (0.6-0.2)*100 = 60 | ||||||
| 			*/ | 			*/ | ||||||
| 			pod:          &v1.Pod{Spec: cpuAndMemory}, | 			pod:          &v1.Pod{Spec: cpuAndMemory}, | ||||||
| 			nodes:        []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)}, | 			nodes:        []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)}, | ||||||
| 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 6}, {Name: "machine2", Score: 6}}, | 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 65}, {Name: "machine2", Score: 60}}, | ||||||
| 			name:         "resources requested, pods scheduled with resources, differently sized machines", | 			name:         "resources requested, pods scheduled with resources, differently sized machines", | ||||||
| 			pods: []*v1.Pod{ | 			pods: []*v1.Pod{ | ||||||
| 				{Spec: cpuOnly}, | 				{Spec: cpuOnly}, | ||||||
| @@ -388,7 +388,7 @@ func TestBalancedResourceAllocation(t *testing.T) { | |||||||
| 				}, | 				}, | ||||||
| 			}, | 			}, | ||||||
| 			nodes:        []*v1.Node{makeNode("machine3", 3500, 40000), makeNode("machine4", 4000, 10000)}, | 			nodes:        []*v1.Node{makeNode("machine3", 3500, 40000), makeNode("machine4", 4000, 10000)}, | ||||||
| 			expectedList: []framework.NodeScore{{Name: "machine3", Score: 8}, {Name: "machine4", Score: 9}}, | 			expectedList: []framework.NodeScore{{Name: "machine3", Score: 89}, {Name: "machine4", Score: 98}}, | ||||||
| 			name:         "Include volume count on a node for balanced resource allocation", | 			name:         "Include volume count on a node for balanced resource allocation", | ||||||
| 			pods: []*v1.Pod{ | 			pods: []*v1.Pod{ | ||||||
| 				{Spec: cpuAndMemory3}, | 				{Spec: cpuAndMemory3}, | ||||||
|   | |||||||
| @@ -20,7 +20,7 @@ import ( | |||||||
| 	"reflect" | 	"reflect" | ||||||
| 	"testing" | 	"testing" | ||||||
|  |  | ||||||
| 	"k8s.io/api/core/v1" | 	v1 "k8s.io/api/core/v1" | ||||||
| 	framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" | 	framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" | ||||||
| 	schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" | 	schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" | ||||||
| 	st "k8s.io/kubernetes/pkg/scheduler/testing" | 	st "k8s.io/kubernetes/pkg/scheduler/testing" | ||||||
| @@ -121,8 +121,8 @@ func TestCalculateEvenPodsSpreadPriority(t *testing.T) { | |||||||
| 				st.MakeNode().Name("node-b").Label("node", "node-b").Obj(), | 				st.MakeNode().Name("node-b").Label("node", "node-b").Obj(), | ||||||
| 			}, | 			}, | ||||||
| 			want: []framework.NodeScore{ | 			want: []framework.NodeScore{ | ||||||
| 				{Name: "node-a", Score: 10}, | 				{Name: "node-a", Score: 100}, | ||||||
| 				{Name: "node-b", Score: 10}, | 				{Name: "node-b", Score: 100}, | ||||||
| 			}, | 			}, | ||||||
| 		}, | 		}, | ||||||
| 		{ | 		{ | ||||||
| @@ -143,7 +143,7 @@ func TestCalculateEvenPodsSpreadPriority(t *testing.T) { | |||||||
| 				st.MakeNode().Name("node-b").Label("node", "node-b").Obj(), | 				st.MakeNode().Name("node-b").Label("node", "node-b").Obj(), | ||||||
| 			}, | 			}, | ||||||
| 			want: []framework.NodeScore{ | 			want: []framework.NodeScore{ | ||||||
| 				{Name: "node-a", Score: 10}, | 				{Name: "node-a", Score: 100}, | ||||||
| 			}, | 			}, | ||||||
| 		}, | 		}, | ||||||
| 		{ | 		{ | ||||||
| @@ -160,14 +160,14 @@ func TestCalculateEvenPodsSpreadPriority(t *testing.T) { | |||||||
| 				st.MakeNode().Name("node-b").Label("node", "node-b").Obj(), | 				st.MakeNode().Name("node-b").Label("node", "node-b").Obj(), | ||||||
| 			}, | 			}, | ||||||
| 			want: []framework.NodeScore{ | 			want: []framework.NodeScore{ | ||||||
| 				{Name: "node-a", Score: 10}, | 				{Name: "node-a", Score: 100}, | ||||||
| 				{Name: "node-b", Score: 10}, | 				{Name: "node-b", Score: 100}, | ||||||
| 			}, | 			}, | ||||||
| 		}, | 		}, | ||||||
| 		{ | 		{ | ||||||
| 			// matching pods spread as 2/1/0/3, total = 6 | 			// matching pods spread as 2/1/0/3, total = 6 | ||||||
| 			// after reversing, it's 4/5/6/3 | 			// after reversing, it's 4/5/6/3 | ||||||
| 			// so scores = 40/6, 50/6, 60/6, 30/6 | 			// so scores = 400/6, 500/6, 600/6, 300/6 | ||||||
| 			name: "one constraint on node, all 4 nodes are candidates", | 			name: "one constraint on node, all 4 nodes are candidates", | ||||||
| 			pod: st.MakePod().Name("p").Label("foo", ""). | 			pod: st.MakePod().Name("p").Label("foo", ""). | ||||||
| 				SpreadConstraint(1, "node", softSpread, st.MakeLabelSelector().Exists("foo").Obj()). | 				SpreadConstraint(1, "node", softSpread, st.MakeLabelSelector().Exists("foo").Obj()). | ||||||
| @@ -188,16 +188,16 @@ func TestCalculateEvenPodsSpreadPriority(t *testing.T) { | |||||||
| 			}, | 			}, | ||||||
| 			failedNodes: []*v1.Node{}, | 			failedNodes: []*v1.Node{}, | ||||||
| 			want: []framework.NodeScore{ | 			want: []framework.NodeScore{ | ||||||
| 				{Name: "node-a", Score: 6}, | 				{Name: "node-a", Score: 66}, | ||||||
| 				{Name: "node-b", Score: 8}, | 				{Name: "node-b", Score: 83}, | ||||||
| 				{Name: "node-c", Score: 10}, | 				{Name: "node-c", Score: 100}, | ||||||
| 				{Name: "node-d", Score: 5}, | 				{Name: "node-d", Score: 50}, | ||||||
| 			}, | 			}, | ||||||
| 		}, | 		}, | ||||||
| 		{ | 		{ | ||||||
| 			// matching pods spread as 4/2/1/~3~, total = 4+2+1 = 7 (as node4 is not a candidate) | 			// matching pods spread as 4/2/1/~3~, total = 4+2+1 = 7 (as node4 is not a candidate) | ||||||
| 			// after reversing, it's 3/5/6 | 			// after reversing, it's 3/5/6 | ||||||
| 			// so scores = 30/6, 50/6, 60/6 | 			// so scores = 300/6, 500/6, 600/6 | ||||||
| 			name: "one constraint on node, 3 out of 4 nodes are candidates", | 			name: "one constraint on node, 3 out of 4 nodes are candidates", | ||||||
| 			pod: st.MakePod().Name("p").Label("foo", ""). | 			pod: st.MakePod().Name("p").Label("foo", ""). | ||||||
| 				SpreadConstraint(1, "node", softSpread, st.MakeLabelSelector().Exists("foo").Obj()). | 				SpreadConstraint(1, "node", softSpread, st.MakeLabelSelector().Exists("foo").Obj()). | ||||||
| @@ -223,15 +223,15 @@ func TestCalculateEvenPodsSpreadPriority(t *testing.T) { | |||||||
| 				st.MakeNode().Name("node-y").Label("node", "node-y").Obj(), | 				st.MakeNode().Name("node-y").Label("node", "node-y").Obj(), | ||||||
| 			}, | 			}, | ||||||
| 			want: []framework.NodeScore{ | 			want: []framework.NodeScore{ | ||||||
| 				{Name: "node-a", Score: 5}, | 				{Name: "node-a", Score: 50}, | ||||||
| 				{Name: "node-b", Score: 8}, | 				{Name: "node-b", Score: 83}, | ||||||
| 				{Name: "node-x", Score: 10}, | 				{Name: "node-x", Score: 100}, | ||||||
| 			}, | 			}, | ||||||
| 		}, | 		}, | ||||||
| 		{ | 		{ | ||||||
| 			// matching pods spread as 4/?2?/1/~3~, total = 4+?+1 = 5 (as node2 is problematic) | 			// matching pods spread as 4/?2?/1/~3~, total = 4+?+1 = 5 (as node2 is problematic) | ||||||
| 			// after reversing, it's 1/?/4 | 			// after reversing, it's 1/?/4 | ||||||
| 			// so scores = 10/4, 0, 40/4 | 			// so scores = 100/4, 0, 400/4 | ||||||
| 			name: "one constraint on node, 3 out of 4 nodes are candidates", | 			name: "one constraint on node, 3 out of 4 nodes are candidates", | ||||||
| 			pod: st.MakePod().Name("p").Label("foo", ""). | 			pod: st.MakePod().Name("p").Label("foo", ""). | ||||||
| 				SpreadConstraint(1, "node", softSpread, st.MakeLabelSelector().Exists("foo").Obj()). | 				SpreadConstraint(1, "node", softSpread, st.MakeLabelSelector().Exists("foo").Obj()). | ||||||
| @@ -257,15 +257,15 @@ func TestCalculateEvenPodsSpreadPriority(t *testing.T) { | |||||||
| 				st.MakeNode().Name("node-y").Label("node", "node-y").Obj(), | 				st.MakeNode().Name("node-y").Label("node", "node-y").Obj(), | ||||||
| 			}, | 			}, | ||||||
| 			want: []framework.NodeScore{ | 			want: []framework.NodeScore{ | ||||||
| 				{Name: "node-a", Score: 2}, | 				{Name: "node-a", Score: 25}, | ||||||
| 				{Name: "node-b", Score: 0}, | 				{Name: "node-b", Score: 0}, | ||||||
| 				{Name: "node-x", Score: 10}, | 				{Name: "node-x", Score: 100}, | ||||||
| 			}, | 			}, | ||||||
| 		}, | 		}, | ||||||
| 		{ | 		{ | ||||||
| 			// matching pods spread as 4/2/1/~3~, total = 6+6+4 = 16 (as topologyKey is zone instead of node) | 			// matching pods spread as 4/2/1/~3~, total = 6+6+4 = 16 (as topologyKey is zone instead of node) | ||||||
| 			// after reversing, it's 10/10/12 | 			// after reversing, it's 10/10/12 | ||||||
| 			// so scores = 100/12, 100/12, 120/12 | 			// so scores = 1000/12, 1000/12, 1200/12 | ||||||
| 			name: "one constraint on zone, 3 out of 4 nodes are candidates", | 			name: "one constraint on zone, 3 out of 4 nodes are candidates", | ||||||
| 			pod: st.MakePod().Name("p").Label("foo", ""). | 			pod: st.MakePod().Name("p").Label("foo", ""). | ||||||
| 				SpreadConstraint(1, "zone", softSpread, st.MakeLabelSelector().Exists("foo").Obj()). | 				SpreadConstraint(1, "zone", softSpread, st.MakeLabelSelector().Exists("foo").Obj()). | ||||||
| @@ -291,15 +291,15 @@ func TestCalculateEvenPodsSpreadPriority(t *testing.T) { | |||||||
| 				st.MakeNode().Name("node-y").Label("zone", "zone2").Label("node", "node-y").Obj(), | 				st.MakeNode().Name("node-y").Label("zone", "zone2").Label("node", "node-y").Obj(), | ||||||
| 			}, | 			}, | ||||||
| 			want: []framework.NodeScore{ | 			want: []framework.NodeScore{ | ||||||
| 				{Name: "node-a", Score: 8}, | 				{Name: "node-a", Score: 83}, | ||||||
| 				{Name: "node-b", Score: 8}, | 				{Name: "node-b", Score: 83}, | ||||||
| 				{Name: "node-x", Score: 10}, | 				{Name: "node-x", Score: 100}, | ||||||
| 			}, | 			}, | ||||||
| 		}, | 		}, | ||||||
| 		{ | 		{ | ||||||
| 			// matching pods spread as 2/~1~/2/~4~, total = 2+3 + 2+6 = 13 (zone and node should be both summed up) | 			// matching pods spread as 2/~1~/2/~4~, total = 2+3 + 2+6 = 13 (zone and node should be both summed up) | ||||||
| 			// after reversing, it's 8/5 | 			// after reversing, it's 8/5 | ||||||
| 			// so scores = 80/8, 50/8 | 			// so scores = 800/8, 500/8 | ||||||
| 			name: "two constraints on zone and node, 2 out of 4 nodes are candidates", | 			name: "two constraints on zone and node, 2 out of 4 nodes are candidates", | ||||||
| 			pod: st.MakePod().Name("p").Label("foo", ""). | 			pod: st.MakePod().Name("p").Label("foo", ""). | ||||||
| 				SpreadConstraint(1, "zone", softSpread, st.MakeLabelSelector().Exists("foo").Obj()). | 				SpreadConstraint(1, "zone", softSpread, st.MakeLabelSelector().Exists("foo").Obj()). | ||||||
| @@ -325,8 +325,8 @@ func TestCalculateEvenPodsSpreadPriority(t *testing.T) { | |||||||
| 				st.MakeNode().Name("node-y").Label("zone", "zone2").Label("node", "node-y").Obj(), | 				st.MakeNode().Name("node-y").Label("zone", "zone2").Label("node", "node-y").Obj(), | ||||||
| 			}, | 			}, | ||||||
| 			want: []framework.NodeScore{ | 			want: []framework.NodeScore{ | ||||||
| 				{Name: "node-a", Score: 10}, | 				{Name: "node-a", Score: 100}, | ||||||
| 				{Name: "node-x", Score: 6}, | 				{Name: "node-x", Score: 62}, | ||||||
| 			}, | 			}, | ||||||
| 		}, | 		}, | ||||||
| 		{ | 		{ | ||||||
| @@ -342,7 +342,7 @@ func TestCalculateEvenPodsSpreadPriority(t *testing.T) { | |||||||
| 			// For the second constraint (node): the matching pods spread as 0/1/0/1 | 			// For the second constraint (node): the matching pods spread as 0/1/0/1 | ||||||
| 			// sum them up gets: 2/3/1/2, and total number is 8. | 			// sum them up gets: 2/3/1/2, and total number is 8. | ||||||
| 			// after reversing, it's 6/5/7/6 | 			// after reversing, it's 6/5/7/6 | ||||||
| 			// so scores = 60/7, 50/7, 70/7, 60/7 | 			// so scores = 600/7, 500/7, 700/7, 600/7 | ||||||
| 			name: "two constraints on zone and node, with different labelSelectors", | 			name: "two constraints on zone and node, with different labelSelectors", | ||||||
| 			pod: st.MakePod().Name("p").Label("foo", "").Label("bar", ""). | 			pod: st.MakePod().Name("p").Label("foo", "").Label("bar", ""). | ||||||
| 				SpreadConstraint(1, "zone", softSpread, st.MakeLabelSelector().Exists("foo").Obj()). | 				SpreadConstraint(1, "zone", softSpread, st.MakeLabelSelector().Exists("foo").Obj()). | ||||||
| @@ -362,10 +362,10 @@ func TestCalculateEvenPodsSpreadPriority(t *testing.T) { | |||||||
| 			}, | 			}, | ||||||
| 			failedNodes: []*v1.Node{}, | 			failedNodes: []*v1.Node{}, | ||||||
| 			want: []framework.NodeScore{ | 			want: []framework.NodeScore{ | ||||||
| 				{Name: "node-a", Score: 8}, | 				{Name: "node-a", Score: 85}, | ||||||
| 				{Name: "node-b", Score: 7}, | 				{Name: "node-b", Score: 71}, | ||||||
| 				{Name: "node-x", Score: 10}, | 				{Name: "node-x", Score: 100}, | ||||||
| 				{Name: "node-y", Score: 8}, | 				{Name: "node-y", Score: 85}, | ||||||
| 			}, | 			}, | ||||||
| 		}, | 		}, | ||||||
| 		{ | 		{ | ||||||
| @@ -373,7 +373,7 @@ func TestCalculateEvenPodsSpreadPriority(t *testing.T) { | |||||||
| 			// For the second constraint (node): the matching pods spread as 0/1/0/1 | 			// For the second constraint (node): the matching pods spread as 0/1/0/1 | ||||||
| 			// sum them up gets: 0/1/2/3, and total number is 6. | 			// sum them up gets: 0/1/2/3, and total number is 6. | ||||||
| 			// after reversing, it's 6/5/4/3. | 			// after reversing, it's 6/5/4/3. | ||||||
| 			// so scores = 60/6, 50/6, 40/6, 30/6 | 			// so scores = 600/6, 500/6, 400/6, 300/6 | ||||||
| 			name: "two constraints on zone and node, with different labelSelectors, some nodes have 0 pods", | 			name: "two constraints on zone and node, with different labelSelectors, some nodes have 0 pods", | ||||||
| 			pod: st.MakePod().Name("p").Label("foo", "").Label("bar", ""). | 			pod: st.MakePod().Name("p").Label("foo", "").Label("bar", ""). | ||||||
| 				SpreadConstraint(1, "zone", softSpread, st.MakeLabelSelector().Exists("foo").Obj()). | 				SpreadConstraint(1, "zone", softSpread, st.MakeLabelSelector().Exists("foo").Obj()). | ||||||
| @@ -392,10 +392,10 @@ func TestCalculateEvenPodsSpreadPriority(t *testing.T) { | |||||||
| 			}, | 			}, | ||||||
| 			failedNodes: []*v1.Node{}, | 			failedNodes: []*v1.Node{}, | ||||||
| 			want: []framework.NodeScore{ | 			want: []framework.NodeScore{ | ||||||
| 				{Name: "node-a", Score: 10}, | 				{Name: "node-a", Score: 100}, | ||||||
| 				{Name: "node-b", Score: 8}, | 				{Name: "node-b", Score: 83}, | ||||||
| 				{Name: "node-x", Score: 6}, | 				{Name: "node-x", Score: 66}, | ||||||
| 				{Name: "node-y", Score: 5}, | 				{Name: "node-y", Score: 50}, | ||||||
| 			}, | 			}, | ||||||
| 		}, | 		}, | ||||||
| 		{ | 		{ | ||||||
| @@ -403,7 +403,7 @@ func TestCalculateEvenPodsSpreadPriority(t *testing.T) { | |||||||
| 			// For the second constraint (node): the matching pods spread as 0/1/0/~1~ | 			// For the second constraint (node): the matching pods spread as 0/1/0/~1~ | ||||||
| 			// sum them up gets: 2/3/1, and total number is 6. | 			// sum them up gets: 2/3/1, and total number is 6. | ||||||
| 			// after reversing, it's 4/3/5 | 			// after reversing, it's 4/3/5 | ||||||
| 			// so scores = 40/5, 30/5, 50/5 | 			// so scores = 400/5, 300/5, 500/5 | ||||||
| 			name: "two constraints on zone and node, with different labelSelectors, 3 out of 4 nodes are candidates", | 			name: "two constraints on zone and node, with different labelSelectors, 3 out of 4 nodes are candidates", | ||||||
| 			pod: st.MakePod().Name("p").Label("foo", "").Label("bar", ""). | 			pod: st.MakePod().Name("p").Label("foo", "").Label("bar", ""). | ||||||
| 				SpreadConstraint(1, "zone", softSpread, st.MakeLabelSelector().Exists("foo").Obj()). | 				SpreadConstraint(1, "zone", softSpread, st.MakeLabelSelector().Exists("foo").Obj()). | ||||||
| @@ -424,9 +424,9 @@ func TestCalculateEvenPodsSpreadPriority(t *testing.T) { | |||||||
| 				st.MakeNode().Name("node-y").Label("zone", "zone2").Label("node", "node-y").Obj(), | 				st.MakeNode().Name("node-y").Label("zone", "zone2").Label("node", "node-y").Obj(), | ||||||
| 			}, | 			}, | ||||||
| 			want: []framework.NodeScore{ | 			want: []framework.NodeScore{ | ||||||
| 				{Name: "node-a", Score: 8}, | 				{Name: "node-a", Score: 80}, | ||||||
| 				{Name: "node-b", Score: 6}, | 				{Name: "node-b", Score: 60}, | ||||||
| 				{Name: "node-x", Score: 10}, | 				{Name: "node-x", Score: 100}, | ||||||
| 			}, | 			}, | ||||||
| 		}, | 		}, | ||||||
| 	} | 	} | ||||||
|   | |||||||
| @@ -125,10 +125,10 @@ func TestImageLocalityPriority(t *testing.T) { | |||||||
|  |  | ||||||
| 			// Node2 | 			// Node2 | ||||||
| 			// Image: gcr.io/250:latest 250MB | 			// Image: gcr.io/250:latest 250MB | ||||||
| 			// Score: 10 * (250M/2 - 23M)/(1000M - 23M) = 1 | 			// Score: 100 * (250M/2 - 23M)/(1000M - 23M) = 100 | ||||||
| 			pod:          &v1.Pod{Spec: test40250}, | 			pod:          &v1.Pod{Spec: test40250}, | ||||||
| 			nodes:        []*v1.Node{makeImageNode("machine1", node403002000), makeImageNode("machine2", node25010)}, | 			nodes:        []*v1.Node{makeImageNode("machine1", node403002000), makeImageNode("machine2", node25010)}, | ||||||
| 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 1}}, | 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 10}}, | ||||||
| 			name:         "two images spread on two nodes, prefer the larger image one", | 			name:         "two images spread on two nodes, prefer the larger image one", | ||||||
| 		}, | 		}, | ||||||
| 		{ | 		{ | ||||||
| @@ -136,14 +136,14 @@ func TestImageLocalityPriority(t *testing.T) { | |||||||
|  |  | ||||||
| 			// Node1 | 			// Node1 | ||||||
| 			// Image: gcr.io/40:latest 40MB, gcr.io/300:latest 300MB | 			// Image: gcr.io/40:latest 40MB, gcr.io/300:latest 300MB | ||||||
| 			// Score: 10 * ((40M + 300M)/2 - 23M)/(1000M - 23M) = 1 | 			// Score: 100 * ((40M + 300M)/2 - 23M)/(1000M - 23M) = 15 | ||||||
|  |  | ||||||
| 			// Node2 | 			// Node2 | ||||||
| 			// Image: not present | 			// Image: not present | ||||||
| 			// Score: 0 | 			// Score: 0 | ||||||
| 			pod:          &v1.Pod{Spec: test40300}, | 			pod:          &v1.Pod{Spec: test40300}, | ||||||
| 			nodes:        []*v1.Node{makeImageNode("machine1", node403002000), makeImageNode("machine2", node25010)}, | 			nodes:        []*v1.Node{makeImageNode("machine1", node403002000), makeImageNode("machine2", node25010)}, | ||||||
| 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 1}, {Name: "machine2", Score: 0}}, | 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 15}, {Name: "machine2", Score: 0}}, | ||||||
| 			name:         "two images on one node, prefer this node", | 			name:         "two images on one node, prefer this node", | ||||||
| 		}, | 		}, | ||||||
| 		{ | 		{ | ||||||
| @@ -151,7 +151,7 @@ func TestImageLocalityPriority(t *testing.T) { | |||||||
|  |  | ||||||
| 			// Node1 | 			// Node1 | ||||||
| 			// Image: gcr.io/2000:latest 2000MB | 			// Image: gcr.io/2000:latest 2000MB | ||||||
| 			// Score: 10 (2000M/2 >= 1000M, max-threshold) | 			// Score: 100 (2000M/2 >= 1000M, max-threshold) | ||||||
|  |  | ||||||
| 			// Node2 | 			// Node2 | ||||||
| 			// Image: gcr.io/10:latest 10MB | 			// Image: gcr.io/10:latest 10MB | ||||||
| @@ -166,7 +166,7 @@ func TestImageLocalityPriority(t *testing.T) { | |||||||
|  |  | ||||||
| 			// Node1 | 			// Node1 | ||||||
| 			// Image: gcr.io/2000:latest 2000MB | 			// Image: gcr.io/2000:latest 2000MB | ||||||
| 			// Score: 10 * (2000M/3 - 23M)/(1000M - 23M) = 6 | 			// Score: 100 * (2000M/3 - 23M)/(1000M - 23M) = 65 | ||||||
|  |  | ||||||
| 			// Node2 | 			// Node2 | ||||||
| 			// Image: gcr.io/10:latest 10MB | 			// Image: gcr.io/10:latest 10MB | ||||||
| @@ -177,7 +177,7 @@ func TestImageLocalityPriority(t *testing.T) { | |||||||
| 			// Score: 0 | 			// Score: 0 | ||||||
| 			pod:          &v1.Pod{Spec: testMinMax}, | 			pod:          &v1.Pod{Spec: testMinMax}, | ||||||
| 			nodes:        []*v1.Node{makeImageNode("machine1", node403002000), makeImageNode("machine2", node25010), makeImageNode("machine3", nodeWithNoImages)}, | 			nodes:        []*v1.Node{makeImageNode("machine1", node403002000), makeImageNode("machine2", node25010), makeImageNode("machine3", nodeWithNoImages)}, | ||||||
| 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 6}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: 0}}, | 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 65}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: 0}}, | ||||||
| 			name:         "if exceed limit, use limit (with node which has no images present)", | 			name:         "if exceed limit, use limit (with node which has no images present)", | ||||||
| 		}, | 		}, | ||||||
| 	} | 	} | ||||||
|   | |||||||
| @@ -336,7 +336,7 @@ func TestInterPodAffinityPriority(t *testing.T) { | |||||||
| 				{ObjectMeta: metav1.ObjectMeta{Name: "machine4", Labels: labelRgChina}}, | 				{ObjectMeta: metav1.ObjectMeta{Name: "machine4", Labels: labelRgChina}}, | ||||||
| 				{ObjectMeta: metav1.ObjectMeta{Name: "machine5", Labels: labelRgIndia}}, | 				{ObjectMeta: metav1.ObjectMeta{Name: "machine5", Labels: labelRgIndia}}, | ||||||
| 			}, | 			}, | ||||||
| 			expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: 5}, {Name: "machine3", Score: framework.MaxNodeScore}, {Name: "machine4", Score: framework.MaxNodeScore}, {Name: "machine5", Score: 5}}, | 			expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: 50}, {Name: "machine3", Score: framework.MaxNodeScore}, {Name: "machine4", Score: framework.MaxNodeScore}, {Name: "machine5", Score: 50}}, | ||||||
| 			name:         "Affinity: nodes in one region has more matching pods comparing to other reqion, so the region which has more macthes will get high score", | 			name:         "Affinity: nodes in one region has more matching pods comparing to other reqion, so the region which has more macthes will get high score", | ||||||
| 		}, | 		}, | ||||||
| 		// Test with the different operators and values for pod affinity scheduling preference, including some match failures. | 		// Test with the different operators and values for pod affinity scheduling preference, including some match failures. | ||||||
| @@ -352,7 +352,7 @@ func TestInterPodAffinityPriority(t *testing.T) { | |||||||
| 				{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, | 				{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, | ||||||
| 				{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, | 				{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, | ||||||
| 			}, | 			}, | ||||||
| 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 2}, {Name: "machine2", Score: framework.MaxNodeScore}, {Name: "machine3", Score: 0}}, | 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 20}, {Name: "machine2", Score: framework.MaxNodeScore}, {Name: "machine3", Score: 0}}, | ||||||
| 			name:         "Affinity: different Label operators and values for pod affinity scheduling preference, including some match failures ", | 			name:         "Affinity: different Label operators and values for pod affinity scheduling preference, including some match failures ", | ||||||
| 		}, | 		}, | ||||||
| 		// Test the symmetry cases for affinity, the difference between affinity and symmetry is not the pod wants to run together with some existing pods, | 		// Test the symmetry cases for affinity, the difference between affinity and symmetry is not the pod wants to run together with some existing pods, | ||||||
| @@ -482,7 +482,7 @@ func TestInterPodAffinityPriority(t *testing.T) { | |||||||
| 				{ObjectMeta: metav1.ObjectMeta{Name: "machine4", Labels: labelRgChina}}, | 				{ObjectMeta: metav1.ObjectMeta{Name: "machine4", Labels: labelRgChina}}, | ||||||
| 				{ObjectMeta: metav1.ObjectMeta{Name: "machine5", Labels: labelRgIndia}}, | 				{ObjectMeta: metav1.ObjectMeta{Name: "machine5", Labels: labelRgIndia}}, | ||||||
| 			}, | 			}, | ||||||
| 			expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: 4}, {Name: "machine3", Score: framework.MaxNodeScore}, {Name: "machine4", Score: framework.MaxNodeScore}, {Name: "machine5", Score: 4}}, | 			expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: 40}, {Name: "machine3", Score: framework.MaxNodeScore}, {Name: "machine4", Score: framework.MaxNodeScore}, {Name: "machine5", Score: 40}}, | ||||||
| 			name:         "Affinity and Anti Affinity: considering both affinity and anti-affinity, the pod to schedule and existing pods have the same labels", | 			name:         "Affinity and Anti Affinity: considering both affinity and anti-affinity, the pod to schedule and existing pods have the same labels", | ||||||
| 		}, | 		}, | ||||||
| 		// Consider Affinity, Anti Affinity and symmetry together. | 		// Consider Affinity, Anti Affinity and symmetry together. | ||||||
|   | |||||||
| @@ -99,14 +99,14 @@ func TestLeastRequested(t *testing.T) { | |||||||
| 		{ | 		{ | ||||||
| 			/* | 			/* | ||||||
| 				Node1 scores (remaining resources) on 0-10 scale | 				Node1 scores (remaining resources) on 0-10 scale | ||||||
| 				CPU Score: ((4000 - 0) *10) / 4000 = 10 | 				CPU Score: ((4000 - 0) *100) / 4000 = 100 | ||||||
| 				Memory Score: ((10000 - 0) *10) / 10000 = 10 | 				Memory Score: ((10000 - 0) *100) / 10000 = 100 | ||||||
| 				Node1 Score: (10 + 10) / 2 = 10 | 				Node1 Score: (100 + 100) / 2 = 100 | ||||||
|  |  | ||||||
| 				Node2 scores (remaining resources) on 0-10 scale | 				Node2 scores (remaining resources) on 0-10 scale | ||||||
| 				CPU Score: ((4000 - 0) *10) / 4000 = 10 | 				CPU Score: ((4000 - 0) *100) / 4000 = 100 | ||||||
| 				Memory Score: ((10000 - 0) *10) / 10000 = 10 | 				Memory Score: ((10000 - 0) *10) / 10000 = 100 | ||||||
| 				Node2 Score: (10 + 10) / 2 = 10 | 				Node2 Score: (100 + 100) / 2 = 100 | ||||||
| 			*/ | 			*/ | ||||||
| 			pod:          &v1.Pod{Spec: noResources}, | 			pod:          &v1.Pod{Spec: noResources}, | ||||||
| 			nodes:        []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, | 			nodes:        []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, | ||||||
| @@ -116,31 +116,31 @@ func TestLeastRequested(t *testing.T) { | |||||||
| 		{ | 		{ | ||||||
| 			/* | 			/* | ||||||
| 				Node1 scores on 0-10 scale | 				Node1 scores on 0-10 scale | ||||||
| 				CPU Score: ((4000 - 3000) *10) / 4000 = 2.5 | 				CPU Score: ((4000 - 3000) *100) / 4000 = 25 | ||||||
| 				Memory Score: ((10000 - 5000) *10) / 10000 = 5 | 				Memory Score: ((10000 - 5000) *100) / 10000 = 50 | ||||||
| 				Node1 Score: (2.5 + 5) / 2 = 3 | 				Node1 Score: (25 + 50) / 2 = 37 | ||||||
|  |  | ||||||
| 				Node2 scores on 0-10 scale | 				Node2 scores on 0-10 scale | ||||||
| 				CPU Score: ((6000 - 3000) *10) / 6000 = 5 | 				CPU Score: ((6000 - 3000) *100) / 6000 = 50 | ||||||
| 				Memory Score: ((10000 - 5000) *10) / 10000 = 5 | 				Memory Score: ((10000 - 5000) *100) / 10000 = 50 | ||||||
| 				Node2 Score: (5 + 5) / 2 = 5 | 				Node2 Score: (50 + 50) / 2 = 50 | ||||||
| 			*/ | 			*/ | ||||||
| 			pod:          &v1.Pod{Spec: cpuAndMemory}, | 			pod:          &v1.Pod{Spec: cpuAndMemory}, | ||||||
| 			nodes:        []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)}, | 			nodes:        []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)}, | ||||||
| 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 3}, {Name: "machine2", Score: 5}}, | 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 37}, {Name: "machine2", Score: 50}}, | ||||||
| 			name:         "nothing scheduled, resources requested, differently sized machines", | 			name:         "nothing scheduled, resources requested, differently sized machines", | ||||||
| 		}, | 		}, | ||||||
| 		{ | 		{ | ||||||
| 			/* | 			/* | ||||||
| 				Node1 scores on 0-10 scale | 				Node1 scores on 0-10 scale | ||||||
| 				CPU Score: ((4000 - 0) *10) / 4000 = 10 | 				CPU Score: ((4000 - 0) *100) / 4000 = 100 | ||||||
| 				Memory Score: ((10000 - 0) *10) / 10000 = 10 | 				Memory Score: ((10000 - 0) *100) / 10000 = 100 | ||||||
| 				Node1 Score: (10 + 10) / 2 = 10 | 				Node1 Score: (100 + 100) / 2 = 100 | ||||||
|  |  | ||||||
| 				Node2 scores on 0-10 scale | 				Node2 scores on 0-10 scale | ||||||
| 				CPU Score: ((4000 - 0) *10) / 4000 = 10 | 				CPU Score: ((4000 - 0) *100) / 4000 = 100 | ||||||
| 				Memory Score: ((10000 - 0) *10) / 10000 = 10 | 				Memory Score: ((10000 - 0) *100) / 10000 = 100 | ||||||
| 				Node2 Score: (10 + 10) / 2 = 10 | 				Node2 Score: (100 + 100) / 2 = 100 | ||||||
| 			*/ | 			*/ | ||||||
| 			pod:          &v1.Pod{Spec: noResources}, | 			pod:          &v1.Pod{Spec: noResources}, | ||||||
| 			nodes:        []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, | 			nodes:        []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, | ||||||
| @@ -156,18 +156,18 @@ func TestLeastRequested(t *testing.T) { | |||||||
| 		{ | 		{ | ||||||
| 			/* | 			/* | ||||||
| 				Node1 scores on 0-10 scale | 				Node1 scores on 0-10 scale | ||||||
| 				CPU Score: ((10000 - 6000) *10) / 10000 = 4 | 				CPU Score: ((10000 - 6000) *100) / 10000 = 40 | ||||||
| 				Memory Score: ((20000 - 0) *10) / 20000 = 10 | 				Memory Score: ((20000 - 0) *100) / 20000 = 100 | ||||||
| 				Node1 Score: (4 + 10) / 2 = 7 | 				Node1 Score: (40 + 100) / 2 = 70 | ||||||
|  |  | ||||||
| 				Node2 scores on 0-10 scale | 				Node2 scores on 0-10 scale | ||||||
| 				CPU Score: ((10000 - 6000) *10) / 10000 = 4 | 				CPU Score: ((10000 - 6000) *100) / 10000 = 40 | ||||||
| 				Memory Score: ((20000 - 5000) *10) / 20000 = 7.5 | 				Memory Score: ((20000 - 5000) *100) / 20000 = 75 | ||||||
| 				Node2 Score: (4 + 7.5) / 2 = 5 | 				Node2 Score: (40 + 75) / 2 = 57 | ||||||
| 			*/ | 			*/ | ||||||
| 			pod:          &v1.Pod{Spec: noResources}, | 			pod:          &v1.Pod{Spec: noResources}, | ||||||
| 			nodes:        []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)}, | 			nodes:        []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)}, | ||||||
| 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 7}, {Name: "machine2", Score: 5}}, | 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 70}, {Name: "machine2", Score: 57}}, | ||||||
| 			name:         "no resources requested, pods scheduled with resources", | 			name:         "no resources requested, pods scheduled with resources", | ||||||
| 			pods: []*v1.Pod{ | 			pods: []*v1.Pod{ | ||||||
| 				{Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}, | 				{Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}, | ||||||
| @@ -179,18 +179,18 @@ func TestLeastRequested(t *testing.T) { | |||||||
| 		{ | 		{ | ||||||
| 			/* | 			/* | ||||||
| 				Node1 scores on 0-10 scale | 				Node1 scores on 0-10 scale | ||||||
| 				CPU Score: ((10000 - 6000) *10) / 10000 = 4 | 				CPU Score: ((10000 - 6000) *10) / 10000 = 40 | ||||||
| 				Memory Score: ((20000 - 5000) *10) / 20000 = 7.5 | 				Memory Score: ((20000 - 5000) *10) / 20000 = 75 | ||||||
| 				Node1 Score: (4 + 7.5) / 2 = 5 | 				Node1 Score: (40 + 75) / 2 = 57 | ||||||
|  |  | ||||||
| 				Node2 scores on 0-10 scale | 				Node2 scores on 0-10 scale | ||||||
| 				CPU Score: ((10000 - 6000) *10) / 10000 = 4 | 				CPU Score: ((10000 - 6000) *100) / 10000 = 40 | ||||||
| 				Memory Score: ((20000 - 10000) *10) / 20000 = 5 | 				Memory Score: ((20000 - 10000) *100) / 20000 = 50 | ||||||
| 				Node2 Score: (4 + 5) / 2 = 4 | 				Node2 Score: (40 + 50) / 2 = 45 | ||||||
| 			*/ | 			*/ | ||||||
| 			pod:          &v1.Pod{Spec: cpuAndMemory}, | 			pod:          &v1.Pod{Spec: cpuAndMemory}, | ||||||
| 			nodes:        []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)}, | 			nodes:        []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)}, | ||||||
| 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 5}, {Name: "machine2", Score: 4}}, | 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 57}, {Name: "machine2", Score: 45}}, | ||||||
| 			name:         "resources requested, pods scheduled with resources", | 			name:         "resources requested, pods scheduled with resources", | ||||||
| 			pods: []*v1.Pod{ | 			pods: []*v1.Pod{ | ||||||
| 				{Spec: cpuOnly}, | 				{Spec: cpuOnly}, | ||||||
| @@ -200,18 +200,18 @@ func TestLeastRequested(t *testing.T) { | |||||||
| 		{ | 		{ | ||||||
| 			/* | 			/* | ||||||
| 				Node1 scores on 0-10 scale | 				Node1 scores on 0-10 scale | ||||||
| 				CPU Score: ((10000 - 6000) *10) / 10000 = 4 | 				CPU Score: ((10000 - 6000) *100) / 10000 = 40 | ||||||
| 				Memory Score: ((20000 - 5000) *10) / 20000 = 7.5 | 				Memory Score: ((20000 - 5000) *100) / 20000 = 75 | ||||||
| 				Node1 Score: (4 + 7.5) / 2 = 5 | 				Node1 Score: (40 + 75) / 2 = 57 | ||||||
|  |  | ||||||
| 				Node2 scores on 0-10 scale | 				Node2 scores on 0-10 scale | ||||||
| 				CPU Score: ((10000 - 6000) *10) / 10000 = 4 | 				CPU Score: ((10000 - 6000) *100) / 10000 = 40 | ||||||
| 				Memory Score: ((50000 - 10000) *10) / 50000 = 8 | 				Memory Score: ((50000 - 10000) *100) / 50000 = 80 | ||||||
| 				Node2 Score: (4 + 8) / 2 = 6 | 				Node2 Score: (40 + 80) / 2 = 60 | ||||||
| 			*/ | 			*/ | ||||||
| 			pod:          &v1.Pod{Spec: cpuAndMemory}, | 			pod:          &v1.Pod{Spec: cpuAndMemory}, | ||||||
| 			nodes:        []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)}, | 			nodes:        []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)}, | ||||||
| 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 5}, {Name: "machine2", Score: 6}}, | 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 57}, {Name: "machine2", Score: 60}}, | ||||||
| 			name:         "resources requested, pods scheduled with resources, differently sized machines", | 			name:         "resources requested, pods scheduled with resources, differently sized machines", | ||||||
| 			pods: []*v1.Pod{ | 			pods: []*v1.Pod{ | ||||||
| 				{Spec: cpuOnly}, | 				{Spec: cpuOnly}, | ||||||
| @@ -221,18 +221,18 @@ func TestLeastRequested(t *testing.T) { | |||||||
| 		{ | 		{ | ||||||
| 			/* | 			/* | ||||||
| 				Node1 scores on 0-10 scale | 				Node1 scores on 0-10 scale | ||||||
| 				CPU Score: ((4000 - 6000) *10) / 4000 = 0 | 				CPU Score: ((4000 - 6000) *100) / 4000 = 0 | ||||||
| 				Memory Score: ((10000 - 0) *10) / 10000 = 10 | 				Memory Score: ((10000 - 0) *100) / 10000 = 100 | ||||||
| 				Node1 Score: (0 + 10) / 2 = 5 | 				Node1 Score: (0 + 100) / 2 = 50 | ||||||
|  |  | ||||||
| 				Node2 scores on 0-10 scale | 				Node2 scores on 0-10 scale | ||||||
| 				CPU Score: ((4000 - 6000) *10) / 4000 = 0 | 				CPU Score: ((4000 - 6000) *100) / 4000 = 0 | ||||||
| 				Memory Score: ((10000 - 5000) *10) / 10000 = 5 | 				Memory Score: ((10000 - 5000) *100) / 10000 = 50 | ||||||
| 				Node2 Score: (0 + 5) / 2 = 2 | 				Node2 Score: (0 + 50) / 2 = 25 | ||||||
| 			*/ | 			*/ | ||||||
| 			pod:          &v1.Pod{Spec: cpuOnly}, | 			pod:          &v1.Pod{Spec: cpuOnly}, | ||||||
| 			nodes:        []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, | 			nodes:        []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, | ||||||
| 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 5}, {Name: "machine2", Score: 2}}, | 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 50}, {Name: "machine2", Score: 25}}, | ||||||
| 			name:         "requested resources exceed node capacity", | 			name:         "requested resources exceed node capacity", | ||||||
| 			pods: []*v1.Pod{ | 			pods: []*v1.Pod{ | ||||||
| 				{Spec: cpuOnly}, | 				{Spec: cpuOnly}, | ||||||
|   | |||||||
| @@ -20,7 +20,7 @@ import ( | |||||||
| 	"reflect" | 	"reflect" | ||||||
| 	"testing" | 	"testing" | ||||||
|  |  | ||||||
| 	"k8s.io/api/core/v1" | 	v1 "k8s.io/api/core/v1" | ||||||
| 	"k8s.io/apimachinery/pkg/api/resource" | 	"k8s.io/apimachinery/pkg/api/resource" | ||||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||||
| 	framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" | 	framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" | ||||||
| @@ -114,13 +114,13 @@ func TestMostRequested(t *testing.T) { | |||||||
| 		{ | 		{ | ||||||
| 			/* | 			/* | ||||||
| 				Node1 scores (used resources) on 0-10 scale | 				Node1 scores (used resources) on 0-10 scale | ||||||
| 				CPU Score: (0 * 10  / 4000 = 0 | 				CPU Score: (0 * 100)  / 4000 = 0 | ||||||
| 				Memory Score: (0 * 10) / 10000 = 0 | 				Memory Score: (0 * 100) / 10000 = 0 | ||||||
| 				Node1 Score: (0 + 0) / 2 = 0 | 				Node1 Score: (0 + 0) / 2 = 0 | ||||||
|  |  | ||||||
| 				Node2 scores (used resources) on 0-10 scale | 				Node2 scores (used resources) on 0-10 scale | ||||||
| 				CPU Score: (0 * 10 / 4000 = 0 | 				CPU Score: (0 * 100) / 4000 = 0 | ||||||
| 				Memory Score: (0 * 10 / 10000 = 0 | 				Memory Score: (0 * 100) / 10000 = 0 | ||||||
| 				Node2 Score: (0 + 0) / 2 = 0 | 				Node2 Score: (0 + 0) / 2 = 0 | ||||||
| 			*/ | 			*/ | ||||||
| 			pod:          &v1.Pod{Spec: noResources}, | 			pod:          &v1.Pod{Spec: noResources}, | ||||||
| @@ -131,35 +131,35 @@ func TestMostRequested(t *testing.T) { | |||||||
| 		{ | 		{ | ||||||
| 			/* | 			/* | ||||||
| 				Node1 scores on 0-10 scale | 				Node1 scores on 0-10 scale | ||||||
| 				CPU Score: (3000 * 10 / 4000 = 7.5 | 				CPU Score: (3000 * 100) / 4000 = 75 | ||||||
| 				Memory Score: (5000 * 10) / 10000 = 5 | 				Memory Score: (5000 * 100) / 10000 = 50 | ||||||
| 				Node1 Score: (7.5 + 5) / 2 = 6 | 				Node1 Score: (75 + 50) / 2 = 6 | ||||||
|  |  | ||||||
| 				Node2 scores on 0-10 scale | 				Node2 scores on 0-10 scale | ||||||
| 				CPU Score: (3000 * 10 / 6000 = 5 | 				CPU Score: (3000 * 100) / 6000 = 50 | ||||||
| 				Memory Score: (5000 * 10 / 10000 = 5 | 				Memory Score: (5000 * 100) / 10000 = 50 | ||||||
| 				Node2 Score: (5 + 5) / 2 = 5 | 				Node2 Score: (50 + 50) / 2 = 50 | ||||||
| 			*/ | 			*/ | ||||||
| 			pod:          &v1.Pod{Spec: cpuAndMemory}, | 			pod:          &v1.Pod{Spec: cpuAndMemory}, | ||||||
| 			nodes:        []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)}, | 			nodes:        []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)}, | ||||||
| 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 6}, {Name: "machine2", Score: 5}}, | 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 62}, {Name: "machine2", Score: 50}}, | ||||||
| 			name:         "nothing scheduled, resources requested, differently sized machines", | 			name:         "nothing scheduled, resources requested, differently sized machines", | ||||||
| 		}, | 		}, | ||||||
| 		{ | 		{ | ||||||
| 			/* | 			/* | ||||||
| 				Node1 scores on 0-10 scale | 				Node1 scores on 0-10 scale | ||||||
| 				CPU Score: (6000 * 10) / 10000 = 6 | 				CPU Score: (6000 * 100) / 10000 = 60 | ||||||
| 				Memory Score: (0 * 10) / 20000 = 10 | 				Memory Score: (0 * 100) / 20000 = 100 | ||||||
| 				Node1 Score: (6 + 0) / 2 = 3 | 				Node1 Score: (60 + 0) / 2 = 30 | ||||||
|  |  | ||||||
| 				Node2 scores on 0-10 scale | 				Node2 scores on 0-10 scale | ||||||
| 				CPU Score: (6000 * 10) / 10000 = 6 | 				CPU Score: (6000 * 100) / 10000 = 60 | ||||||
| 				Memory Score: (5000 * 10) / 20000 = 2.5 | 				Memory Score: (5000 * 100) / 20000 = 25 | ||||||
| 				Node2 Score: (6 + 2.5) / 2 = 4 | 				Node2 Score: (60 + 25) / 2 = 42 | ||||||
| 			*/ | 			*/ | ||||||
| 			pod:          &v1.Pod{Spec: noResources}, | 			pod:          &v1.Pod{Spec: noResources}, | ||||||
| 			nodes:        []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)}, | 			nodes:        []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)}, | ||||||
| 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 3}, {Name: "machine2", Score: 4}}, | 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 30}, {Name: "machine2", Score: 42}}, | ||||||
| 			name:         "no resources requested, pods scheduled with resources", | 			name:         "no resources requested, pods scheduled with resources", | ||||||
| 			pods: []*v1.Pod{ | 			pods: []*v1.Pod{ | ||||||
| 				{Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}, | 				{Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}, | ||||||
| @@ -171,18 +171,18 @@ func TestMostRequested(t *testing.T) { | |||||||
| 		{ | 		{ | ||||||
| 			/* | 			/* | ||||||
| 				Node1 scores on 0-10 scale | 				Node1 scores on 0-10 scale | ||||||
| 				CPU Score: (6000 * 10) / 10000 = 6 | 				CPU Score: (6000 * 100) / 10000 = 60 | ||||||
| 				Memory Score: (5000 * 10) / 20000 = 2.5 | 				Memory Score: (5000 * 100) / 20000 = 25 | ||||||
| 				Node1 Score: (6 + 2.5) / 2 = 4 | 				Node1 Score: (60 + 25) / 2 = 42 | ||||||
|  |  | ||||||
| 				Node2 scores on 0-10 scale | 				Node2 scores on 0-10 scale | ||||||
| 				CPU Score: (6000 * 10) / 10000 = 6 | 				CPU Score: (6000 * 100) / 10000 = 60 | ||||||
| 				Memory Score: (10000 * 10) / 20000 = 5 | 				Memory Score: (10000 * 100) / 20000 = 50 | ||||||
| 				Node2 Score: (6 + 5) / 2 = 5 | 				Node2 Score: (60 + 50) / 2 = 55 | ||||||
| 			*/ | 			*/ | ||||||
| 			pod:          &v1.Pod{Spec: cpuAndMemory}, | 			pod:          &v1.Pod{Spec: cpuAndMemory}, | ||||||
| 			nodes:        []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)}, | 			nodes:        []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)}, | ||||||
| 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 4}, {Name: "machine2", Score: 5}}, | 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 42}, {Name: "machine2", Score: 55}}, | ||||||
| 			name:         "resources requested, pods scheduled with resources", | 			name:         "resources requested, pods scheduled with resources", | ||||||
| 			pods: []*v1.Pod{ | 			pods: []*v1.Pod{ | ||||||
| 				{Spec: cpuOnly}, | 				{Spec: cpuOnly}, | ||||||
| @@ -193,17 +193,17 @@ func TestMostRequested(t *testing.T) { | |||||||
| 			/* | 			/* | ||||||
| 				Node1 scores on 0-10 scale | 				Node1 scores on 0-10 scale | ||||||
| 				CPU Score: 5000 > 4000 return 0 | 				CPU Score: 5000 > 4000 return 0 | ||||||
| 				Memory Score: (9000 * 10) / 10000 = 9 | 				Memory Score: (9000 * 100) / 10000 = 90 | ||||||
| 				Node1 Score: (0 + 9) / 2 = 4 | 				Node1 Score: (0 + 90) / 2 = 45 | ||||||
|  |  | ||||||
| 				Node2 scores on 0-10 scale | 				Node2 scores on 0-10 scale | ||||||
| 				CPU Score: (5000 * 10) / 10000 = 5 | 				CPU Score: (5000 * 100) / 10000 = 50 | ||||||
| 				Memory Score: 9000 > 8000 return 0 | 				Memory Score: 9000 > 8000 return 0 | ||||||
| 				Node2 Score: (5 + 0) / 2 = 2 | 				Node2 Score: (50 + 0) / 2 = 25 | ||||||
| 			*/ | 			*/ | ||||||
| 			pod:          &v1.Pod{Spec: bigCPUAndMemory}, | 			pod:          &v1.Pod{Spec: bigCPUAndMemory}, | ||||||
| 			nodes:        []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 10000, 8000)}, | 			nodes:        []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 10000, 8000)}, | ||||||
| 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 4}, {Name: "machine2", Score: 2}}, | 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 45}, {Name: "machine2", Score: 25}}, | ||||||
| 			name:         "resources requested with more than the node, pods scheduled with resources", | 			name:         "resources requested with more than the node, pods scheduled with resources", | ||||||
| 		}, | 		}, | ||||||
| 	} | 	} | ||||||
|   | |||||||
| @@ -160,7 +160,7 @@ func TestNodeAffinityPriority(t *testing.T) { | |||||||
| 				{ObjectMeta: metav1.ObjectMeta{Name: "machine5", Labels: label5}}, | 				{ObjectMeta: metav1.ObjectMeta{Name: "machine5", Labels: label5}}, | ||||||
| 				{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}}, | 				{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}}, | ||||||
| 			}, | 			}, | ||||||
| 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 1}, {Name: "machine5", Score: framework.MaxNodeScore}, {Name: "machine2", Score: 3}}, | 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 18}, {Name: "machine5", Score: framework.MaxNodeScore}, {Name: "machine2", Score: 36}}, | ||||||
| 			name:         "all machines matches the preferred scheduling requirements of pod but with different priorities ", | 			name:         "all machines matches the preferred scheduling requirements of pod but with different priorities ", | ||||||
| 		}, | 		}, | ||||||
| 	} | 	} | ||||||
|   | |||||||
| @@ -38,7 +38,16 @@ type FunctionShapePoint struct { | |||||||
|  |  | ||||||
| var ( | var ( | ||||||
| 	// give priority to least utilized nodes by default | 	// give priority to least utilized nodes by default | ||||||
| 	defaultFunctionShape, _ = NewFunctionShape([]FunctionShapePoint{{0, 10}, {100, 0}}) | 	defaultFunctionShape, _ = NewFunctionShape([]FunctionShapePoint{ | ||||||
|  | 		{ | ||||||
|  | 			Utilization: 0, | ||||||
|  | 			Score:       framework.MaxNodeScore, | ||||||
|  | 		}, | ||||||
|  | 		{ | ||||||
|  | 			Utilization: 100, | ||||||
|  | 			Score:       framework.MinNodeScore, | ||||||
|  | 		}, | ||||||
|  | 	}) | ||||||
| ) | ) | ||||||
|  |  | ||||||
| const ( | const ( | ||||||
|   | |||||||
| @@ -56,17 +56,17 @@ func TestCreatingFunctionShapeErrorsIfXIsNotSorted(t *testing.T) { | |||||||
|  |  | ||||||
| func TestCreatingFunctionPointNotInAllowedRange(t *testing.T) { | func TestCreatingFunctionPointNotInAllowedRange(t *testing.T) { | ||||||
| 	var err error | 	var err error | ||||||
| 	_, err = NewFunctionShape([]FunctionShapePoint{{-1, 0}, {100, 10}}) | 	_, err = NewFunctionShape([]FunctionShapePoint{{-1, 0}, {100, 100}}) | ||||||
| 	assert.Equal(t, "utilization values must not be less than 0. Utilization[0]==-1", err.Error()) | 	assert.Equal(t, "utilization values must not be less than 0. Utilization[0]==-1", err.Error()) | ||||||
|  |  | ||||||
| 	_, err = NewFunctionShape([]FunctionShapePoint{{0, 0}, {101, 10}}) | 	_, err = NewFunctionShape([]FunctionShapePoint{{0, 0}, {101, 100}}) | ||||||
| 	assert.Equal(t, "utilization values must not be greater than 100. Utilization[1]==101", err.Error()) | 	assert.Equal(t, "utilization values must not be greater than 100. Utilization[1]==101", err.Error()) | ||||||
|  |  | ||||||
| 	_, err = NewFunctionShape([]FunctionShapePoint{{0, -1}, {100, 10}}) | 	_, err = NewFunctionShape([]FunctionShapePoint{{0, -1}, {100, 100}}) | ||||||
| 	assert.Equal(t, "score values must not be less than 0. Score[0]==-1", err.Error()) | 	assert.Equal(t, "score values must not be less than 0. Score[0]==-1", err.Error()) | ||||||
|  |  | ||||||
| 	_, err = NewFunctionShape([]FunctionShapePoint{{0, 0}, {100, 11}}) | 	_, err = NewFunctionShape([]FunctionShapePoint{{0, 0}, {100, 101}}) | ||||||
| 	assert.Equal(t, "score valuses not be greater than 10. Score[1]==11", err.Error()) | 	assert.Equal(t, "score valuses not be greater than 100. Score[1]==101", err.Error()) | ||||||
| } | } | ||||||
|  |  | ||||||
| func TestBrokenLinearFunction(t *testing.T) { | func TestBrokenLinearFunction(t *testing.T) { | ||||||
| @@ -167,7 +167,7 @@ func TestRequestedToCapacityRatio(t *testing.T) { | |||||||
| 					used:     resources{0, 0}, | 					used:     resources{0, 0}, | ||||||
| 				}, | 				}, | ||||||
| 			}, | 			}, | ||||||
| 			expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 10}, {Name: "node2", Score: 10}}, | 			expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 100}, {Name: "node2", Score: 100}}, | ||||||
| 		}, | 		}, | ||||||
| 		{ | 		{ | ||||||
| 			test:      "nothing scheduled, resources requested, differently sized machines (default - least requested nodes have priority)", | 			test:      "nothing scheduled, resources requested, differently sized machines (default - least requested nodes have priority)", | ||||||
| @@ -182,7 +182,7 @@ func TestRequestedToCapacityRatio(t *testing.T) { | |||||||
| 					used:     resources{0, 0}, | 					used:     resources{0, 0}, | ||||||
| 				}, | 				}, | ||||||
| 			}, | 			}, | ||||||
| 			expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 4}, {Name: "node2", Score: 5}}, | 			expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 38}, {Name: "node2", Score: 50}}, | ||||||
| 		}, | 		}, | ||||||
| 		{ | 		{ | ||||||
| 			test:      "no resources requested, pods scheduled with resources (default - least requested nodes have priority)", | 			test:      "no resources requested, pods scheduled with resources (default - least requested nodes have priority)", | ||||||
| @@ -197,7 +197,7 @@ func TestRequestedToCapacityRatio(t *testing.T) { | |||||||
| 					used:     resources{3000, 5000}, | 					used:     resources{3000, 5000}, | ||||||
| 				}, | 				}, | ||||||
| 			}, | 			}, | ||||||
| 			expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 4}, {Name: "node2", Score: 5}}, | 			expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 38}, {Name: "node2", Score: 50}}, | ||||||
| 		}, | 		}, | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
|   | |||||||
| @@ -158,7 +158,7 @@ func TestSelectorSpreadPriority(t *testing.T) { | |||||||
| 			}, | 			}, | ||||||
| 			nodes:        []string{"machine1", "machine2"}, | 			nodes:        []string{"machine1", "machine2"}, | ||||||
| 			services:     []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}}, | 			services:     []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}}, | ||||||
| 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 5}, {Name: "machine2", Score: 0}}, | 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 50}, {Name: "machine2", Score: 0}}, | ||||||
| 			name:         "four pods, three service pods", | 			name:         "four pods, three service pods", | ||||||
| 		}, | 		}, | ||||||
| 		{ | 		{ | ||||||
| @@ -170,7 +170,7 @@ func TestSelectorSpreadPriority(t *testing.T) { | |||||||
| 			}, | 			}, | ||||||
| 			nodes:        []string{"machine1", "machine2"}, | 			nodes:        []string{"machine1", "machine2"}, | ||||||
| 			services:     []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}}, | 			services:     []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}}, | ||||||
| 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 5}}, | 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 50}}, | ||||||
| 			name:         "service with partial pod label matches", | 			name:         "service with partial pod label matches", | ||||||
| 		}, | 		}, | ||||||
| 		{ | 		{ | ||||||
| @@ -226,7 +226,7 @@ func TestSelectorSpreadPriority(t *testing.T) { | |||||||
| 			rcs:      []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}}, | 			rcs:      []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}}, | ||||||
| 			services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}}, | 			services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}}, | ||||||
| 			// Taken together Service and Replication Controller should match no pods. | 			// Taken together Service and Replication Controller should match no pods. | ||||||
| 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 10}, {Name: "machine2", Score: 10}}, | 			expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}}, | ||||||
| 			name:         "disjoined service and replication controller matches no pods", | 			name:         "disjoined service and replication controller matches no pods", | ||||||
| 		}, | 		}, | ||||||
| 		{ | 		{ | ||||||
| @@ -240,7 +240,7 @@ func TestSelectorSpreadPriority(t *testing.T) { | |||||||
| 			services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}}, | 			services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}}, | ||||||
| 			rss:      []*apps.ReplicaSet{{Spec: apps.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}}, | 			rss:      []*apps.ReplicaSet{{Spec: apps.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}}, | ||||||
| 			// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above. | 			// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above. | ||||||
| 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 10}, {Name: "machine2", Score: 10}}, | 			expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}}, | ||||||
| 			name:         "disjoined service and replica set matches no pods", | 			name:         "disjoined service and replica set matches no pods", | ||||||
| 		}, | 		}, | ||||||
| 		{ | 		{ | ||||||
| @@ -253,7 +253,7 @@ func TestSelectorSpreadPriority(t *testing.T) { | |||||||
| 			nodes:        []string{"machine1", "machine2"}, | 			nodes:        []string{"machine1", "machine2"}, | ||||||
| 			services:     []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}}, | 			services:     []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}}, | ||||||
| 			sss:          []*apps.StatefulSet{{Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}}, | 			sss:          []*apps.StatefulSet{{Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}}, | ||||||
| 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 10}, {Name: "machine2", Score: 10}}, | 			expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}}, | ||||||
| 			name:         "disjoined service and stateful set matches no pods", | 			name:         "disjoined service and stateful set matches no pods", | ||||||
| 		}, | 		}, | ||||||
| 		{ | 		{ | ||||||
| @@ -304,7 +304,7 @@ func TestSelectorSpreadPriority(t *testing.T) { | |||||||
| 			}, | 			}, | ||||||
| 			nodes:        []string{"machine1", "machine2"}, | 			nodes:        []string{"machine1", "machine2"}, | ||||||
| 			rcs:          []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"baz": "blah"}}}}, | 			rcs:          []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"baz": "blah"}}}}, | ||||||
| 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 5}}, | 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 50}}, | ||||||
| 			name:         "Another replication controller with partial pod label matches", | 			name:         "Another replication controller with partial pod label matches", | ||||||
| 		}, | 		}, | ||||||
| 		{ | 		{ | ||||||
| @@ -317,7 +317,7 @@ func TestSelectorSpreadPriority(t *testing.T) { | |||||||
| 			nodes: []string{"machine1", "machine2"}, | 			nodes: []string{"machine1", "machine2"}, | ||||||
| 			rss:   []*apps.ReplicaSet{{Spec: apps.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"baz": "blah"}}}}}, | 			rss:   []*apps.ReplicaSet{{Spec: apps.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"baz": "blah"}}}}}, | ||||||
| 			// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above. | 			// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above. | ||||||
| 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 5}}, | 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 50}}, | ||||||
| 			name:         "Another replication set with partial pod label matches", | 			name:         "Another replication set with partial pod label matches", | ||||||
| 		}, | 		}, | ||||||
| 		{ | 		{ | ||||||
| @@ -330,7 +330,7 @@ func TestSelectorSpreadPriority(t *testing.T) { | |||||||
| 			nodes: []string{"machine1", "machine2"}, | 			nodes: []string{"machine1", "machine2"}, | ||||||
| 			sss:   []*apps.StatefulSet{{Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"baz": "blah"}}}}}, | 			sss:   []*apps.StatefulSet{{Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"baz": "blah"}}}}}, | ||||||
| 			// We use StatefulSet, instead of ReplicationController. The result should be exactly as above. | 			// We use StatefulSet, instead of ReplicationController. The result should be exactly as above. | ||||||
| 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 5}}, | 			expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 50}}, | ||||||
| 			name:         "Another stateful set with partial pod label matches", | 			name:         "Another stateful set with partial pod label matches", | ||||||
| 		}, | 		}, | ||||||
| 	} | 	} | ||||||
| @@ -478,8 +478,8 @@ func TestZoneSelectorSpreadPriority(t *testing.T) { | |||||||
| 			services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}}, | 			services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}}, | ||||||
| 			expectedList: []framework.NodeScore{ | 			expectedList: []framework.NodeScore{ | ||||||
| 				{Name: nodeMachine1Zone1, Score: framework.MaxNodeScore}, | 				{Name: nodeMachine1Zone1, Score: framework.MaxNodeScore}, | ||||||
| 				{Name: nodeMachine1Zone2, Score: 0}, // Already have pod on machine | 				{Name: nodeMachine1Zone2, Score: 0},  // Already have pod on machine | ||||||
| 				{Name: nodeMachine2Zone2, Score: 3}, // Already have pod in zone | 				{Name: nodeMachine2Zone2, Score: 33}, // Already have pod in zone | ||||||
| 				{Name: nodeMachine1Zone3, Score: framework.MaxNodeScore}, | 				{Name: nodeMachine1Zone3, Score: framework.MaxNodeScore}, | ||||||
| 				{Name: nodeMachine2Zone3, Score: framework.MaxNodeScore}, | 				{Name: nodeMachine2Zone3, Score: framework.MaxNodeScore}, | ||||||
| 				{Name: nodeMachine3Zone3, Score: framework.MaxNodeScore}, | 				{Name: nodeMachine3Zone3, Score: framework.MaxNodeScore}, | ||||||
| @@ -498,11 +498,11 @@ func TestZoneSelectorSpreadPriority(t *testing.T) { | |||||||
| 			services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}}, | 			services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}}, | ||||||
| 			expectedList: []framework.NodeScore{ | 			expectedList: []framework.NodeScore{ | ||||||
| 				{Name: nodeMachine1Zone1, Score: framework.MaxNodeScore}, | 				{Name: nodeMachine1Zone1, Score: framework.MaxNodeScore}, | ||||||
| 				{Name: nodeMachine1Zone2, Score: 0}, // Pod on node | 				{Name: nodeMachine1Zone2, Score: 0},  // Pod on node | ||||||
| 				{Name: nodeMachine2Zone2, Score: 0}, // Pod on node | 				{Name: nodeMachine2Zone2, Score: 0},  // Pod on node | ||||||
| 				{Name: nodeMachine1Zone3, Score: 6}, // Pod in zone | 				{Name: nodeMachine1Zone3, Score: 66}, // Pod in zone | ||||||
| 				{Name: nodeMachine2Zone3, Score: 3}, // Pod on node | 				{Name: nodeMachine2Zone3, Score: 33}, // Pod on node | ||||||
| 				{Name: nodeMachine3Zone3, Score: 6}, // Pod in zone | 				{Name: nodeMachine3Zone3, Score: 66}, // Pod in zone | ||||||
| 			}, | 			}, | ||||||
| 			name: "five pods, 3 matching (z2=2, z3=1)", | 			name: "five pods, 3 matching (z2=2, z3=1)", | ||||||
| 		}, | 		}, | ||||||
| @@ -516,12 +516,12 @@ func TestZoneSelectorSpreadPriority(t *testing.T) { | |||||||
| 			}, | 			}, | ||||||
| 			services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}}, | 			services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}}, | ||||||
| 			expectedList: []framework.NodeScore{ | 			expectedList: []framework.NodeScore{ | ||||||
| 				{Name: nodeMachine1Zone1, Score: 0}, // Pod on node | 				{Name: nodeMachine1Zone1, Score: 0},  // Pod on node | ||||||
| 				{Name: nodeMachine1Zone2, Score: 0}, // Pod on node | 				{Name: nodeMachine1Zone2, Score: 0},  // Pod on node | ||||||
| 				{Name: nodeMachine2Zone2, Score: 3}, // Pod in zone | 				{Name: nodeMachine2Zone2, Score: 33}, // Pod in zone | ||||||
| 				{Name: nodeMachine1Zone3, Score: 0}, // Pod on node | 				{Name: nodeMachine1Zone3, Score: 0},  // Pod on node | ||||||
| 				{Name: nodeMachine2Zone3, Score: 3}, // Pod in zone | 				{Name: nodeMachine2Zone3, Score: 33}, // Pod in zone | ||||||
| 				{Name: nodeMachine3Zone3, Score: 3}, // Pod in zone | 				{Name: nodeMachine3Zone3, Score: 33}, // Pod in zone | ||||||
| 			}, | 			}, | ||||||
| 			name: "four pods, 3 matching (z1=1, z2=1, z3=1)", | 			name: "four pods, 3 matching (z1=1, z2=1, z3=1)", | ||||||
| 		}, | 		}, | ||||||
| @@ -535,12 +535,12 @@ func TestZoneSelectorSpreadPriority(t *testing.T) { | |||||||
| 			}, | 			}, | ||||||
| 			services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}}, | 			services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}}, | ||||||
| 			expectedList: []framework.NodeScore{ | 			expectedList: []framework.NodeScore{ | ||||||
| 				{Name: nodeMachine1Zone1, Score: 0}, // Pod on node | 				{Name: nodeMachine1Zone1, Score: 0},  // Pod on node | ||||||
| 				{Name: nodeMachine1Zone2, Score: 0}, // Pod on node | 				{Name: nodeMachine1Zone2, Score: 0},  // Pod on node | ||||||
| 				{Name: nodeMachine2Zone2, Score: 3}, // Pod in zone | 				{Name: nodeMachine2Zone2, Score: 33}, // Pod in zone | ||||||
| 				{Name: nodeMachine1Zone3, Score: 0}, // Pod on node | 				{Name: nodeMachine1Zone3, Score: 0},  // Pod on node | ||||||
| 				{Name: nodeMachine2Zone3, Score: 3}, // Pod in zone | 				{Name: nodeMachine2Zone3, Score: 33}, // Pod in zone | ||||||
| 				{Name: nodeMachine3Zone3, Score: 3}, // Pod in zone | 				{Name: nodeMachine3Zone3, Score: 33}, // Pod in zone | ||||||
| 			}, | 			}, | ||||||
| 			name: "four pods, 3 matching (z1=1, z2=1, z3=1)", | 			name: "four pods, 3 matching (z1=1, z2=1, z3=1)", | ||||||
| 		}, | 		}, | ||||||
| @@ -561,11 +561,11 @@ func TestZoneSelectorSpreadPriority(t *testing.T) { | |||||||
| 				// pod on m1.z2 and putting a pod on m2.z2, but the ordering is correct. | 				// pod on m1.z2 and putting a pod on m2.z2, but the ordering is correct. | ||||||
| 				// This is also consistent with what we have already. | 				// This is also consistent with what we have already. | ||||||
| 				{Name: nodeMachine1Zone1, Score: framework.MaxNodeScore}, // No pods in zone | 				{Name: nodeMachine1Zone1, Score: framework.MaxNodeScore}, // No pods in zone | ||||||
| 				{Name: nodeMachine1Zone2, Score: 5},                      // Pod on node | 				{Name: nodeMachine1Zone2, Score: 50},                     // Pod on node | ||||||
| 				{Name: nodeMachine2Zone2, Score: 6},                      // Pod in zone | 				{Name: nodeMachine2Zone2, Score: 66},                     // Pod in zone | ||||||
| 				{Name: nodeMachine1Zone3, Score: 0},                      // Two pods on node | 				{Name: nodeMachine1Zone3, Score: 0},                      // Two pods on node | ||||||
| 				{Name: nodeMachine2Zone3, Score: 3},                      // Pod in zone | 				{Name: nodeMachine2Zone3, Score: 33},                     // Pod in zone | ||||||
| 				{Name: nodeMachine3Zone3, Score: 3},                      // Pod in zone | 				{Name: nodeMachine3Zone3, Score: 33},                     // Pod in zone | ||||||
| 			}, | 			}, | ||||||
| 			name: "Replication controller spreading (z1=0, z2=1, z3=2)", | 			name: "Replication controller spreading (z1=0, z2=1, z3=2)", | ||||||
| 		}, | 		}, | ||||||
| @@ -692,8 +692,8 @@ func TestZoneSpreadPriority(t *testing.T) { | |||||||
| 			}, | 			}, | ||||||
| 			nodes:    labeledNodes, | 			nodes:    labeledNodes, | ||||||
| 			services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}}, | 			services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}}, | ||||||
| 			expectedList: []framework.NodeScore{{Name: "machine11", Score: 5}, {Name: "machine12", Score: 5}, | 			expectedList: []framework.NodeScore{{Name: "machine11", Score: 50}, {Name: "machine12", Score: 50}, | ||||||
| 				{Name: "machine21", Score: 5}, {Name: "machine22", Score: 5}, | 				{Name: "machine21", Score: 50}, {Name: "machine22", Score: 50}, | ||||||
| 				{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}}, | 				{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}}, | ||||||
| 			name: "three pods, two service pods on different machines", | 			name: "three pods, two service pods on different machines", | ||||||
| 		}, | 		}, | ||||||
| @@ -722,8 +722,8 @@ func TestZoneSpreadPriority(t *testing.T) { | |||||||
| 			}, | 			}, | ||||||
| 			nodes:    labeledNodes, | 			nodes:    labeledNodes, | ||||||
| 			services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}}, | 			services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}}, | ||||||
| 			expectedList: []framework.NodeScore{{Name: "machine11", Score: 6}, {Name: "machine12", Score: 6}, | 			expectedList: []framework.NodeScore{{Name: "machine11", Score: 66}, {Name: "machine12", Score: 66}, | ||||||
| 				{Name: "machine21", Score: 3}, {Name: "machine22", Score: 3}, | 				{Name: "machine21", Score: 33}, {Name: "machine22", Score: 33}, | ||||||
| 				{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}}, | 				{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}}, | ||||||
| 			name: "four pods, three service pods", | 			name: "four pods, three service pods", | ||||||
| 		}, | 		}, | ||||||
| @@ -736,8 +736,8 @@ func TestZoneSpreadPriority(t *testing.T) { | |||||||
| 			}, | 			}, | ||||||
| 			nodes:    labeledNodes, | 			nodes:    labeledNodes, | ||||||
| 			services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}}, | 			services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}}, | ||||||
| 			expectedList: []framework.NodeScore{{Name: "machine11", Score: 3}, {Name: "machine12", Score: 3}, | 			expectedList: []framework.NodeScore{{Name: "machine11", Score: 33}, {Name: "machine12", Score: 33}, | ||||||
| 				{Name: "machine21", Score: 6}, {Name: "machine22", Score: 6}, | 				{Name: "machine21", Score: 66}, {Name: "machine22", Score: 66}, | ||||||
| 				{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}}, | 				{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}}, | ||||||
| 			name: "service with partial pod label matches", | 			name: "service with partial pod label matches", | ||||||
| 		}, | 		}, | ||||||
| @@ -751,8 +751,8 @@ func TestZoneSpreadPriority(t *testing.T) { | |||||||
| 			}, | 			}, | ||||||
| 			nodes:    labeledNodes, | 			nodes:    labeledNodes, | ||||||
| 			services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}}, | 			services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}}, | ||||||
| 			expectedList: []framework.NodeScore{{Name: "machine11", Score: 7}, {Name: "machine12", Score: 7}, | 			expectedList: []framework.NodeScore{{Name: "machine11", Score: 75}, {Name: "machine12", Score: 75}, | ||||||
| 				{Name: "machine21", Score: 5}, {Name: "machine22", Score: 5}, | 				{Name: "machine21", Score: 50}, {Name: "machine22", Score: 50}, | ||||||
| 				{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}}, | 				{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}}, | ||||||
| 			name: "service pod on non-zoned node", | 			name: "service pod on non-zoned node", | ||||||
| 		}, | 		}, | ||||||
|   | |||||||
| @@ -157,7 +157,7 @@ func TestTaintAndToleration(t *testing.T) { | |||||||
| 			}, | 			}, | ||||||
| 			expectedList: []framework.NodeScore{ | 			expectedList: []framework.NodeScore{ | ||||||
| 				{Name: "nodeA", Score: framework.MaxNodeScore}, | 				{Name: "nodeA", Score: framework.MaxNodeScore}, | ||||||
| 				{Name: "nodeB", Score: 5}, | 				{Name: "nodeB", Score: 50}, | ||||||
| 				{Name: "nodeC", Score: 0}, | 				{Name: "nodeC", Score: 0}, | ||||||
| 			}, | 			}, | ||||||
| 		}, | 		}, | ||||||
|   | |||||||
| @@ -27,6 +27,9 @@ const ( | |||||||
| 	// DefaultPercentageOfNodesToScore defines the percentage of nodes of all nodes | 	// DefaultPercentageOfNodesToScore defines the percentage of nodes of all nodes | ||||||
| 	// that once found feasible, the scheduler stops looking for more nodes. | 	// that once found feasible, the scheduler stops looking for more nodes. | ||||||
| 	DefaultPercentageOfNodesToScore = 50 | 	DefaultPercentageOfNodesToScore = 50 | ||||||
|  |  | ||||||
|  | 	// CustomPriorityMaxScore is the max score UtilizationShapePoint expects. | ||||||
|  | 	CustomPriorityMaxScore int64 = 10 | ||||||
| ) | ) | ||||||
|  |  | ||||||
| // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object | // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object | ||||||
|   | |||||||
| @@ -106,9 +106,9 @@ func machine2PrioritizerExtender(pod *v1.Pod, nodes []*v1.Node) (*framework.Node | |||||||
| func machine2Prioritizer(_ *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (framework.NodeScoreList, error) { | func machine2Prioritizer(_ *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (framework.NodeScoreList, error) { | ||||||
| 	result := []framework.NodeScore{} | 	result := []framework.NodeScore{} | ||||||
| 	for _, node := range nodes { | 	for _, node := range nodes { | ||||||
| 		score := 1 | 		score := 10 | ||||||
| 		if node.Name == "machine2" { | 		if node.Name == "machine2" { | ||||||
| 			score = 10 | 			score = 100 | ||||||
| 		} | 		} | ||||||
| 		result = append(result, framework.NodeScore{Name: node.Name, Score: int64(score)}) | 		result = append(result, framework.NodeScore{Name: node.Name, Score: int64(score)}) | ||||||
| 	} | 	} | ||||||
|   | |||||||
| @@ -946,7 +946,7 @@ func TestZeroRequest(t *testing.T) { | |||||||
| 				{Spec: large1}, {Spec: noResources1}, | 				{Spec: large1}, {Spec: noResources1}, | ||||||
| 				{Spec: large2}, {Spec: small2}, | 				{Spec: large2}, {Spec: small2}, | ||||||
| 			}, | 			}, | ||||||
| 			expectedScore: 25, | 			expectedScore: 250, | ||||||
| 		}, | 		}, | ||||||
| 		{ | 		{ | ||||||
| 			pod:   &v1.Pod{Spec: small}, | 			pod:   &v1.Pod{Spec: small}, | ||||||
| @@ -956,7 +956,7 @@ func TestZeroRequest(t *testing.T) { | |||||||
| 				{Spec: large1}, {Spec: noResources1}, | 				{Spec: large1}, {Spec: noResources1}, | ||||||
| 				{Spec: large2}, {Spec: small2}, | 				{Spec: large2}, {Spec: small2}, | ||||||
| 			}, | 			}, | ||||||
| 			expectedScore: 25, | 			expectedScore: 250, | ||||||
| 		}, | 		}, | ||||||
| 		// The point of this test is to verify that we're not just getting the same score no matter what we schedule. | 		// The point of this test is to verify that we're not just getting the same score no matter what we schedule. | ||||||
| 		{ | 		{ | ||||||
| @@ -967,7 +967,7 @@ func TestZeroRequest(t *testing.T) { | |||||||
| 				{Spec: large1}, {Spec: noResources1}, | 				{Spec: large1}, {Spec: noResources1}, | ||||||
| 				{Spec: large2}, {Spec: small2}, | 				{Spec: large2}, {Spec: small2}, | ||||||
| 			}, | 			}, | ||||||
| 			expectedScore: 23, | 			expectedScore: 230, | ||||||
| 		}, | 		}, | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
|   | |||||||
| @@ -417,7 +417,13 @@ func buildScoringFunctionShapeFromRequestedToCapacityRatioArguments(arguments *s | |||||||
| 	n := len(arguments.UtilizationShape) | 	n := len(arguments.UtilizationShape) | ||||||
| 	points := make([]priorities.FunctionShapePoint, 0, n) | 	points := make([]priorities.FunctionShapePoint, 0, n) | ||||||
| 	for _, point := range arguments.UtilizationShape { | 	for _, point := range arguments.UtilizationShape { | ||||||
| 		points = append(points, priorities.FunctionShapePoint{Utilization: int64(point.Utilization), Score: int64(point.Score)}) | 		points = append(points, priorities.FunctionShapePoint{ | ||||||
|  | 			Utilization: int64(point.Utilization), | ||||||
|  | 			// CustomPriorityMaxScore may diverge from the max score used in the scheduler and defined by MaxNodeScore, | ||||||
|  | 			// therefore we need to scale the score returned by requested to capacity ratio to the score range | ||||||
|  | 			// used by the scheduler. | ||||||
|  | 			Score: int64(point.Score) * (framework.MaxNodeScore / schedulerapi.CustomPriorityMaxScore), | ||||||
|  | 		}) | ||||||
| 	} | 	} | ||||||
| 	shape, err := priorities.NewFunctionShape(points) | 	shape, err := priorities.NewFunctionShape(points) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
|   | |||||||
| @@ -65,9 +65,9 @@ func TestBuildScoringFunctionShapeFromRequestedToCapacityRatioArguments(t *testi | |||||||
| 	} | 	} | ||||||
| 	builtShape, resources := buildScoringFunctionShapeFromRequestedToCapacityRatioArguments(&arguments) | 	builtShape, resources := buildScoringFunctionShapeFromRequestedToCapacityRatioArguments(&arguments) | ||||||
| 	expectedShape, _ := priorities.NewFunctionShape([]priorities.FunctionShapePoint{ | 	expectedShape, _ := priorities.NewFunctionShape([]priorities.FunctionShapePoint{ | ||||||
| 		{Utilization: 10, Score: 1}, | 		{Utilization: 10, Score: 10}, | ||||||
| 		{Utilization: 30, Score: 5}, | 		{Utilization: 30, Score: 50}, | ||||||
| 		{Utilization: 70, Score: 2}, | 		{Utilization: 70, Score: 20}, | ||||||
| 	}) | 	}) | ||||||
| 	expectedResources := priorities.ResourceToWeightMap{ | 	expectedResources := priorities.ResourceToWeightMap{ | ||||||
| 		v1.ResourceCPU:    1, | 		v1.ResourceCPU:    1, | ||||||
| @@ -87,9 +87,9 @@ func TestBuildScoringFunctionShapeFromRequestedToCapacityRatioArgumentsNilResour | |||||||
| 	} | 	} | ||||||
| 	builtShape, resources := buildScoringFunctionShapeFromRequestedToCapacityRatioArguments(&arguments) | 	builtShape, resources := buildScoringFunctionShapeFromRequestedToCapacityRatioArguments(&arguments) | ||||||
| 	expectedShape, _ := priorities.NewFunctionShape([]priorities.FunctionShapePoint{ | 	expectedShape, _ := priorities.NewFunctionShape([]priorities.FunctionShapePoint{ | ||||||
| 		{Utilization: 10, Score: 1}, | 		{Utilization: 10, Score: 10}, | ||||||
| 		{Utilization: 30, Score: 5}, | 		{Utilization: 30, Score: 50}, | ||||||
| 		{Utilization: 70, Score: 2}, | 		{Utilization: 70, Score: 20}, | ||||||
| 	}) | 	}) | ||||||
| 	expectedResources := priorities.ResourceToWeightMap{ | 	expectedResources := priorities.ResourceToWeightMap{ | ||||||
| 		v1.ResourceCPU:    1, | 		v1.ResourceCPU:    1, | ||||||
|   | |||||||
| @@ -20,7 +20,7 @@ import ( | |||||||
| 	"reflect" | 	"reflect" | ||||||
| 	"testing" | 	"testing" | ||||||
|  |  | ||||||
| 	"k8s.io/api/core/v1" | 	v1 "k8s.io/api/core/v1" | ||||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||||
| 	"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" | 	"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" | ||||||
| 	framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" | 	framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" | ||||||
| @@ -157,7 +157,7 @@ func TestTaintTolerationScore(t *testing.T) { | |||||||
| 			}, | 			}, | ||||||
| 			expectedList: []framework.NodeScore{ | 			expectedList: []framework.NodeScore{ | ||||||
| 				{Name: "nodeA", Score: framework.MaxNodeScore}, | 				{Name: "nodeA", Score: framework.MaxNodeScore}, | ||||||
| 				{Name: "nodeB", Score: 5}, | 				{Name: "nodeB", Score: 50}, | ||||||
| 				{Name: "nodeC", Score: 0}, | 				{Name: "nodeC", Score: 0}, | ||||||
| 			}, | 			}, | ||||||
| 		}, | 		}, | ||||||
|   | |||||||
| @@ -73,7 +73,7 @@ const ( | |||||||
|  |  | ||||||
| const ( | const ( | ||||||
| 	// MaxNodeScore is the maximum score a Score plugin is expected to return. | 	// MaxNodeScore is the maximum score a Score plugin is expected to return. | ||||||
| 	MaxNodeScore int64 = 10 | 	MaxNodeScore int64 = 100 | ||||||
|  |  | ||||||
| 	// MinNodeScore is the minimum score a Score plugin is expected to return. | 	// MinNodeScore is the minimum score a Score plugin is expected to return. | ||||||
| 	MinNodeScore int64 = 0 | 	MinNodeScore int64 = 0 | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user
	 draveness
					draveness