Scheduler extension
This commit is contained in:
@@ -25,6 +25,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
|
||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||
)
|
||||
|
||||
// the unused capacity is calculated on a scale of 0-10
|
||||
@@ -73,7 +74,7 @@ func getNonzeroRequests(requests *api.ResourceList) (int64, int64) {
|
||||
|
||||
// Calculate the resource occupancy on a node. 'node' has information about the resources on the node.
|
||||
// 'pods' is a list of pods currently scheduled on the node.
|
||||
func calculateResourceOccupancy(pod *api.Pod, node api.Node, pods []*api.Pod) algorithm.HostPriority {
|
||||
func calculateResourceOccupancy(pod *api.Pod, node api.Node, pods []*api.Pod) schedulerapi.HostPriority {
|
||||
totalMilliCPU := int64(0)
|
||||
totalMemory := int64(0)
|
||||
capacityMilliCPU := node.Status.Capacity.Cpu().MilliValue()
|
||||
@@ -104,7 +105,7 @@ func calculateResourceOccupancy(pod *api.Pod, node api.Node, pods []*api.Pod) al
|
||||
cpuScore, memoryScore,
|
||||
)
|
||||
|
||||
return algorithm.HostPriority{
|
||||
return schedulerapi.HostPriority{
|
||||
Host: node.Name,
|
||||
Score: int((cpuScore + memoryScore) / 2),
|
||||
}
|
||||
@@ -114,14 +115,14 @@ func calculateResourceOccupancy(pod *api.Pod, node api.Node, pods []*api.Pod) al
|
||||
// It calculates the percentage of memory and CPU requested by pods scheduled on the node, and prioritizes
|
||||
// based on the minimum of the average of the fraction of requested to capacity.
|
||||
// Details: cpu((capacity - sum(requested)) * 10 / capacity) + memory((capacity - sum(requested)) * 10 / capacity) / 2
|
||||
func LeastRequestedPriority(pod *api.Pod, podLister algorithm.PodLister, nodeLister algorithm.NodeLister) (algorithm.HostPriorityList, error) {
|
||||
func LeastRequestedPriority(pod *api.Pod, podLister algorithm.PodLister, nodeLister algorithm.NodeLister) (schedulerapi.HostPriorityList, error) {
|
||||
nodes, err := nodeLister.List()
|
||||
if err != nil {
|
||||
return algorithm.HostPriorityList{}, err
|
||||
return schedulerapi.HostPriorityList{}, err
|
||||
}
|
||||
podsToMachines, err := predicates.MapPodsToMachines(podLister)
|
||||
|
||||
list := algorithm.HostPriorityList{}
|
||||
list := schedulerapi.HostPriorityList{}
|
||||
for _, node := range nodes.Items {
|
||||
list = append(list, calculateResourceOccupancy(pod, node, podsToMachines[node.Name]))
|
||||
}
|
||||
@@ -144,7 +145,7 @@ func NewNodeLabelPriority(label string, presence bool) algorithm.PriorityFunctio
|
||||
// CalculateNodeLabelPriority checks whether a particular label exists on a node or not, regardless of its value.
|
||||
// If presence is true, prioritizes nodes that have the specified label, regardless of value.
|
||||
// If presence is false, prioritizes nodes that do not have the specified label.
|
||||
func (n *NodeLabelPrioritizer) CalculateNodeLabelPriority(pod *api.Pod, podLister algorithm.PodLister, nodeLister algorithm.NodeLister) (algorithm.HostPriorityList, error) {
|
||||
func (n *NodeLabelPrioritizer) CalculateNodeLabelPriority(pod *api.Pod, podLister algorithm.PodLister, nodeLister algorithm.NodeLister) (schedulerapi.HostPriorityList, error) {
|
||||
var score int
|
||||
nodes, err := nodeLister.List()
|
||||
if err != nil {
|
||||
@@ -157,7 +158,7 @@ func (n *NodeLabelPrioritizer) CalculateNodeLabelPriority(pod *api.Pod, podListe
|
||||
labeledNodes[node.Name] = (exists && n.presence) || (!exists && !n.presence)
|
||||
}
|
||||
|
||||
result := []algorithm.HostPriority{}
|
||||
result := []schedulerapi.HostPriority{}
|
||||
//score int - scale of 0-10
|
||||
// 0 being the lowest priority and 10 being the highest
|
||||
for nodeName, success := range labeledNodes {
|
||||
@@ -166,7 +167,7 @@ func (n *NodeLabelPrioritizer) CalculateNodeLabelPriority(pod *api.Pod, podListe
|
||||
} else {
|
||||
score = 0
|
||||
}
|
||||
result = append(result, algorithm.HostPriority{Host: nodeName, Score: score})
|
||||
result = append(result, schedulerapi.HostPriority{Host: nodeName, Score: score})
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
@@ -177,21 +178,21 @@ func (n *NodeLabelPrioritizer) CalculateNodeLabelPriority(pod *api.Pod, podListe
|
||||
// close the two metrics are to each other.
|
||||
// Detail: score = 10 - abs(cpuFraction-memoryFraction)*10. The algorithm is partly inspired by:
|
||||
// "Wei Huang et al. An Energy Efficient Virtual Machine Placement Algorithm with Balanced Resource Utilization"
|
||||
func BalancedResourceAllocation(pod *api.Pod, podLister algorithm.PodLister, nodeLister algorithm.NodeLister) (algorithm.HostPriorityList, error) {
|
||||
func BalancedResourceAllocation(pod *api.Pod, podLister algorithm.PodLister, nodeLister algorithm.NodeLister) (schedulerapi.HostPriorityList, error) {
|
||||
nodes, err := nodeLister.List()
|
||||
if err != nil {
|
||||
return algorithm.HostPriorityList{}, err
|
||||
return schedulerapi.HostPriorityList{}, err
|
||||
}
|
||||
podsToMachines, err := predicates.MapPodsToMachines(podLister)
|
||||
|
||||
list := algorithm.HostPriorityList{}
|
||||
list := schedulerapi.HostPriorityList{}
|
||||
for _, node := range nodes.Items {
|
||||
list = append(list, calculateBalancedResourceAllocation(pod, node, podsToMachines[node.Name]))
|
||||
}
|
||||
return list, nil
|
||||
}
|
||||
|
||||
func calculateBalancedResourceAllocation(pod *api.Pod, node api.Node, pods []*api.Pod) algorithm.HostPriority {
|
||||
func calculateBalancedResourceAllocation(pod *api.Pod, node api.Node, pods []*api.Pod) schedulerapi.HostPriority {
|
||||
totalMilliCPU := int64(0)
|
||||
totalMemory := int64(0)
|
||||
score := int(0)
|
||||
@@ -234,7 +235,7 @@ func calculateBalancedResourceAllocation(pod *api.Pod, node api.Node, pods []*ap
|
||||
score,
|
||||
)
|
||||
|
||||
return algorithm.HostPriority{
|
||||
return schedulerapi.HostPriority{
|
||||
Host: node.Name,
|
||||
Score: score,
|
||||
}
|
||||
|
@@ -26,6 +26,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||
)
|
||||
|
||||
func makeNode(node string, milliCPU, memory int64) api.Node {
|
||||
@@ -133,7 +134,7 @@ func TestZeroRequest(t *testing.T) {
|
||||
// plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go if you want
|
||||
// to test what's actually in production.
|
||||
[]algorithm.PriorityConfig{{Function: LeastRequestedPriority, Weight: 1}, {Function: BalancedResourceAllocation, Weight: 1}, {Function: NewSelectorSpreadPriority(algorithm.FakeServiceLister([]api.Service{}), algorithm.FakeControllerLister([]api.ReplicationController{})), Weight: 1}},
|
||||
algorithm.FakeNodeLister(api.NodeList{Items: test.nodes}))
|
||||
algorithm.FakeNodeLister(api.NodeList{Items: test.nodes}), []algorithm.SchedulerExtender{})
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -217,7 +218,7 @@ func TestLeastRequested(t *testing.T) {
|
||||
pod *api.Pod
|
||||
pods []*api.Pod
|
||||
nodes []api.Node
|
||||
expectedList algorithm.HostPriorityList
|
||||
expectedList schedulerapi.HostPriorityList
|
||||
test string
|
||||
}{
|
||||
{
|
||||
@@ -234,7 +235,7 @@ func TestLeastRequested(t *testing.T) {
|
||||
*/
|
||||
pod: &api.Pod{Spec: noResources},
|
||||
nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 10}},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 10}},
|
||||
test: "nothing scheduled, nothing requested",
|
||||
},
|
||||
{
|
||||
@@ -251,7 +252,7 @@ func TestLeastRequested(t *testing.T) {
|
||||
*/
|
||||
pod: &api.Pod{Spec: cpuAndMemory},
|
||||
nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 3}, {"machine2", 5}},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 3}, {"machine2", 5}},
|
||||
test: "nothing scheduled, resources requested, differently sized machines",
|
||||
},
|
||||
{
|
||||
@@ -268,7 +269,7 @@ func TestLeastRequested(t *testing.T) {
|
||||
*/
|
||||
pod: &api.Pod{Spec: noResources},
|
||||
nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 10}},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 10}},
|
||||
test: "no resources requested, pods scheduled",
|
||||
pods: []*api.Pod{
|
||||
{Spec: machine1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||
@@ -291,7 +292,7 @@ func TestLeastRequested(t *testing.T) {
|
||||
*/
|
||||
pod: &api.Pod{Spec: noResources},
|
||||
nodes: []api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 7}, {"machine2", 5}},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 7}, {"machine2", 5}},
|
||||
test: "no resources requested, pods scheduled with resources",
|
||||
pods: []*api.Pod{
|
||||
{Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||
@@ -314,7 +315,7 @@ func TestLeastRequested(t *testing.T) {
|
||||
*/
|
||||
pod: &api.Pod{Spec: cpuAndMemory},
|
||||
nodes: []api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 5}, {"machine2", 4}},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 5}, {"machine2", 4}},
|
||||
test: "resources requested, pods scheduled with resources",
|
||||
pods: []*api.Pod{
|
||||
{Spec: cpuOnly},
|
||||
@@ -335,7 +336,7 @@ func TestLeastRequested(t *testing.T) {
|
||||
*/
|
||||
pod: &api.Pod{Spec: cpuAndMemory},
|
||||
nodes: []api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 5}, {"machine2", 6}},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 5}, {"machine2", 6}},
|
||||
test: "resources requested, pods scheduled with resources, differently sized machines",
|
||||
pods: []*api.Pod{
|
||||
{Spec: cpuOnly},
|
||||
@@ -356,7 +357,7 @@ func TestLeastRequested(t *testing.T) {
|
||||
*/
|
||||
pod: &api.Pod{Spec: cpuOnly},
|
||||
nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 5}, {"machine2", 2}},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 5}, {"machine2", 2}},
|
||||
test: "requested resources exceed node capacity",
|
||||
pods: []*api.Pod{
|
||||
{Spec: cpuOnly},
|
||||
@@ -366,7 +367,7 @@ func TestLeastRequested(t *testing.T) {
|
||||
{
|
||||
pod: &api.Pod{Spec: noResources},
|
||||
nodes: []api.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 0}},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}},
|
||||
test: "zero node resources, pods scheduled with resources",
|
||||
pods: []*api.Pod{
|
||||
{Spec: cpuOnly},
|
||||
@@ -394,7 +395,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
|
||||
nodes []api.Node
|
||||
label string
|
||||
presence bool
|
||||
expectedList algorithm.HostPriorityList
|
||||
expectedList schedulerapi.HostPriorityList
|
||||
test string
|
||||
}{
|
||||
{
|
||||
@@ -403,7 +404,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 0}, {"machine3", 0}},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}, {"machine3", 0}},
|
||||
label: "baz",
|
||||
presence: true,
|
||||
test: "no match found, presence true",
|
||||
@@ -414,7 +415,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 10}, {"machine3", 10}},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 10}, {"machine3", 10}},
|
||||
label: "baz",
|
||||
presence: false,
|
||||
test: "no match found, presence false",
|
||||
@@ -425,7 +426,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 0}, {"machine3", 0}},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 0}, {"machine3", 0}},
|
||||
label: "foo",
|
||||
presence: true,
|
||||
test: "one match found, presence true",
|
||||
@@ -436,7 +437,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 10}, {"machine3", 10}},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 10}, {"machine3", 10}},
|
||||
label: "foo",
|
||||
presence: false,
|
||||
test: "one match found, presence false",
|
||||
@@ -447,7 +448,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 10}, {"machine3", 10}},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 10}, {"machine3", 10}},
|
||||
label: "bar",
|
||||
presence: true,
|
||||
test: "two matches found, presence true",
|
||||
@@ -458,7 +459,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 0}, {"machine3", 0}},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 0}, {"machine3", 0}},
|
||||
label: "bar",
|
||||
presence: false,
|
||||
test: "two matches found, presence false",
|
||||
@@ -549,7 +550,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
||||
pod *api.Pod
|
||||
pods []*api.Pod
|
||||
nodes []api.Node
|
||||
expectedList algorithm.HostPriorityList
|
||||
expectedList schedulerapi.HostPriorityList
|
||||
test string
|
||||
}{
|
||||
{
|
||||
@@ -566,7 +567,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
||||
*/
|
||||
pod: &api.Pod{Spec: noResources},
|
||||
nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 10}},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 10}},
|
||||
test: "nothing scheduled, nothing requested",
|
||||
},
|
||||
{
|
||||
@@ -583,7 +584,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
||||
*/
|
||||
pod: &api.Pod{Spec: cpuAndMemory},
|
||||
nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 7}, {"machine2", 10}},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 7}, {"machine2", 10}},
|
||||
test: "nothing scheduled, resources requested, differently sized machines",
|
||||
},
|
||||
{
|
||||
@@ -600,7 +601,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
||||
*/
|
||||
pod: &api.Pod{Spec: noResources},
|
||||
nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 10}},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 10}},
|
||||
test: "no resources requested, pods scheduled",
|
||||
pods: []*api.Pod{
|
||||
{Spec: machine1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||
@@ -623,7 +624,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
||||
*/
|
||||
pod: &api.Pod{Spec: noResources},
|
||||
nodes: []api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 4}, {"machine2", 6}},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 4}, {"machine2", 6}},
|
||||
test: "no resources requested, pods scheduled with resources",
|
||||
pods: []*api.Pod{
|
||||
{Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||
@@ -646,7 +647,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
||||
*/
|
||||
pod: &api.Pod{Spec: cpuAndMemory},
|
||||
nodes: []api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 6}, {"machine2", 9}},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 6}, {"machine2", 9}},
|
||||
test: "resources requested, pods scheduled with resources",
|
||||
pods: []*api.Pod{
|
||||
{Spec: cpuOnly},
|
||||
@@ -667,7 +668,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
||||
*/
|
||||
pod: &api.Pod{Spec: cpuAndMemory},
|
||||
nodes: []api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 6}, {"machine2", 6}},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 6}, {"machine2", 6}},
|
||||
test: "resources requested, pods scheduled with resources, differently sized machines",
|
||||
pods: []*api.Pod{
|
||||
{Spec: cpuOnly},
|
||||
@@ -688,7 +689,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
||||
*/
|
||||
pod: &api.Pod{Spec: cpuOnly},
|
||||
nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 0}},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}},
|
||||
test: "requested resources exceed node capacity",
|
||||
pods: []*api.Pod{
|
||||
{Spec: cpuOnly},
|
||||
@@ -698,7 +699,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
||||
{
|
||||
pod: &api.Pod{Spec: noResources},
|
||||
nodes: []api.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 0}},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}},
|
||||
test: "zero node resources, pods scheduled with resources",
|
||||
pods: []*api.Pod{
|
||||
{Spec: cpuOnly},
|
||||
|
@@ -21,6 +21,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||
)
|
||||
|
||||
type SelectorSpread struct {
|
||||
@@ -39,7 +40,7 @@ func NewSelectorSpreadPriority(serviceLister algorithm.ServiceLister, controller
|
||||
// CalculateSpreadPriority spreads pods by minimizing the number of pods belonging to the same service or replication controller. It counts number of pods that run under
|
||||
// Services or RCs as the pod being scheduled and tries to minimize the number of conflicts. I.e. pushes scheduler towards a Node where there's a smallest number of
|
||||
// pods which match the same selectors of Services and RCs as current pod.
|
||||
func (s *SelectorSpread) CalculateSpreadPriority(pod *api.Pod, podLister algorithm.PodLister, nodeLister algorithm.NodeLister) (algorithm.HostPriorityList, error) {
|
||||
func (s *SelectorSpread) CalculateSpreadPriority(pod *api.Pod, podLister algorithm.PodLister, nodeLister algorithm.NodeLister) (schedulerapi.HostPriorityList, error) {
|
||||
var maxCount int
|
||||
var nsPods []*api.Pod
|
||||
|
||||
@@ -95,7 +96,7 @@ func (s *SelectorSpread) CalculateSpreadPriority(pod *api.Pod, podLister algorit
|
||||
}
|
||||
}
|
||||
|
||||
result := []algorithm.HostPriority{}
|
||||
result := []schedulerapi.HostPriority{}
|
||||
//score int - scale of 0-10
|
||||
// 0 being the lowest priority and 10 being the highest
|
||||
for _, node := range nodes.Items {
|
||||
@@ -104,7 +105,7 @@ func (s *SelectorSpread) CalculateSpreadPriority(pod *api.Pod, podLister algorit
|
||||
if maxCount > 0 {
|
||||
fScore = 10 * (float32(maxCount-counts[node.Name]) / float32(maxCount))
|
||||
}
|
||||
result = append(result, algorithm.HostPriority{Host: node.Name, Score: int(fScore)})
|
||||
result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: int(fScore)})
|
||||
glog.V(10).Infof(
|
||||
"%v -> %v: SelectorSpreadPriority, Score: (%d)", pod.Name, node.Name, int(fScore),
|
||||
)
|
||||
@@ -128,7 +129,7 @@ func NewServiceAntiAffinityPriority(serviceLister algorithm.ServiceLister, label
|
||||
// CalculateAntiAffinityPriority spreads pods by minimizing the number of pods belonging to the same service
|
||||
// on machines with the same value for a particular label.
|
||||
// The label to be considered is provided to the struct (ServiceAntiAffinity).
|
||||
func (s *ServiceAntiAffinity) CalculateAntiAffinityPriority(pod *api.Pod, podLister algorithm.PodLister, nodeLister algorithm.NodeLister) (algorithm.HostPriorityList, error) {
|
||||
func (s *ServiceAntiAffinity) CalculateAntiAffinityPriority(pod *api.Pod, podLister algorithm.PodLister, nodeLister algorithm.NodeLister) (schedulerapi.HostPriorityList, error) {
|
||||
var nsServicePods []*api.Pod
|
||||
|
||||
services, err := s.serviceLister.GetPodServices(pod)
|
||||
@@ -175,7 +176,7 @@ func (s *ServiceAntiAffinity) CalculateAntiAffinityPriority(pod *api.Pod, podLis
|
||||
}
|
||||
|
||||
numServicePods := len(nsServicePods)
|
||||
result := []algorithm.HostPriority{}
|
||||
result := []schedulerapi.HostPriority{}
|
||||
//score int - scale of 0-10
|
||||
// 0 being the lowest priority and 10 being the highest
|
||||
for node := range labeledNodes {
|
||||
@@ -184,11 +185,11 @@ func (s *ServiceAntiAffinity) CalculateAntiAffinityPriority(pod *api.Pod, podLis
|
||||
if numServicePods > 0 {
|
||||
fScore = 10 * (float32(numServicePods-podCounts[labeledNodes[node]]) / float32(numServicePods))
|
||||
}
|
||||
result = append(result, algorithm.HostPriority{Host: node, Score: int(fScore)})
|
||||
result = append(result, schedulerapi.HostPriority{Host: node, Score: int(fScore)})
|
||||
}
|
||||
// add the open nodes with a score of 0
|
||||
for _, node := range otherNodes {
|
||||
result = append(result, algorithm.HostPriority{Host: node, Score: 0})
|
||||
result = append(result, schedulerapi.HostPriority{Host: node, Score: 0})
|
||||
}
|
||||
|
||||
return result, nil
|
||||
|
@@ -23,6 +23,7 @@ import (
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||
)
|
||||
|
||||
func TestSelectorSpreadPriority(t *testing.T) {
|
||||
@@ -46,20 +47,20 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
||||
nodes []string
|
||||
rcs []api.ReplicationController
|
||||
services []api.Service
|
||||
expectedList algorithm.HostPriorityList
|
||||
expectedList schedulerapi.HostPriorityList
|
||||
test string
|
||||
}{
|
||||
{
|
||||
pod: new(api.Pod),
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 10}},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 10}},
|
||||
test: "nothing scheduled",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
pods: []*api.Pod{{Spec: zone1Spec}},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 10}},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 10}},
|
||||
test: "no services",
|
||||
},
|
||||
{
|
||||
@@ -67,7 +68,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
||||
pods: []*api.Pod{{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 10}},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 10}},
|
||||
test: "different services",
|
||||
},
|
||||
{
|
||||
@@ -78,7 +79,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 0}},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 0}},
|
||||
test: "two pods, one service pod",
|
||||
},
|
||||
{
|
||||
@@ -92,7 +93,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 0}},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 0}},
|
||||
test: "five pods, one service pod in no namespace",
|
||||
},
|
||||
{
|
||||
@@ -105,7 +106,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}, ObjectMeta: api.ObjectMeta{Namespace: api.NamespaceDefault}}},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 0}},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 0}},
|
||||
test: "four pods, one service pod in default namespace",
|
||||
},
|
||||
{
|
||||
@@ -119,7 +120,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}, ObjectMeta: api.ObjectMeta{Namespace: "ns1"}}},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 0}},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 0}},
|
||||
test: "five pods, one service pod in specific namespace",
|
||||
},
|
||||
{
|
||||
@@ -131,7 +132,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 0}},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}},
|
||||
test: "three pods, two service pods on different machines",
|
||||
},
|
||||
{
|
||||
@@ -144,7 +145,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 5}, {"machine2", 0}},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 5}, {"machine2", 0}},
|
||||
test: "four pods, three service pods",
|
||||
},
|
||||
{
|
||||
@@ -156,7 +157,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 5}},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 5}},
|
||||
test: "service with partial pod label matches",
|
||||
},
|
||||
{
|
||||
@@ -171,7 +172,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
||||
rcs: []api.ReplicationController{{Spec: api.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}},
|
||||
// "baz=blah" matches both labels1 and labels2, and "foo=bar" matches only labels 1. This means that we assume that we want to
|
||||
// do spreading between all pods. The result should be exactly as above.
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 5}},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 5}},
|
||||
test: "service with partial pod label matches with service and replication controller",
|
||||
},
|
||||
{
|
||||
@@ -185,7 +186,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
||||
services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}},
|
||||
rcs: []api.ReplicationController{{Spec: api.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}},
|
||||
// Taken together Service and Replication Controller should match all Pods, hence result should be equal to one above.
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 5}},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 5}},
|
||||
test: "disjoined service and replication controller should be treated equally",
|
||||
},
|
||||
{
|
||||
@@ -198,7 +199,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
rcs: []api.ReplicationController{{Spec: api.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}},
|
||||
// Both Nodes have one pod from the given RC, hence both get 0 score.
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 0}},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}},
|
||||
test: "Replication controller with partial pod label matches",
|
||||
},
|
||||
{
|
||||
@@ -210,7 +211,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
rcs: []api.ReplicationController{{Spec: api.ReplicationControllerSpec{Selector: map[string]string{"baz": "blah"}}}},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 5}},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 5}},
|
||||
test: "Replication controller with partial pod label matches",
|
||||
},
|
||||
}
|
||||
@@ -264,13 +265,13 @@ func TestZoneSpreadPriority(t *testing.T) {
|
||||
pods []*api.Pod
|
||||
nodes map[string]map[string]string
|
||||
services []api.Service
|
||||
expectedList algorithm.HostPriorityList
|
||||
expectedList schedulerapi.HostPriorityList
|
||||
test string
|
||||
}{
|
||||
{
|
||||
pod: new(api.Pod),
|
||||
nodes: labeledNodes,
|
||||
expectedList: []algorithm.HostPriority{{"machine11", 10}, {"machine12", 10},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine11", 10}, {"machine12", 10},
|
||||
{"machine21", 10}, {"machine22", 10},
|
||||
{"machine01", 0}, {"machine02", 0}},
|
||||
test: "nothing scheduled",
|
||||
@@ -279,7 +280,7 @@ func TestZoneSpreadPriority(t *testing.T) {
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
pods: []*api.Pod{{Spec: zone1Spec}},
|
||||
nodes: labeledNodes,
|
||||
expectedList: []algorithm.HostPriority{{"machine11", 10}, {"machine12", 10},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine11", 10}, {"machine12", 10},
|
||||
{"machine21", 10}, {"machine22", 10},
|
||||
{"machine01", 0}, {"machine02", 0}},
|
||||
test: "no services",
|
||||
@@ -289,7 +290,7 @@ func TestZoneSpreadPriority(t *testing.T) {
|
||||
pods: []*api.Pod{{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}},
|
||||
nodes: labeledNodes,
|
||||
services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
|
||||
expectedList: []algorithm.HostPriority{{"machine11", 10}, {"machine12", 10},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine11", 10}, {"machine12", 10},
|
||||
{"machine21", 10}, {"machine22", 10},
|
||||
{"machine01", 0}, {"machine02", 0}},
|
||||
test: "different services",
|
||||
@@ -303,7 +304,7 @@ func TestZoneSpreadPriority(t *testing.T) {
|
||||
},
|
||||
nodes: labeledNodes,
|
||||
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []algorithm.HostPriority{{"machine11", 10}, {"machine12", 10},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine11", 10}, {"machine12", 10},
|
||||
{"machine21", 0}, {"machine22", 0},
|
||||
{"machine01", 0}, {"machine02", 0}},
|
||||
test: "three pods, one service pod",
|
||||
@@ -317,7 +318,7 @@ func TestZoneSpreadPriority(t *testing.T) {
|
||||
},
|
||||
nodes: labeledNodes,
|
||||
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []algorithm.HostPriority{{"machine11", 5}, {"machine12", 5},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine11", 5}, {"machine12", 5},
|
||||
{"machine21", 5}, {"machine22", 5},
|
||||
{"machine01", 0}, {"machine02", 0}},
|
||||
test: "three pods, two service pods on different machines",
|
||||
@@ -332,7 +333,7 @@ func TestZoneSpreadPriority(t *testing.T) {
|
||||
},
|
||||
nodes: labeledNodes,
|
||||
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}, ObjectMeta: api.ObjectMeta{Namespace: api.NamespaceDefault}}},
|
||||
expectedList: []algorithm.HostPriority{{"machine11", 0}, {"machine12", 0},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine11", 0}, {"machine12", 0},
|
||||
{"machine21", 10}, {"machine22", 10},
|
||||
{"machine01", 0}, {"machine02", 0}},
|
||||
test: "three service label match pods in different namespaces",
|
||||
@@ -347,7 +348,7 @@ func TestZoneSpreadPriority(t *testing.T) {
|
||||
},
|
||||
nodes: labeledNodes,
|
||||
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []algorithm.HostPriority{{"machine11", 6}, {"machine12", 6},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine11", 6}, {"machine12", 6},
|
||||
{"machine21", 3}, {"machine22", 3},
|
||||
{"machine01", 0}, {"machine02", 0}},
|
||||
test: "four pods, three service pods",
|
||||
@@ -361,7 +362,7 @@ func TestZoneSpreadPriority(t *testing.T) {
|
||||
},
|
||||
nodes: labeledNodes,
|
||||
services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
|
||||
expectedList: []algorithm.HostPriority{{"machine11", 3}, {"machine12", 3},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine11", 3}, {"machine12", 3},
|
||||
{"machine21", 6}, {"machine22", 6},
|
||||
{"machine01", 0}, {"machine02", 0}},
|
||||
test: "service with partial pod label matches",
|
||||
@@ -376,7 +377,7 @@ func TestZoneSpreadPriority(t *testing.T) {
|
||||
},
|
||||
nodes: labeledNodes,
|
||||
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []algorithm.HostPriority{{"machine11", 7}, {"machine12", 7},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine11", 7}, {"machine12", 7},
|
||||
{"machine21", 5}, {"machine22", 5},
|
||||
{"machine01", 0}, {"machine02", 0}},
|
||||
test: "service pod on non-zoned node",
|
||||
|
@@ -18,9 +18,24 @@ package algorithm
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||
)
|
||||
|
||||
// Scheduler is an interface implemented by things that know how to schedule pods
|
||||
// SchedulerExtender is an interface for external processes to influence scheduling
|
||||
// decisions made by Kubernetes. This is typically needed for resources not directly
|
||||
// managed by Kubernetes.
|
||||
type SchedulerExtender interface {
|
||||
// Filter based on extender-implemented predicate functions. The filtered list is
|
||||
// expected to be a subset of the supplied list.
|
||||
Filter(pod *api.Pod, nodes *api.NodeList) (filteredNodes *api.NodeList, err error)
|
||||
|
||||
// Prioritize based on extender-implemented priority functions. The returned scores & weight
|
||||
// are used to compute the weighted score for an extender. The weighted scores are added to
|
||||
// the scores computed by Kubernetes scheduler. The total scores are used to do the host selection.
|
||||
Prioritize(pod *api.Pod, nodes *api.NodeList) (hostPriorities *schedulerapi.HostPriorityList, weight int, err error)
|
||||
}
|
||||
|
||||
// ScheduleAlgorithm is an interface implemented by things that know how to schedule pods
|
||||
// onto machines.
|
||||
type ScheduleAlgorithm interface {
|
||||
Schedule(*api.Pod, NodeLister) (selectedMachine string, err error)
|
||||
|
@@ -18,35 +18,13 @@ package algorithm
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||
)
|
||||
|
||||
// FitPredicate is a function that indicates if a pod fits into an existing node.
|
||||
type FitPredicate func(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error)
|
||||
|
||||
// HostPriority represents the priority of scheduling to a particular host, lower priority is better.
|
||||
type HostPriority struct {
|
||||
Host string
|
||||
Score int
|
||||
}
|
||||
|
||||
type HostPriorityList []HostPriority
|
||||
|
||||
func (h HostPriorityList) Len() int {
|
||||
return len(h)
|
||||
}
|
||||
|
||||
func (h HostPriorityList) Less(i, j int) bool {
|
||||
if h[i].Score == h[j].Score {
|
||||
return h[i].Host < h[j].Host
|
||||
}
|
||||
return h[i].Score < h[j].Score
|
||||
}
|
||||
|
||||
func (h HostPriorityList) Swap(i, j int) {
|
||||
h[i], h[j] = h[j], h[i]
|
||||
}
|
||||
|
||||
type PriorityFunction func(pod *api.Pod, podLister PodLister, nodeLister NodeLister) (HostPriorityList, error)
|
||||
type PriorityFunction func(pod *api.Pod, podLister PodLister, nodeLister NodeLister) (schedulerapi.HostPriorityList, error)
|
||||
|
||||
type PriorityConfig struct {
|
||||
Function PriorityFunction
|
||||
|
Reference in New Issue
Block a user