Merge pull request #7955 from hurf/refactor_scheduler

Move pkg/scheduler to plugin/pkg/scheduler
This commit is contained in:
Rohit Jnagal 2015-05-14 10:28:52 -07:00
commit 532f6fdcef
22 changed files with 263 additions and 238 deletions

View File

@ -48,7 +48,6 @@ import (
kubeletTypes "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/types"
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
"github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler"
"github.com/GoogleCloudPlatform/kubernetes/pkg/types"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
utilErrors "github.com/GoogleCloudPlatform/kubernetes/pkg/util/errors"
@ -56,6 +55,7 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/version"
"github.com/GoogleCloudPlatform/kubernetes/pkg/volume"
"github.com/GoogleCloudPlatform/kubernetes/pkg/watch"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
"github.com/golang/glog"
cadvisorApi "github.com/google/cadvisor/info/v1"
)
@ -1354,7 +1354,7 @@ func (kl *Kubelet) checkCapacityExceeded(pods []*api.Pod) (fitting []*api.Pod, n
sort.Sort(podsByCreationTime(pods))
capacity := CapacityFromMachineInfo(info)
return scheduler.CheckPodsExceedingCapacity(pods, capacity)
return predicates.CheckPodsExceedingCapacity(pods, capacity)
}
// handleOutOfDisk detects if pods can't fit due to lack of disk space.
@ -1403,7 +1403,7 @@ func (kl *Kubelet) checkNodeSelectorMatching(pods []*api.Pod) (fitting []*api.Po
return pods, nil
}
for _, pod := range pods {
if !scheduler.PodMatchesNodeLabels(pod, node) {
if !predicates.PodMatchesNodeLabels(pod, node) {
notFitting = append(notFitting, pod)
continue
}

View File

@ -18,7 +18,7 @@ package registrytest
import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm"
)
type Scheduler struct {
@ -27,7 +27,7 @@ type Scheduler struct {
Machine string
}
func (s *Scheduler) Schedule(pod *api.Pod, lister scheduler.MinionLister) (string, error) {
func (s *Scheduler) Schedule(pod *api.Pod, lister algorithm.MinionLister) (string, error) {
s.Pod = pod
return s.Machine, s.Err
}

View File

@ -16,4 +16,4 @@ limitations under the License.
// Package scheduler contains a generic Scheduler interface and several
// implementations.
package scheduler
package algorithm

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduler
package algorithm
import (
"fmt"

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduler
package predicates
import (
"fmt"
@ -22,6 +22,7 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm"
)
type NodeInfo interface {
@ -154,14 +155,14 @@ func (r *ResourceFit) PodFitsResources(pod *api.Pod, existingPods []*api.Pod, no
return true, nil
}
func NewResourceFitPredicate(info NodeInfo) FitPredicate {
func NewResourceFitPredicate(info NodeInfo) algorithm.FitPredicate {
fit := &ResourceFit{
info: info,
}
return fit.PodFitsResources
}
func NewSelectorMatchPredicate(info NodeInfo) FitPredicate {
func NewSelectorMatchPredicate(info NodeInfo) algorithm.FitPredicate {
selector := &NodeSelector{
info: info,
}
@ -201,7 +202,7 @@ type NodeLabelChecker struct {
presence bool
}
func NewNodeLabelPredicate(info NodeInfo, labels []string, presence bool) FitPredicate {
func NewNodeLabelPredicate(info NodeInfo, labels []string, presence bool) algorithm.FitPredicate {
labelChecker := &NodeLabelChecker{
info: info,
labels: labels,
@ -239,13 +240,13 @@ func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *api.Pod, existingPods []*
}
type ServiceAffinity struct {
podLister PodLister
serviceLister ServiceLister
podLister algorithm.PodLister
serviceLister algorithm.ServiceLister
nodeInfo NodeInfo
labels []string
}
func NewServiceAffinityPredicate(podLister PodLister, serviceLister ServiceLister, nodeInfo NodeInfo, labels []string) FitPredicate {
func NewServiceAffinityPredicate(podLister algorithm.PodLister, serviceLister algorithm.ServiceLister, nodeInfo NodeInfo, labels []string) algorithm.FitPredicate {
affinity := &ServiceAffinity{
podLister: podLister,
serviceLister: serviceLister,
@ -361,7 +362,7 @@ func getUsedPorts(pods ...*api.Pod) map[int]bool {
// MapPodsToMachines obtains a list of pods and pivots that list into a map where the keys are host names
// and the values are the list of pods running on that host.
func MapPodsToMachines(lister PodLister) (map[string][]*api.Pod, error) {
func MapPodsToMachines(lister algorithm.PodLister) (map[string][]*api.Pod, error) {
machineToPods := map[string][]*api.Pod{}
// TODO: perform more targeted query...
pods, err := lister.List(labels.Everything())

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduler
package predicates
import (
"fmt"
@ -23,6 +23,7 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm"
)
type FakeNodeInfo api.Node
@ -179,6 +180,23 @@ func TestPodFitsHost(t *testing.T) {
}
}
func newPod(host string, hostPorts ...int) *api.Pod {
networkPorts := []api.ContainerPort{}
for _, port := range hostPorts {
networkPorts = append(networkPorts, api.ContainerPort{HostPort: port})
}
return &api.Pod{
Spec: api.PodSpec{
Host: host,
Containers: []api.Container{
{
Ports: networkPorts,
},
},
},
}
}
func TestPodFitsPorts(t *testing.T) {
tests := []struct {
pod *api.Pod
@ -641,7 +659,7 @@ func TestServiceAffinity(t *testing.T) {
for _, test := range tests {
nodes := []api.Node{node1, node2, node3, node4, node5}
serviceAffinity := ServiceAffinity{FakePodLister(test.pods), FakeServiceLister(test.services), FakeNodeListInfo(nodes), test.labels}
serviceAffinity := ServiceAffinity{algorithm.FakePodLister(test.pods), algorithm.FakeServiceLister(test.services), FakeNodeListInfo(nodes), test.labels}
fits, err := serviceAffinity.CheckServiceAffinity(test.pod, []*api.Pod{}, test.node)
if err != nil {
t.Errorf("unexpected error: %v", err)

View File

@ -14,13 +14,15 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduler
package priorities
import (
"math"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
"github.com/golang/glog"
)
@ -39,7 +41,7 @@ func calculateScore(requested, capacity int64, node string) int {
// Calculate the occupancy on a node. 'node' has information about the resources on the node.
// 'pods' is a list of pods currently scheduled on the node.
func calculateOccupancy(pod *api.Pod, node api.Node, pods []*api.Pod) HostPriority {
func calculateOccupancy(pod *api.Pod, node api.Node, pods []*api.Pod) algorithm.HostPriority {
totalMilliCPU := int64(0)
totalMemory := int64(0)
for _, existingPod := range pods {
@ -68,9 +70,9 @@ func calculateOccupancy(pod *api.Pod, node api.Node, pods []*api.Pod) HostPriori
cpuScore, memoryScore,
)
return HostPriority{
host: node.Name,
score: int((cpuScore + memoryScore) / 2),
return algorithm.HostPriority{
Host: node.Name,
Score: int((cpuScore + memoryScore) / 2),
}
}
@ -78,14 +80,14 @@ func calculateOccupancy(pod *api.Pod, node api.Node, pods []*api.Pod) HostPriori
// It calculates the percentage of memory and CPU requested by pods scheduled on the node, and prioritizes
// based on the minimum of the average of the fraction of requested to capacity.
// Details: (Sum(requested cpu) / Capacity + Sum(requested memory) / Capacity) * 50
func LeastRequestedPriority(pod *api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) {
func LeastRequestedPriority(pod *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) {
nodes, err := minionLister.List()
if err != nil {
return HostPriorityList{}, err
return algorithm.HostPriorityList{}, err
}
podsToMachines, err := MapPodsToMachines(podLister)
podsToMachines, err := predicates.MapPodsToMachines(podLister)
list := HostPriorityList{}
list := algorithm.HostPriorityList{}
for _, node := range nodes.Items {
list = append(list, calculateOccupancy(pod, node, podsToMachines[node.Name]))
}
@ -97,7 +99,7 @@ type NodeLabelPrioritizer struct {
presence bool
}
func NewNodeLabelPriority(label string, presence bool) PriorityFunction {
func NewNodeLabelPriority(label string, presence bool) algorithm.PriorityFunction {
labelPrioritizer := &NodeLabelPrioritizer{
label: label,
presence: presence,
@ -108,7 +110,7 @@ func NewNodeLabelPriority(label string, presence bool) PriorityFunction {
// CalculateNodeLabelPriority checks whether a particular label exists on a minion or not, regardless of its value.
// If presence is true, prioritizes minions that have the specified label, regardless of value.
// If presence is false, prioritizes minions that do not have the specified label.
func (n *NodeLabelPrioritizer) CalculateNodeLabelPriority(pod *api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) {
func (n *NodeLabelPrioritizer) CalculateNodeLabelPriority(pod *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) {
var score int
minions, err := minionLister.List()
if err != nil {
@ -121,7 +123,7 @@ func (n *NodeLabelPrioritizer) CalculateNodeLabelPriority(pod *api.Pod, podListe
labeledMinions[minion.Name] = (exists && n.presence) || (!exists && !n.presence)
}
result := []HostPriority{}
result := []algorithm.HostPriority{}
//score int - scale of 0-10
// 0 being the lowest priority and 10 being the highest
for minionName, success := range labeledMinions {
@ -130,7 +132,7 @@ func (n *NodeLabelPrioritizer) CalculateNodeLabelPriority(pod *api.Pod, podListe
} else {
score = 0
}
result = append(result, HostPriority{host: minionName, score: score})
result = append(result, algorithm.HostPriority{Host: minionName, Score: score})
}
return result, nil
}
@ -141,21 +143,21 @@ func (n *NodeLabelPrioritizer) CalculateNodeLabelPriority(pod *api.Pod, podListe
// close the two metrics are to each other.
// Detail: score = 10 - abs(cpuFraction-memoryFraction)*10. The algorithm is partly inspired by:
// "Wei Huang et al. An Energy Efficient Virtual Machine Placement Algorithm with Balanced Resource Utilization"
func BalancedResourceAllocation(pod *api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) {
func BalancedResourceAllocation(pod *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) {
nodes, err := minionLister.List()
if err != nil {
return HostPriorityList{}, err
return algorithm.HostPriorityList{}, err
}
podsToMachines, err := MapPodsToMachines(podLister)
podsToMachines, err := predicates.MapPodsToMachines(podLister)
list := HostPriorityList{}
list := algorithm.HostPriorityList{}
for _, node := range nodes.Items {
list = append(list, calculateBalancedResourceAllocation(pod, node, podsToMachines[node.Name]))
}
return list, nil
}
func calculateBalancedResourceAllocation(pod *api.Pod, node api.Node, pods []*api.Pod) HostPriority {
func calculateBalancedResourceAllocation(pod *api.Pod, node api.Node, pods []*api.Pod) algorithm.HostPriority {
totalMilliCPU := int64(0)
totalMemory := int64(0)
score := int(0)
@ -196,9 +198,9 @@ func calculateBalancedResourceAllocation(pod *api.Pod, node api.Node, pods []*ap
score,
)
return HostPriority{
host: node.Name,
score: score,
return algorithm.HostPriority{
Host: node.Name,
Score: score,
}
}

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduler
package priorities
import (
"reflect"
@ -23,6 +23,7 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm"
)
func makeMinion(node string, milliCPU, memory int64) api.Node {
@ -101,7 +102,7 @@ func TestLeastRequested(t *testing.T) {
pod *api.Pod
pods []*api.Pod
nodes []api.Node
expectedList HostPriorityList
expectedList algorithm.HostPriorityList
test string
}{
{
@ -118,7 +119,7 @@ func TestLeastRequested(t *testing.T) {
*/
pod: &api.Pod{Spec: noResources},
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}},
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 10}},
test: "nothing scheduled, nothing requested",
},
{
@ -135,7 +136,7 @@ func TestLeastRequested(t *testing.T) {
*/
pod: &api.Pod{Spec: cpuAndMemory},
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 6000, 10000)},
expectedList: []HostPriority{{"machine1", 3}, {"machine2", 5}},
expectedList: []algorithm.HostPriority{{"machine1", 3}, {"machine2", 5}},
test: "nothing scheduled, resources requested, differently sized machines",
},
{
@ -152,7 +153,7 @@ func TestLeastRequested(t *testing.T) {
*/
pod: &api.Pod{Spec: noResources},
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}},
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 10}},
test: "no resources requested, pods scheduled",
pods: []*api.Pod{
{Spec: machine1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
@ -175,7 +176,7 @@ func TestLeastRequested(t *testing.T) {
*/
pod: &api.Pod{Spec: noResources},
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 20000)},
expectedList: []HostPriority{{"machine1", 7}, {"machine2", 5}},
expectedList: []algorithm.HostPriority{{"machine1", 7}, {"machine2", 5}},
test: "no resources requested, pods scheduled with resources",
pods: []*api.Pod{
{Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels2}},
@ -198,7 +199,7 @@ func TestLeastRequested(t *testing.T) {
*/
pod: &api.Pod{Spec: cpuAndMemory},
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 20000)},
expectedList: []HostPriority{{"machine1", 5}, {"machine2", 4}},
expectedList: []algorithm.HostPriority{{"machine1", 5}, {"machine2", 4}},
test: "resources requested, pods scheduled with resources",
pods: []*api.Pod{
{Spec: cpuOnly},
@ -219,7 +220,7 @@ func TestLeastRequested(t *testing.T) {
*/
pod: &api.Pod{Spec: cpuAndMemory},
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 50000)},
expectedList: []HostPriority{{"machine1", 5}, {"machine2", 6}},
expectedList: []algorithm.HostPriority{{"machine1", 5}, {"machine2", 6}},
test: "resources requested, pods scheduled with resources, differently sized machines",
pods: []*api.Pod{
{Spec: cpuOnly},
@ -240,7 +241,7 @@ func TestLeastRequested(t *testing.T) {
*/
pod: &api.Pod{Spec: cpuOnly},
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
expectedList: []HostPriority{{"machine1", 5}, {"machine2", 2}},
expectedList: []algorithm.HostPriority{{"machine1", 5}, {"machine2", 2}},
test: "requested resources exceed minion capacity",
pods: []*api.Pod{
{Spec: cpuOnly},
@ -250,7 +251,7 @@ func TestLeastRequested(t *testing.T) {
{
pod: &api.Pod{Spec: noResources},
nodes: []api.Node{makeMinion("machine1", 0, 0), makeMinion("machine2", 0, 0)},
expectedList: []HostPriority{{"machine1", 0}, {"machine2", 0}},
expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 0}},
test: "zero minion resources, pods scheduled with resources",
pods: []*api.Pod{
{Spec: cpuOnly},
@ -260,7 +261,7 @@ func TestLeastRequested(t *testing.T) {
}
for _, test := range tests {
list, err := LeastRequestedPriority(test.pod, FakePodLister(test.pods), FakeMinionLister(api.NodeList{Items: test.nodes}))
list, err := LeastRequestedPriority(test.pod, algorithm.FakePodLister(test.pods), algorithm.FakeMinionLister(api.NodeList{Items: test.nodes}))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
@ -278,7 +279,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
nodes []api.Node
label string
presence bool
expectedList HostPriorityList
expectedList algorithm.HostPriorityList
test string
}{
{
@ -287,7 +288,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
},
expectedList: []HostPriority{{"machine1", 0}, {"machine2", 0}, {"machine3", 0}},
expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 0}, {"machine3", 0}},
label: "baz",
presence: true,
test: "no match found, presence true",
@ -298,7 +299,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
},
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}, {"machine3", 10}},
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 10}, {"machine3", 10}},
label: "baz",
presence: false,
test: "no match found, presence false",
@ -309,7 +310,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
},
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 0}, {"machine3", 0}},
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 0}, {"machine3", 0}},
label: "foo",
presence: true,
test: "one match found, presence true",
@ -320,7 +321,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
},
expectedList: []HostPriority{{"machine1", 0}, {"machine2", 10}, {"machine3", 10}},
expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 10}, {"machine3", 10}},
label: "foo",
presence: false,
test: "one match found, presence false",
@ -331,7 +332,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
},
expectedList: []HostPriority{{"machine1", 0}, {"machine2", 10}, {"machine3", 10}},
expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 10}, {"machine3", 10}},
label: "bar",
presence: true,
test: "two matches found, presence true",
@ -342,7 +343,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
},
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 0}, {"machine3", 0}},
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 0}, {"machine3", 0}},
label: "bar",
presence: false,
test: "two matches found, presence false",
@ -354,7 +355,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
label: test.label,
presence: test.presence,
}
list, err := prioritizer.CalculateNodeLabelPriority(nil, nil, FakeMinionLister(api.NodeList{Items: test.nodes}))
list, err := prioritizer.CalculateNodeLabelPriority(nil, nil, algorithm.FakeMinionLister(api.NodeList{Items: test.nodes}))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
@ -431,7 +432,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
pod *api.Pod
pods []*api.Pod
nodes []api.Node
expectedList HostPriorityList
expectedList algorithm.HostPriorityList
test string
}{
{
@ -448,7 +449,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
*/
pod: &api.Pod{Spec: noResources},
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}},
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 10}},
test: "nothing scheduled, nothing requested",
},
{
@ -465,7 +466,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
*/
pod: &api.Pod{Spec: cpuAndMemory},
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 6000, 10000)},
expectedList: []HostPriority{{"machine1", 7}, {"machine2", 10}},
expectedList: []algorithm.HostPriority{{"machine1", 7}, {"machine2", 10}},
test: "nothing scheduled, resources requested, differently sized machines",
},
{
@ -482,7 +483,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
*/
pod: &api.Pod{Spec: noResources},
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}},
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 10}},
test: "no resources requested, pods scheduled",
pods: []*api.Pod{
{Spec: machine1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
@ -505,7 +506,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
*/
pod: &api.Pod{Spec: noResources},
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 20000)},
expectedList: []HostPriority{{"machine1", 4}, {"machine2", 6}},
expectedList: []algorithm.HostPriority{{"machine1", 4}, {"machine2", 6}},
test: "no resources requested, pods scheduled with resources",
pods: []*api.Pod{
{Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels2}},
@ -528,7 +529,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
*/
pod: &api.Pod{Spec: cpuAndMemory},
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 20000)},
expectedList: []HostPriority{{"machine1", 6}, {"machine2", 9}},
expectedList: []algorithm.HostPriority{{"machine1", 6}, {"machine2", 9}},
test: "resources requested, pods scheduled with resources",
pods: []*api.Pod{
{Spec: cpuOnly},
@ -549,7 +550,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
*/
pod: &api.Pod{Spec: cpuAndMemory},
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 50000)},
expectedList: []HostPriority{{"machine1", 6}, {"machine2", 6}},
expectedList: []algorithm.HostPriority{{"machine1", 6}, {"machine2", 6}},
test: "resources requested, pods scheduled with resources, differently sized machines",
pods: []*api.Pod{
{Spec: cpuOnly},
@ -570,7 +571,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
*/
pod: &api.Pod{Spec: cpuOnly},
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
expectedList: []HostPriority{{"machine1", 0}, {"machine2", 0}},
expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 0}},
test: "requested resources exceed minion capacity",
pods: []*api.Pod{
{Spec: cpuOnly},
@ -580,7 +581,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
{
pod: &api.Pod{Spec: noResources},
nodes: []api.Node{makeMinion("machine1", 0, 0), makeMinion("machine2", 0, 0)},
expectedList: []HostPriority{{"machine1", 0}, {"machine2", 0}},
expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 0}},
test: "zero minion resources, pods scheduled with resources",
pods: []*api.Pod{
{Spec: cpuOnly},
@ -590,7 +591,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
}
for _, test := range tests {
list, err := BalancedResourceAllocation(test.pod, FakePodLister(test.pods), FakeMinionLister(api.NodeList{Items: test.nodes}))
list, err := BalancedResourceAllocation(test.pod, algorithm.FakePodLister(test.pods), algorithm.FakeMinionLister(api.NodeList{Items: test.nodes}))
if err != nil {
t.Errorf("unexpected error: %v", err)
}

View File

@ -14,18 +14,19 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduler
package priorities
import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm"
)
type ServiceSpread struct {
serviceLister ServiceLister
serviceLister algorithm.ServiceLister
}
func NewServiceSpreadPriority(serviceLister ServiceLister) PriorityFunction {
func NewServiceSpreadPriority(serviceLister algorithm.ServiceLister) algorithm.PriorityFunction {
serviceSpread := &ServiceSpread{
serviceLister: serviceLister,
}
@ -34,7 +35,7 @@ func NewServiceSpreadPriority(serviceLister ServiceLister) PriorityFunction {
// CalculateSpreadPriority spreads pods by minimizing the number of pods belonging to the same service
// on the same machine.
func (s *ServiceSpread) CalculateSpreadPriority(pod *api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) {
func (s *ServiceSpread) CalculateSpreadPriority(pod *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) {
var maxCount int
var nsServicePods []*api.Pod
@ -71,7 +72,7 @@ func (s *ServiceSpread) CalculateSpreadPriority(pod *api.Pod, podLister PodListe
}
}
result := []HostPriority{}
result := []algorithm.HostPriority{}
//score int - scale of 0-10
// 0 being the lowest priority and 10 being the highest
for _, minion := range minions.Items {
@ -80,17 +81,17 @@ func (s *ServiceSpread) CalculateSpreadPriority(pod *api.Pod, podLister PodListe
if maxCount > 0 {
fScore = 10 * (float32(maxCount-counts[minion.Name]) / float32(maxCount))
}
result = append(result, HostPriority{host: minion.Name, score: int(fScore)})
result = append(result, algorithm.HostPriority{Host: minion.Name, Score: int(fScore)})
}
return result, nil
}
type ServiceAntiAffinity struct {
serviceLister ServiceLister
serviceLister algorithm.ServiceLister
label string
}
func NewServiceAntiAffinityPriority(serviceLister ServiceLister, label string) PriorityFunction {
func NewServiceAntiAffinityPriority(serviceLister algorithm.ServiceLister, label string) algorithm.PriorityFunction {
antiAffinity := &ServiceAntiAffinity{
serviceLister: serviceLister,
label: label,
@ -101,7 +102,7 @@ func NewServiceAntiAffinityPriority(serviceLister ServiceLister, label string) P
// CalculateAntiAffinityPriority spreads pods by minimizing the number of pods belonging to the same service
// on machines with the same value for a particular label.
// The label to be considered is provided to the struct (ServiceAntiAffinity).
func (s *ServiceAntiAffinity) CalculateAntiAffinityPriority(pod *api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) {
func (s *ServiceAntiAffinity) CalculateAntiAffinityPriority(pod *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) {
var nsServicePods []*api.Pod
services, err := s.serviceLister.GetPodServices(pod)
@ -148,7 +149,7 @@ func (s *ServiceAntiAffinity) CalculateAntiAffinityPriority(pod *api.Pod, podLis
}
numServicePods := len(nsServicePods)
result := []HostPriority{}
result := []algorithm.HostPriority{}
//score int - scale of 0-10
// 0 being the lowest priority and 10 being the highest
for minion := range labeledMinions {
@ -157,11 +158,11 @@ func (s *ServiceAntiAffinity) CalculateAntiAffinityPriority(pod *api.Pod, podLis
if numServicePods > 0 {
fScore = 10 * (float32(numServicePods-podCounts[labeledMinions[minion]]) / float32(numServicePods))
}
result = append(result, HostPriority{host: minion, score: int(fScore)})
result = append(result, algorithm.HostPriority{Host: minion, Score: int(fScore)})
}
// add the open minions with a score of 0
for _, minion := range otherMinions {
result = append(result, HostPriority{host: minion, score: 0})
result = append(result, algorithm.HostPriority{Host: minion, Score: 0})
}
return result, nil

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduler
package priorities
import (
"reflect"
@ -22,6 +22,7 @@ import (
"testing"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm"
)
func TestServiceSpreadPriority(t *testing.T) {
@ -44,20 +45,20 @@ func TestServiceSpreadPriority(t *testing.T) {
pods []*api.Pod
nodes []string
services []api.Service
expectedList HostPriorityList
expectedList algorithm.HostPriorityList
test string
}{
{
pod: new(api.Pod),
nodes: []string{"machine1", "machine2"},
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}},
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 10}},
test: "nothing scheduled",
},
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
pods: []*api.Pod{{Spec: zone1Spec}},
nodes: []string{"machine1", "machine2"},
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}},
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 10}},
test: "no services",
},
{
@ -65,7 +66,7 @@ func TestServiceSpreadPriority(t *testing.T) {
pods: []*api.Pod{{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}},
nodes: []string{"machine1", "machine2"},
services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}},
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 10}},
test: "different services",
},
{
@ -76,7 +77,7 @@ func TestServiceSpreadPriority(t *testing.T) {
},
nodes: []string{"machine1", "machine2"},
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 0}},
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 0}},
test: "two pods, one service pod",
},
{
@ -90,7 +91,7 @@ func TestServiceSpreadPriority(t *testing.T) {
},
nodes: []string{"machine1", "machine2"},
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 0}},
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 0}},
test: "five pods, one service pod in no namespace",
},
{
@ -103,7 +104,7 @@ func TestServiceSpreadPriority(t *testing.T) {
},
nodes: []string{"machine1", "machine2"},
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}, ObjectMeta: api.ObjectMeta{Namespace: api.NamespaceDefault}}},
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 0}},
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 0}},
test: "four pods, one service pod in default namespace",
},
{
@ -117,7 +118,7 @@ func TestServiceSpreadPriority(t *testing.T) {
},
nodes: []string{"machine1", "machine2"},
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}, ObjectMeta: api.ObjectMeta{Namespace: "ns1"}}},
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 0}},
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 0}},
test: "five pods, one service pod in specific namespace",
},
{
@ -129,7 +130,7 @@ func TestServiceSpreadPriority(t *testing.T) {
},
nodes: []string{"machine1", "machine2"},
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []HostPriority{{"machine1", 0}, {"machine2", 0}},
expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 0}},
test: "three pods, two service pods on different machines",
},
{
@ -142,7 +143,7 @@ func TestServiceSpreadPriority(t *testing.T) {
},
nodes: []string{"machine1", "machine2"},
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []HostPriority{{"machine1", 5}, {"machine2", 0}},
expectedList: []algorithm.HostPriority{{"machine1", 5}, {"machine2", 0}},
test: "four pods, three service pods",
},
{
@ -154,14 +155,14 @@ func TestServiceSpreadPriority(t *testing.T) {
},
nodes: []string{"machine1", "machine2"},
services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
expectedList: []HostPriority{{"machine1", 0}, {"machine2", 5}},
expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 5}},
test: "service with partial pod label matches",
},
}
for _, test := range tests {
serviceSpread := ServiceSpread{serviceLister: FakeServiceLister(test.services)}
list, err := serviceSpread.CalculateSpreadPriority(test.pod, FakePodLister(test.pods), FakeMinionLister(makeNodeList(test.nodes)))
serviceSpread := ServiceSpread{serviceLister: algorithm.FakeServiceLister(test.services)}
list, err := serviceSpread.CalculateSpreadPriority(test.pod, algorithm.FakePodLister(test.pods), algorithm.FakeMinionLister(makeNodeList(test.nodes)))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
@ -208,13 +209,13 @@ func TestZoneSpreadPriority(t *testing.T) {
pods []*api.Pod
nodes map[string]map[string]string
services []api.Service
expectedList HostPriorityList
expectedList algorithm.HostPriorityList
test string
}{
{
pod: new(api.Pod),
nodes: labeledNodes,
expectedList: []HostPriority{{"machine11", 10}, {"machine12", 10},
expectedList: []algorithm.HostPriority{{"machine11", 10}, {"machine12", 10},
{"machine21", 10}, {"machine22", 10},
{"machine01", 0}, {"machine02", 0}},
test: "nothing scheduled",
@ -223,7 +224,7 @@ func TestZoneSpreadPriority(t *testing.T) {
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
pods: []*api.Pod{{Spec: zone1Spec}},
nodes: labeledNodes,
expectedList: []HostPriority{{"machine11", 10}, {"machine12", 10},
expectedList: []algorithm.HostPriority{{"machine11", 10}, {"machine12", 10},
{"machine21", 10}, {"machine22", 10},
{"machine01", 0}, {"machine02", 0}},
test: "no services",
@ -233,7 +234,7 @@ func TestZoneSpreadPriority(t *testing.T) {
pods: []*api.Pod{{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}},
nodes: labeledNodes,
services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
expectedList: []HostPriority{{"machine11", 10}, {"machine12", 10},
expectedList: []algorithm.HostPriority{{"machine11", 10}, {"machine12", 10},
{"machine21", 10}, {"machine22", 10},
{"machine01", 0}, {"machine02", 0}},
test: "different services",
@ -247,7 +248,7 @@ func TestZoneSpreadPriority(t *testing.T) {
},
nodes: labeledNodes,
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []HostPriority{{"machine11", 10}, {"machine12", 10},
expectedList: []algorithm.HostPriority{{"machine11", 10}, {"machine12", 10},
{"machine21", 0}, {"machine22", 0},
{"machine01", 0}, {"machine02", 0}},
test: "three pods, one service pod",
@ -261,7 +262,7 @@ func TestZoneSpreadPriority(t *testing.T) {
},
nodes: labeledNodes,
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []HostPriority{{"machine11", 5}, {"machine12", 5},
expectedList: []algorithm.HostPriority{{"machine11", 5}, {"machine12", 5},
{"machine21", 5}, {"machine22", 5},
{"machine01", 0}, {"machine02", 0}},
test: "three pods, two service pods on different machines",
@ -276,7 +277,7 @@ func TestZoneSpreadPriority(t *testing.T) {
},
nodes: labeledNodes,
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}, ObjectMeta: api.ObjectMeta{Namespace: api.NamespaceDefault}}},
expectedList: []HostPriority{{"machine11", 0}, {"machine12", 0},
expectedList: []algorithm.HostPriority{{"machine11", 0}, {"machine12", 0},
{"machine21", 10}, {"machine22", 10},
{"machine01", 0}, {"machine02", 0}},
test: "three service label match pods in different namespaces",
@ -291,7 +292,7 @@ func TestZoneSpreadPriority(t *testing.T) {
},
nodes: labeledNodes,
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []HostPriority{{"machine11", 6}, {"machine12", 6},
expectedList: []algorithm.HostPriority{{"machine11", 6}, {"machine12", 6},
{"machine21", 3}, {"machine22", 3},
{"machine01", 0}, {"machine02", 0}},
test: "four pods, three service pods",
@ -305,7 +306,7 @@ func TestZoneSpreadPriority(t *testing.T) {
},
nodes: labeledNodes,
services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
expectedList: []HostPriority{{"machine11", 3}, {"machine12", 3},
expectedList: []algorithm.HostPriority{{"machine11", 3}, {"machine12", 3},
{"machine21", 6}, {"machine22", 6},
{"machine01", 0}, {"machine02", 0}},
test: "service with partial pod label matches",
@ -320,7 +321,7 @@ func TestZoneSpreadPriority(t *testing.T) {
},
nodes: labeledNodes,
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []HostPriority{{"machine11", 7}, {"machine12", 7},
expectedList: []algorithm.HostPriority{{"machine11", 7}, {"machine12", 7},
{"machine21", 5}, {"machine22", 5},
{"machine01", 0}, {"machine02", 0}},
test: "service pod on non-zoned minion",
@ -328,8 +329,8 @@ func TestZoneSpreadPriority(t *testing.T) {
}
for _, test := range tests {
zoneSpread := ServiceAntiAffinity{serviceLister: FakeServiceLister(test.services), label: "zone"}
list, err := zoneSpread.CalculateAntiAffinityPriority(test.pod, FakePodLister(test.pods), FakeMinionLister(makeLabeledMinionList(test.nodes)))
zoneSpread := ServiceAntiAffinity{serviceLister: algorithm.FakeServiceLister(test.services), label: "zone"}
list, err := zoneSpread.CalculateAntiAffinityPriority(test.pod, algorithm.FakePodLister(test.pods), algorithm.FakeMinionLister(makeLabeledMinionList(test.nodes)))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
@ -349,3 +350,13 @@ func makeLabeledMinionList(nodeMap map[string]map[string]string) (result api.Nod
}
return api.NodeList{Items: nodes}
}
func makeNodeList(nodeNames []string) api.NodeList {
result := api.NodeList{
Items: make([]api.Node, len(nodeNames)),
}
for ix := range nodeNames {
result.Items[ix].Name = nodeNames[ix]
}
return result
}

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduler
package algorithm
import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
@ -22,6 +22,6 @@ import (
// Scheduler is an interface implemented by things that know how to schedule pods
// onto machines.
type Scheduler interface {
type ScheduleAlgorithm interface {
Schedule(*api.Pod, MinionLister) (selectedMachine string, err error)
}

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduler
package algorithm
import (
"testing"
@ -26,7 +26,7 @@ import (
type schedulerTester struct {
t *testing.T
scheduler Scheduler
scheduler ScheduleAlgorithm
minionLister MinionLister
}
@ -58,20 +58,3 @@ func (st *schedulerTester) expectFailure(pod *api.Pod) {
st.t.Error("Unexpected non-error")
}
}
func newPod(host string, hostPorts ...int) *api.Pod {
networkPorts := []api.ContainerPort{}
for _, port := range hostPorts {
networkPorts = append(networkPorts, api.ContainerPort{HostPort: port})
}
return &api.Pod{
Spec: api.PodSpec{
Host: host,
Containers: []api.Container{
{
Ports: networkPorts,
},
},
},
}
}

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduler
package algorithm
import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
@ -25,8 +25,8 @@ type FitPredicate func(pod *api.Pod, existingPods []*api.Pod, node string) (bool
// HostPriority represents the priority of scheduling to a particular host, lower priority is better.
type HostPriority struct {
host string
score int
Host string
Score int
}
type HostPriorityList []HostPriority
@ -36,10 +36,10 @@ func (h HostPriorityList) Len() int {
}
func (h HostPriorityList) Less(i, j int) bool {
if h[i].score == h[j].score {
return h[i].host < h[j].host
if h[i].Score == h[j].Score {
return h[i].Host < h[j].Host
}
return h[i].score < h[j].score
return h[i].Score < h[j].Score
}
func (h HostPriorityList) Swap(i, j int) {

View File

@ -18,8 +18,11 @@ limitations under the License.
package defaults
import (
algorithm "github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm/priorities"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/factory"
)
@ -28,46 +31,46 @@ func init() {
// EqualPriority is a prioritizer function that gives an equal weight of one to all minions
// Register the priority function so that its available
// but do not include it as part of the default priorities
factory.RegisterPriorityFunction("EqualPriority", algorithm.EqualPriority, 1)
factory.RegisterPriorityFunction("EqualPriority", scheduler.EqualPriority, 1)
}
func defaultPredicates() util.StringSet {
return util.NewStringSet(
// Fit is defined based on the absence of port conflicts.
factory.RegisterFitPredicate("PodFitsPorts", algorithm.PodFitsPorts),
factory.RegisterFitPredicate("PodFitsPorts", predicates.PodFitsPorts),
// Fit is determined by resource availability.
factory.RegisterFitPredicateFactory(
"PodFitsResources",
func(args factory.PluginFactoryArgs) algorithm.FitPredicate {
return algorithm.NewResourceFitPredicate(args.NodeInfo)
return predicates.NewResourceFitPredicate(args.NodeInfo)
},
),
// Fit is determined by non-conflicting disk volumes.
factory.RegisterFitPredicate("NoDiskConflict", algorithm.NoDiskConflict),
factory.RegisterFitPredicate("NoDiskConflict", predicates.NoDiskConflict),
// Fit is determined by node selector query.
factory.RegisterFitPredicateFactory(
"MatchNodeSelector",
func(args factory.PluginFactoryArgs) algorithm.FitPredicate {
return algorithm.NewSelectorMatchPredicate(args.NodeInfo)
return predicates.NewSelectorMatchPredicate(args.NodeInfo)
},
),
// Fit is determined by the presence of the Host parameter and a string match
factory.RegisterFitPredicate("HostName", algorithm.PodFitsHost),
factory.RegisterFitPredicate("HostName", predicates.PodFitsHost),
)
}
func defaultPriorities() util.StringSet {
return util.NewStringSet(
// Prioritize nodes by least requested utilization.
factory.RegisterPriorityFunction("LeastRequestedPriority", algorithm.LeastRequestedPriority, 1),
factory.RegisterPriorityFunction("LeastRequestedPriority", priorities.LeastRequestedPriority, 1),
// Prioritizes nodes to help achieve balanced resource usage
factory.RegisterPriorityFunction("BalancedResourceAllocation", algorithm.BalancedResourceAllocation, 1),
factory.RegisterPriorityFunction("BalancedResourceAllocation", priorities.BalancedResourceAllocation, 1),
// spreads pods by minimizing the number of pods (belonging to the same service) on the same minion.
factory.RegisterPriorityConfigFactory(
"ServiceSpreadingPriority",
factory.PriorityConfigFactory{
Function: func(args factory.PluginFactoryArgs) algorithm.PriorityFunction {
return algorithm.NewServiceSpreadPriority(args.ServiceLister)
return priorities.NewServiceSpreadPriority(args.ServiceLister)
},
Weight: 1,
},

View File

@ -28,9 +28,9 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache"
"github.com/GoogleCloudPlatform/kubernetes/pkg/controller/framework"
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
algorithm "github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm"
schedulerapi "github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/api"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/api/validation"
@ -182,7 +182,7 @@ func (f *ConfigFactory) CreateFromKeys(predicateKeys, priorityKeys util.StringSe
r := rand.New(rand.NewSource(time.Now().UnixNano()))
algo := algorithm.NewGenericScheduler(predicateFuncs, priorityConfigs, f.PodLister, r)
algo := scheduler.NewGenericScheduler(predicateFuncs, priorityConfigs, f.PodLister, r)
podBackoff := podBackoff{
perPodBackoff: map[string]*backoffEntry{},

View File

@ -29,8 +29,8 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache"
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
algorithm "github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm"
schedulerapi "github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/api"
latestschedulerapi "github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/api/latest"
)

View File

@ -22,8 +22,10 @@ import (
"strings"
"sync"
algorithm "github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm/priorities"
schedulerapi "github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/api"
"github.com/golang/glog"
@ -34,7 +36,7 @@ type PluginFactoryArgs struct {
algorithm.PodLister
algorithm.ServiceLister
NodeLister algorithm.MinionLister
NodeInfo algorithm.NodeInfo
NodeInfo predicates.NodeInfo
}
// A FitPredicateFactory produces a FitPredicate from the given args.
@ -95,7 +97,7 @@ func RegisterCustomFitPredicate(policy schedulerapi.PredicatePolicy) string {
if policy.Argument != nil {
if policy.Argument.ServiceAffinity != nil {
predicateFactory = func(args PluginFactoryArgs) algorithm.FitPredicate {
return algorithm.NewServiceAffinityPredicate(
return predicates.NewServiceAffinityPredicate(
args.PodLister,
args.ServiceLister,
args.NodeInfo,
@ -104,7 +106,7 @@ func RegisterCustomFitPredicate(policy schedulerapi.PredicatePolicy) string {
}
} else if policy.Argument.LabelsPresence != nil {
predicateFactory = func(args PluginFactoryArgs) algorithm.FitPredicate {
return algorithm.NewNodeLabelPredicate(
return predicates.NewNodeLabelPredicate(
args.NodeInfo,
policy.Argument.LabelsPresence.Labels,
policy.Argument.LabelsPresence.Presence,
@ -162,7 +164,7 @@ func RegisterCustomPriorityFunction(policy schedulerapi.PriorityPolicy) string {
if policy.Argument.ServiceAntiAffinity != nil {
pcf = &PriorityConfigFactory{
Function: func(args PluginFactoryArgs) algorithm.PriorityFunction {
return algorithm.NewServiceAntiAffinityPriority(
return priorities.NewServiceAntiAffinityPriority(
args.ServiceLister,
policy.Argument.ServiceAntiAffinity.Label,
)
@ -172,7 +174,7 @@ func RegisterCustomPriorityFunction(policy schedulerapi.PriorityPolicy) string {
} else if policy.Argument.LabelPreference != nil {
pcf = &PriorityConfigFactory{
Function: func(args PluginFactoryArgs) algorithm.PriorityFunction {
return algorithm.NewNodeLabelPriority(
return priorities.NewNodeLabelPriority(
policy.Argument.LabelPreference.Label,
policy.Argument.LabelPreference.Presence,
)

View File

@ -25,6 +25,8 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
)
type FailedPredicateMap map[string]util.StringSet
@ -44,14 +46,14 @@ func (f *FitError) Error() string {
}
type genericScheduler struct {
predicates map[string]FitPredicate
prioritizers []PriorityConfig
pods PodLister
predicates map[string]algorithm.FitPredicate
prioritizers []algorithm.PriorityConfig
pods algorithm.PodLister
random *rand.Rand
randomLock sync.Mutex
}
func (g *genericScheduler) Schedule(pod *api.Pod, minionLister MinionLister) (string, error) {
func (g *genericScheduler) Schedule(pod *api.Pod, minionLister algorithm.MinionLister) (string, error) {
minions, err := minionLister.List()
if err != nil {
return "", err
@ -65,7 +67,7 @@ func (g *genericScheduler) Schedule(pod *api.Pod, minionLister MinionLister) (st
return "", err
}
priorityList, err := prioritizeNodes(pod, g.pods, g.prioritizers, FakeMinionLister(filteredNodes))
priorityList, err := prioritizeNodes(pod, g.pods, g.prioritizers, algorithm.FakeMinionLister(filteredNodes))
if err != nil {
return "", err
}
@ -81,7 +83,7 @@ func (g *genericScheduler) Schedule(pod *api.Pod, minionLister MinionLister) (st
// This method takes a prioritized list of minions and sorts them in reverse order based on scores
// and then picks one randomly from the minions that had the highest score
func (g *genericScheduler) selectHost(priorityList HostPriorityList) (string, error) {
func (g *genericScheduler) selectHost(priorityList algorithm.HostPriorityList) (string, error) {
if len(priorityList) == 0 {
return "", fmt.Errorf("empty priorityList")
}
@ -97,16 +99,16 @@ func (g *genericScheduler) selectHost(priorityList HostPriorityList) (string, er
// Filters the minions to find the ones that fit based on the given predicate functions
// Each minion is passed through the predicate functions to determine if it is a fit
func findNodesThatFit(pod *api.Pod, podLister PodLister, predicates map[string]FitPredicate, nodes api.NodeList) (api.NodeList, FailedPredicateMap, error) {
func findNodesThatFit(pod *api.Pod, podLister algorithm.PodLister, predicateFuncs map[string]algorithm.FitPredicate, nodes api.NodeList) (api.NodeList, FailedPredicateMap, error) {
filtered := []api.Node{}
machineToPods, err := MapPodsToMachines(podLister)
machineToPods, err := predicates.MapPodsToMachines(podLister)
failedPredicateMap := FailedPredicateMap{}
if err != nil {
return api.NodeList{}, FailedPredicateMap{}, err
}
for _, node := range nodes.Items {
fits := true
for name, predicate := range predicates {
for name, predicate := range predicateFuncs {
fit, err := predicate(pod, machineToPods[node.Name], node.Name)
if err != nil {
return api.NodeList{}, FailedPredicateMap{}, err
@ -133,8 +135,8 @@ func findNodesThatFit(pod *api.Pod, podLister PodLister, predicates map[string]F
// Each priority function can also have its own weight
// The minion scores returned by the priority function are multiplied by the weights to get weighted scores
// All scores are finally combined (added) to get the total weighted scores of all minions
func prioritizeNodes(pod *api.Pod, podLister PodLister, priorityConfigs []PriorityConfig, minionLister MinionLister) (HostPriorityList, error) {
result := HostPriorityList{}
func prioritizeNodes(pod *api.Pod, podLister algorithm.PodLister, priorityConfigs []algorithm.PriorityConfig, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) {
result := algorithm.HostPriorityList{}
// If no priority configs are provided, then the EqualPriority function is applied
// This is required to generate the priority list in the required format
@ -152,23 +154,23 @@ func prioritizeNodes(pod *api.Pod, podLister PodLister, priorityConfigs []Priori
priorityFunc := priorityConfig.Function
prioritizedList, err := priorityFunc(pod, podLister, minionLister)
if err != nil {
return HostPriorityList{}, err
return algorithm.HostPriorityList{}, err
}
for _, hostEntry := range prioritizedList {
combinedScores[hostEntry.host] += hostEntry.score * weight
combinedScores[hostEntry.Host] += hostEntry.Score * weight
}
}
for host, score := range combinedScores {
result = append(result, HostPriority{host: host, score: score})
result = append(result, algorithm.HostPriority{Host: host, Score: score})
}
return result, nil
}
func getBestHosts(list HostPriorityList) []string {
func getBestHosts(list algorithm.HostPriorityList) []string {
result := []string{}
for _, hostEntry := range list {
if hostEntry.score == list[0].score {
result = append(result, hostEntry.host)
if hostEntry.Score == list[0].Score {
result = append(result, hostEntry.Host)
} else {
break
}
@ -177,24 +179,24 @@ func getBestHosts(list HostPriorityList) []string {
}
// EqualPriority is a prioritizer function that gives an equal weight of one to all nodes
func EqualPriority(_ *api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) {
func EqualPriority(_ *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) {
nodes, err := minionLister.List()
if err != nil {
fmt.Errorf("failed to list nodes: %v", err)
return []HostPriority{}, err
return []algorithm.HostPriority{}, err
}
result := []HostPriority{}
result := []algorithm.HostPriority{}
for _, minion := range nodes.Items {
result = append(result, HostPriority{
host: minion.Name,
score: 1,
result = append(result, algorithm.HostPriority{
Host: minion.Name,
Score: 1,
})
}
return result, nil
}
func NewGenericScheduler(predicates map[string]FitPredicate, prioritizers []PriorityConfig, pods PodLister, random *rand.Rand) Scheduler {
func NewGenericScheduler(predicates map[string]algorithm.FitPredicate, prioritizers []algorithm.PriorityConfig, pods algorithm.PodLister, random *rand.Rand) algorithm.ScheduleAlgorithm {
return &genericScheduler{
predicates: predicates,
prioritizers: prioritizers,

View File

@ -25,6 +25,7 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm"
)
func falsePredicate(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) {
@ -39,9 +40,9 @@ func matchesPredicate(pod *api.Pod, existingPods []*api.Pod, node string) (bool,
return pod.Name == node, nil
}
func numericPriority(pod *api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) {
func numericPriority(pod *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) {
nodes, err := minionLister.List()
result := []HostPriority{}
result := []algorithm.HostPriority{}
if err != nil {
return nil, fmt.Errorf("failed to list nodes: %v", err)
@ -51,31 +52,31 @@ func numericPriority(pod *api.Pod, podLister PodLister, minionLister MinionListe
if err != nil {
return nil, err
}
result = append(result, HostPriority{
host: minion.Name,
score: score,
result = append(result, algorithm.HostPriority{
Host: minion.Name,
Score: score,
})
}
return result, nil
}
func reverseNumericPriority(pod *api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) {
func reverseNumericPriority(pod *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) {
var maxScore float64
minScore := math.MaxFloat64
reverseResult := []HostPriority{}
reverseResult := []algorithm.HostPriority{}
result, err := numericPriority(pod, podLister, minionLister)
if err != nil {
return nil, err
}
for _, hostPriority := range result {
maxScore = math.Max(maxScore, float64(hostPriority.score))
minScore = math.Min(minScore, float64(hostPriority.score))
maxScore = math.Max(maxScore, float64(hostPriority.Score))
minScore = math.Min(minScore, float64(hostPriority.Score))
}
for _, hostPriority := range result {
reverseResult = append(reverseResult, HostPriority{
host: hostPriority.host,
score: int(maxScore + minScore - float64(hostPriority.score)),
reverseResult = append(reverseResult, algorithm.HostPriority{
Host: hostPriority.Host,
Score: int(maxScore + minScore - float64(hostPriority.Score)),
})
}
@ -95,44 +96,44 @@ func makeNodeList(nodeNames []string) api.NodeList {
func TestSelectHost(t *testing.T) {
scheduler := genericScheduler{random: rand.New(rand.NewSource(0))}
tests := []struct {
list HostPriorityList
list algorithm.HostPriorityList
possibleHosts util.StringSet
expectsErr bool
}{
{
list: []HostPriority{
{host: "machine1.1", score: 1},
{host: "machine2.1", score: 2},
list: []algorithm.HostPriority{
{Host: "machine1.1", Score: 1},
{Host: "machine2.1", Score: 2},
},
possibleHosts: util.NewStringSet("machine2.1"),
expectsErr: false,
},
// equal scores
{
list: []HostPriority{
{host: "machine1.1", score: 1},
{host: "machine1.2", score: 2},
{host: "machine1.3", score: 2},
{host: "machine2.1", score: 2},
list: []algorithm.HostPriority{
{Host: "machine1.1", Score: 1},
{Host: "machine1.2", Score: 2},
{Host: "machine1.3", Score: 2},
{Host: "machine2.1", Score: 2},
},
possibleHosts: util.NewStringSet("machine1.2", "machine1.3", "machine2.1"),
expectsErr: false,
},
// out of order scores
{
list: []HostPriority{
{host: "machine1.1", score: 3},
{host: "machine1.2", score: 3},
{host: "machine2.1", score: 2},
{host: "machine3.1", score: 1},
{host: "machine1.3", score: 3},
list: []algorithm.HostPriority{
{Host: "machine1.1", Score: 3},
{Host: "machine1.2", Score: 3},
{Host: "machine2.1", Score: 2},
{Host: "machine3.1", Score: 1},
{Host: "machine1.3", Score: 3},
},
possibleHosts: util.NewStringSet("machine1.1", "machine1.2", "machine1.3"),
expectsErr: false,
},
// empty priorityList
{
list: []HostPriority{},
list: []algorithm.HostPriority{},
possibleHosts: util.NewStringSet(),
expectsErr: true,
},
@ -161,23 +162,23 @@ func TestSelectHost(t *testing.T) {
func TestGenericScheduler(t *testing.T) {
tests := []struct {
name string
predicates map[string]FitPredicate
prioritizers []PriorityConfig
predicates map[string]algorithm.FitPredicate
prioritizers []algorithm.PriorityConfig
nodes []string
pod *api.Pod
expectedHost string
expectsErr bool
}{
{
predicates: map[string]FitPredicate{"false": falsePredicate},
prioritizers: []PriorityConfig{{Function: EqualPriority, Weight: 1}},
predicates: map[string]algorithm.FitPredicate{"false": falsePredicate},
prioritizers: []algorithm.PriorityConfig{{Function: EqualPriority, Weight: 1}},
nodes: []string{"machine1", "machine2"},
expectsErr: true,
name: "test 1",
},
{
predicates: map[string]FitPredicate{"true": truePredicate},
prioritizers: []PriorityConfig{{Function: EqualPriority, Weight: 1}},
predicates: map[string]algorithm.FitPredicate{"true": truePredicate},
prioritizers: []algorithm.PriorityConfig{{Function: EqualPriority, Weight: 1}},
nodes: []string{"machine1", "machine2"},
// Random choice between both, the rand seeded above with zero, chooses "machine1"
expectedHost: "machine1",
@ -185,39 +186,39 @@ func TestGenericScheduler(t *testing.T) {
},
{
// Fits on a machine where the pod ID matches the machine name
predicates: map[string]FitPredicate{"matches": matchesPredicate},
prioritizers: []PriorityConfig{{Function: EqualPriority, Weight: 1}},
predicates: map[string]algorithm.FitPredicate{"matches": matchesPredicate},
prioritizers: []algorithm.PriorityConfig{{Function: EqualPriority, Weight: 1}},
nodes: []string{"machine1", "machine2"},
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "machine2"}},
expectedHost: "machine2",
name: "test 3",
},
{
predicates: map[string]FitPredicate{"true": truePredicate},
prioritizers: []PriorityConfig{{Function: numericPriority, Weight: 1}},
predicates: map[string]algorithm.FitPredicate{"true": truePredicate},
prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}},
nodes: []string{"3", "2", "1"},
expectedHost: "3",
name: "test 4",
},
{
predicates: map[string]FitPredicate{"matches": matchesPredicate},
prioritizers: []PriorityConfig{{Function: numericPriority, Weight: 1}},
predicates: map[string]algorithm.FitPredicate{"matches": matchesPredicate},
prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}},
nodes: []string{"3", "2", "1"},
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}},
expectedHost: "2",
name: "test 5",
},
{
predicates: map[string]FitPredicate{"true": truePredicate},
prioritizers: []PriorityConfig{{Function: numericPriority, Weight: 1}, {Function: reverseNumericPriority, Weight: 2}},
predicates: map[string]algorithm.FitPredicate{"true": truePredicate},
prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}, {Function: reverseNumericPriority, Weight: 2}},
nodes: []string{"3", "2", "1"},
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}},
expectedHost: "1",
name: "test 6",
},
{
predicates: map[string]FitPredicate{"true": truePredicate, "false": falsePredicate},
prioritizers: []PriorityConfig{{Function: numericPriority, Weight: 1}},
predicates: map[string]algorithm.FitPredicate{"true": truePredicate, "false": falsePredicate},
prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}},
nodes: []string{"3", "2", "1"},
expectsErr: true,
name: "test 7",
@ -226,8 +227,8 @@ func TestGenericScheduler(t *testing.T) {
for _, test := range tests {
random := rand.New(rand.NewSource(0))
scheduler := NewGenericScheduler(test.predicates, test.prioritizers, FakePodLister([]*api.Pod{}), random)
machine, err := scheduler.Schedule(test.pod, FakeMinionLister(makeNodeList(test.nodes)))
scheduler := NewGenericScheduler(test.predicates, test.prioritizers, algorithm.FakePodLister([]*api.Pod{}), random)
machine, err := scheduler.Schedule(test.pod, algorithm.FakeMinionLister(makeNodeList(test.nodes)))
if test.expectsErr {
if err == nil {
t.Error("Unexpected non-error")
@ -245,8 +246,8 @@ func TestGenericScheduler(t *testing.T) {
func TestFindFitAllError(t *testing.T) {
nodes := []string{"3", "2", "1"}
predicates := map[string]FitPredicate{"true": truePredicate, "false": falsePredicate}
_, predicateMap, err := findNodesThatFit(&api.Pod{}, FakePodLister([]*api.Pod{}), predicates, makeNodeList(nodes))
predicates := map[string]algorithm.FitPredicate{"true": truePredicate, "false": falsePredicate}
_, predicateMap, err := findNodesThatFit(&api.Pod{}, algorithm.FakePodLister([]*api.Pod{}), predicates, makeNodeList(nodes))
if err != nil {
t.Errorf("unexpected error: %v", err)
@ -269,9 +270,9 @@ func TestFindFitAllError(t *testing.T) {
func TestFindFitSomeError(t *testing.T) {
nodes := []string{"3", "2", "1"}
predicates := map[string]FitPredicate{"true": truePredicate, "match": matchesPredicate}
predicates := map[string]algorithm.FitPredicate{"true": truePredicate, "match": matchesPredicate}
pod := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "1"}}
_, predicateMap, err := findNodesThatFit(pod, FakePodLister([]*api.Pod{}), predicates, makeNodeList(nodes))
_, predicateMap, err := findNodesThatFit(pod, algorithm.FakePodLister([]*api.Pod{}), predicates, makeNodeList(nodes))
if err != nil {
t.Errorf("unexpected error: %v", err)

View File

@ -25,7 +25,7 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache"
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
algorithm "github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm"
"github.com/golang/glog"
)

View File

@ -21,9 +21,8 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/record"
// TODO: move everything from pkg/scheduler into this package. Remove references from registry.
"github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/metrics"
"github.com/golang/glog"
@ -70,8 +69,8 @@ type Config struct {
// It is expected that changes made via modeler will be observed
// by MinionLister and Algorithm.
Modeler SystemModeler
MinionLister scheduler.MinionLister
Algorithm scheduler.Scheduler
MinionLister algorithm.MinionLister
Algorithm algorithm.ScheduleAlgorithm
Binder Binder
// NextPod should be a function that blocks until the next pod

View File

@ -27,8 +27,9 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/testapi"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/record"
"github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
)
type fakeBinder struct {
@ -59,7 +60,7 @@ type mockScheduler struct {
err error
}
func (es mockScheduler) Schedule(pod *api.Pod, ml scheduler.MinionLister) (string, error) {
func (es mockScheduler) Schedule(pod *api.Pod, ml algorithm.MinionLister) (string, error) {
return es.machine, es.err
}
@ -72,7 +73,7 @@ func TestScheduler(t *testing.T) {
table := []struct {
injectBindError error
sendPod *api.Pod
algo scheduler.Scheduler
algo algorithm.ScheduleAlgorithm
expectErrorPod *api.Pod
expectAssumedPod *api.Pod
expectError error
@ -113,7 +114,7 @@ func TestScheduler(t *testing.T) {
gotAssumedPod = pod
},
},
MinionLister: scheduler.FakeMinionLister(
MinionLister: algorithm.FakeMinionLister(
api.NodeList{Items: []api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}}},
),
Algorithm: item.algo,
@ -186,16 +187,16 @@ func TestSchedulerForgetAssumedPodAfterDelete(t *testing.T) {
firstPod := podWithPort("foo", "", podPort)
// Create the scheduler config
algo := scheduler.NewGenericScheduler(
map[string]scheduler.FitPredicate{"PodFitsPorts": scheduler.PodFitsPorts},
[]scheduler.PriorityConfig{},
algo := NewGenericScheduler(
map[string]algorithm.FitPredicate{"PodFitsPorts": predicates.PodFitsPorts},
[]algorithm.PriorityConfig{},
modeler.PodLister(),
rand.New(rand.NewSource(time.Now().UnixNano())))
var gotBinding *api.Binding
c := &Config{
Modeler: modeler,
MinionLister: scheduler.FakeMinionLister(
MinionLister: algorithm.FakeMinionLister(
api.NodeList{Items: []api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}}},
),
Algorithm: algo,