plugin/scheduler

This commit is contained in:
Chao Xu
2016-11-18 12:52:35 -08:00
parent 5e1adf91df
commit f782aba56e
55 changed files with 1907 additions and 1906 deletions

View File

@@ -24,9 +24,9 @@ import (
"testing"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/api/v1"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
@@ -37,15 +37,15 @@ import (
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
)
func falsePredicate(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
func falsePredicate(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
return false, []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, nil
}
func truePredicate(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
func truePredicate(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
return true, nil, nil
}
func matchesPredicate(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
func matchesPredicate(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
node := nodeInfo.Node()
if node == nil {
return false, nil, fmt.Errorf("node not found")
@@ -56,14 +56,14 @@ func matchesPredicate(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.N
return false, []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, nil
}
func hasNoPodsPredicate(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
func hasNoPodsPredicate(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
if len(nodeInfo.Pods()) == 0 {
return true, nil, nil
}
return false, []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, nil
}
func numericPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*api.Node) (schedulerapi.HostPriorityList, error) {
func numericPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
result := []schedulerapi.HostPriority{}
for _, node := range nodes {
score, err := strconv.Atoi(node.Name)
@@ -78,7 +78,7 @@ func numericPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.Nod
return result, nil
}
func reverseNumericPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*api.Node) (schedulerapi.HostPriorityList, error) {
func reverseNumericPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
var maxScore float64
minScore := math.MaxFloat64
reverseResult := []schedulerapi.HostPriority{}
@@ -101,10 +101,10 @@ func reverseNumericPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulerca
return reverseResult, nil
}
func makeNodeList(nodeNames []string) []*api.Node {
result := make([]*api.Node, 0, len(nodeNames))
func makeNodeList(nodeNames []string) []*v1.Node {
result := make([]*v1.Node, 0, len(nodeNames))
for _, nodeName := range nodeNames {
result = append(result, &api.Node{ObjectMeta: api.ObjectMeta{Name: nodeName}})
result = append(result, &v1.Node{ObjectMeta: v1.ObjectMeta{Name: nodeName}})
}
return result
}
@@ -181,8 +181,8 @@ func TestGenericScheduler(t *testing.T) {
predicates map[string]algorithm.FitPredicate
prioritizers []algorithm.PriorityConfig
nodes []string
pod *api.Pod
pods []*api.Pod
pod *v1.Pod
pods []*v1.Pod
expectedHosts sets.String
expectsErr bool
wErr error
@@ -192,10 +192,10 @@ func TestGenericScheduler(t *testing.T) {
prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}},
nodes: []string{"machine1", "machine2"},
expectsErr: true,
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}},
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "2"}},
name: "test 1",
wErr: &FitError{
Pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}},
Pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "2"}},
FailedPredicates: FailedPredicateMap{
"machine1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
"machine2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
@@ -214,7 +214,7 @@ func TestGenericScheduler(t *testing.T) {
predicates: map[string]algorithm.FitPredicate{"matches": matchesPredicate},
prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}},
nodes: []string{"machine1", "machine2"},
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "machine2"}},
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "machine2"}},
expectedHosts: sets.NewString("machine2"),
name: "test 3",
wErr: nil,
@@ -231,7 +231,7 @@ func TestGenericScheduler(t *testing.T) {
predicates: map[string]algorithm.FitPredicate{"matches": matchesPredicate},
prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}},
nodes: []string{"3", "2", "1"},
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}},
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "2"}},
expectedHosts: sets.NewString("2"),
name: "test 5",
wErr: nil,
@@ -240,7 +240,7 @@ func TestGenericScheduler(t *testing.T) {
predicates: map[string]algorithm.FitPredicate{"true": truePredicate},
prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}, {Function: reverseNumericPriority, Weight: 2}},
nodes: []string{"3", "2", "1"},
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}},
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "2"}},
expectedHosts: sets.NewString("1"),
name: "test 6",
wErr: nil,
@@ -249,11 +249,11 @@ func TestGenericScheduler(t *testing.T) {
predicates: map[string]algorithm.FitPredicate{"true": truePredicate, "false": falsePredicate},
prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}},
nodes: []string{"3", "2", "1"},
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}},
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "2"}},
expectsErr: true,
name: "test 7",
wErr: &FitError{
Pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}},
Pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "2"}},
FailedPredicates: FailedPredicateMap{
"3": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
"2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
@@ -266,24 +266,24 @@ func TestGenericScheduler(t *testing.T) {
"nopods": hasNoPodsPredicate,
"matches": matchesPredicate,
},
pods: []*api.Pod{
pods: []*v1.Pod{
{
ObjectMeta: api.ObjectMeta{Name: "2"},
Spec: api.PodSpec{
ObjectMeta: v1.ObjectMeta{Name: "2"},
Spec: v1.PodSpec{
NodeName: "2",
},
Status: api.PodStatus{
Phase: api.PodRunning,
Status: v1.PodStatus{
Phase: v1.PodRunning,
},
},
},
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}},
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "2"}},
prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}},
nodes: []string{"1", "2"},
expectsErr: true,
name: "test 8",
wErr: &FitError{
Pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}},
Pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "2"}},
FailedPredicates: FailedPredicateMap{
"1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
"2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
@@ -297,7 +297,7 @@ func TestGenericScheduler(t *testing.T) {
cache.AddPod(pod)
}
for _, name := range test.nodes {
cache.AddNode(&api.Node{ObjectMeta: api.ObjectMeta{Name: name}})
cache.AddNode(&v1.Node{ObjectMeta: v1.ObjectMeta{Name: name}})
}
scheduler := NewGenericScheduler(
@@ -322,7 +322,7 @@ func TestFindFitAllError(t *testing.T) {
"2": schedulercache.NewNodeInfo(),
"1": schedulercache.NewNodeInfo(),
}
_, predicateMap, err := findNodesThatFit(&api.Pod{}, nodeNameToInfo, makeNodeList(nodes), predicates, nil, algorithm.EmptyMetadataProducer)
_, predicateMap, err := findNodesThatFit(&v1.Pod{}, nodeNameToInfo, makeNodeList(nodes), predicates, nil, algorithm.EmptyMetadataProducer)
if err != nil {
t.Errorf("unexpected error: %v", err)
@@ -346,14 +346,14 @@ func TestFindFitAllError(t *testing.T) {
func TestFindFitSomeError(t *testing.T) {
nodes := []string{"3", "2", "1"}
predicates := map[string]algorithm.FitPredicate{"true": truePredicate, "match": matchesPredicate}
pod := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "1"}}
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "1"}}
nodeNameToInfo := map[string]*schedulercache.NodeInfo{
"3": schedulercache.NewNodeInfo(),
"2": schedulercache.NewNodeInfo(),
"1": schedulercache.NewNodeInfo(pod),
}
for name := range nodeNameToInfo {
nodeNameToInfo[name].SetNode(&api.Node{ObjectMeta: api.ObjectMeta{Name: name}})
nodeNameToInfo[name].SetNode(&v1.Node{ObjectMeta: v1.ObjectMeta{Name: name}})
}
_, predicateMap, err := findNodesThatFit(pod, nodeNameToInfo, makeNodeList(nodes), predicates, nil, algorithm.EmptyMetadataProducer)
@@ -379,15 +379,15 @@ func TestFindFitSomeError(t *testing.T) {
}
}
func makeNode(node string, milliCPU, memory int64) *api.Node {
return &api.Node{
ObjectMeta: api.ObjectMeta{Name: node},
Status: api.NodeStatus{
Capacity: api.ResourceList{
func makeNode(node string, milliCPU, memory int64) *v1.Node {
return &v1.Node{
ObjectMeta: v1.ObjectMeta{Name: node},
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
"cpu": *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
"memory": *resource.NewQuantity(memory, resource.BinarySI),
},
Allocatable: api.ResourceList{
Allocatable: v1.ResourceList{
"cpu": *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
"memory": *resource.NewQuantity(memory, resource.BinarySI),
},
@@ -402,19 +402,19 @@ func makeNode(node string, milliCPU, memory int64) *api.Node {
// - don't get the same score no matter what we schedule.
func TestZeroRequest(t *testing.T) {
// A pod with no resources. We expect spreading to count it as having the default resources.
noResources := api.PodSpec{
Containers: []api.Container{
noResources := v1.PodSpec{
Containers: []v1.Container{
{},
},
}
noResources1 := noResources
noResources1.NodeName = "machine1"
// A pod with the same resources as a 0-request pod gets by default as its resources (for spreading).
small := api.PodSpec{
Containers: []api.Container{
small := v1.PodSpec{
Containers: []v1.Container{
{
Resources: api.ResourceRequirements{
Requests: api.ResourceList{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse(
strconv.FormatInt(priorityutil.DefaultMilliCpuRequest, 10) + "m"),
"memory": resource.MustParse(
@@ -427,11 +427,11 @@ func TestZeroRequest(t *testing.T) {
small2 := small
small2.NodeName = "machine2"
// A larger pod.
large := api.PodSpec{
Containers: []api.Container{
large := v1.PodSpec{
Containers: []v1.Container{
{
Resources: api.ResourceRequirements{
Requests: api.ResourceList{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse(
strconv.FormatInt(priorityutil.DefaultMilliCpuRequest*3, 10) + "m"),
"memory": resource.MustParse(
@@ -446,38 +446,38 @@ func TestZeroRequest(t *testing.T) {
large2 := large
large2.NodeName = "machine2"
tests := []struct {
pod *api.Pod
pods []*api.Pod
nodes []*api.Node
pod *v1.Pod
pods []*v1.Pod
nodes []*v1.Node
test string
}{
// The point of these next two tests is to show you get the same priority for a zero-request pod
// as for a pod with the defaults requests, both when the zero-request pod is already on the machine
// and when the zero-request pod is the one being scheduled.
{
pod: &api.Pod{Spec: noResources},
nodes: []*api.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)},
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)},
test: "test priority of zero-request pod with machine with zero-request pod",
pods: []*api.Pod{
pods: []*v1.Pod{
{Spec: large1}, {Spec: noResources1},
{Spec: large2}, {Spec: small2},
},
},
{
pod: &api.Pod{Spec: small},
nodes: []*api.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)},
pod: &v1.Pod{Spec: small},
nodes: []*v1.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)},
test: "test priority of nonzero-request pod with machine with zero-request pod",
pods: []*api.Pod{
pods: []*v1.Pod{
{Spec: large1}, {Spec: noResources1},
{Spec: large2}, {Spec: small2},
},
},
// The point of this test is to verify that we're not just getting the same score no matter what we schedule.
{
pod: &api.Pod{Spec: large},
nodes: []*api.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)},
pod: &v1.Pod{Spec: large},
nodes: []*v1.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)},
test: "test priority of larger pod with machine with zero-request pod",
pods: []*api.Pod{
pods: []*v1.Pod{
{Spec: large1}, {Spec: noResources1},
{Spec: large2}, {Spec: small2},
},
@@ -494,8 +494,8 @@ func TestZeroRequest(t *testing.T) {
{Map: algorithmpriorities.BalancedResourceAllocationMap, Weight: 1},
{
Function: algorithmpriorities.NewSelectorSpreadPriority(
algorithm.FakeServiceLister([]*api.Service{}),
algorithm.FakeControllerLister([]*api.ReplicationController{}),
algorithm.FakeServiceLister([]*v1.Service{}),
algorithm.FakeControllerLister([]*v1.ReplicationController{}),
algorithm.FakeReplicaSetLister([]*extensions.ReplicaSet{})),
Weight: 1,
},