Merge pull request #109684 from yibozhuang/test-cleanup
cleanup: move scheduler unit tests to use PodWrapper
This commit is contained in:
commit
c8cdf08191
@ -23,7 +23,6 @@ import (
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@ -352,56 +351,23 @@ func TestIsInterested(t *testing.T) {
|
||||
{
|
||||
label: "Managed memory, empty resources",
|
||||
extender: mem,
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "app",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: false,
|
||||
pod: st.MakePod().Container("app").Obj(),
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
label: "Managed memory, container memory",
|
||||
extender: mem,
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "app",
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{"memory": resource.Quantity{}},
|
||||
Limits: v1.ResourceList{"memory": resource.Quantity{}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
pod: st.MakePod().Req(map[v1.ResourceName]string{
|
||||
"memory": "0",
|
||||
}).Obj(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
label: "Managed memory, init container memory",
|
||||
extender: mem,
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "app",
|
||||
},
|
||||
},
|
||||
InitContainers: []v1.Container{
|
||||
{
|
||||
Name: "init",
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{"memory": resource.Quantity{}},
|
||||
Limits: v1.ResourceList{"memory": resource.Quantity{}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
pod: st.MakePod().Container("app").InitReq(map[v1.ResourceName]string{
|
||||
"memory": "0",
|
||||
}).Obj(),
|
||||
want: true,
|
||||
},
|
||||
} {
|
||||
@ -424,15 +390,15 @@ func TestConvertToMetaVictims(t *testing.T) {
|
||||
nodeNameToVictims: map[string]*extenderv1.Victims{
|
||||
"node1": {
|
||||
Pods: []*v1.Pod{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "pod1", UID: "uid1"}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "pod3", UID: "uid3"}},
|
||||
st.MakePod().Name("pod1").UID("uid1").Obj(),
|
||||
st.MakePod().Name("pod3").UID("uid3").Obj(),
|
||||
},
|
||||
NumPDBViolations: 1,
|
||||
},
|
||||
"node2": {
|
||||
Pods: []*v1.Pod{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "pod2", UID: "uid2"}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "pod4", UID: "uid4"}},
|
||||
st.MakePod().Name("pod2").UID("uid2").Obj(),
|
||||
st.MakePod().Name("pod4").UID("uid4").Obj(),
|
||||
},
|
||||
NumPDBViolations: 2,
|
||||
},
|
||||
@ -496,24 +462,24 @@ func TestConvertToVictims(t *testing.T) {
|
||||
},
|
||||
nodeNames: []string{"node1", "node2"},
|
||||
podsInNodeList: []*v1.Pod{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "pod1", UID: "uid1"}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "pod2", UID: "uid2"}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "pod3", UID: "uid3"}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "pod4", UID: "uid4"}},
|
||||
st.MakePod().Name("pod1").UID("uid1").Obj(),
|
||||
st.MakePod().Name("pod2").UID("uid2").Obj(),
|
||||
st.MakePod().Name("pod3").UID("uid3").Obj(),
|
||||
st.MakePod().Name("pod4").UID("uid4").Obj(),
|
||||
},
|
||||
nodeInfos: nil,
|
||||
want: map[string]*extenderv1.Victims{
|
||||
"node1": {
|
||||
Pods: []*v1.Pod{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "pod1", UID: "uid1"}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "pod3", UID: "uid3"}},
|
||||
st.MakePod().Name("pod1").UID("uid1").Obj(),
|
||||
st.MakePod().Name("pod3").UID("uid3").Obj(),
|
||||
},
|
||||
NumPDBViolations: 1,
|
||||
},
|
||||
"node2": {
|
||||
Pods: []*v1.Pod{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "pod2", UID: "uid2"}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "pod4", UID: "uid4"}},
|
||||
st.MakePod().Name("pod2").UID("uid2").Obj(),
|
||||
st.MakePod().Name("pod4").UID("uid4").Obj(),
|
||||
},
|
||||
NumPDBViolations: 2,
|
||||
},
|
||||
|
@ -28,12 +28,11 @@ import (
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
clienttesting "k8s.io/client-go/testing"
|
||||
frameworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
|
||||
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
)
|
||||
|
||||
func TestDefaultBinder(t *testing.T) {
|
||||
testPod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "ns"},
|
||||
}
|
||||
testPod := st.MakePod().Name("foo").Namespace("ns").Obj()
|
||||
testNode := "foohost.kubernetes.mydomain.com"
|
||||
tests := []struct {
|
||||
name string
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
)
|
||||
|
||||
func TestGetPodServices(t *testing.T) {
|
||||
@ -48,16 +49,11 @@ func TestGetPodServices(t *testing.T) {
|
||||
}
|
||||
var pods []*v1.Pod
|
||||
for i := 0; i < 5; i++ {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "test",
|
||||
Name: fmt.Sprintf("test-pod-%d", i),
|
||||
Labels: map[string]string{
|
||||
"app": fmt.Sprintf("test-%d", i),
|
||||
"label": fmt.Sprintf("label-%d", i),
|
||||
},
|
||||
},
|
||||
}
|
||||
pod := st.MakePod().Name(fmt.Sprintf("test-pod-%d", i)).
|
||||
Namespace("test").
|
||||
Label("app", fmt.Sprintf("test-%d", i)).
|
||||
Label("label", fmt.Sprintf("label-%d", i)).
|
||||
Obj()
|
||||
pods = append(pods, pod)
|
||||
}
|
||||
|
||||
|
@ -240,8 +240,8 @@ func TestImageLocalityPriority(t *testing.T) {
|
||||
// Image: gcr.io/250:latest 250MB
|
||||
// Score: 100 * (250M/2 - 23M)/(1000M * 2 - 23M) = 5
|
||||
pod: &v1.Pod{Spec: test40250},
|
||||
nodes: []*v1.Node{makeImageNode("machine1", node403002000), makeImageNode("machine2", node25010)},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 5}},
|
||||
nodes: []*v1.Node{makeImageNode("node1", node403002000), makeImageNode("node2", node25010)},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 5}},
|
||||
name: "two images spread on two nodes, prefer the larger image one",
|
||||
},
|
||||
{
|
||||
@ -255,8 +255,8 @@ func TestImageLocalityPriority(t *testing.T) {
|
||||
// Image: not present
|
||||
// Score: 0
|
||||
pod: &v1.Pod{Spec: test40300},
|
||||
nodes: []*v1.Node{makeImageNode("machine1", node403002000), makeImageNode("machine2", node25010)},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 7}, {Name: "machine2", Score: 0}},
|
||||
nodes: []*v1.Node{makeImageNode("node1", node403002000), makeImageNode("node2", node25010)},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: 7}, {Name: "node2", Score: 0}},
|
||||
name: "two images on one node, prefer this node",
|
||||
},
|
||||
{
|
||||
@ -270,8 +270,8 @@ func TestImageLocalityPriority(t *testing.T) {
|
||||
// Image: gcr.io/10:latest 10MB
|
||||
// Score: 0 (10M/2 < 23M, min-threshold)
|
||||
pod: &v1.Pod{Spec: testMinMax},
|
||||
nodes: []*v1.Node{makeImageNode("machine1", node400030), makeImageNode("machine2", node25010)},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: 0}},
|
||||
nodes: []*v1.Node{makeImageNode("node1", node400030), makeImageNode("node2", node25010)},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: 0}},
|
||||
name: "if exceed limit, use limit",
|
||||
},
|
||||
{
|
||||
@ -289,8 +289,8 @@ func TestImageLocalityPriority(t *testing.T) {
|
||||
// Image:
|
||||
// Score: 0
|
||||
pod: &v1.Pod{Spec: testMinMax},
|
||||
nodes: []*v1.Node{makeImageNode("machine1", node400030), makeImageNode("machine2", node25010), makeImageNode("machine3", nodeWithNoImages)},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 66}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: 0}},
|
||||
nodes: []*v1.Node{makeImageNode("node1", node400030), makeImageNode("node2", node25010), makeImageNode("node3", nodeWithNoImages)},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: 66}, {Name: "node2", Score: 0}, {Name: "node3", Score: 0}},
|
||||
name: "if exceed limit, use limit (with node which has no images present)",
|
||||
},
|
||||
{
|
||||
@ -308,9 +308,9 @@ func TestImageLocalityPriority(t *testing.T) {
|
||||
// Image:
|
||||
// Score: 0
|
||||
pod: &v1.Pod{Spec: test300600900},
|
||||
nodes: []*v1.Node{makeImageNode("machine1", node60040900), makeImageNode("machine2", node300600900), makeImageNode("machine3", nodeWithNoImages)},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 32}, {Name: "machine2", Score: 36}, {Name: "machine3", Score: 0}},
|
||||
name: "pod with multiple large images, machine2 is preferred",
|
||||
nodes: []*v1.Node{makeImageNode("node1", node60040900), makeImageNode("node2", node300600900), makeImageNode("node3", nodeWithNoImages)},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: 32}, {Name: "node2", Score: 36}, {Name: "node3", Score: 0}},
|
||||
name: "pod with multiple large images, node2 is preferred",
|
||||
},
|
||||
{
|
||||
// Pod: gcr.io/30 gcr.io/40
|
||||
@ -323,8 +323,8 @@ func TestImageLocalityPriority(t *testing.T) {
|
||||
// Image: 100 * (30M - 23M) / (1000M * 2 - 23M) = 0
|
||||
// Score: 0
|
||||
pod: &v1.Pod{Spec: test3040},
|
||||
nodes: []*v1.Node{makeImageNode("machine1", node203040), makeImageNode("machine2", node400030)},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 1}, {Name: "machine2", Score: 0}},
|
||||
nodes: []*v1.Node{makeImageNode("node1", node203040), makeImageNode("node2", node400030)},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: 1}, {Name: "node2", Score: 0}},
|
||||
name: "pod with multiple small images",
|
||||
},
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -22,7 +22,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
@ -377,52 +377,52 @@ func TestPreferredAffinity(t *testing.T) {
|
||||
wantStatus *framework.Status
|
||||
}{
|
||||
{
|
||||
name: "all machines are same priority as Affinity is nil",
|
||||
name: "all nodes are same priority as Affinity is nil",
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: labelAzAz1}},
|
||||
},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: 0}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 0}, {Name: "node3", Score: 0}},
|
||||
},
|
||||
// the node(machine1) that have the label {"region": "China"} (match the topology key) and that have existing pods that match the labelSelector get high score
|
||||
// the node(machine3) that don't have the label {"region": "whatever the value is"} (mismatch the topology key) but that have existing pods that match the labelSelector get low score
|
||||
// the node(machine2) that have the label {"region": "China"} (match the topology key) but that have existing pods that mismatch the labelSelector get low score
|
||||
// the node(node1) that have the label {"region": "China"} (match the topology key) and that have existing pods that match the labelSelector get high score
|
||||
// the node(node3) that don't have the label {"region": "whatever the value is"} (mismatch the topology key) but that have existing pods that match the labelSelector get low score
|
||||
// the node(node2) that have the label {"region": "China"} (match the topology key) but that have existing pods that mismatch the labelSelector get low score
|
||||
{
|
||||
name: "Affinity: pod that matches topology key & pods in nodes will get high score comparing to others" +
|
||||
"which doesn't match either pods in nodes or in topology key",
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: "", Affinity: stayWithS1InRegion}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "node1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "node2"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: v1.PodSpec{NodeName: "node3"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: labelAzAz1}},
|
||||
},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: 0}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: 0}, {Name: "node3", Score: 0}},
|
||||
},
|
||||
// the node1(machine1) that have the label {"region": "China"} (match the topology key) and that have existing pods that match the labelSelector get high score
|
||||
// the node2(machine2) that have the label {"region": "China"}, match the topology key and have the same label value with node1, get the same high score with node1
|
||||
// the node3(machine3) that have the label {"region": "India"}, match the topology key but have a different label value, don't have existing pods that match the labelSelector,
|
||||
// the node1(node1) that have the label {"region": "China"} (match the topology key) and that have existing pods that match the labelSelector get high score
|
||||
// the node2(node2) that have the label {"region": "China"}, match the topology key and have the same label value with node1, get the same high score with node1
|
||||
// the node3(node3) that have the label {"region": "India"}, match the topology key but have a different label value, don't have existing pods that match the labelSelector,
|
||||
// get a low score.
|
||||
{
|
||||
name: "All the nodes that have the same topology key & label value with one of them has an existing pod that match the affinity rules, have the same score",
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: "", Affinity: stayWithS1InRegion}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "node1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgChinaAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelRgChinaAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: labelRgIndia}},
|
||||
},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}, {Name: "machine3", Score: 0}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: framework.MaxNodeScore}, {Name: "node3", Score: 0}},
|
||||
},
|
||||
// there are 2 regions, say regionChina(machine1,machine3,machine4) and regionIndia(machine2,machine5), both regions have nodes that match the preference.
|
||||
// there are 2 regions, say regionChina(node1,node3,node4) and regionIndia(node2,node5), both regions have nodes that match the preference.
|
||||
// But there are more nodes(actually more existing pods) in regionChina that match the preference than regionIndia.
|
||||
// Then, nodes in regionChina get higher score than nodes in regionIndia, and all the nodes in regionChina should get a same score(high score),
|
||||
// while all the nodes in regionIndia should get another same score(low score).
|
||||
@ -430,37 +430,37 @@ func TestPreferredAffinity(t *testing.T) {
|
||||
name: "Affinity: nodes in one region has more matching pods comparing to other region, so the region which has more matches will get high score",
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: "", Affinity: stayWithS2InRegion}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine4"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine5"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: v1.PodSpec{NodeName: "node1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: v1.PodSpec{NodeName: "node1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: v1.PodSpec{NodeName: "node2"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: v1.PodSpec{NodeName: "node3"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: v1.PodSpec{NodeName: "node4"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: v1.PodSpec{NodeName: "node5"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine4", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine5", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node4", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node5", Labels: labelRgIndia}},
|
||||
},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: framework.MaxNodeScore}, {Name: "machine4", Score: framework.MaxNodeScore}, {Name: "machine5", Score: 0}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: 0}, {Name: "node3", Score: framework.MaxNodeScore}, {Name: "node4", Score: framework.MaxNodeScore}, {Name: "node5", Score: 0}},
|
||||
},
|
||||
// Test with the different operators and values for pod affinity scheduling preference, including some match failures.
|
||||
{
|
||||
name: "Affinity: different Label operators and values for pod affinity scheduling preference, including some match failures ",
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: "", Affinity: affinity3}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "node1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "node2"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: v1.PodSpec{NodeName: "node3"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: labelAzAz1}},
|
||||
},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 20}, {Name: "machine2", Score: framework.MaxNodeScore}, {Name: "machine3", Score: 0}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: 20}, {Name: "node2", Score: framework.MaxNodeScore}, {Name: "node3", Score: 0}},
|
||||
},
|
||||
// Test the symmetry cases for affinity, the difference between affinity and symmetry is not the pod wants to run together with some existing pods,
|
||||
// but the existing pods have the inter pod affinity preference while the pod to schedule satisfy the preference.
|
||||
@ -468,57 +468,57 @@ func TestPreferredAffinity(t *testing.T) {
|
||||
name: "Affinity symmetry: considered only the preferredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry",
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1", Affinity: stayWithS1InRegion}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2", Affinity: stayWithS2InRegion}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: v1.PodSpec{NodeName: "node1", Affinity: stayWithS1InRegion}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "node2", Affinity: stayWithS2InRegion}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: labelAzAz1}},
|
||||
},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: framework.MaxNodeScore}, {Name: "machine3", Score: 0}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: framework.MaxNodeScore}, {Name: "node3", Score: 0}},
|
||||
},
|
||||
{
|
||||
name: "Affinity symmetry with namespace selector",
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Namespace: "subteam1.team1", Labels: podLabelSecurityS1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1", Affinity: affinityNamespaceSelector}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2", Affinity: stayWithS2InRegion}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: v1.PodSpec{NodeName: "node1", Affinity: affinityNamespaceSelector}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "node2", Affinity: stayWithS2InRegion}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: labelAzAz1}},
|
||||
},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: 0}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: 0}, {Name: "node3", Score: 0}},
|
||||
},
|
||||
{
|
||||
name: "AntiAffinity symmetry with namespace selector",
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Namespace: "subteam1.team1", Labels: podLabelSecurityS1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1", Affinity: antiAffinityNamespaceSelector}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2", Affinity: stayWithS2InRegion}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: v1.PodSpec{NodeName: "node1", Affinity: antiAffinityNamespaceSelector}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "node2", Affinity: stayWithS2InRegion}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: labelAzAz1}},
|
||||
},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: framework.MaxNodeScore}, {Name: "machine3", Score: framework.MaxNodeScore}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: framework.MaxNodeScore}, {Name: "node3", Score: framework.MaxNodeScore}},
|
||||
},
|
||||
{
|
||||
name: "Affinity symmetry: considered RequiredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry",
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1", Affinity: hardAffinity}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2", Affinity: hardAffinity}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: v1.PodSpec{NodeName: "node1", Affinity: hardAffinity}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "node2", Affinity: hardAffinity}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: labelAzAz1}},
|
||||
},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}, {Name: "machine3", Score: 0}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: framework.MaxNodeScore}, {Name: "node3", Score: 0}},
|
||||
},
|
||||
|
||||
// The pod to schedule prefer to stay away from some existing pods at node level using the pod anti affinity.
|
||||
@ -531,94 +531,94 @@ func TestPreferredAffinity(t *testing.T) {
|
||||
name: "Anti Affinity: pod that does not match existing pods in node will get high score ",
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: "", Affinity: awayFromS1InAz}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: v1.PodSpec{NodeName: "node1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "node2"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: labelAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelRgChina}},
|
||||
},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: framework.MaxNodeScore}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: framework.MaxNodeScore}},
|
||||
},
|
||||
{
|
||||
name: "Anti Affinity: pod that does not match topology key & match the pods in nodes will get higher score comparing to others ",
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: "", Affinity: awayFromS1InAz}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "node1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "node2"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: labelAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelRgChina}},
|
||||
},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: framework.MaxNodeScore}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: framework.MaxNodeScore}},
|
||||
},
|
||||
{
|
||||
name: "Anti Affinity: one node has more matching pods comparing to other node, so the node which has more unmatches will get high score",
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: "", Affinity: awayFromS1InAz}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: v1.PodSpec{NodeName: "node1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "node1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "node2"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: labelAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelRgIndia}},
|
||||
},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: framework.MaxNodeScore}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: framework.MaxNodeScore}},
|
||||
},
|
||||
// Test the symmetry cases for anti affinity
|
||||
{
|
||||
name: "Anti Affinity symmetry: the existing pods in node which has anti affinity match will get high score",
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1", Affinity: awayFromS2InAz}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2", Affinity: awayFromS1InAz}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: v1.PodSpec{NodeName: "node1", Affinity: awayFromS2InAz}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "node2", Affinity: awayFromS1InAz}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelAzAz2}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: labelAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelAzAz2}},
|
||||
},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: framework.MaxNodeScore}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: framework.MaxNodeScore}},
|
||||
},
|
||||
// Test both affinity and anti-affinity
|
||||
{
|
||||
name: "Affinity and Anti Affinity: considered only preferredDuringSchedulingIgnoredDuringExecution in both pod affinity & anti affinity",
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: "", Affinity: stayWithS1InRegionAwayFromS2InAz}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "node1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "node2"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelAzAz1}},
|
||||
},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: 0}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: 0}},
|
||||
},
|
||||
// Combined cases considering both affinity and anti-affinity, the pod to schedule and existing pods have the same labels (they are in the same RC/service),
|
||||
// the pod prefer to run together with its brother pods in the same region, but wants to stay away from them at node level,
|
||||
// so that all the pods of a RC/service can stay in a same region but trying to separate with each other
|
||||
// machine-1,machine-3,machine-4 are in ChinaRegion others machine-2,machine-5 are in IndiaRegion
|
||||
// node-1,node-3,node-4 are in ChinaRegion others node-2,node-5 are in IndiaRegion
|
||||
{
|
||||
name: "Affinity and Anti Affinity: considering both affinity and anti-affinity, the pod to schedule and existing pods have the same labels",
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: "", Affinity: stayWithS1InRegionAwayFromS2InAz}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine4"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine5"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "node1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "node1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "node2"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "node3"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "node3"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "node4"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "node5"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChinaAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine4", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine5", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: labelRgChinaAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node4", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node5", Labels: labelRgIndia}},
|
||||
},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: framework.MaxNodeScore}, {Name: "machine4", Score: framework.MaxNodeScore}, {Name: "machine5", Score: 0}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: 0}, {Name: "node3", Score: framework.MaxNodeScore}, {Name: "node4", Score: framework.MaxNodeScore}, {Name: "node5", Score: 0}},
|
||||
},
|
||||
// Consider Affinity, Anti Affinity and symmetry together.
|
||||
// for Affinity, the weights are: 8, 0, 0, 0
|
||||
@ -629,18 +629,18 @@ func TestPreferredAffinity(t *testing.T) {
|
||||
name: "Affinity and Anti Affinity and symmetry: considered only preferredDuringSchedulingIgnoredDuringExecution in both pod affinity & anti affinity & symmetry",
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: "", Affinity: stayWithS1InRegionAwayFromS2InAz}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine3", Affinity: stayWithS1InRegionAwayFromS2InAz}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine4", Affinity: awayFromS1InAz}},
|
||||
{Spec: v1.PodSpec{NodeName: "node1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "node2"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: v1.PodSpec{NodeName: "node3", Affinity: stayWithS1InRegionAwayFromS2InAz}},
|
||||
{Spec: v1.PodSpec{NodeName: "node4", Affinity: awayFromS1InAz}},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine4", Labels: labelAzAz2}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node4", Labels: labelAzAz2}},
|
||||
},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: framework.MaxNodeScore}, {Name: "machine4", Score: 0}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: 0}, {Name: "node3", Score: framework.MaxNodeScore}, {Name: "node4", Score: 0}},
|
||||
},
|
||||
// Cover https://github.com/kubernetes/kubernetes/issues/82796 which panics upon:
|
||||
// 1. Some nodes in a topology don't have pods with affinity, but other nodes in the same topology have.
|
||||
@ -649,22 +649,22 @@ func TestPreferredAffinity(t *testing.T) {
|
||||
name: "Avoid panic when partial nodes in a topology don't have pods with affinity",
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2", Affinity: stayWithS1InRegionAwayFromS2InAz}},
|
||||
{Spec: v1.PodSpec{NodeName: "node1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "node2", Affinity: stayWithS1InRegionAwayFromS2InAz}},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelRgChina}},
|
||||
},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 0}},
|
||||
},
|
||||
{
|
||||
name: "invalid Affinity fails PreScore",
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: "", Affinity: invalidAffinityLabels}},
|
||||
wantStatus: framework.NewStatus(framework.Error, `Invalid value: "{{.bad-value.}}"`),
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelRgChina}},
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -672,69 +672,69 @@ func TestPreferredAffinity(t *testing.T) {
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: "", Affinity: invalidAntiAffinityLabels}},
|
||||
wantStatus: framework.NewStatus(framework.Error, `Invalid value: "{{.bad-value.}}"`),
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelRgChina}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Affinity with pods matching NamespaceSelector",
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{Affinity: affinityNamespaceSelector}, ObjectMeta: metav1.ObjectMeta{Namespace: "subteam1.team1", Labels: podLabelSecurityS1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Namespace: "subteam1.team1", Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Namespace: "subteam1.team1", Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Namespace: "subteam1.team2", Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Namespace: "subteam2.team1", Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "node1"}, ObjectMeta: metav1.ObjectMeta{Namespace: "subteam1.team1", Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "node1"}, ObjectMeta: metav1.ObjectMeta{Namespace: "subteam1.team1", Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "node1"}, ObjectMeta: metav1.ObjectMeta{Namespace: "subteam1.team2", Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "node2"}, ObjectMeta: metav1.ObjectMeta{Namespace: "subteam2.team1", Labels: podLabelSecurityS1}},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelRgIndia}},
|
||||
},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: 0}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: 0}},
|
||||
},
|
||||
{
|
||||
name: "Affinity with pods matching both NamespaceSelector and Namespaces fields",
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{Affinity: affinityNamespaceSelector}, ObjectMeta: metav1.ObjectMeta{Namespace: "subteam1.team1", Labels: podLabelSecurityS1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Namespace: "subteam1.team1", Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Namespace: "subteam1.team1", Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Namespace: "subteam2.team2", Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Namespace: "subteam2.team1", Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "node1"}, ObjectMeta: metav1.ObjectMeta{Namespace: "subteam1.team1", Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "node1"}, ObjectMeta: metav1.ObjectMeta{Namespace: "subteam1.team1", Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "node1"}, ObjectMeta: metav1.ObjectMeta{Namespace: "subteam2.team2", Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "node2"}, ObjectMeta: metav1.ObjectMeta{Namespace: "subteam2.team1", Labels: podLabelSecurityS1}},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelRgIndia}},
|
||||
},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: 0}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: 0}},
|
||||
},
|
||||
{
|
||||
name: "Affinity with pods matching NamespaceSelector",
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{Affinity: antiAffinityNamespaceSelector}, ObjectMeta: metav1.ObjectMeta{Namespace: "subteam1.team1", Labels: podLabelSecurityS1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Namespace: "subteam1.team1", Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Namespace: "subteam1.team1", Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Namespace: "subteam1.team2", Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Namespace: "subteam2.team1", Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "node1"}, ObjectMeta: metav1.ObjectMeta{Namespace: "subteam1.team1", Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "node1"}, ObjectMeta: metav1.ObjectMeta{Namespace: "subteam1.team1", Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "node1"}, ObjectMeta: metav1.ObjectMeta{Namespace: "subteam1.team2", Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "node2"}, ObjectMeta: metav1.ObjectMeta{Namespace: "subteam2.team1", Labels: podLabelSecurityS1}},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelRgIndia}},
|
||||
},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: framework.MaxNodeScore}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: framework.MaxNodeScore}},
|
||||
},
|
||||
{
|
||||
name: "Affinity with pods matching both NamespaceSelector and Namespaces fields",
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{Affinity: antiAffinityNamespaceSelector}, ObjectMeta: metav1.ObjectMeta{Namespace: "subteam1.team1", Labels: podLabelSecurityS1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Namespace: "subteam1.team1", Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Namespace: "subteam1.team1", Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Namespace: "subteam2.team2", Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Namespace: "subteam2.team1", Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "node1"}, ObjectMeta: metav1.ObjectMeta{Namespace: "subteam1.team1", Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "node1"}, ObjectMeta: metav1.ObjectMeta{Namespace: "subteam1.team1", Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "node1"}, ObjectMeta: metav1.ObjectMeta{Namespace: "subteam2.team2", Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "node2"}, ObjectMeta: metav1.ObjectMeta{Namespace: "subteam2.team1", Labels: podLabelSecurityS1}},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelRgIndia}},
|
||||
},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: framework.MaxNodeScore}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: framework.MaxNodeScore}},
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
@ -826,76 +826,76 @@ func TestPreferredAffinityWithHardPodAffinitySymmetricWeight(t *testing.T) {
|
||||
name: "with default weight",
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: podLabelServiceS1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1", Affinity: hardPodAffinity}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2", Affinity: hardPodAffinity}},
|
||||
{Spec: v1.PodSpec{NodeName: "node1", Affinity: hardPodAffinity}},
|
||||
{Spec: v1.PodSpec{NodeName: "node2", Affinity: hardPodAffinity}},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: labelAzAz1}},
|
||||
},
|
||||
hardPodAffinityWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}, {Name: "machine3", Score: 0}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: framework.MaxNodeScore}, {Name: "node3", Score: 0}},
|
||||
},
|
||||
{
|
||||
name: "with zero weight",
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: podLabelServiceS1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1", Affinity: hardPodAffinity}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2", Affinity: hardPodAffinity}},
|
||||
{Spec: v1.PodSpec{NodeName: "node1", Affinity: hardPodAffinity}},
|
||||
{Spec: v1.PodSpec{NodeName: "node2", Affinity: hardPodAffinity}},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: labelAzAz1}},
|
||||
},
|
||||
hardPodAffinityWeight: 0,
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: 0}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 0}, {Name: "node3", Score: 0}},
|
||||
},
|
||||
{
|
||||
name: "with no matching namespace",
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: "subteam1.team2", Labels: podLabelServiceS1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1", Affinity: hardPodAffinity}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2", Affinity: hardPodAffinity}},
|
||||
{Spec: v1.PodSpec{NodeName: "node1", Affinity: hardPodAffinity}},
|
||||
{Spec: v1.PodSpec{NodeName: "node2", Affinity: hardPodAffinity}},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: labelAzAz1}},
|
||||
},
|
||||
hardPodAffinityWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: 0}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 0}, {Name: "node3", Score: 0}},
|
||||
},
|
||||
{
|
||||
name: "with matching NamespaceSelector",
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: "subteam1.team1", Labels: podLabelServiceS1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1", Affinity: hardPodAffinity}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2", Affinity: hardPodAffinity}},
|
||||
{Spec: v1.PodSpec{NodeName: "node1", Affinity: hardPodAffinity}},
|
||||
{Spec: v1.PodSpec{NodeName: "node2", Affinity: hardPodAffinity}},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: labelAzAz1}},
|
||||
},
|
||||
hardPodAffinityWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}, {Name: "machine3", Score: 0}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: framework.MaxNodeScore}, {Name: "node3", Score: 0}},
|
||||
},
|
||||
{
|
||||
name: "with matching Namespaces",
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: "subteam2.team2", Labels: podLabelServiceS1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1", Affinity: hardPodAffinity}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2", Affinity: hardPodAffinity}},
|
||||
{Spec: v1.PodSpec{NodeName: "node1", Affinity: hardPodAffinity}},
|
||||
{Spec: v1.PodSpec{NodeName: "node2", Affinity: hardPodAffinity}},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: labelAzAz1}},
|
||||
},
|
||||
hardPodAffinityWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}, {Name: "machine3", Score: 0}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: framework.MaxNodeScore}, {Name: "node3", Score: 0}},
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
|
@ -28,6 +28,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/runtime"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
)
|
||||
|
||||
// TODO: Add test case for RequiredDuringSchedulingRequiredDuringExecution after it's implemented.
|
||||
@ -49,37 +50,25 @@ func TestNodeAffinity(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "missing labels",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
NodeSelector: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
pod: st.MakePod().NodeSelector(map[string]string{
|
||||
"foo": "bar",
|
||||
}).Obj(),
|
||||
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReasonPod),
|
||||
},
|
||||
{
|
||||
name: "same labels",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
NodeSelector: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
pod: st.MakePod().NodeSelector(map[string]string{
|
||||
"foo": "bar",
|
||||
}).Obj(),
|
||||
labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "node labels are superset",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
NodeSelector: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
pod: st.MakePod().NodeSelector(map[string]string{
|
||||
"foo": "bar",
|
||||
}).Obj(),
|
||||
labels: map[string]string{
|
||||
"foo": "bar",
|
||||
"baz": "blah",
|
||||
@ -87,14 +76,10 @@ func TestNodeAffinity(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "node labels are subset",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
NodeSelector: map[string]string{
|
||||
"foo": "bar",
|
||||
"baz": "blah",
|
||||
},
|
||||
},
|
||||
},
|
||||
pod: st.MakePod().NodeSelector(map[string]string{
|
||||
"foo": "bar",
|
||||
"baz": "blah",
|
||||
}).Obj(),
|
||||
labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
@ -1029,69 +1014,69 @@ func TestNodeAffinityPriority(t *testing.T) {
|
||||
disablePreScore bool
|
||||
}{
|
||||
{
|
||||
name: "all machines are same priority as NodeAffinity is nil",
|
||||
name: "all nodes are same priority as NodeAffinity is nil",
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: label1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: label2}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: label3}},
|
||||
},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: 0}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 0}, {Name: "node3", Score: 0}},
|
||||
},
|
||||
{
|
||||
name: "no machine matches preferred scheduling requirements in NodeAffinity of pod so all machines' priority is zero",
|
||||
name: "no node matches preferred scheduling requirements in NodeAffinity of pod so all nodes' priority is zero",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: affinity1,
|
||||
},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: label4}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: label4}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: label2}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: label3}},
|
||||
},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: 0}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 0}, {Name: "node3", Score: 0}},
|
||||
},
|
||||
{
|
||||
name: "only machine1 matches the preferred scheduling requirements of pod",
|
||||
name: "only node1 matches the preferred scheduling requirements of pod",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: affinity1,
|
||||
},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: label1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: label2}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: label3}},
|
||||
},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: 0}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: 0}, {Name: "node3", Score: 0}},
|
||||
},
|
||||
{
|
||||
name: "all machines matches the preferred scheduling requirements of pod but with different priorities ",
|
||||
name: "all nodes matches the preferred scheduling requirements of pod but with different priorities ",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: affinity2,
|
||||
},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine5", Labels: label5}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: label1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node5", Labels: label5}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: label2}},
|
||||
},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 18}, {Name: "machine5", Score: framework.MaxNodeScore}, {Name: "machine2", Score: 36}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: 18}, {Name: "node5", Score: framework.MaxNodeScore}, {Name: "node2", Score: 36}},
|
||||
},
|
||||
{
|
||||
name: "added affinity",
|
||||
pod: &v1.Pod{},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: label1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: label2}},
|
||||
},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: 0}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: 0}},
|
||||
args: config.NodeAffinityArgs{
|
||||
AddedAffinity: affinity1.NodeAffinity,
|
||||
},
|
||||
@ -1104,11 +1089,11 @@ func TestNodeAffinityPriority(t *testing.T) {
|
||||
},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label5}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: label1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: label2}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: label5}},
|
||||
},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 40}, {Name: "machine2", Score: 60}, {Name: "machine3", Score: framework.MaxNodeScore}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: 40}, {Name: "node2", Score: 60}, {Name: "node3", Score: framework.MaxNodeScore}},
|
||||
args: config.NodeAffinityArgs{
|
||||
AddedAffinity: &v1.NodeAffinity{
|
||||
PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{
|
||||
@ -1136,11 +1121,11 @@ func TestNodeAffinityPriority(t *testing.T) {
|
||||
},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine5", Labels: label5}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: label1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node5", Labels: label5}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: label2}},
|
||||
},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 18}, {Name: "machine5", Score: framework.MaxNodeScore}, {Name: "machine2", Score: 36}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: 18}, {Name: "node5", Score: framework.MaxNodeScore}, {Name: "node2", Score: 36}},
|
||||
disablePreScore: true,
|
||||
},
|
||||
}
|
||||
|
@ -22,8 +22,8 @@ import (
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
)
|
||||
|
||||
func TestNodeName(t *testing.T) {
|
||||
@ -39,29 +39,13 @@ func TestNodeName(t *testing.T) {
|
||||
name: "no host specified",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: "foo",
|
||||
},
|
||||
},
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
},
|
||||
},
|
||||
pod: st.MakePod().Node("foo").Obj(),
|
||||
node: st.MakeNode().Name("foo").Obj(),
|
||||
name: "host matches",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: "bar",
|
||||
},
|
||||
},
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
},
|
||||
},
|
||||
pod: st.MakePod().Node("bar").Obj(),
|
||||
node: st.MakeNode().Name("foo").Obj(),
|
||||
name: "host doesn't match",
|
||||
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReason),
|
||||
},
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
)
|
||||
|
||||
func newPod(host string, hostPortInfos ...string) *v1.Pod {
|
||||
@ -41,16 +42,7 @@ func newPod(host string, hostPortInfos ...string) *v1.Pod {
|
||||
Protocol: v1.Protocol(splited[0]),
|
||||
})
|
||||
}
|
||||
return &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: host,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Ports: networkPorts,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return st.MakePod().Node(host).ContainerPort(networkPorts).Obj()
|
||||
}
|
||||
|
||||
func TestNodePorts(t *testing.T) {
|
||||
@ -184,66 +176,42 @@ func TestGetContainerPorts(t *testing.T) {
|
||||
expected []*v1.ContainerPort
|
||||
}{
|
||||
{
|
||||
pod1: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
ContainerPort: 8001,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
{
|
||||
ContainerPort: 8002,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
ContainerPort: 8003,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
{
|
||||
ContainerPort: 8004,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
pod1: st.MakePod().ContainerPort([]v1.ContainerPort{
|
||||
{
|
||||
ContainerPort: 8001,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
pod2: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
ContainerPort: 8011,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
{
|
||||
ContainerPort: 8012,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
ContainerPort: 8013,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
{
|
||||
ContainerPort: 8014,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ContainerPort: 8002,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
}}).ContainerPort([]v1.ContainerPort{
|
||||
{
|
||||
ContainerPort: 8003,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
{
|
||||
ContainerPort: 8004,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
}).Obj(),
|
||||
pod2: st.MakePod().ContainerPort([]v1.ContainerPort{
|
||||
{
|
||||
ContainerPort: 8011,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
{
|
||||
ContainerPort: 8012,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
}}).ContainerPort([]v1.ContainerPort{
|
||||
{
|
||||
ContainerPort: 8013,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
{
|
||||
ContainerPort: 8014,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
}).Obj(),
|
||||
expected: []*v1.ContainerPort{
|
||||
{
|
||||
ContainerPort: 8001,
|
||||
|
@ -29,6 +29,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/runtime"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
)
|
||||
|
||||
func TestNodeResourcesBalancedAllocation(t *testing.T) {
|
||||
@ -52,7 +53,7 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
NodeName: "machine1",
|
||||
NodeName: "node1",
|
||||
}
|
||||
labels1 := map[string]string{
|
||||
"foo": "bar",
|
||||
@ -62,17 +63,8 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) {
|
||||
"bar": "foo",
|
||||
"baz": "blah",
|
||||
}
|
||||
machine1Spec := v1.PodSpec{
|
||||
NodeName: "machine1",
|
||||
}
|
||||
machine2Spec := v1.PodSpec{
|
||||
NodeName: "machine2",
|
||||
}
|
||||
noResources := v1.PodSpec{
|
||||
Containers: []v1.Container{},
|
||||
}
|
||||
cpuOnly := v1.PodSpec{
|
||||
NodeName: "machine1",
|
||||
NodeName: "node1",
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: v1.ResourceRequirements{
|
||||
@ -93,9 +85,9 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) {
|
||||
},
|
||||
}
|
||||
cpuOnly2 := cpuOnly
|
||||
cpuOnly2.NodeName = "machine2"
|
||||
cpuOnly2.NodeName = "node2"
|
||||
cpuAndMemory := v1.PodSpec{
|
||||
NodeName: "machine2",
|
||||
NodeName: "node2",
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: v1.ResourceRequirements{
|
||||
@ -115,13 +107,6 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
nonZeroContainer := v1.PodSpec{
|
||||
Containers: []v1.Container{{}},
|
||||
}
|
||||
nonZeroContainer1 := v1.PodSpec{
|
||||
NodeName: "machine1",
|
||||
Containers: []v1.Container{{}},
|
||||
}
|
||||
|
||||
defaultResourceBalancedAllocationSet := []config.ResourceSpec{
|
||||
{Name: string(v1.ResourceCPU), Weight: 1},
|
||||
@ -148,9 +133,9 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) {
|
||||
// CPU Fraction: 0 / 4000 = 0 %
|
||||
// Memory Fraction: 0 / 10000 = 0%
|
||||
// Node2 Score: (1-0) * MaxNodeScore = MaxNodeScore
|
||||
pod: &v1.Pod{Spec: noResources},
|
||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000, nil), makeNode("machine2", 4000, 10000, nil)},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}},
|
||||
pod: st.MakePod().Obj(),
|
||||
nodes: []*v1.Node{makeNode("node1", 4000, 10000, nil), makeNode("node2", 4000, 10000, nil)},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: framework.MaxNodeScore}},
|
||||
name: "nothing scheduled, nothing requested",
|
||||
args: config.NodeResourcesBalancedAllocationArgs{Resources: defaultResourceBalancedAllocationSet},
|
||||
},
|
||||
@ -166,9 +151,9 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) {
|
||||
// Node2 std: 0
|
||||
// Node2 Score: (1-0) * MaxNodeScore = MaxNodeScore
|
||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000, nil), makeNode("machine2", 6000, 10000, nil)},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 87}, {Name: "machine2", Score: framework.MaxNodeScore}},
|
||||
name: "nothing scheduled, resources requested, differently sized machines",
|
||||
nodes: []*v1.Node{makeNode("node1", 4000, 10000, nil), makeNode("node2", 6000, 10000, nil)},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: 87}, {Name: "node2", Score: framework.MaxNodeScore}},
|
||||
name: "nothing scheduled, resources requested, differently sized nodes",
|
||||
args: config.NodeResourcesBalancedAllocationArgs{Resources: defaultResourceBalancedAllocationSet},
|
||||
},
|
||||
{
|
||||
@ -182,15 +167,15 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) {
|
||||
// Memory Fraction: 0 / 10000 = 0%
|
||||
// Node2 std: 0
|
||||
// Node2 Score: (1-0) * MaxNodeScore = MaxNodeScore
|
||||
pod: &v1.Pod{Spec: noResources},
|
||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000, nil), makeNode("machine2", 4000, 10000, nil)},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}},
|
||||
pod: st.MakePod().Obj(),
|
||||
nodes: []*v1.Node{makeNode("node1", 4000, 10000, nil), makeNode("node2", 4000, 10000, nil)},
|
||||
expectedList: []framework.NodeScore{{Name: "node2", Score: framework.MaxNodeScore}, {Name: "node2", Score: framework.MaxNodeScore}},
|
||||
name: "no resources requested, pods without container scheduled",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: machine1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: machine1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: machine2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: machine2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
st.MakePod().Node("node1").Labels(labels2).Obj(),
|
||||
st.MakePod().Node("node1").Labels(labels1).Obj(),
|
||||
st.MakePod().Node("node2").Labels(labels1).Obj(),
|
||||
st.MakePod().Node("node2").Labels(labels1).Obj(),
|
||||
},
|
||||
args: config.NodeResourcesBalancedAllocationArgs{Resources: defaultResourceBalancedAllocationSet},
|
||||
},
|
||||
@ -205,13 +190,13 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) {
|
||||
// Memory Fraction: 0 / 1000 = 0%
|
||||
// Node2 std: (0 - 0) / 2 = 0
|
||||
// Node2 Score: (1 - 0)*MaxNodeScore = 100
|
||||
pod: &v1.Pod{Spec: nonZeroContainer},
|
||||
nodes: []*v1.Node{makeNode("machine1", 250, 1000*1024*1024, nil), makeNode("machine2", 250, 1000*1024*1024, nil)},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 100}, {Name: "machine2", Score: 100}},
|
||||
pod: st.MakePod().Obj(),
|
||||
nodes: []*v1.Node{makeNode("node1", 250, 1000*1024*1024, nil), makeNode("node2", 250, 1000*1024*1024, nil)},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: 100}, {Name: "node2", Score: 100}},
|
||||
name: "no resources requested, pods with container scheduled",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: nonZeroContainer1},
|
||||
{Spec: nonZeroContainer1},
|
||||
st.MakePod().Node("node1").Obj(),
|
||||
st.MakePod().Node("node1").Obj(),
|
||||
},
|
||||
args: config.NodeResourcesBalancedAllocationArgs{Resources: defaultResourceBalancedAllocationSet},
|
||||
},
|
||||
@ -226,9 +211,9 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) {
|
||||
// Memory Fraction: 5000 / 20000 = 25%
|
||||
// Node2 std: (0.6 - 0.25) / 2 = 0.175
|
||||
// Node2 Score: (1 - 0.175)*MaxNodeScore = 82
|
||||
pod: &v1.Pod{Spec: noResources},
|
||||
nodes: []*v1.Node{makeNode("machine1", 10000, 20000, nil), makeNode("machine2", 10000, 20000, nil)},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 70}, {Name: "machine2", Score: 82}},
|
||||
pod: st.MakePod().Obj(),
|
||||
nodes: []*v1.Node{makeNode("node1", 10000, 20000, nil), makeNode("node2", 10000, 20000, nil)},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: 70}, {Name: "node2", Score: 82}},
|
||||
name: "no resources requested, pods scheduled with resources",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
||||
@ -250,8 +235,8 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) {
|
||||
// Node2 std: (0.6 - 0.5) / 2 = 0.05
|
||||
// Node2 Score: (1 - 0.05)*MaxNodeScore = 95
|
||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||
nodes: []*v1.Node{makeNode("machine1", 10000, 20000, nil), makeNode("machine2", 10000, 20000, nil)},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 82}, {Name: "machine2", Score: 95}},
|
||||
nodes: []*v1.Node{makeNode("node1", 10000, 20000, nil), makeNode("node2", 10000, 20000, nil)},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: 82}, {Name: "node2", Score: 95}},
|
||||
name: "resources requested, pods scheduled with resources",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: cpuOnly},
|
||||
@ -271,9 +256,9 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) {
|
||||
// Node2 std: (0.6 - 0.2) / 2 = 0.2
|
||||
// Node2 Score: (1 - 0.2)*MaxNodeScore = 80
|
||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||
nodes: []*v1.Node{makeNode("machine1", 10000, 20000, nil), makeNode("machine2", 10000, 50000, nil)},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 82}, {Name: "machine2", Score: 80}},
|
||||
name: "resources requested, pods scheduled with resources, differently sized machines",
|
||||
nodes: []*v1.Node{makeNode("node1", 10000, 20000, nil), makeNode("node2", 10000, 50000, nil)},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: 82}, {Name: "node2", Score: 80}},
|
||||
name: "resources requested, pods scheduled with resources, differently sized nodes",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: cpuOnly},
|
||||
{Spec: cpuAndMemory},
|
||||
@ -293,8 +278,8 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) {
|
||||
// Node2 std: (1 - 0.5) / 2 = 0.25
|
||||
// Node2 Score: (1 - 0.25)*MaxNodeScore = 75
|
||||
pod: &v1.Pod{Spec: cpuOnly},
|
||||
nodes: []*v1.Node{makeNode("machine1", 6000, 10000, nil), makeNode("machine2", 6000, 10000, nil)},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 50}, {Name: "machine2", Score: 75}},
|
||||
nodes: []*v1.Node{makeNode("node1", 6000, 10000, nil), makeNode("node2", 6000, 10000, nil)},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: 50}, {Name: "node2", Score: 75}},
|
||||
name: "requested resources at node capacity",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: cpuOnly},
|
||||
@ -303,9 +288,9 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) {
|
||||
args: config.NodeResourcesBalancedAllocationArgs{Resources: defaultResourceBalancedAllocationSet},
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{Spec: noResources},
|
||||
nodes: []*v1.Node{makeNode("machine1", 0, 0, nil), makeNode("machine2", 0, 0, nil)},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 100}, {Name: "machine2", Score: 100}},
|
||||
pod: st.MakePod().Obj(),
|
||||
nodes: []*v1.Node{makeNode("node1", 0, 0, nil), makeNode("node2", 0, 0, nil)},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: 100}, {Name: "node2", Score: 100}},
|
||||
name: "zero node resources, pods scheduled with resources",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: cpuOnly},
|
||||
@ -326,22 +311,12 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) {
|
||||
// Node2 std: sqrt(((0.8571 - 0.378) * (0.8571 - 0.378) + (0.378 - 0.125) * (0.378 - 0.125)) + (0.378 - 0.125) * (0.378 - 0.125)) / 3) = 0.345
|
||||
// Node2 Score: (1 - 0.358)*MaxNodeScore = 65
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceMemory: resource.MustParse("0"),
|
||||
"nvidia.com/gpu": resource.MustParse("1"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
nodes: []*v1.Node{makeNode("machine1", 3500, 40000, scalarResource), makeNode("machine2", 3500, 40000, scalarResource)},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 70}, {Name: "machine2", Score: 65}},
|
||||
pod: st.MakePod().Req(map[v1.ResourceName]string{
|
||||
v1.ResourceMemory: "0",
|
||||
"nvidia.com/gpu": "1",
|
||||
}).Obj(),
|
||||
nodes: []*v1.Node{makeNode("node1", 3500, 40000, scalarResource), makeNode("node2", 3500, 40000, scalarResource)},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: 70}, {Name: "node2", Score: 65}},
|
||||
name: "include scalar resource on a node for balanced resource allocation",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: cpuAndMemory},
|
||||
@ -353,13 +328,13 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) {
|
||||
{Name: "nvidia.com/gpu", Weight: 1},
|
||||
}},
|
||||
},
|
||||
// Only one node (machine1) has the scalar resource, pod doesn't request the scalar resource and the scalar resource should be skipped for consideration.
|
||||
// Only one node (node1) has the scalar resource, pod doesn't request the scalar resource and the scalar resource should be skipped for consideration.
|
||||
// Node1: std = 0, score = 100
|
||||
// Node2: std = 0, score = 100
|
||||
{
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{Containers: []v1.Container{{}}}},
|
||||
nodes: []*v1.Node{makeNode("machine1", 3500, 40000, scalarResource), makeNode("machine2", 3500, 40000, nil)},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 100}, {Name: "machine2", Score: 100}},
|
||||
pod: st.MakePod().Obj(),
|
||||
nodes: []*v1.Node{makeNode("node1", 3500, 40000, scalarResource), makeNode("node2", 3500, 40000, nil)},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: 100}, {Name: "node2", Score: 100}},
|
||||
name: "node without the scalar resource results to a higher score",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: cpuOnly},
|
||||
|
@ -81,7 +81,7 @@ func TestLeastAllocatedScoringStrategy(t *testing.T) {
|
||||
// CPU Score: ((6000 - 3000) * MaxNodeScore) / 6000 = 50
|
||||
// Memory Score: ((10000 - 5000) * MaxNodeScore) / 10000 = 50
|
||||
// Node2 Score: (50 + 50) / 2 = 50
|
||||
name: "nothing scheduled, resources requested, differently sized machines",
|
||||
name: "nothing scheduled, resources requested, differently sized nodes",
|
||||
requestedPod: st.MakePod().
|
||||
Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).
|
||||
Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "3000"}).
|
||||
@ -95,7 +95,7 @@ func TestLeastAllocatedScoringStrategy(t *testing.T) {
|
||||
resources: defaultResources,
|
||||
},
|
||||
{
|
||||
name: "Resources not set, nothing scheduled, resources requested, differently sized machines",
|
||||
name: "Resources not set, nothing scheduled, resources requested, differently sized nodes",
|
||||
requestedPod: st.MakePod().
|
||||
Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).
|
||||
Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "3000"}).
|
||||
@ -190,7 +190,7 @@ func TestLeastAllocatedScoringStrategy(t *testing.T) {
|
||||
// CPU Score: ((10000 - 6000) * MaxNodeScore) / 10000 = 40
|
||||
// Memory Score: ((50000 - 10000) * MaxNodeScore) / 50000 = 80
|
||||
// Node2 Score: (40 + 80) / 2 = 60
|
||||
name: "resources requested, pods scheduled with resources, differently sized machines",
|
||||
name: "resources requested, pods scheduled with resources, differently sized nodes",
|
||||
requestedPod: st.MakePod().
|
||||
Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).
|
||||
Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "3000"}).
|
||||
@ -249,7 +249,7 @@ func TestLeastAllocatedScoringStrategy(t *testing.T) {
|
||||
// CPU Score: ((6000 - 3000) *100) / 6000 = 50
|
||||
// Memory Score: ((10000 - 5000) *100) / 10000 = 50
|
||||
// Node2 Score: (50 * 1 + 50 * 2) / (1 + 2) = 50
|
||||
name: "nothing scheduled, resources requested with different weight on CPU and memory, differently sized machines",
|
||||
name: "nothing scheduled, resources requested with different weight on CPU and memory, differently sized nodes",
|
||||
requestedPod: st.MakePod().Node("node1").
|
||||
Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).
|
||||
Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "3000"}).
|
||||
|
@ -80,7 +80,7 @@ func TestMostAllocatedScoringStrategy(t *testing.T) {
|
||||
// CPU Score: (3000 * MaxNodeScore) / 6000 = 50
|
||||
// Memory Score: (5000 * MaxNodeScore) / 10000 = 50
|
||||
// Node2 Score: (50 + 50) / 2 = 50
|
||||
name: "nothing scheduled, resources requested, differently sized machines",
|
||||
name: "nothing scheduled, resources requested, differently sized nodes",
|
||||
requestedPod: st.MakePod().
|
||||
Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).
|
||||
Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "3000"}).
|
||||
@ -94,7 +94,7 @@ func TestMostAllocatedScoringStrategy(t *testing.T) {
|
||||
resources: defaultResources,
|
||||
},
|
||||
{
|
||||
name: "Resources not set, nothing scheduled, resources requested, differently sized machines",
|
||||
name: "Resources not set, nothing scheduled, resources requested, differently sized nodes",
|
||||
requestedPod: st.MakePod().
|
||||
Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).
|
||||
Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "3000"}).
|
||||
@ -185,7 +185,7 @@ func TestMostAllocatedScoringStrategy(t *testing.T) {
|
||||
// CPU Score: (3000 *100) / 6000 = 50
|
||||
// Memory Score: (5000 *100) / 10000 = 50
|
||||
// Node2 Score: (50 * 1 + 50 * 2) / (1 + 2) = 50
|
||||
name: "nothing scheduled, resources requested, differently sized machines",
|
||||
name: "nothing scheduled, resources requested, differently sized nodes",
|
||||
requestedPod: st.MakePod().
|
||||
Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).
|
||||
Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "3000"}).
|
||||
|
@ -24,7 +24,6 @@ import (
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/stretchr/testify/assert"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
@ -73,7 +72,7 @@ func TestRequestedToCapacityRatioScoringStrategy(t *testing.T) {
|
||||
shape: shape,
|
||||
},
|
||||
{
|
||||
name: "nothing scheduled, resources requested, differently sized machines (default - least requested nodes have priority)",
|
||||
name: "nothing scheduled, resources requested, differently sized nodes (default - least requested nodes have priority)",
|
||||
requestedPod: st.MakePod().
|
||||
Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).
|
||||
Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "3000"}).
|
||||
@ -213,38 +212,19 @@ func TestBrokenLinearFunction(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestResourceBinPackingSingleExtended(t *testing.T) {
|
||||
extendedResource := "intel.com/foo"
|
||||
extendedResource1 := map[string]int64{
|
||||
"intel.com/foo": 4,
|
||||
}
|
||||
extendedResource2 := map[string]int64{
|
||||
"intel.com/foo": 8,
|
||||
}
|
||||
extendedResource3 := map[v1.ResourceName]string{
|
||||
"intel.com/foo": "2",
|
||||
}
|
||||
extendedResource4 := map[v1.ResourceName]string{
|
||||
"intel.com/foo": "4",
|
||||
}
|
||||
|
||||
extendedResourcePod1 := v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceName(extendedResource): resource.MustParse("2"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
extendedResourcePod2 := v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceName(extendedResource): resource.MustParse("4"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
machine2Pod := extendedResourcePod1
|
||||
machine2Pod.NodeName = "machine2"
|
||||
tests := []struct {
|
||||
pod *v1.Pod
|
||||
pods []*v1.Pod
|
||||
@ -255,8 +235,8 @@ func TestResourceBinPackingSingleExtended(t *testing.T) {
|
||||
{
|
||||
// Node1 Score = Node2 Score = 0 as the incoming Pod doesn't request extended resource.
|
||||
pod: st.MakePod().Obj(),
|
||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000*1024*1024, extendedResource2), makeNode("machine2", 4000, 10000*1024*1024, extendedResource1)},
|
||||
expectedScores: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
|
||||
nodes: []*v1.Node{makeNode("node1", 4000, 10000*1024*1024, extendedResource2), makeNode("node2", 4000, 10000*1024*1024, extendedResource1)},
|
||||
expectedScores: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 0}},
|
||||
name: "nothing scheduled, nothing requested",
|
||||
},
|
||||
{
|
||||
@ -271,13 +251,11 @@ func TestResourceBinPackingSingleExtended(t *testing.T) {
|
||||
// resourceScoringFunction((0+2),4)
|
||||
// = 2/4 * maxUtilization = 50 = rawScoringFunction(50)
|
||||
// Node2 Score: 5
|
||||
pod: &v1.Pod{Spec: extendedResourcePod1},
|
||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000*1024*1024, extendedResource2), makeNode("machine2", 4000, 10000*1024*1024, extendedResource1)},
|
||||
expectedScores: []framework.NodeScore{{Name: "machine1", Score: 2}, {Name: "machine2", Score: 5}},
|
||||
pod: st.MakePod().Req(extendedResource3).Obj(),
|
||||
nodes: []*v1.Node{makeNode("node1", 4000, 10000*1024*1024, extendedResource2), makeNode("node2", 4000, 10000*1024*1024, extendedResource1)},
|
||||
expectedScores: []framework.NodeScore{{Name: "node1", Score: 2}, {Name: "node2", Score: 5}},
|
||||
name: "resources requested, pods scheduled with less resources",
|
||||
pods: []*v1.Pod{
|
||||
st.MakePod().Obj(),
|
||||
},
|
||||
pods: []*v1.Pod{st.MakePod().Obj()},
|
||||
},
|
||||
{
|
||||
// Node1 scores (used resources) on 0-MaxNodeScore scale
|
||||
@ -291,13 +269,11 @@ func TestResourceBinPackingSingleExtended(t *testing.T) {
|
||||
// resourceScoringFunction((2+2),4)
|
||||
// = 4/4 * maxUtilization = maxUtilization = rawScoringFunction(maxUtilization)
|
||||
// Node2 Score: 10
|
||||
pod: &v1.Pod{Spec: extendedResourcePod1},
|
||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000*1024*1024, extendedResource2), makeNode("machine2", 4000, 10000*1024*1024, extendedResource1)},
|
||||
expectedScores: []framework.NodeScore{{Name: "machine1", Score: 2}, {Name: "machine2", Score: 10}},
|
||||
pod: st.MakePod().Req(extendedResource3).Obj(),
|
||||
nodes: []*v1.Node{makeNode("node1", 4000, 10000*1024*1024, extendedResource2), makeNode("node2", 4000, 10000*1024*1024, extendedResource1)},
|
||||
expectedScores: []framework.NodeScore{{Name: "node1", Score: 2}, {Name: "node2", Score: 10}},
|
||||
name: "resources requested, pods scheduled with resources, on node with existing pod running ",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: machine2Pod},
|
||||
},
|
||||
pods: []*v1.Pod{st.MakePod().Req(extendedResource3).Node("node2").Obj()},
|
||||
},
|
||||
{
|
||||
// Node1 scores (used resources) on 0-MaxNodeScore scale
|
||||
@ -311,9 +287,9 @@ func TestResourceBinPackingSingleExtended(t *testing.T) {
|
||||
// resourceScoringFunction((0+4),4)
|
||||
// = 4/4 * maxUtilization = maxUtilization = rawScoringFunction(maxUtilization)
|
||||
// Node2 Score: 10
|
||||
pod: &v1.Pod{Spec: extendedResourcePod2},
|
||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000*1024*1024, extendedResource2), makeNode("machine2", 4000, 10000*1024*1024, extendedResource1)},
|
||||
expectedScores: []framework.NodeScore{{Name: "machine1", Score: 5}, {Name: "machine2", Score: 10}},
|
||||
pod: st.MakePod().Req(extendedResource4).Obj(),
|
||||
nodes: []*v1.Node{makeNode("node1", 4000, 10000*1024*1024, extendedResource2), makeNode("node2", 4000, 10000*1024*1024, extendedResource1)},
|
||||
expectedScores: []framework.NodeScore{{Name: "node1", Score: 5}, {Name: "node2", Score: 10}},
|
||||
name: "resources requested, pods scheduled with more resources",
|
||||
pods: []*v1.Pod{
|
||||
st.MakePod().Obj(),
|
||||
@ -362,44 +338,24 @@ func TestResourceBinPackingSingleExtended(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestResourceBinPackingMultipleExtended(t *testing.T) {
|
||||
extendedResource1 := "intel.com/foo"
|
||||
extendedResource2 := "intel.com/bar"
|
||||
extendedResources1 := map[string]int64{
|
||||
"intel.com/foo": 4,
|
||||
"intel.com/bar": 8,
|
||||
}
|
||||
|
||||
extendedResources2 := map[string]int64{
|
||||
"intel.com/foo": 8,
|
||||
"intel.com/bar": 4,
|
||||
}
|
||||
|
||||
extnededResourcePod1 := v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceName(extendedResource1): resource.MustParse("2"),
|
||||
v1.ResourceName(extendedResource2): resource.MustParse("2"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
extendedResourcePod1 := map[v1.ResourceName]string{
|
||||
"intel.com/foo": "2",
|
||||
"intel.com/bar": "2",
|
||||
}
|
||||
extnededResourcePod2 := v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceName(extendedResource1): resource.MustParse("4"),
|
||||
v1.ResourceName(extendedResource2): resource.MustParse("2"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
extendedResourcePod2 := map[v1.ResourceName]string{
|
||||
"intel.com/foo": "4",
|
||||
"intel.com/bar": "2",
|
||||
}
|
||||
machine2Pod := extnededResourcePod1
|
||||
machine2Pod.NodeName = "machine2"
|
||||
|
||||
tests := []struct {
|
||||
pod *v1.Pod
|
||||
pods []*v1.Pod
|
||||
@ -436,11 +392,10 @@ func TestResourceBinPackingMultipleExtended(t *testing.T) {
|
||||
// Node2 Score: (0 * 3) + (0 * 5) / 8 = 0
|
||||
|
||||
pod: st.MakePod().Obj(),
|
||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000*1024*1024, extendedResources2), makeNode("machine2", 4000, 10000*1024*1024, extendedResources1)},
|
||||
expectedScores: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
|
||||
nodes: []*v1.Node{makeNode("node1", 4000, 10000*1024*1024, extendedResources2), makeNode("node2", 4000, 10000*1024*1024, extendedResources1)},
|
||||
expectedScores: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 0}},
|
||||
name: "nothing scheduled, nothing requested",
|
||||
},
|
||||
|
||||
{
|
||||
|
||||
// resources["intel.com/foo"] = 3
|
||||
@ -469,15 +424,14 @@ func TestResourceBinPackingMultipleExtended(t *testing.T) {
|
||||
// = 2/8 * 100 = 25 = rawScoringFunction(25)
|
||||
// Node2 Score: (5 * 3) + (2 * 5) / 8 = 3
|
||||
|
||||
pod: &v1.Pod{Spec: extnededResourcePod1},
|
||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000*1024*1024, extendedResources2), makeNode("machine2", 4000, 10000*1024*1024, extendedResources1)},
|
||||
expectedScores: []framework.NodeScore{{Name: "machine1", Score: 4}, {Name: "machine2", Score: 3}},
|
||||
pod: st.MakePod().Req(extendedResourcePod1).Obj(),
|
||||
nodes: []*v1.Node{makeNode("node1", 4000, 10000*1024*1024, extendedResources2), makeNode("node2", 4000, 10000*1024*1024, extendedResources1)},
|
||||
expectedScores: []framework.NodeScore{{Name: "node1", Score: 4}, {Name: "node2", Score: 3}},
|
||||
name: "resources requested, pods scheduled with less resources",
|
||||
pods: []*v1.Pod{
|
||||
st.MakePod().Obj(),
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
|
||||
// resources["intel.com/foo"] = 3
|
||||
@ -505,15 +459,12 @@ func TestResourceBinPackingMultipleExtended(t *testing.T) {
|
||||
// = 4/8 *100 = 50 = rawScoringFunction(50)
|
||||
// Node2 Score: (10 * 3) + (5 * 5) / 8 = 7
|
||||
|
||||
pod: &v1.Pod{Spec: extnededResourcePod1},
|
||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000*1024*1024, extendedResources2), makeNode("machine2", 4000, 10000*1024*1024, extendedResources1)},
|
||||
expectedScores: []framework.NodeScore{{Name: "machine1", Score: 4}, {Name: "machine2", Score: 7}},
|
||||
pod: st.MakePod().Req(extendedResourcePod1).Obj(),
|
||||
nodes: []*v1.Node{makeNode("node1", 4000, 10000*1024*1024, extendedResources2), makeNode("node2", 4000, 10000*1024*1024, extendedResources1)},
|
||||
expectedScores: []framework.NodeScore{{Name: "node1", Score: 4}, {Name: "node2", Score: 7}},
|
||||
name: "resources requested, pods scheduled with resources, on node with existing pod running ",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: machine2Pod},
|
||||
},
|
||||
pods: []*v1.Pod{st.MakePod().Req(extendedResourcePod2).Node("node2").Obj()},
|
||||
},
|
||||
|
||||
{
|
||||
|
||||
// resources["intel.com/foo"] = 3
|
||||
@ -556,9 +507,9 @@ func TestResourceBinPackingMultipleExtended(t *testing.T) {
|
||||
// = 2/8 * 100 = 25 = rawScoringFunction(25)
|
||||
// Node2 Score: (10 * 3) + (2 * 5) / 8 = 5
|
||||
|
||||
pod: &v1.Pod{Spec: extnededResourcePod2},
|
||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000*1024*1024, extendedResources2), makeNode("machine2", 4000, 10000*1024*1024, extendedResources1)},
|
||||
expectedScores: []framework.NodeScore{{Name: "machine1", Score: 5}, {Name: "machine2", Score: 5}},
|
||||
pod: st.MakePod().Req(extendedResourcePod2).Obj(),
|
||||
nodes: []*v1.Node{makeNode("node1", 4000, 10000*1024*1024, extendedResources2), makeNode("node2", 4000, 10000*1024*1024, extendedResources1)},
|
||||
expectedScores: []framework.NodeScore{{Name: "node1", Score: 5}, {Name: "node2", Score: 5}},
|
||||
name: "resources requested, pods scheduled with more resources",
|
||||
pods: []*v1.Pod{
|
||||
st.MakePod().Obj(),
|
||||
|
@ -36,6 +36,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
fakeframework "k8s.io/kubernetes/pkg/scheduler/framework/fake"
|
||||
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
)
|
||||
@ -68,195 +69,35 @@ func getVolumeLimitKey(filterType string) v1.ResourceName {
|
||||
}
|
||||
|
||||
func TestCSILimits(t *testing.T) {
|
||||
runningPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-ebs.csi.aws.com-3",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
pendingVolumePod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-4",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
runningPod := st.MakePod().PVC("csi-ebs.csi.aws.com-3").Obj()
|
||||
pendingVolumePod := st.MakePod().PVC("csi-4").Obj()
|
||||
|
||||
// Different pod than pendingVolumePod, but using the same unbound PVC
|
||||
unboundPVCPod2 := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-4",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
unboundPVCPod2 := st.MakePod().PVC("csi-4").Obj()
|
||||
|
||||
missingPVPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-6",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
missingPVPod := st.MakePod().PVC("csi-6").Obj()
|
||||
noSCPVCPod := st.MakePod().PVC("csi-5").Obj()
|
||||
|
||||
gceTwoVolPod := st.MakePod().PVC("csi-pd.csi.storage.gke.io-1").PVC("csi-pd.csi.storage.gke.io-2").Obj()
|
||||
|
||||
noSCPVCPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-5",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
gceTwoVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-pd.csi.storage.gke.io-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-pd.csi.storage.gke.io-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// In-tree volumes
|
||||
inTreeOneVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-kubernetes.io/aws-ebs-0",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
inTreeTwoVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-kubernetes.io/aws-ebs-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-kubernetes.io/aws-ebs-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// pods with matching csi driver names
|
||||
csiEBSOneVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-ebs.csi.aws.com-0",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
csiEBSTwoVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-ebs.csi.aws.com-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-ebs.csi.aws.com-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
inTreeNonMigratableOneVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-kubernetes.io/hostpath-0",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
inTreeOneVolPod := st.MakePod().PVC("csi-kubernetes.io/aws-ebs-0").Obj()
|
||||
inTreeTwoVolPod := st.MakePod().PVC("csi-kubernetes.io/aws-ebs-1").PVC("csi-kubernetes.io/aws-ebs-2").Obj()
|
||||
|
||||
ephemeralVolumePod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "test",
|
||||
Name: "abc",
|
||||
UID: "12345",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "xyz",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Ephemeral: &v1.EphemeralVolumeSource{},
|
||||
},
|
||||
},
|
||||
// pods with matching csi driver names
|
||||
csiEBSOneVolPod := st.MakePod().PVC("csi-ebs.csi.aws.com-0").Obj()
|
||||
csiEBSTwoVolPod := st.MakePod().PVC("csi-ebs.csi.aws.com-1").PVC("csi-ebs.csi.aws.com-2").Obj()
|
||||
|
||||
inTreeNonMigratableOneVolPod := st.MakePod().PVC("csi-kubernetes.io/hostpath-0").Obj()
|
||||
|
||||
ephemeralVolumePod := st.MakePod().Name("abc").Namespace("test").UID("12345").Volume(
|
||||
v1.Volume{
|
||||
Name: "xyz",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Ephemeral: &v1.EphemeralVolumeSource{},
|
||||
},
|
||||
},
|
||||
}
|
||||
}).Obj()
|
||||
|
||||
controller := true
|
||||
ephemeralClaim := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -278,29 +119,18 @@ func TestCSILimits(t *testing.T) {
|
||||
conflictingClaim := ephemeralClaim.DeepCopy()
|
||||
conflictingClaim.OwnerReferences = nil
|
||||
|
||||
ephemeralTwoVolumePod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "test",
|
||||
Name: "abc",
|
||||
UID: "12345II",
|
||||
ephemeralTwoVolumePod := st.MakePod().Name("abc").Namespace("test").UID("12345II").Volume(v1.Volume{
|
||||
Name: "x",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Ephemeral: &v1.EphemeralVolumeSource{},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "x",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Ephemeral: &v1.EphemeralVolumeSource{},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "y",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Ephemeral: &v1.EphemeralVolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
}).Volume(v1.Volume{
|
||||
Name: "y",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Ephemeral: &v1.EphemeralVolumeSource{},
|
||||
},
|
||||
}
|
||||
}).Obj()
|
||||
|
||||
ephemeralClaimX := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: ephemeralTwoVolumePod.Namespace,
|
||||
|
@ -18,6 +18,7 @@ package nodevolumelimits
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
@ -29,32 +30,67 @@ import (
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
fakeframework "k8s.io/kubernetes/pkg/scheduler/framework/fake"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
|
||||
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
var (
|
||||
oneVolPod = st.MakePod().Volume(v1.Volume{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "ovp"},
|
||||
},
|
||||
}).Obj()
|
||||
twoVolPod = st.MakePod().Volume(v1.Volume{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "tvp1"},
|
||||
},
|
||||
}).Volume(v1.Volume{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "tvp2"},
|
||||
},
|
||||
}).Obj()
|
||||
splitVolsPod = st.MakePod().Volume(v1.Volume{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{},
|
||||
},
|
||||
}).Volume(v1.Volume{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "svp"},
|
||||
},
|
||||
}).Obj()
|
||||
nonApplicablePod = st.MakePod().Volume(v1.Volume{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{},
|
||||
},
|
||||
}).Obj()
|
||||
|
||||
deletedPVCPod = st.MakePod().PVC("deletedPVC").Obj()
|
||||
twoDeletedPVCPod = st.MakePod().PVC("deletedPVC").PVC("anotherDeletedPVC").Obj()
|
||||
deletedPVPod = st.MakePod().PVC("pvcWithDeletedPV").Obj()
|
||||
// deletedPVPod2 is a different pod than deletedPVPod but using the same PVC
|
||||
deletedPVPod2 = st.MakePod().PVC("pvcWithDeletedPV").Obj()
|
||||
anotherDeletedPVPod = st.MakePod().PVC("anotherPVCWithDeletedPV").Obj()
|
||||
emptyPod = st.MakePod().Obj()
|
||||
unboundPVCPod = st.MakePod().PVC("unboundPVC").Obj()
|
||||
// Different pod than unboundPVCPod, but using the same unbound PVC
|
||||
unboundPVCPod2 = st.MakePod().PVC("unboundPVC").Obj()
|
||||
// pod with unbound PVC that's different to unboundPVC
|
||||
anotherUnboundPVCPod = st.MakePod().PVC("anotherUnboundPVC").Obj()
|
||||
)
|
||||
|
||||
func TestEphemeralLimits(t *testing.T) {
|
||||
// We have to specify a valid filter and arbitrarily pick Cinder here.
|
||||
// It doesn't matter for the test cases.
|
||||
filterName := gcePDVolumeFilterType
|
||||
driverName := csilibplugins.GCEPDInTreePluginName
|
||||
|
||||
ephemeralVolumePod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "test",
|
||||
Name: "abc",
|
||||
UID: "12345",
|
||||
ephemeralVolumePod := st.MakePod().Name("abc").Namespace("test").UID("12345").Volume(v1.Volume{
|
||||
Name: "xyz",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Ephemeral: &v1.EphemeralVolumeSource{},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "xyz",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Ephemeral: &v1.EphemeralVolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}).Obj()
|
||||
|
||||
controller := true
|
||||
ephemeralClaim := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -130,180 +166,6 @@ func TestEphemeralLimits(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAzureDiskLimits(t *testing.T) {
|
||||
oneVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "ovp"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
twoVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "tvp1"},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "tvp2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
splitVolsPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "svp"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
nonApplicablePod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
deletedPVCPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "deletedPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
twoDeletedPVCPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "deletedPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "anotherDeletedPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
deletedPVPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvcWithDeletedPV",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// deletedPVPod2 is a different pod than deletedPVPod but using the same PVC
|
||||
deletedPVPod2 := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvcWithDeletedPV",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// anotherDeletedPVPod is a different pod than deletedPVPod and uses another PVC
|
||||
anotherDeletedPVPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "anotherPVCWithDeletedPV",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
emptyPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{},
|
||||
}
|
||||
unboundPVCPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "unboundPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// Different pod than unboundPVCPod, but using the same unbound PVC
|
||||
unboundPVCPod2 := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "unboundPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// pod with unbound PVC that's different to unboundPVC
|
||||
anotherUnboundPVCPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "anotherUnboundPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
newPod *v1.Pod
|
||||
existingPods []*v1.Pod
|
||||
@ -468,205 +330,8 @@ func TestAzureDiskLimits(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestEBSLimits(t *testing.T) {
|
||||
oneVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "ovp"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
twoVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "tvp1"},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "tvp2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
unboundPVCwithInvalidSCPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "unboundPVCwithInvalidSCPod",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
unboundPVCwithDefaultSCPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "unboundPVCwithDefaultSCPod",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
splitVolsPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "svp"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
nonApplicablePod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
deletedPVCPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "deletedPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
twoDeletedPVCPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "deletedPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "anotherDeletedPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
deletedPVPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvcWithDeletedPV",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// deletedPVPod2 is a different pod than deletedPVPod but using the same PVC
|
||||
deletedPVPod2 := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvcWithDeletedPV",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// anotherDeletedPVPod is a different pod than deletedPVPod and uses another PVC
|
||||
anotherDeletedPVPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "anotherPVCWithDeletedPV",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
emptyPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{},
|
||||
}
|
||||
unboundPVCPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "unboundPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// Different pod than unboundPVCPod, but using the same unbound PVC
|
||||
unboundPVCPod2 := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "unboundPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// pod with unbound PVC that's different to unboundPVC
|
||||
anotherUnboundPVCPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "anotherUnboundPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
unboundPVCWithInvalidSCPod := st.MakePod().PVC("unboundPVCWithInvalidSCPod").Obj()
|
||||
unboundPVCWithDefaultSCPod := st.MakePod().PVC("unboundPVCWithDefaultSCPod").Obj()
|
||||
|
||||
tests := []struct {
|
||||
newPod *v1.Pod
|
||||
@ -777,7 +442,7 @@ func TestEBSLimits(t *testing.T) {
|
||||
test: "two missing PVCs are not counted towards the PV limit twice",
|
||||
},
|
||||
{
|
||||
newPod: unboundPVCwithInvalidSCPod,
|
||||
newPod: unboundPVCWithInvalidSCPod,
|
||||
existingPods: []*v1.Pod{oneVolPod},
|
||||
filterName: ebsVolumeFilterType,
|
||||
driverName: csilibplugins.AWSEBSInTreePluginName,
|
||||
@ -785,14 +450,13 @@ func TestEBSLimits(t *testing.T) {
|
||||
test: "unbound PVC with invalid SC is not counted towards the PV limit",
|
||||
},
|
||||
{
|
||||
newPod: unboundPVCwithDefaultSCPod,
|
||||
newPod: unboundPVCWithDefaultSCPod,
|
||||
existingPods: []*v1.Pod{oneVolPod},
|
||||
filterName: ebsVolumeFilterType,
|
||||
driverName: csilibplugins.AWSEBSInTreePluginName,
|
||||
maxVols: 1,
|
||||
test: "unbound PVC from different provisioner is not counted towards the PV limit",
|
||||
},
|
||||
|
||||
{
|
||||
newPod: onePVCPod(ebsVolumeFilterType),
|
||||
existingPods: []*v1.Pod{oneVolPod, deletedPVPod},
|
||||
@ -876,180 +540,6 @@ func TestEBSLimits(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGCEPDLimits(t *testing.T) {
|
||||
oneVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "ovp"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
twoVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "tvp1"},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "tvp2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
splitVolsPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "svp"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
nonApplicablePod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
deletedPVCPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "deletedPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
twoDeletedPVCPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "deletedPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "anotherDeletedPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
deletedPVPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvcWithDeletedPV",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// deletedPVPod2 is a different pod than deletedPVPod but using the same PVC
|
||||
deletedPVPod2 := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvcWithDeletedPV",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// anotherDeletedPVPod is a different pod than deletedPVPod and uses another PVC
|
||||
anotherDeletedPVPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "anotherPVCWithDeletedPV",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
emptyPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{},
|
||||
}
|
||||
unboundPVCPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "unboundPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// Different pod than unboundPVCPod, but using the same unbound PVC
|
||||
unboundPVCPod2 := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "unboundPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// pod with unbound PVC that's different to unboundPVC
|
||||
anotherUnboundPVCPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "anotherUnboundPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
newPod *v1.Pod
|
||||
existingPods []*v1.Pod
|
||||
@ -1299,14 +789,14 @@ func getFakePVCLister(filterName string) fakeframework.PersistentVolumeClaimList
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "unboundPVCwithDefaultSCPod"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "unboundPVCWithDefaultSCPod"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeName: "",
|
||||
StorageClassName: utilpointer.StringPtr("standard-sc"),
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "unboundPVCwithInvalidSCPod"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "unboundPVCWithInvalidSCPod"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeName: "",
|
||||
StorageClassName: utilpointer.StringPtr("invalid-sc"),
|
||||
@ -1335,40 +825,9 @@ func getFakePVLister(filterName string) fakeframework.PersistentVolumeLister {
|
||||
}
|
||||
|
||||
func onePVCPod(filterName string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "some" + filterName + "Vol",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return st.MakePod().PVC(fmt.Sprintf("some%sVol", filterName)).Obj()
|
||||
}
|
||||
|
||||
func splitPVCPod(filterName string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "someNon" + filterName + "Vol",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "some" + filterName + "Vol",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return st.MakePod().PVC(fmt.Sprintf("someNon%sVol", filterName)).PVC(fmt.Sprintf("some%sVol", filterName)).Obj()
|
||||
}
|
||||
|
@ -17,10 +17,11 @@ limitations under the License.
|
||||
package queuesort
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
)
|
||||
|
||||
func TestLess(t *testing.T) {
|
||||
@ -37,55 +38,31 @@ func TestLess(t *testing.T) {
|
||||
{
|
||||
name: "p1.priority less than p2.priority",
|
||||
p1: &framework.QueuedPodInfo{
|
||||
PodInfo: framework.NewPodInfo(&v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Priority: &lowPriority,
|
||||
},
|
||||
}),
|
||||
PodInfo: framework.NewPodInfo(st.MakePod().Priority(lowPriority).Obj()),
|
||||
},
|
||||
p2: &framework.QueuedPodInfo{
|
||||
PodInfo: framework.NewPodInfo(&v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Priority: &highPriority,
|
||||
},
|
||||
}),
|
||||
PodInfo: framework.NewPodInfo(st.MakePod().Priority(highPriority).Obj()),
|
||||
},
|
||||
expected: false, // p2 should be ahead of p1 in the queue
|
||||
},
|
||||
{
|
||||
name: "p1.priority greater than p2.priority",
|
||||
p1: &framework.QueuedPodInfo{
|
||||
PodInfo: framework.NewPodInfo(&v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Priority: &highPriority,
|
||||
},
|
||||
}),
|
||||
PodInfo: framework.NewPodInfo(st.MakePod().Priority(highPriority).Obj()),
|
||||
},
|
||||
p2: &framework.QueuedPodInfo{
|
||||
PodInfo: framework.NewPodInfo(&v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Priority: &lowPriority,
|
||||
},
|
||||
}),
|
||||
PodInfo: framework.NewPodInfo(st.MakePod().Priority(lowPriority).Obj()),
|
||||
},
|
||||
expected: true, // p1 should be ahead of p2 in the queue
|
||||
},
|
||||
{
|
||||
name: "equal priority. p1 is added to schedulingQ earlier than p2",
|
||||
p1: &framework.QueuedPodInfo{
|
||||
PodInfo: framework.NewPodInfo(&v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Priority: &highPriority,
|
||||
},
|
||||
}),
|
||||
PodInfo: framework.NewPodInfo(st.MakePod().Priority(highPriority).Obj()),
|
||||
Timestamp: t1,
|
||||
},
|
||||
p2: &framework.QueuedPodInfo{
|
||||
PodInfo: framework.NewPodInfo(&v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Priority: &highPriority,
|
||||
},
|
||||
}),
|
||||
PodInfo: framework.NewPodInfo(st.MakePod().Priority(highPriority).Obj()),
|
||||
Timestamp: t2,
|
||||
},
|
||||
expected: true, // p1 should be ahead of p2 in the queue
|
||||
@ -93,19 +70,11 @@ func TestLess(t *testing.T) {
|
||||
{
|
||||
name: "equal priority. p2 is added to schedulingQ earlier than p1",
|
||||
p1: &framework.QueuedPodInfo{
|
||||
PodInfo: framework.NewPodInfo(&v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Priority: &highPriority,
|
||||
},
|
||||
}),
|
||||
PodInfo: framework.NewPodInfo(st.MakePod().Priority(highPriority).Obj()),
|
||||
Timestamp: t2,
|
||||
},
|
||||
p2: &framework.QueuedPodInfo{
|
||||
PodInfo: framework.NewPodInfo(&v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Priority: &highPriority,
|
||||
},
|
||||
}),
|
||||
PodInfo: framework.NewPodInfo(st.MakePod().Priority(highPriority).Obj()),
|
||||
Timestamp: t1,
|
||||
},
|
||||
expected: false, // p2 should be ahead of p1 in the queue
|
||||
|
@ -33,6 +33,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
frameworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
"k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
@ -63,10 +64,10 @@ func TestSelectorSpreadScore(t *testing.T) {
|
||||
"baz": "blah",
|
||||
}
|
||||
zone1Spec := v1.PodSpec{
|
||||
NodeName: "machine1",
|
||||
NodeName: "node1",
|
||||
}
|
||||
zone2Spec := v1.PodSpec{
|
||||
NodeName: "machine2",
|
||||
NodeName: "node2",
|
||||
}
|
||||
tests := []struct {
|
||||
pod *v1.Pod
|
||||
@ -81,122 +82,122 @@ func TestSelectorSpreadScore(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
pod: new(v1.Pod),
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}},
|
||||
nodes: []string{"node1", "node2"},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: framework.MaxNodeScore}},
|
||||
name: "nothing scheduled",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
pods: []*v1.Pod{{Spec: zone1Spec}},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}},
|
||||
pod: st.MakePod().Labels(labels1).Obj(),
|
||||
pods: []*v1.Pod{st.MakePod().Node("node1").Obj()},
|
||||
nodes: []string{"node1", "node2"},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: framework.MaxNodeScore}},
|
||||
name: "no services",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
pods: []*v1.Pod{{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
pod: st.MakePod().Labels(labels1).Obj(),
|
||||
pods: []*v1.Pod{st.MakePod().Labels(labels2).Node("node1").Obj()},
|
||||
nodes: []string{"node1", "node2"},
|
||||
services: []*v1.Service{{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "s1"}, Spec: v1.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: framework.MaxNodeScore}},
|
||||
name: "different services",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
pod: st.MakePod().Labels(labels1).Obj(),
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
st.MakePod().Labels(labels2).Node("node1").Obj(),
|
||||
st.MakePod().Labels(labels1).Node("node2").Obj(),
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
nodes: []string{"node1", "node2"},
|
||||
services: []*v1.Service{{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "s1"}, Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: 0}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: 0}},
|
||||
name: "two pods, one service pod",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
pod: st.MakePod().Labels(labels1).Obj(),
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: metav1.NamespaceDefault}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: "ns1"}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
||||
st.MakePod().Labels(labels2).Node("node1").Obj(),
|
||||
st.MakePod().Labels(labels1).Node("node1").Namespace(metav1.NamespaceDefault).Obj(),
|
||||
st.MakePod().Labels(labels1).Node("node1").Namespace("ns1").Obj(),
|
||||
st.MakePod().Labels(labels1).Node("node2").Obj(),
|
||||
st.MakePod().Labels(labels2).Node("node2").Obj(),
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
nodes: []string{"node1", "node2"},
|
||||
services: []*v1.Service{{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "s1"}, Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: 0}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: 0}},
|
||||
name: "five pods, one service pod in no namespace",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: metav1.NamespaceDefault}},
|
||||
pod: st.MakePod().Labels(labels1).Namespace(metav1.NamespaceDefault).Obj(),
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: "ns1"}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: metav1.NamespaceDefault}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
nodes: []string{"node1", "node2"},
|
||||
services: []*v1.Service{{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "s1"}, Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: 0}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: 0}},
|
||||
name: "four pods, one service pod in default namespace",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: "ns1"}},
|
||||
pod: st.MakePod().Labels(labels1).Namespace("ns1").Obj(),
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: metav1.NamespaceDefault}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: "ns2"}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: "ns1"}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
||||
st.MakePod().Labels(labels2).Node("node1").Obj(),
|
||||
st.MakePod().Labels(labels1).Node("node1").Namespace(metav1.NamespaceDefault).Obj(),
|
||||
st.MakePod().Labels(labels1).Node("node1").Namespace("ns2").Obj(),
|
||||
st.MakePod().Labels(labels1).Node("node2").Namespace("ns1").Obj(),
|
||||
st.MakePod().Labels(labels2).Node("node2").Obj(),
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
nodes: []string{"node1", "node2"},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}, ObjectMeta: metav1.ObjectMeta{Namespace: "ns1"}}},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: 0}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: 0}},
|
||||
name: "five pods, one service pod in specific namespace",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
pod: st.MakePod().Labels(labels1).Obj(),
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
st.MakePod().Labels(labels2).Node("node1").Obj(),
|
||||
st.MakePod().Labels(labels1).Node("node1").Obj(),
|
||||
st.MakePod().Labels(labels1).Node("node2").Obj(),
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
nodes: []string{"node1", "node2"},
|
||||
services: []*v1.Service{{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "s1"}, Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
|
||||
name: "three pods, two service pods on different machines",
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 0}},
|
||||
name: "three pods, two service pods on different nodes",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
pod: st.MakePod().Labels(labels1).Obj(),
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
st.MakePod().Labels(labels2).Node("node1").Obj(),
|
||||
st.MakePod().Labels(labels1).Node("node1").Obj(),
|
||||
st.MakePod().Labels(labels1).Node("node2").Obj(),
|
||||
st.MakePod().Labels(labels1).Node("node2").Obj(),
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
nodes: []string{"node1", "node2"},
|
||||
services: []*v1.Service{{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "s1"}, Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 50}, {Name: "machine2", Score: 0}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: 50}, {Name: "node2", Score: 0}},
|
||||
name: "four pods, three service pods",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
pod: st.MakePod().Labels(labels1).Obj(),
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
st.MakePod().Labels(labels2).Node("node1").Obj(),
|
||||
st.MakePod().Labels(labels1).Node("node1").Obj(),
|
||||
st.MakePod().Labels(labels1).Node("node2").Obj(),
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
nodes: []string{"node1", "node2"},
|
||||
services: []*v1.Service{{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "s1"}, Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 50}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 50}},
|
||||
name: "service with partial pod label matches",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels1, OwnerReferences: controllerRef("rc1", rcKind)}},
|
||||
pod: st.MakePod().Namespace(metav1.NamespaceDefault).Labels(labels1).OwnerReference("rc1", rcKind).Obj(),
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels1}},
|
||||
st.MakePod().Node("node1").Namespace(metav1.NamespaceDefault).Labels(labels2).Obj(),
|
||||
st.MakePod().Node("node1").Namespace(metav1.NamespaceDefault).Labels(labels1).Obj(),
|
||||
st.MakePod().Node("node2").Namespace(metav1.NamespaceDefault).Labels(labels1).Obj(),
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
nodes: []string{"node1", "node2"},
|
||||
rcs: []*v1.ReplicationController{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "rc1", Namespace: metav1.NamespaceDefault}, Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "rc2", Namespace: metav1.NamespaceDefault}, Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"bar": "foo"}}},
|
||||
@ -204,163 +205,162 @@ func TestSelectorSpreadScore(t *testing.T) {
|
||||
services: []*v1.Service{{ObjectMeta: metav1.ObjectMeta{Name: "s1", Namespace: metav1.NamespaceDefault}, Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
|
||||
// "baz=blah" matches both labels1 and labels2, and "foo=bar" matches only labels 1. This means that we assume that we want to
|
||||
// do spreading pod2 and pod3 and not pod1.
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 0}},
|
||||
name: "service with partial pod label matches with service and replication controller",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels1, OwnerReferences: controllerRef("rs1", rsKind)}},
|
||||
pod: st.MakePod().Namespace(metav1.NamespaceDefault).Labels(labels1).OwnerReference("rs1", rsKind).Obj(),
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels1}},
|
||||
st.MakePod().Node("node1").Namespace(metav1.NamespaceDefault).Labels(labels2).Obj(),
|
||||
st.MakePod().Node("node1").Namespace(metav1.NamespaceDefault).Labels(labels1).Obj(),
|
||||
st.MakePod().Node("node2").Namespace(metav1.NamespaceDefault).Labels(labels1).Obj(),
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
nodes: []string{"node1", "node2"},
|
||||
services: []*v1.Service{{ObjectMeta: metav1.ObjectMeta{Name: "s1", Namespace: metav1.NamespaceDefault}, Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
|
||||
rss: []*apps.ReplicaSet{
|
||||
{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "rs1"}, Spec: apps.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "rs2"}, Spec: apps.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"bar": "foo"}}}},
|
||||
},
|
||||
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 0}},
|
||||
name: "service with partial pod label matches with service and replica set",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels1, OwnerReferences: controllerRef("ss1", ssKind)}},
|
||||
pod: st.MakePod().Namespace(metav1.NamespaceDefault).Labels(labels1).OwnerReference("ss1", ssKind).Obj(),
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels1}},
|
||||
st.MakePod().Node("node1").Namespace(metav1.NamespaceDefault).Labels(labels2).Obj(),
|
||||
st.MakePod().Node("node1").Namespace(metav1.NamespaceDefault).Labels(labels1).Obj(),
|
||||
st.MakePod().Node("node2").Namespace(metav1.NamespaceDefault).Labels(labels1).Obj(),
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
nodes: []string{"node1", "node2"},
|
||||
services: []*v1.Service{{ObjectMeta: metav1.ObjectMeta{Name: "s1", Namespace: metav1.NamespaceDefault}, Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
|
||||
sss: []*apps.StatefulSet{
|
||||
{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "ss1"}, Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "ss2"}, Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"bar": "foo"}}}},
|
||||
},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 0}},
|
||||
name: "service with partial pod label matches with service and statefulset",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: map[string]string{"foo": "bar", "bar": "foo"}, OwnerReferences: controllerRef("rc3", rcKind)}},
|
||||
pod: st.MakePod().Namespace(metav1.NamespaceDefault).Labels(map[string]string{"foo": "bar", "bar": "foo"}).OwnerReference("rc3", rcKind).Obj(),
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels2, OwnerReferences: controllerRef("rc2", rcKind)}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels1, OwnerReferences: controllerRef("rc1", rcKind)}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels1, OwnerReferences: controllerRef("rc1", rcKind)}},
|
||||
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node1").Labels(labels2).OwnerReference("rc2", rcKind).Obj(),
|
||||
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node1").Labels(labels1).OwnerReference("rc1", rcKind).Obj(),
|
||||
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node2").Labels(labels1).OwnerReference("rc1", rcKind).Obj(),
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
nodes: []string{"node1", "node2"},
|
||||
rcs: []*v1.ReplicationController{{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "rc3"},
|
||||
Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}},
|
||||
services: []*v1.Service{{ObjectMeta: metav1.ObjectMeta{Name: "s1", Namespace: metav1.NamespaceDefault}, Spec: v1.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}},
|
||||
// Taken together Service and Replication Controller should match no pods.
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: framework.MaxNodeScore}},
|
||||
name: "disjoined service and replication controller matches no pods",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: map[string]string{"foo": "bar", "bar": "foo"},
|
||||
OwnerReferences: controllerRef("rs3", rsKind)}},
|
||||
pod: st.MakePod().Namespace(metav1.NamespaceDefault).Labels(map[string]string{"foo": "bar", "bar": "foo"}).OwnerReference("rs3", rsKind).Obj(),
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels2, OwnerReferences: controllerRef("rs2", rsKind)}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels1, OwnerReferences: controllerRef("rs1", rsKind)}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels1, OwnerReferences: controllerRef("rs1", rsKind)}},
|
||||
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node1").Labels(labels2).OwnerReference("rs2", rsKind).Obj(),
|
||||
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node1").Labels(labels1).OwnerReference("rs1", rsKind).Obj(),
|
||||
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node2").Labels(labels1).OwnerReference("rs1", rsKind).Obj(),
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
nodes: []string{"node1", "node2"},
|
||||
services: []*v1.Service{{ObjectMeta: metav1.ObjectMeta{Name: "s1", Namespace: metav1.NamespaceDefault}, Spec: v1.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}},
|
||||
rss: []*apps.ReplicaSet{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "rs3", Namespace: metav1.NamespaceDefault}, Spec: apps.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
|
||||
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: framework.MaxNodeScore}},
|
||||
name: "disjoined service and replica set matches no pods",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: map[string]string{"foo": "bar", "bar": "foo"}, OwnerReferences: controllerRef("ss3", ssKind)}},
|
||||
pod: st.MakePod().Namespace(metav1.NamespaceDefault).Labels(map[string]string{"foo": "bar", "bar": "foo"}).OwnerReference("ss3", ssKind).Obj(),
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels2, OwnerReferences: controllerRef("ss2", ssKind)}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels1, OwnerReferences: controllerRef("ss1", ssKind)}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels1, OwnerReferences: controllerRef("ss1", ssKind)}},
|
||||
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node1").Labels(labels2).OwnerReference("ss2", ssKind).Obj(),
|
||||
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node1").Labels(labels1).OwnerReference("ss1", ssKind).Obj(),
|
||||
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node2").Labels(labels1).OwnerReference("ss1", ssKind).Obj(),
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
nodes: []string{"node1", "node2"},
|
||||
services: []*v1.Service{{ObjectMeta: metav1.ObjectMeta{Name: "s1", Namespace: metav1.NamespaceDefault}, Spec: v1.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}},
|
||||
sss: []*apps.StatefulSet{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "ss3", Namespace: metav1.NamespaceDefault}, Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: framework.MaxNodeScore}},
|
||||
name: "disjoined service and stateful set matches no pods",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels1, OwnerReferences: controllerRef("rc1", rcKind)}},
|
||||
pod: st.MakePod().Namespace(metav1.NamespaceDefault).Labels(labels1).OwnerReference("rc1", rcKind).Obj(),
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels2, OwnerReferences: controllerRef("rc2", rcKind)}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels1, OwnerReferences: controllerRef("rc1", rcKind)}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels1, OwnerReferences: controllerRef("rc1", rcKind)}},
|
||||
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node1").Labels(labels2).OwnerReference("rc2", rcKind).Obj(),
|
||||
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node1").Labels(labels1).OwnerReference("rc1", rcKind).Obj(),
|
||||
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node2").Labels(labels1).OwnerReference("rc1", rcKind).Obj(),
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
nodes: []string{"node1", "node2"},
|
||||
rcs: []*v1.ReplicationController{{ObjectMeta: metav1.ObjectMeta{Name: "rc1", Namespace: metav1.NamespaceDefault}, Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}},
|
||||
// Both Nodes have one pod from the given RC, hence both get 0 score.
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 0}},
|
||||
name: "Replication controller with partial pod label matches",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels1, OwnerReferences: controllerRef("rs1", rsKind)}},
|
||||
pod: st.MakePod().Namespace(metav1.NamespaceDefault).Labels(labels1).OwnerReference("rs1", rsKind).Obj(),
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels2, OwnerReferences: controllerRef("rs2", rsKind)}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels1, OwnerReferences: controllerRef("rs1", rsKind)}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels1, OwnerReferences: controllerRef("rs1", rsKind)}},
|
||||
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node1").Labels(labels2).OwnerReference("rs2", rsKind).Obj(),
|
||||
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node1").Labels(labels1).OwnerReference("rs1", rsKind).Obj(),
|
||||
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node2").Labels(labels1).OwnerReference("rs1", rsKind).Obj(),
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
nodes: []string{"node1", "node2"},
|
||||
rss: []*apps.ReplicaSet{{ObjectMeta: metav1.ObjectMeta{Name: "rs1", Namespace: metav1.NamespaceDefault}, Spec: apps.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
|
||||
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 0}},
|
||||
name: "Replica set with partial pod label matches",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels1, OwnerReferences: controllerRef("ss1", ssKind)}},
|
||||
pod: st.MakePod().Namespace(metav1.NamespaceDefault).Labels(labels1).OwnerReference("ss1", ssKind).Obj(),
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels2, OwnerReferences: controllerRef("ss2", ssKind)}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels1, OwnerReferences: controllerRef("ss1", ssKind)}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels1, OwnerReferences: controllerRef("ss1", ssKind)}},
|
||||
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node1").Labels(labels2).OwnerReference("ss2", ssKind).Obj(),
|
||||
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node1").Labels(labels1).OwnerReference("ss1", ssKind).Obj(),
|
||||
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node2").Labels(labels1).OwnerReference("ss1", ssKind).Obj(),
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
nodes: []string{"node1", "node2"},
|
||||
sss: []*apps.StatefulSet{{ObjectMeta: metav1.ObjectMeta{Name: "ss1", Namespace: metav1.NamespaceDefault}, Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
|
||||
// We use StatefulSet, instead of ReplicationController. The result should be exactly as above.
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 0}},
|
||||
name: "StatefulSet with partial pod label matches",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels1, OwnerReferences: controllerRef("rc3", rcKind)}},
|
||||
pod: st.MakePod().Namespace(metav1.NamespaceDefault).Labels(labels1).OwnerReference("rc3", rcKind).Obj(),
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels2, OwnerReferences: controllerRef("rc2", rcKind)}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels1, OwnerReferences: controllerRef("rc1", rcKind)}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels1, OwnerReferences: controllerRef("rc1", rcKind)}},
|
||||
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node1").Labels(labels2).OwnerReference("rc2", rcKind).Obj(),
|
||||
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node1").Labels(labels1).OwnerReference("rc1", rcKind).Obj(),
|
||||
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node2").Labels(labels1).OwnerReference("rc1", rcKind).Obj(),
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
nodes: []string{"node1", "node2"},
|
||||
rcs: []*v1.ReplicationController{{ObjectMeta: metav1.ObjectMeta{Name: "rc3", Namespace: metav1.NamespaceDefault}, Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"baz": "blah"}}}},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 50}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 50}},
|
||||
name: "Another replication controller with partial pod label matches",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels1, OwnerReferences: controllerRef("rs3", rsKind)}},
|
||||
pod: st.MakePod().Namespace(metav1.NamespaceDefault).Labels(labels1).OwnerReference("rs3", rsKind).Obj(),
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels2, OwnerReferences: controllerRef("rs2", rsKind)}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels1, OwnerReferences: controllerRef("rs1", rsKind)}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels1, OwnerReferences: controllerRef("rs1", rsKind)}},
|
||||
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node1").Labels(labels2).OwnerReference("rs2", rsKind).Obj(),
|
||||
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node1").Labels(labels1).OwnerReference("rs1", rsKind).Obj(),
|
||||
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node2").Labels(labels1).OwnerReference("rs1", rsKind).Obj(),
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
nodes: []string{"node1", "node2"},
|
||||
rss: []*apps.ReplicaSet{{ObjectMeta: metav1.ObjectMeta{Name: "rs3", Namespace: metav1.NamespaceDefault}, Spec: apps.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"baz": "blah"}}}}},
|
||||
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 50}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 50}},
|
||||
name: "Another replication set with partial pod label matches",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels1, OwnerReferences: controllerRef("ss3", ssKind)}},
|
||||
pod: st.MakePod().Namespace(metav1.NamespaceDefault).Labels(labels1).OwnerReference("ss3", ssKind).Obj(),
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels2, OwnerReferences: controllerRef("ss2", ssKind)}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels1, OwnerReferences: controllerRef("ss1", ssKind)}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels1, OwnerReferences: controllerRef("ss1", ssKind)}},
|
||||
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node1").Labels(labels2).OwnerReference("ss2", ssKind).Obj(),
|
||||
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node1").Labels(labels1).OwnerReference("ss1", ssKind).Obj(),
|
||||
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node2").Labels(labels1).OwnerReference("ss1", ssKind).Obj(),
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
nodes: []string{"node1", "node2"},
|
||||
sss: []*apps.StatefulSet{{ObjectMeta: metav1.ObjectMeta{Name: "ss3", Namespace: metav1.NamespaceDefault}, Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"baz": "blah"}}}}},
|
||||
// We use StatefulSet, instead of ReplicationController. The result should be exactly as above.
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 50}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 50}},
|
||||
name: "Another stateful set with partial pod label matches",
|
||||
},
|
||||
{
|
||||
@ -385,9 +385,9 @@ func TestSelectorSpreadScore(t *testing.T) {
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels1, OwnerReferences: controllerRef("ss1", ssKind)}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels1, OwnerReferences: controllerRef("ss1", ssKind)}},
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
nodes: []string{"node1", "node2"},
|
||||
sss: []*apps.StatefulSet{{ObjectMeta: metav1.ObjectMeta{Name: "ss1", Namespace: metav1.NamespaceDefault}, Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"baz": "blah"}}}}},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
|
||||
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 0}},
|
||||
name: "Another statefulset with TopologySpreadConstraints set in pod",
|
||||
},
|
||||
}
|
||||
@ -458,12 +458,12 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
|
||||
"baz": "blah",
|
||||
}
|
||||
|
||||
const nodeMachine1Zone1 = "machine1.zone1"
|
||||
const nodeMachine1Zone2 = "machine1.zone2"
|
||||
const nodeMachine2Zone2 = "machine2.zone2"
|
||||
const nodeMachine1Zone3 = "machine1.zone3"
|
||||
const nodeMachine2Zone3 = "machine2.zone3"
|
||||
const nodeMachine3Zone3 = "machine3.zone3"
|
||||
const nodeMachine1Zone1 = "node1.zone1"
|
||||
const nodeMachine1Zone2 = "node1.zone2"
|
||||
const nodeMachine2Zone2 = "node2.zone2"
|
||||
const nodeMachine1Zone3 = "node1.zone3"
|
||||
const nodeMachine2Zone3 = "node2.zone3"
|
||||
const nodeMachine3Zone3 = "node3.zone3"
|
||||
|
||||
buildNodeLabels := func(failureDomain string) map[string]string {
|
||||
labels := map[string]string{
|
||||
@ -555,7 +555,7 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
|
||||
services: []*v1.Service{{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "s1"}, Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []framework.NodeScore{
|
||||
{Name: nodeMachine1Zone1, Score: framework.MaxNodeScore},
|
||||
{Name: nodeMachine1Zone2, Score: 0}, // Already have pod on machine
|
||||
{Name: nodeMachine1Zone2, Score: 0}, // Already have pod on node
|
||||
{Name: nodeMachine2Zone2, Score: 33}, // Already have pod in zone
|
||||
{Name: nodeMachine1Zone3, Score: framework.MaxNodeScore},
|
||||
{Name: nodeMachine2Zone3, Score: framework.MaxNodeScore},
|
||||
|
@ -32,28 +32,21 @@ import (
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
|
||||
plugintesting "k8s.io/kubernetes/pkg/scheduler/framework/plugins/testing"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
)
|
||||
|
||||
func TestGCEDiskConflicts(t *testing.T) {
|
||||
volState := v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: "foo",
|
||||
},
|
||||
},
|
||||
volState := v1.Volume{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: "foo",
|
||||
},
|
||||
},
|
||||
}
|
||||
volState2 := v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: "bar",
|
||||
},
|
||||
},
|
||||
volState2 := v1.Volume{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: "bar",
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -66,9 +59,9 @@ func TestGCEDiskConflicts(t *testing.T) {
|
||||
wantStatus *framework.Status
|
||||
}{
|
||||
{&v1.Pod{}, framework.NewNodeInfo(), true, "nothing", nil},
|
||||
{&v1.Pod{}, framework.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state", nil},
|
||||
{&v1.Pod{Spec: volState}, framework.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state", errStatus},
|
||||
{&v1.Pod{Spec: volState2}, framework.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state", nil},
|
||||
{&v1.Pod{}, framework.NewNodeInfo(st.MakePod().Volume(volState).Obj()), true, "one state", nil},
|
||||
{st.MakePod().Volume(volState).Obj(), framework.NewNodeInfo(st.MakePod().Volume(volState).Obj()), false, "same state", errStatus},
|
||||
{st.MakePod().Volume(volState2).Obj(), framework.NewNodeInfo(st.MakePod().Volume(volState).Obj()), true, "different state", nil},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
@ -85,25 +78,17 @@ func TestGCEDiskConflicts(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAWSDiskConflicts(t *testing.T) {
|
||||
volState := v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
|
||||
VolumeID: "foo",
|
||||
},
|
||||
},
|
||||
volState := v1.Volume{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
|
||||
VolumeID: "foo",
|
||||
},
|
||||
},
|
||||
}
|
||||
volState2 := v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
|
||||
VolumeID: "bar",
|
||||
},
|
||||
},
|
||||
volState2 := v1.Volume{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
|
||||
VolumeID: "bar",
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -116,9 +101,9 @@ func TestAWSDiskConflicts(t *testing.T) {
|
||||
wantStatus *framework.Status
|
||||
}{
|
||||
{&v1.Pod{}, framework.NewNodeInfo(), true, "nothing", nil},
|
||||
{&v1.Pod{}, framework.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state", nil},
|
||||
{&v1.Pod{Spec: volState}, framework.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state", errStatus},
|
||||
{&v1.Pod{Spec: volState2}, framework.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state", nil},
|
||||
{&v1.Pod{}, framework.NewNodeInfo(st.MakePod().Volume(volState).Obj()), true, "one state", nil},
|
||||
{st.MakePod().Volume(volState).Obj(), framework.NewNodeInfo(st.MakePod().Volume(volState).Obj()), false, "same state", errStatus},
|
||||
{st.MakePod().Volume(volState2).Obj(), framework.NewNodeInfo(st.MakePod().Volume(volState).Obj()), true, "different state", nil},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
@ -135,31 +120,23 @@ func TestAWSDiskConflicts(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRBDDiskConflicts(t *testing.T) {
|
||||
volState := v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
RBD: &v1.RBDVolumeSource{
|
||||
CephMonitors: []string{"a", "b"},
|
||||
RBDPool: "foo",
|
||||
RBDImage: "bar",
|
||||
FSType: "ext4",
|
||||
},
|
||||
},
|
||||
volState := v1.Volume{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
RBD: &v1.RBDVolumeSource{
|
||||
CephMonitors: []string{"a", "b"},
|
||||
RBDPool: "foo",
|
||||
RBDImage: "bar",
|
||||
FSType: "ext4",
|
||||
},
|
||||
},
|
||||
}
|
||||
volState2 := v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
RBD: &v1.RBDVolumeSource{
|
||||
CephMonitors: []string{"c", "d"},
|
||||
RBDPool: "foo",
|
||||
RBDImage: "bar",
|
||||
FSType: "ext4",
|
||||
},
|
||||
},
|
||||
volState2 := v1.Volume{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
RBD: &v1.RBDVolumeSource{
|
||||
CephMonitors: []string{"c", "d"},
|
||||
RBDPool: "foo",
|
||||
RBDImage: "bar",
|
||||
FSType: "ext4",
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -172,9 +149,9 @@ func TestRBDDiskConflicts(t *testing.T) {
|
||||
wantStatus *framework.Status
|
||||
}{
|
||||
{&v1.Pod{}, framework.NewNodeInfo(), true, "nothing", nil},
|
||||
{&v1.Pod{}, framework.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state", nil},
|
||||
{&v1.Pod{Spec: volState}, framework.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state", errStatus},
|
||||
{&v1.Pod{Spec: volState2}, framework.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state", nil},
|
||||
{&v1.Pod{}, framework.NewNodeInfo(st.MakePod().Volume(volState).Obj()), true, "one state", nil},
|
||||
{st.MakePod().Volume(volState).Obj(), framework.NewNodeInfo(st.MakePod().Volume(volState).Obj()), false, "same state", errStatus},
|
||||
{st.MakePod().Volume(volState2).Obj(), framework.NewNodeInfo(st.MakePod().Volume(volState).Obj()), true, "different state", nil},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
@ -191,31 +168,23 @@ func TestRBDDiskConflicts(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestISCSIDiskConflicts(t *testing.T) {
|
||||
volState := v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
ISCSI: &v1.ISCSIVolumeSource{
|
||||
TargetPortal: "127.0.0.1:3260",
|
||||
IQN: "iqn.2016-12.server:storage.target01",
|
||||
FSType: "ext4",
|
||||
Lun: 0,
|
||||
},
|
||||
},
|
||||
volState := v1.Volume{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
ISCSI: &v1.ISCSIVolumeSource{
|
||||
TargetPortal: "127.0.0.1:3260",
|
||||
IQN: "iqn.2016-12.server:storage.target01",
|
||||
FSType: "ext4",
|
||||
Lun: 0,
|
||||
},
|
||||
},
|
||||
}
|
||||
volState2 := v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
ISCSI: &v1.ISCSIVolumeSource{
|
||||
TargetPortal: "127.0.0.1:3260",
|
||||
IQN: "iqn.2017-12.server:storage.target01",
|
||||
FSType: "ext4",
|
||||
Lun: 0,
|
||||
},
|
||||
},
|
||||
volState2 := v1.Volume{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
ISCSI: &v1.ISCSIVolumeSource{
|
||||
TargetPortal: "127.0.0.1:3260",
|
||||
IQN: "iqn.2017-12.server:storage.target01",
|
||||
FSType: "ext4",
|
||||
Lun: 0,
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -228,9 +197,9 @@ func TestISCSIDiskConflicts(t *testing.T) {
|
||||
wantStatus *framework.Status
|
||||
}{
|
||||
{&v1.Pod{}, framework.NewNodeInfo(), true, "nothing", nil},
|
||||
{&v1.Pod{}, framework.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state", nil},
|
||||
{&v1.Pod{Spec: volState}, framework.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state", errStatus},
|
||||
{&v1.Pod{Spec: volState2}, framework.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state", nil},
|
||||
{&v1.Pod{}, framework.NewNodeInfo(st.MakePod().Volume(volState).Obj()), true, "one state", nil},
|
||||
{st.MakePod().Volume(volState).Obj(), framework.NewNodeInfo(st.MakePod().Volume(volState).Obj()), false, "same state", errStatus},
|
||||
{st.MakePod().Volume(volState2).Obj(), framework.NewNodeInfo(st.MakePod().Volume(volState).Obj()), true, "different state", nil},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
@ -249,44 +218,10 @@ func TestISCSIDiskConflicts(t *testing.T) {
|
||||
func TestAccessModeConflicts(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ReadWriteOncePod, true)()
|
||||
|
||||
podWithReadWriteOncePodPVC := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
// Required for querying lister for PVCs in the same namespace.
|
||||
Namespace: "default",
|
||||
Name: "pod-with-rwop",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: "node-1",
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "claim-with-rwop",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
podWithReadWriteManyPVC := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
// Required for querying lister for PVCs in the same namespace.
|
||||
Namespace: "default",
|
||||
Name: "pod-with-rwx",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: "node-1",
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "claim-with-rwx",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// Required for querying lister for PVCs in the same namespace.
|
||||
podWithReadWriteOncePodPVC := st.MakePod().Name("pod-with-rwop").Namespace(metav1.NamespaceDefault).PVC("claim-with-rwop").Node("node-1").Obj()
|
||||
// Required for querying lister for PVCs in the same namespace.
|
||||
podWithReadWriteManyPVC := st.MakePod().Name("pod-with-rwx").Namespace(metav1.NamespaceDefault).PVC("claim-with-rwx").Node("node-1").Obj()
|
||||
|
||||
node := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
@ -26,24 +26,11 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
fakeframework "k8s.io/kubernetes/pkg/scheduler/framework/fake"
|
||||
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
)
|
||||
|
||||
func createPodWithVolume(pod, pv, pvc string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: pod, Namespace: "default"},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: pv,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: pvc,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return st.MakePod().Name(pod).Namespace(metav1.NamespaceDefault).PVC(pvc).Obj()
|
||||
}
|
||||
|
||||
func TestSingleZone(t *testing.T) {
|
||||
@ -100,9 +87,7 @@ func TestSingleZone(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "pod without volume",
|
||||
Pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pod_1", Namespace: "default"},
|
||||
},
|
||||
Pod: st.MakePod().Name("pod_1").Namespace(metav1.NamespaceDefault).Obj(),
|
||||
Node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "host1",
|
||||
|
67
pkg/scheduler/internal/cache/cache_test.go
vendored
67
pkg/scheduler/internal/cache/cache_test.go
vendored
@ -24,8 +24,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
@ -33,6 +31,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
|
||||
)
|
||||
|
||||
@ -745,24 +744,11 @@ func TestExpireAddUpdatePod(t *testing.T) {
|
||||
}
|
||||
|
||||
func makePodWithEphemeralStorage(nodeName, ephemeralStorage string) *v1.Pod {
|
||||
req := v1.ResourceList{
|
||||
v1.ResourceEphemeralStorage: resource.MustParse(ephemeralStorage),
|
||||
}
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default-namespace",
|
||||
Name: "pod-with-ephemeral-storage",
|
||||
UID: types.UID("pod-with-ephemeral-storage"),
|
||||
return st.MakePod().Name("pod-with-ephemeral-storage").Namespace("default-namespace").UID("pod-with-ephemeral-storage").Req(
|
||||
map[v1.ResourceName]string{
|
||||
v1.ResourceEphemeralStorage: ephemeralStorage,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: req,
|
||||
},
|
||||
}},
|
||||
NodeName: nodeName,
|
||||
},
|
||||
}
|
||||
).Node(nodeName).Obj()
|
||||
}
|
||||
|
||||
func TestEphemeralStorageResource(t *testing.T) {
|
||||
@ -1202,16 +1188,8 @@ func TestSchedulerCache_UpdateSnapshot(t *testing.T) {
|
||||
// Create a few pods for tests.
|
||||
var pods []*v1.Pod
|
||||
for i := 0; i < 20; i++ {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("test-pod%v", i),
|
||||
Namespace: "test-ns",
|
||||
UID: types.UID(fmt.Sprintf("test-puid%v", i)),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: fmt.Sprintf("test-node%v", i%10),
|
||||
},
|
||||
}
|
||||
pod := st.MakePod().Name(fmt.Sprintf("test-pod%v", i)).Namespace("test-ns").UID(fmt.Sprintf("test-puid%v", i)).
|
||||
Node(fmt.Sprintf("test-node%v", i%10)).Obj()
|
||||
pods = append(pods, pod)
|
||||
}
|
||||
|
||||
@ -1823,36 +1801,23 @@ type testingMode interface {
|
||||
}
|
||||
|
||||
func makeBasePod(t testingMode, nodeName, objName, cpu, mem, extended string, ports []v1.ContainerPort) *v1.Pod {
|
||||
req := v1.ResourceList{}
|
||||
req := make(map[v1.ResourceName]string)
|
||||
if cpu != "" {
|
||||
req = v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse(cpu),
|
||||
v1.ResourceMemory: resource.MustParse(mem),
|
||||
}
|
||||
req[v1.ResourceCPU] = cpu
|
||||
req[v1.ResourceMemory] = mem
|
||||
|
||||
if extended != "" {
|
||||
parts := strings.Split(extended, ":")
|
||||
if len(parts) != 2 {
|
||||
t.Fatalf("Invalid extended resource string: \"%s\"", extended)
|
||||
}
|
||||
req[v1.ResourceName(parts[0])] = resource.MustParse(parts[1])
|
||||
req[v1.ResourceName(parts[0])] = parts[1]
|
||||
}
|
||||
}
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: types.UID(objName),
|
||||
Namespace: "node_info_cache_test",
|
||||
Name: objName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: req,
|
||||
},
|
||||
Ports: ports,
|
||||
}},
|
||||
NodeName: nodeName,
|
||||
},
|
||||
}
|
||||
podWrapper := st.MakePod().Name(objName).Namespace("node_info_cache_test").UID(objName).Node(nodeName).Containers([]v1.Container{
|
||||
st.MakeContainer().Name("container").Image("pause").Resources(req).ContainerPort(ports).Obj(),
|
||||
})
|
||||
return podWrapper.Obj()
|
||||
}
|
||||
|
||||
func setupCacheOf1kNodes30kPods(b *testing.B) Cache {
|
||||
|
@ -622,7 +622,7 @@ func TestPriorityQueue_MoveAllToActiveOrBackoffQueue(t *testing.T) {
|
||||
// matching label is added, the unschedulable pod is moved to activeQ.
|
||||
func TestPriorityQueue_AssignedPodAdded(t *testing.T) {
|
||||
affinityPod := st.MakePod().Name("afp").Namespace("ns1").UID("upns1").Annotation("annot2", "val2").Priority(mediumPriority).NominatedNodeName("node1").PodAffinityExists("service", "region", st.PodAffinityWithRequiredReq).Obj()
|
||||
labelPod := st.MakePod().Name("lbp").Namespace(affinityPod.Namespace).Label("service", "securityscan").Node("machine1").Obj()
|
||||
labelPod := st.MakePod().Name("lbp").Namespace(affinityPod.Namespace).Label("service", "securityscan").Node("node1").Obj()
|
||||
|
||||
c := testingclock.NewFakeClock(time.Now())
|
||||
m := map[framework.ClusterEvent]sets.String{AssignedPodAdd: sets.NewString("fakePlugin")}
|
||||
|
@ -320,27 +320,27 @@ func (t *TestPlugin) Filter(ctx context.Context, state *framework.CycleState, po
|
||||
|
||||
func TestSchedulerMultipleProfilesScheduling(t *testing.T) {
|
||||
nodes := []runtime.Object{
|
||||
st.MakeNode().Name("machine1").UID("machine1").Obj(),
|
||||
st.MakeNode().Name("machine2").UID("machine2").Obj(),
|
||||
st.MakeNode().Name("machine3").UID("machine3").Obj(),
|
||||
st.MakeNode().Name("node1").UID("node1").Obj(),
|
||||
st.MakeNode().Name("node2").UID("node2").Obj(),
|
||||
st.MakeNode().Name("node3").UID("node3").Obj(),
|
||||
}
|
||||
pods := []*v1.Pod{
|
||||
st.MakePod().Name("pod1").UID("pod1").SchedulerName("match-machine3").Obj(),
|
||||
st.MakePod().Name("pod2").UID("pod2").SchedulerName("match-machine2").Obj(),
|
||||
st.MakePod().Name("pod3").UID("pod3").SchedulerName("match-machine2").Obj(),
|
||||
st.MakePod().Name("pod4").UID("pod4").SchedulerName("match-machine3").Obj(),
|
||||
st.MakePod().Name("pod1").UID("pod1").SchedulerName("match-node3").Obj(),
|
||||
st.MakePod().Name("pod2").UID("pod2").SchedulerName("match-node2").Obj(),
|
||||
st.MakePod().Name("pod3").UID("pod3").SchedulerName("match-node2").Obj(),
|
||||
st.MakePod().Name("pod4").UID("pod4").SchedulerName("match-node3").Obj(),
|
||||
}
|
||||
wantBindings := map[string]string{
|
||||
"pod1": "machine3",
|
||||
"pod2": "machine2",
|
||||
"pod3": "machine2",
|
||||
"pod4": "machine3",
|
||||
"pod1": "node3",
|
||||
"pod2": "node2",
|
||||
"pod3": "node2",
|
||||
"pod4": "node3",
|
||||
}
|
||||
wantControllers := map[string]string{
|
||||
"pod1": "match-machine3",
|
||||
"pod2": "match-machine2",
|
||||
"pod3": "match-machine2",
|
||||
"pod4": "match-machine3",
|
||||
"pod1": "match-node3",
|
||||
"pod2": "match-node2",
|
||||
"pod3": "match-node2",
|
||||
"pod4": "match-node3",
|
||||
}
|
||||
|
||||
// Set up scheduler for the 3 nodes.
|
||||
@ -361,7 +361,7 @@ func TestSchedulerMultipleProfilesScheduling(t *testing.T) {
|
||||
profile.NewRecorderFactory(broadcaster),
|
||||
ctx.Done(),
|
||||
WithProfiles(
|
||||
schedulerapi.KubeSchedulerProfile{SchedulerName: "match-machine2",
|
||||
schedulerapi.KubeSchedulerProfile{SchedulerName: "match-node2",
|
||||
Plugins: &schedulerapi.Plugins{
|
||||
Filter: schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: "FakeNodeSelector"}}},
|
||||
QueueSort: schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: "PrioritySort"}}},
|
||||
@ -370,12 +370,12 @@ func TestSchedulerMultipleProfilesScheduling(t *testing.T) {
|
||||
PluginConfig: []schedulerapi.PluginConfig{
|
||||
{
|
||||
Name: "FakeNodeSelector",
|
||||
Args: &runtime.Unknown{Raw: []byte(`{"nodeName":"machine2"}`)},
|
||||
Args: &runtime.Unknown{Raw: []byte(`{"nodeName":"node2"}`)},
|
||||
},
|
||||
},
|
||||
},
|
||||
schedulerapi.KubeSchedulerProfile{
|
||||
SchedulerName: "match-machine3",
|
||||
SchedulerName: "match-node3",
|
||||
Plugins: &schedulerapi.Plugins{
|
||||
Filter: schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: "FakeNodeSelector"}}},
|
||||
QueueSort: schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: "PrioritySort"}}},
|
||||
@ -384,7 +384,7 @@ func TestSchedulerMultipleProfilesScheduling(t *testing.T) {
|
||||
PluginConfig: []schedulerapi.PluginConfig{
|
||||
{
|
||||
Name: "FakeNodeSelector",
|
||||
Args: &runtime.Unknown{Raw: []byte(`{"nodeName":"machine3"}`)},
|
||||
Args: &runtime.Unknown{Raw: []byte(`{"nodeName":"node3"}`)},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -445,7 +445,7 @@ func TestSchedulerMultipleProfilesScheduling(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSchedulerScheduleOne(t *testing.T) {
|
||||
testNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}}
|
||||
testNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node1", UID: types.UID("node1")}}
|
||||
client := clientsetfake.NewSimpleClientset(&testNode)
|
||||
eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1()})
|
||||
errS := errors.New("scheduler")
|
||||
@ -641,7 +641,7 @@ func TestSchedulerNoPhantomPodAfterExpire(t *testing.T) {
|
||||
queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc)
|
||||
scache := internalcache.New(100*time.Millisecond, ctx.Done())
|
||||
pod := podWithPort("pod.Name", "", 8080)
|
||||
node := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}}
|
||||
node := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node1", UID: types.UID("node1")}}
|
||||
scache.AddNode(&node)
|
||||
|
||||
fns := []st.RegisterPluginFunc{
|
||||
@ -706,7 +706,7 @@ func TestSchedulerNoPhantomPodAfterDelete(t *testing.T) {
|
||||
queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc)
|
||||
scache := internalcache.New(10*time.Minute, ctx.Done())
|
||||
firstPod := podWithPort("pod.Name", "", 8080)
|
||||
node := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}}
|
||||
node := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node1", UID: types.UID("node1")}}
|
||||
scache.AddNode(&node)
|
||||
fns := []st.RegisterPluginFunc{
|
||||
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
|
||||
@ -790,7 +790,7 @@ func TestSchedulerFailedSchedulingReasons(t *testing.T) {
|
||||
var nodes []*v1.Node
|
||||
var objects []runtime.Object
|
||||
for i := 0; i < 100; i++ {
|
||||
uid := fmt.Sprintf("machine%v", i)
|
||||
uid := fmt.Sprintf("node%v", i)
|
||||
node := v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: uid, UID: types.UID(uid)},
|
||||
Status: v1.NodeStatus{
|
||||
@ -877,7 +877,7 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
|
||||
AllBound: true,
|
||||
},
|
||||
expectAssumeCalled: true,
|
||||
expectPodBind: &v1.Binding{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "foo-ns", UID: types.UID("foo")}, Target: v1.ObjectReference{Kind: "Node", Name: "machine1"}},
|
||||
expectPodBind: &v1.Binding{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "foo-ns", UID: types.UID("foo")}, Target: v1.ObjectReference{Kind: "Node", Name: "node1"}},
|
||||
eventReason: "Scheduled",
|
||||
},
|
||||
{
|
||||
@ -910,7 +910,7 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
|
||||
volumeBinderConfig: &volumebinding.FakeVolumeBinderConfig{},
|
||||
expectAssumeCalled: true,
|
||||
expectBindCalled: true,
|
||||
expectPodBind: &v1.Binding{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "foo-ns", UID: types.UID("foo")}, Target: v1.ObjectReference{Kind: "Node", Name: "machine1"}},
|
||||
expectPodBind: &v1.Binding{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "foo-ns", UID: types.UID("foo")}, Target: v1.ObjectReference{Kind: "Node", Name: "node1"}},
|
||||
eventReason: "Scheduled",
|
||||
},
|
||||
{
|
||||
@ -1034,11 +1034,7 @@ func TestSchedulerBinding(t *testing.T) {
|
||||
|
||||
for _, test := range table {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: test.podName,
|
||||
},
|
||||
}
|
||||
pod := st.MakePod().Name(test.podName).Obj()
|
||||
defaultBound := false
|
||||
client := clientsetfake.NewSimpleClientset(pod)
|
||||
client.PrependReactor("create", "pods", func(action clienttesting.Action) (bool, runtime.Object, error) {
|
||||
@ -1239,13 +1235,7 @@ func TestUpdatePod(t *testing.T) {
|
||||
return true, &v1.Pod{}, nil
|
||||
})
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "foo"},
|
||||
Status: v1.PodStatus{
|
||||
Conditions: test.currentPodConditions,
|
||||
NominatedNodeName: test.currentNominatedNodeName,
|
||||
},
|
||||
}
|
||||
pod := st.MakePod().Name("foo").NominatedNodeName(test.currentNominatedNodeName).Conditions(test.currentPodConditions).Obj()
|
||||
|
||||
if err := updatePod(cs, pod, test.newPodCondition, test.newNominatingInfo); err != nil {
|
||||
t.Fatalf("Error calling update: %v", err)
|
||||
@ -1277,33 +1267,33 @@ func TestSelectHost(t *testing.T) {
|
||||
{
|
||||
name: "unique properly ordered scores",
|
||||
list: []framework.NodeScore{
|
||||
{Name: "machine1.1", Score: 1},
|
||||
{Name: "machine2.1", Score: 2},
|
||||
{Name: "node1.1", Score: 1},
|
||||
{Name: "node2.1", Score: 2},
|
||||
},
|
||||
possibleHosts: sets.NewString("machine2.1"),
|
||||
possibleHosts: sets.NewString("node2.1"),
|
||||
expectsErr: false,
|
||||
},
|
||||
{
|
||||
name: "equal scores",
|
||||
list: []framework.NodeScore{
|
||||
{Name: "machine1.1", Score: 1},
|
||||
{Name: "machine1.2", Score: 2},
|
||||
{Name: "machine1.3", Score: 2},
|
||||
{Name: "machine2.1", Score: 2},
|
||||
{Name: "node1.1", Score: 1},
|
||||
{Name: "node1.2", Score: 2},
|
||||
{Name: "node1.3", Score: 2},
|
||||
{Name: "node2.1", Score: 2},
|
||||
},
|
||||
possibleHosts: sets.NewString("machine1.2", "machine1.3", "machine2.1"),
|
||||
possibleHosts: sets.NewString("node1.2", "node1.3", "node2.1"),
|
||||
expectsErr: false,
|
||||
},
|
||||
{
|
||||
name: "out of order scores",
|
||||
list: []framework.NodeScore{
|
||||
{Name: "machine1.1", Score: 3},
|
||||
{Name: "machine1.2", Score: 3},
|
||||
{Name: "machine2.1", Score: 2},
|
||||
{Name: "machine3.1", Score: 1},
|
||||
{Name: "machine1.3", Score: 3},
|
||||
{Name: "node1.1", Score: 3},
|
||||
{Name: "node1.2", Score: 3},
|
||||
{Name: "node2.1", Score: 2},
|
||||
{Name: "node3.1", Score: 1},
|
||||
{Name: "node1.3", Score: 3},
|
||||
},
|
||||
possibleHosts: sets.NewString("machine1.1", "machine1.2", "machine1.3"),
|
||||
possibleHosts: sets.NewString("node1.1", "node1.2", "node1.3"),
|
||||
expectsErr: false,
|
||||
},
|
||||
{
|
||||
@ -1494,7 +1484,7 @@ func TestFindNodesThatPassExtenders(t *testing.T) {
|
||||
extenders = append(extenders, &tt.extenders[ii])
|
||||
}
|
||||
|
||||
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "1", UID: types.UID("1")}}
|
||||
pod := st.MakePod().Name("1").UID("1").Obj()
|
||||
got, err := findNodesThatPassExtenders(extenders, pod, tt.nodes, tt.filteredNodesStatuses)
|
||||
if tt.expectsErr {
|
||||
if err == nil {
|
||||
@ -1534,16 +1524,16 @@ func TestSchedulerSchedulePod(t *testing.T) {
|
||||
st.RegisterFilterPlugin("FalseFilter", st.NewFalseFilterPlugin),
|
||||
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
|
||||
nodes: []string{"node1", "node2"},
|
||||
pod: st.MakePod().Name("2").UID("2").Obj(),
|
||||
name: "test 1",
|
||||
wErr: &framework.FitError{
|
||||
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
|
||||
Pod: st.MakePod().Name("2").UID("2").Obj(),
|
||||
NumAllNodes: 2,
|
||||
Diagnosis: framework.Diagnosis{
|
||||
NodeToStatusMap: framework.NodeToStatusMap{
|
||||
"machine1": framework.NewStatus(framework.Unschedulable, st.ErrReasonFake).WithFailedPlugin("FalseFilter"),
|
||||
"machine2": framework.NewStatus(framework.Unschedulable, st.ErrReasonFake).WithFailedPlugin("FalseFilter"),
|
||||
"node1": framework.NewStatus(framework.Unschedulable, st.ErrReasonFake).WithFailedPlugin("FalseFilter"),
|
||||
"node2": framework.NewStatus(framework.Unschedulable, st.ErrReasonFake).WithFailedPlugin("FalseFilter"),
|
||||
},
|
||||
UnschedulablePlugins: sets.NewString("FalseFilter"),
|
||||
},
|
||||
@ -1555,22 +1545,22 @@ func TestSchedulerSchedulePod(t *testing.T) {
|
||||
st.RegisterFilterPlugin("TrueFilter", st.NewTrueFilterPlugin),
|
||||
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "ignore", UID: types.UID("ignore")}},
|
||||
wantNodes: sets.NewString("machine1", "machine2"),
|
||||
nodes: []string{"node1", "node2"},
|
||||
pod: st.MakePod().Name("ignore").UID("ignore").Obj(),
|
||||
wantNodes: sets.NewString("node1", "node2"),
|
||||
name: "test 2",
|
||||
wErr: nil,
|
||||
},
|
||||
{
|
||||
// Fits on a machine where the pod ID matches the machine name
|
||||
// Fits on a node where the pod ID matches the node name
|
||||
registerPlugins: []st.RegisterPluginFunc{
|
||||
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
|
||||
st.RegisterFilterPlugin("MatchFilter", st.NewMatchFilterPlugin),
|
||||
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine2", UID: types.UID("machine2")}},
|
||||
wantNodes: sets.NewString("machine2"),
|
||||
nodes: []string{"node1", "node2"},
|
||||
pod: st.MakePod().Name("node2").UID("node2").Obj(),
|
||||
wantNodes: sets.NewString("node2"),
|
||||
name: "test 3",
|
||||
wErr: nil,
|
||||
},
|
||||
@ -1582,7 +1572,7 @@ func TestSchedulerSchedulePod(t *testing.T) {
|
||||
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
|
||||
},
|
||||
nodes: []string{"3", "2", "1"},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "ignore", UID: types.UID("ignore")}},
|
||||
pod: st.MakePod().Name("ignore").UID("ignore").Obj(),
|
||||
wantNodes: sets.NewString("3"),
|
||||
name: "test 4",
|
||||
wErr: nil,
|
||||
@ -1595,7 +1585,7 @@ func TestSchedulerSchedulePod(t *testing.T) {
|
||||
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
|
||||
},
|
||||
nodes: []string{"3", "2", "1"},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
|
||||
pod: st.MakePod().Name("2").UID("2").Obj(),
|
||||
wantNodes: sets.NewString("2"),
|
||||
name: "test 5",
|
||||
wErr: nil,
|
||||
@ -1609,7 +1599,7 @@ func TestSchedulerSchedulePod(t *testing.T) {
|
||||
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
|
||||
},
|
||||
nodes: []string{"3", "2", "1"},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
|
||||
pod: st.MakePod().Name("2").UID("2").Obj(),
|
||||
wantNodes: sets.NewString("1"),
|
||||
name: "test 6",
|
||||
wErr: nil,
|
||||
@ -1623,10 +1613,10 @@ func TestSchedulerSchedulePod(t *testing.T) {
|
||||
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
|
||||
},
|
||||
nodes: []string{"3", "2", "1"},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
|
||||
pod: st.MakePod().Name("2").UID("2").Obj(),
|
||||
name: "test 7",
|
||||
wErr: &framework.FitError{
|
||||
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
|
||||
Pod: st.MakePod().Name("2").UID("2").Obj(),
|
||||
NumAllNodes: 3,
|
||||
Diagnosis: framework.Diagnosis{
|
||||
NodeToStatusMap: framework.NodeToStatusMap{
|
||||
@ -1647,21 +1637,13 @@ func TestSchedulerSchedulePod(t *testing.T) {
|
||||
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: "2",
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
},
|
||||
},
|
||||
st.MakePod().Name("2").UID("2").Node("2").Phase(v1.PodRunning).Obj(),
|
||||
},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
|
||||
pod: st.MakePod().Name("2").UID("2").Obj(),
|
||||
nodes: []string{"1", "2"},
|
||||
name: "test 8",
|
||||
wErr: &framework.FitError{
|
||||
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
|
||||
Pod: st.MakePod().Name("2").UID("2").Obj(),
|
||||
NumAllNodes: 2,
|
||||
Diagnosis: framework.Diagnosis{
|
||||
NodeToStatusMap: framework.NodeToStatusMap{
|
||||
@ -1680,28 +1662,15 @@ func TestSchedulerSchedulePod(t *testing.T) {
|
||||
st.RegisterFilterPlugin("TrueFilter", st.NewTrueFilterPlugin),
|
||||
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
nodes: []string{"node1", "node2"},
|
||||
pvcs: []v1.PersistentVolumeClaim{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "existingPVC", UID: types.UID("existingPVC"), Namespace: v1.NamespaceDefault},
|
||||
Spec: v1.PersistentVolumeClaimSpec{VolumeName: "existingPV"},
|
||||
},
|
||||
},
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "ignore", UID: types.UID("ignore"), Namespace: v1.NamespaceDefault},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "existingPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantNodes: sets.NewString("machine1", "machine2"),
|
||||
pod: st.MakePod().Name("ignore").UID("ignore").Namespace(v1.NamespaceDefault).PVC("existingPVC").Obj(),
|
||||
wantNodes: sets.NewString("node1", "node2"),
|
||||
name: "existing PVC",
|
||||
wErr: nil,
|
||||
},
|
||||
@ -1713,42 +1682,16 @@ func TestSchedulerSchedulePod(t *testing.T) {
|
||||
st.RegisterFilterPlugin("TrueFilter", st.NewTrueFilterPlugin),
|
||||
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "ignore", UID: types.UID("ignore")},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "unknownPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
name: "unknown PVC",
|
||||
nodes: []string{"node1", "node2"},
|
||||
pod: st.MakePod().Name("ignore").UID("ignore").PVC("unknownPVC").Obj(),
|
||||
name: "unknown PVC",
|
||||
wErr: &framework.FitError{
|
||||
Pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "ignore", UID: types.UID("ignore")},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "unknownPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Pod: st.MakePod().Name("ignore").UID("ignore").PVC("unknownPVC").Obj(),
|
||||
NumAllNodes: 2,
|
||||
Diagnosis: framework.Diagnosis{
|
||||
NodeToStatusMap: framework.NodeToStatusMap{
|
||||
"machine1": framework.NewStatus(framework.UnschedulableAndUnresolvable, `persistentvolumeclaim "unknownPVC" not found`).WithFailedPlugin(volumebinding.Name),
|
||||
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, `persistentvolumeclaim "unknownPVC" not found`).WithFailedPlugin(volumebinding.Name),
|
||||
"node1": framework.NewStatus(framework.UnschedulableAndUnresolvable, `persistentvolumeclaim "unknownPVC" not found`).WithFailedPlugin(volumebinding.Name),
|
||||
"node2": framework.NewStatus(framework.UnschedulableAndUnresolvable, `persistentvolumeclaim "unknownPVC" not found`).WithFailedPlugin(volumebinding.Name),
|
||||
},
|
||||
UnschedulablePlugins: sets.NewString(volumebinding.Name),
|
||||
},
|
||||
@ -1762,43 +1705,17 @@ func TestSchedulerSchedulePod(t *testing.T) {
|
||||
st.RegisterFilterPlugin("TrueFilter", st.NewTrueFilterPlugin),
|
||||
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
nodes: []string{"node1", "node2"},
|
||||
pvcs: []v1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{Name: "existingPVC", UID: types.UID("existingPVC"), Namespace: v1.NamespaceDefault, DeletionTimestamp: &metav1.Time{}}}},
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "ignore", UID: types.UID("ignore"), Namespace: v1.NamespaceDefault},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "existingPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
name: "deleted PVC",
|
||||
pod: st.MakePod().Name("ignore").UID("ignore").Namespace(v1.NamespaceDefault).PVC("existingPVC").Obj(),
|
||||
name: "deleted PVC",
|
||||
wErr: &framework.FitError{
|
||||
Pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "ignore", UID: types.UID("ignore"), Namespace: v1.NamespaceDefault},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "existingPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Pod: st.MakePod().Name("ignore").UID("ignore").Namespace(v1.NamespaceDefault).PVC("existingPVC").Obj(),
|
||||
NumAllNodes: 2,
|
||||
Diagnosis: framework.Diagnosis{
|
||||
NodeToStatusMap: framework.NodeToStatusMap{
|
||||
"machine1": framework.NewStatus(framework.UnschedulableAndUnresolvable, `persistentvolumeclaim "existingPVC" is being deleted`).WithFailedPlugin(volumebinding.Name),
|
||||
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, `persistentvolumeclaim "existingPVC" is being deleted`).WithFailedPlugin(volumebinding.Name),
|
||||
"node1": framework.NewStatus(framework.UnschedulableAndUnresolvable, `persistentvolumeclaim "existingPVC" is being deleted`).WithFailedPlugin(volumebinding.Name),
|
||||
"node2": framework.NewStatus(framework.UnschedulableAndUnresolvable, `persistentvolumeclaim "existingPVC" is being deleted`).WithFailedPlugin(volumebinding.Name),
|
||||
},
|
||||
UnschedulablePlugins: sets.NewString(volumebinding.Name),
|
||||
},
|
||||
@ -1813,7 +1730,7 @@ func TestSchedulerSchedulePod(t *testing.T) {
|
||||
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
|
||||
},
|
||||
nodes: []string{"2", "1"},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2"}},
|
||||
pod: st.MakePod().Name("2").Obj(),
|
||||
name: "test error with priority map",
|
||||
wErr: fmt.Errorf("running Score plugins: %w", fmt.Errorf(`plugin "FalseMap" failed with: %w`, errPrioritize)),
|
||||
},
|
||||
@ -1829,39 +1746,19 @@ func TestSchedulerSchedulePod(t *testing.T) {
|
||||
),
|
||||
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "p", UID: types.UID("p"), Labels: map[string]string{"foo": ""}},
|
||||
Spec: v1.PodSpec{
|
||||
TopologySpreadConstraints: []v1.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "hostname",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "foo",
|
||||
Operator: metav1.LabelSelectorOpExists,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
nodes: []string{"node1", "node2"},
|
||||
pod: st.MakePod().Name("p").UID("p").Label("foo", "").SpreadConstraint(1, "hostname", v1.DoNotSchedule, &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "foo",
|
||||
Operator: metav1.LabelSelectorOpExists,
|
||||
},
|
||||
},
|
||||
},
|
||||
}, nil).Obj(),
|
||||
pods: []*v1.Pod{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pod1", UID: types.UID("pod1"), Labels: map[string]string{"foo": ""}},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: "machine1",
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
},
|
||||
},
|
||||
st.MakePod().Name("pod1").UID("pod1").Label("foo", "").Node("node1").Phase(v1.PodRunning).Obj(),
|
||||
},
|
||||
wantNodes: sets.NewString("machine2"),
|
||||
wantNodes: sets.NewString("node2"),
|
||||
wErr: nil,
|
||||
},
|
||||
{
|
||||
@ -1876,57 +1773,21 @@ func TestSchedulerSchedulePod(t *testing.T) {
|
||||
),
|
||||
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
|
||||
},
|
||||
nodes: []string{"machine1", "machine2", "machine3"},
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "p", UID: types.UID("p"), Labels: map[string]string{"foo": ""}},
|
||||
Spec: v1.PodSpec{
|
||||
TopologySpreadConstraints: []v1.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: 2,
|
||||
TopologyKey: "hostname",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "foo",
|
||||
Operator: metav1.LabelSelectorOpExists,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
nodes: []string{"node1", "node2", "node3"},
|
||||
pod: st.MakePod().Name("p").UID("p").Label("foo", "").SpreadConstraint(2, "hostname", v1.DoNotSchedule, &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "foo",
|
||||
Operator: metav1.LabelSelectorOpExists,
|
||||
},
|
||||
},
|
||||
},
|
||||
}, nil).Obj(),
|
||||
pods: []*v1.Pod{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pod1a", UID: types.UID("pod1a"), Labels: map[string]string{"foo": ""}},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: "machine1",
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pod1b", UID: types.UID("pod1b"), Labels: map[string]string{"foo": ""}},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: "machine1",
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pod2", UID: types.UID("pod2"), Labels: map[string]string{"foo": ""}},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: "machine2",
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
},
|
||||
},
|
||||
st.MakePod().Name("pod1a").UID("pod1a").Label("foo", "").Node("node1").Phase(v1.PodRunning).Obj(),
|
||||
st.MakePod().Name("pod1b").UID("pod1b").Label("foo", "").Node("node1").Phase(v1.PodRunning).Obj(),
|
||||
st.MakePod().Name("pod2").UID("pod2").Label("foo", "").Node("node2").Phase(v1.PodRunning).Obj(),
|
||||
},
|
||||
wantNodes: sets.NewString("machine2", "machine3"),
|
||||
wantNodes: sets.NewString("node2", "node3"),
|
||||
wErr: nil,
|
||||
},
|
||||
{
|
||||
@ -1941,10 +1802,10 @@ func TestSchedulerSchedulePod(t *testing.T) {
|
||||
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
|
||||
},
|
||||
nodes: []string{"3"},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "test-filter", UID: types.UID("test-filter")}},
|
||||
pod: st.MakePod().Name("test-filter").UID("test-filter").Obj(),
|
||||
wantNodes: nil,
|
||||
wErr: &framework.FitError{
|
||||
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "test-filter", UID: types.UID("test-filter")}},
|
||||
Pod: st.MakePod().Name("test-filter").UID("test-filter").Obj(),
|
||||
NumAllNodes: 1,
|
||||
Diagnosis: framework.Diagnosis{
|
||||
NodeToStatusMap: framework.NodeToStatusMap{
|
||||
@ -1966,10 +1827,10 @@ func TestSchedulerSchedulePod(t *testing.T) {
|
||||
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
|
||||
},
|
||||
nodes: []string{"3"},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "test-filter", UID: types.UID("test-filter")}},
|
||||
pod: st.MakePod().Name("test-filter").UID("test-filter").Obj(),
|
||||
wantNodes: nil,
|
||||
wErr: &framework.FitError{
|
||||
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "test-filter", UID: types.UID("test-filter")}},
|
||||
Pod: st.MakePod().Name("test-filter").UID("test-filter").Obj(),
|
||||
NumAllNodes: 1,
|
||||
Diagnosis: framework.Diagnosis{
|
||||
NodeToStatusMap: framework.NodeToStatusMap{
|
||||
@ -1991,7 +1852,7 @@ func TestSchedulerSchedulePod(t *testing.T) {
|
||||
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
|
||||
},
|
||||
nodes: []string{"1", "2"},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "test-filter", UID: types.UID("test-filter")}},
|
||||
pod: st.MakePod().Name("test-filter").UID("test-filter").Obj(),
|
||||
wantNodes: nil,
|
||||
wErr: nil,
|
||||
},
|
||||
@ -2006,10 +1867,10 @@ func TestSchedulerSchedulePod(t *testing.T) {
|
||||
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
|
||||
},
|
||||
nodes: []string{"1", "2"},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "test-prefilter", UID: types.UID("test-prefilter")}},
|
||||
pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
|
||||
wantNodes: nil,
|
||||
wErr: &framework.FitError{
|
||||
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "test-prefilter", UID: types.UID("test-prefilter")}},
|
||||
Pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
|
||||
NumAllNodes: 2,
|
||||
Diagnosis: framework.Diagnosis{
|
||||
NodeToStatusMap: framework.NodeToStatusMap{
|
||||
@ -2031,7 +1892,7 @@ func TestSchedulerSchedulePod(t *testing.T) {
|
||||
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
|
||||
},
|
||||
nodes: []string{"1", "2"},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "test-prefilter", UID: types.UID("test-prefilter")}},
|
||||
pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
|
||||
wantNodes: nil,
|
||||
wErr: fmt.Errorf(`running PreFilter plugin "FakePreFilter": %w`, errors.New("injected error status")),
|
||||
},
|
||||
@ -2054,7 +1915,7 @@ func TestSchedulerSchedulePod(t *testing.T) {
|
||||
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
|
||||
},
|
||||
nodes: []string{"node1", "node2", "node3"},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "test-prefilter", UID: types.UID("test-prefilter")}},
|
||||
pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
|
||||
wantNodes: sets.NewString("node2"),
|
||||
wantEvaluatedNodes: pointer.Int32Ptr(1),
|
||||
},
|
||||
@ -2077,9 +1938,9 @@ func TestSchedulerSchedulePod(t *testing.T) {
|
||||
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
|
||||
},
|
||||
nodes: []string{"node1", "node2", "node3"},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "test-prefilter", UID: types.UID("test-prefilter")}},
|
||||
pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
|
||||
wErr: &framework.FitError{
|
||||
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "test-prefilter", UID: types.UID("test-prefilter")}},
|
||||
Pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
|
||||
NumAllNodes: 3,
|
||||
Diagnosis: framework.Diagnosis{
|
||||
NodeToStatusMap: framework.NodeToStatusMap{
|
||||
@ -2106,9 +1967,9 @@ func TestSchedulerSchedulePod(t *testing.T) {
|
||||
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
|
||||
},
|
||||
nodes: []string{"node1"},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "test-prefilter", UID: types.UID("test-prefilter")}},
|
||||
pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
|
||||
wErr: &framework.FitError{
|
||||
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "test-prefilter", UID: types.UID("test-prefilter")}},
|
||||
Pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
|
||||
NumAllNodes: 1,
|
||||
Diagnosis: framework.Diagnosis{
|
||||
NodeToStatusMap: framework.NodeToStatusMap{
|
||||
@ -2247,7 +2108,7 @@ func TestFindFitSomeError(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "1", UID: types.UID("1")}}
|
||||
pod := st.MakePod().Name("1").UID("1").Obj()
|
||||
_, diagnosis, err := scheduler.findNodesThatFitPod(context.Background(), fwk, framework.NewCycleState(), pod)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
@ -2286,12 +2147,12 @@ func TestFindFitPredicateCallCounts(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "nominated pods have lower priority, predicate is called once",
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "1", UID: types.UID("1")}, Spec: v1.PodSpec{Priority: &highPriority}},
|
||||
pod: st.MakePod().Name("1").UID("1").Priority(highPriority).Obj(),
|
||||
expectedCount: 1,
|
||||
},
|
||||
{
|
||||
name: "nominated pods have higher priority, predicate is called twice",
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "1", UID: types.UID("1")}, Spec: v1.PodSpec{Priority: &lowPriority}},
|
||||
pod: st.MakePod().Name("1").UID("1").Priority(lowPriority).Obj(),
|
||||
expectedCount: 2,
|
||||
},
|
||||
}
|
||||
@ -2324,7 +2185,7 @@ func TestFindFitPredicateCallCounts(t *testing.T) {
|
||||
if err := scheduler.Cache.UpdateSnapshot(scheduler.nodeInfoSnapshot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
fwk.AddNominatedPod(framework.NewPodInfo(&v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: "nominated"}, Spec: v1.PodSpec{Priority: &midPriority}}),
|
||||
fwk.AddNominatedPod(framework.NewPodInfo(st.MakePod().UID("nominated").Priority(midPriority).Obj()),
|
||||
&framework.NominatingInfo{NominatingMode: framework.ModeOverride, NominatedNodeName: "1"})
|
||||
|
||||
_, _, err = scheduler.findNodesThatFitPod(context.Background(), fwk, framework.NewCycleState(), test.pod)
|
||||
@ -2340,7 +2201,7 @@ func TestFindFitPredicateCallCounts(t *testing.T) {
|
||||
|
||||
// The point of this test is to show that you:
|
||||
// - get the same priority for a zero-request pod as for a pod with the defaults requests,
|
||||
// both when the zero-request pod is already on the machine and when the zero-request pod
|
||||
// both when the zero-request pod is already on the node and when the zero-request pod
|
||||
// is the one being scheduled.
|
||||
// - don't get the same score no matter what we schedule.
|
||||
func TestZeroRequest(t *testing.T) {
|
||||
@ -2351,7 +2212,7 @@ func TestZeroRequest(t *testing.T) {
|
||||
},
|
||||
}
|
||||
noResources1 := noResources
|
||||
noResources1.NodeName = "machine1"
|
||||
noResources1.NodeName = "node1"
|
||||
// A pod with the same resources as a 0-request pod gets by default as its resources (for spreading).
|
||||
small := v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
@ -2368,7 +2229,7 @@ func TestZeroRequest(t *testing.T) {
|
||||
},
|
||||
}
|
||||
small2 := small
|
||||
small2.NodeName = "machine2"
|
||||
small2.NodeName = "node2"
|
||||
// A larger pod.
|
||||
large := v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
@ -2385,9 +2246,9 @@ func TestZeroRequest(t *testing.T) {
|
||||
},
|
||||
}
|
||||
large1 := large
|
||||
large1.NodeName = "machine1"
|
||||
large1.NodeName = "node1"
|
||||
large2 := large
|
||||
large2.NodeName = "machine2"
|
||||
large2.NodeName = "node2"
|
||||
tests := []struct {
|
||||
pod *v1.Pod
|
||||
pods []*v1.Pod
|
||||
@ -2396,12 +2257,12 @@ func TestZeroRequest(t *testing.T) {
|
||||
expectedScore int64
|
||||
}{
|
||||
// The point of these next two tests is to show you get the same priority for a zero-request pod
|
||||
// as for a pod with the defaults requests, both when the zero-request pod is already on the machine
|
||||
// as for a pod with the defaults requests, both when the zero-request pod is already on the node
|
||||
// and when the zero-request pod is the one being scheduled.
|
||||
{
|
||||
pod: &v1.Pod{Spec: noResources},
|
||||
nodes: []*v1.Node{makeNode("machine1", 1000, schedutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, schedutil.DefaultMemoryRequest*10)},
|
||||
name: "test priority of zero-request pod with machine with zero-request pod",
|
||||
nodes: []*v1.Node{makeNode("node1", 1000, schedutil.DefaultMemoryRequest*10), makeNode("node2", 1000, schedutil.DefaultMemoryRequest*10)},
|
||||
name: "test priority of zero-request pod with node with zero-request pod",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: large1}, {Spec: noResources1},
|
||||
{Spec: large2}, {Spec: small2},
|
||||
@ -2410,8 +2271,8 @@ func TestZeroRequest(t *testing.T) {
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{Spec: small},
|
||||
nodes: []*v1.Node{makeNode("machine1", 1000, schedutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, schedutil.DefaultMemoryRequest*10)},
|
||||
name: "test priority of nonzero-request pod with machine with zero-request pod",
|
||||
nodes: []*v1.Node{makeNode("node1", 1000, schedutil.DefaultMemoryRequest*10), makeNode("node2", 1000, schedutil.DefaultMemoryRequest*10)},
|
||||
name: "test priority of nonzero-request pod with node with zero-request pod",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: large1}, {Spec: noResources1},
|
||||
{Spec: large2}, {Spec: small2},
|
||||
@ -2421,8 +2282,8 @@ func TestZeroRequest(t *testing.T) {
|
||||
// The point of this test is to verify that we're not just getting the same score no matter what we schedule.
|
||||
{
|
||||
pod: &v1.Pod{Spec: large},
|
||||
nodes: []*v1.Node{makeNode("machine1", 1000, schedutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, schedutil.DefaultMemoryRequest*10)},
|
||||
name: "test priority of larger pod with machine with zero-request pod",
|
||||
nodes: []*v1.Node{makeNode("node1", 1000, schedutil.DefaultMemoryRequest*10), makeNode("node2", 1000, schedutil.DefaultMemoryRequest*10)},
|
||||
name: "test priority of larger pod with node with zero-request pod",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: large1}, {Spec: noResources1},
|
||||
{Spec: large2}, {Spec: small2},
|
||||
@ -2665,31 +2526,11 @@ func TestPreferNominatedNodeFilterCallCounts(t *testing.T) {
|
||||
}
|
||||
|
||||
func podWithID(id, desiredHost string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: id,
|
||||
UID: types.UID(id),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: desiredHost,
|
||||
SchedulerName: testSchedulerName,
|
||||
},
|
||||
}
|
||||
return st.MakePod().Name(id).UID(id).Node(desiredHost).SchedulerName(testSchedulerName).Obj()
|
||||
}
|
||||
|
||||
func deletingPod(id string) *v1.Pod {
|
||||
deletionTimestamp := metav1.Now()
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: id,
|
||||
UID: types.UID(id),
|
||||
DeletionTimestamp: &deletionTimestamp,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: "",
|
||||
SchedulerName: testSchedulerName,
|
||||
},
|
||||
}
|
||||
return st.MakePod().Name(id).UID(id).Terminating().Node("").SchedulerName(testSchedulerName).Obj()
|
||||
}
|
||||
|
||||
func podWithPort(id, desiredHost string, port int) *v1.Pod {
|
||||
@ -2843,7 +2684,7 @@ func setupTestScheduler(ctx context.Context, queuedPodStore *clientcache.FIFO, c
|
||||
}
|
||||
|
||||
func setupTestSchedulerWithVolumeBinding(ctx context.Context, volumeBinder volumebinding.SchedulerVolumeBinder, broadcaster events.EventBroadcaster) (*Scheduler, chan *v1.Binding, chan error) {
|
||||
testNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}}
|
||||
testNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node1", UID: types.UID("node1")}}
|
||||
queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc)
|
||||
pod := podWithID("foo", "")
|
||||
pod.Namespace = "foo-ns"
|
||||
|
@ -40,6 +40,7 @@ import (
|
||||
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
|
||||
"k8s.io/kubernetes/pkg/scheduler/profile"
|
||||
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
testingclock "k8s.io/utils/clock/testing"
|
||||
)
|
||||
|
||||
@ -226,7 +227,7 @@ func TestSchedulerCreation(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDefaultErrorFunc(t *testing.T) {
|
||||
testPod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "test-pod", Namespace: "default"}}
|
||||
testPod := st.MakePod().Name("test-pod").Namespace(v1.NamespaceDefault).Obj()
|
||||
testPodUpdated := testPod.DeepCopy()
|
||||
testPodUpdated.Labels = map[string]string{"foo": ""}
|
||||
|
||||
@ -307,7 +308,7 @@ func TestDefaultErrorFunc(t *testing.T) {
|
||||
func TestDefaultErrorFunc_NodeNotFound(t *testing.T) {
|
||||
nodeFoo := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "foo"}}
|
||||
nodeBar := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "bar"}}
|
||||
testPod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "test-pod", Namespace: "default"}}
|
||||
testPod := st.MakePod().Name("test-pod").Namespace(v1.NamespaceDefault).Obj()
|
||||
tests := []struct {
|
||||
name string
|
||||
nodes []v1.Node
|
||||
@ -374,7 +375,7 @@ func TestDefaultErrorFunc_PodAlreadyBound(t *testing.T) {
|
||||
defer close(stopCh)
|
||||
|
||||
nodeFoo := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "foo"}}
|
||||
testPod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "test-pod", Namespace: "default"}, Spec: v1.PodSpec{NodeName: "foo"}}
|
||||
testPod := st.MakePod().Name("test-pod").Namespace(v1.NamespaceDefault).Node("foo").Obj()
|
||||
|
||||
client := fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testPod}}, &v1.NodeList{Items: []v1.Node{nodeFoo}})
|
||||
informerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||
|
@ -136,6 +136,56 @@ func (s *LabelSelectorWrapper) Obj() *metav1.LabelSelector {
|
||||
return &s.LabelSelector
|
||||
}
|
||||
|
||||
// ContainerWrapper wraps a Container inside.
|
||||
type ContainerWrapper struct{ v1.Container }
|
||||
|
||||
// MakeContainer creates a Container wrapper.
|
||||
func MakeContainer() *ContainerWrapper {
|
||||
return &ContainerWrapper{v1.Container{}}
|
||||
}
|
||||
|
||||
// Obj returns the inner Container.
|
||||
func (c *ContainerWrapper) Obj() v1.Container {
|
||||
return c.Container
|
||||
}
|
||||
|
||||
// Name sets `n` as the name of the inner Container.
|
||||
func (c *ContainerWrapper) Name(n string) *ContainerWrapper {
|
||||
c.Container.Name = n
|
||||
return c
|
||||
}
|
||||
|
||||
// Image sets `image` as the image of the inner Container.
|
||||
func (c *ContainerWrapper) Image(image string) *ContainerWrapper {
|
||||
c.Container.Image = image
|
||||
return c
|
||||
}
|
||||
|
||||
// HostPort sets `hostPort` as the host port of the inner Container.
|
||||
func (c *ContainerWrapper) HostPort(hostPort int32) *ContainerWrapper {
|
||||
c.Container.Ports = []v1.ContainerPort{{HostPort: hostPort}}
|
||||
return c
|
||||
}
|
||||
|
||||
// ContainerPort sets `ports` as the ports of the inner Container.
|
||||
func (c *ContainerWrapper) ContainerPort(ports []v1.ContainerPort) *ContainerWrapper {
|
||||
c.Container.Ports = ports
|
||||
return c
|
||||
}
|
||||
|
||||
// Resources sets the container resources to the given resource map.
|
||||
func (c *ContainerWrapper) Resources(resMap map[v1.ResourceName]string) *ContainerWrapper {
|
||||
res := v1.ResourceList{}
|
||||
for k, v := range resMap {
|
||||
res[k] = resource.MustParse(v)
|
||||
}
|
||||
c.Container.Resources = v1.ResourceRequirements{
|
||||
Requests: res,
|
||||
Limits: res,
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// PodWrapper wraps a Pod inside.
|
||||
type PodWrapper struct{ v1.Pod }
|
||||
|
||||
@ -188,10 +238,14 @@ func (p *PodWrapper) OwnerReference(name string, gvk schema.GroupVersionKind) *P
|
||||
|
||||
// Container appends a container into PodSpec of the inner pod.
|
||||
func (p *PodWrapper) Container(s string) *PodWrapper {
|
||||
p.Spec.Containers = append(p.Spec.Containers, v1.Container{
|
||||
Name: fmt.Sprintf("con%d", len(p.Spec.Containers)),
|
||||
Image: s,
|
||||
})
|
||||
name := fmt.Sprintf("con%d", len(p.Spec.Containers))
|
||||
p.Spec.Containers = append(p.Spec.Containers, MakeContainer().Name(name).Image(s).Obj())
|
||||
return p
|
||||
}
|
||||
|
||||
// Containers sets `containers` to the PodSpec of the inner pod.
|
||||
func (p *PodWrapper) Containers(containers []v1.Container) *PodWrapper {
|
||||
p.Spec.Containers = containers
|
||||
return p
|
||||
}
|
||||
|
||||
@ -201,15 +255,6 @@ func (p *PodWrapper) Priority(val int32) *PodWrapper {
|
||||
return p
|
||||
}
|
||||
|
||||
// Annotation adds a pair of (key, value) to a pod's Annotations.
|
||||
func (p *PodWrapper) Annotation(key, value string) *PodWrapper {
|
||||
if p.Annotations == nil {
|
||||
p.Annotations = make(map[string]string)
|
||||
}
|
||||
p.Annotations[key] = value
|
||||
return p
|
||||
}
|
||||
|
||||
// CreationTimestamp sets the inner pod's CreationTimestamp.
|
||||
func (p *PodWrapper) CreationTimestamp(t metav1.Time) *PodWrapper {
|
||||
p.ObjectMeta.CreationTimestamp = t
|
||||
@ -281,12 +326,24 @@ func (p *PodWrapper) NominatedNodeName(n string) *PodWrapper {
|
||||
return p
|
||||
}
|
||||
|
||||
// Phase sets `phase` as .status.Phase of the inner pod.
|
||||
func (p *PodWrapper) Phase(phase v1.PodPhase) *PodWrapper {
|
||||
p.Status.Phase = phase
|
||||
return p
|
||||
}
|
||||
|
||||
// Condition adds a `condition(Type, Status, Reason)` to .Status.Conditions.
|
||||
func (p *PodWrapper) Condition(t v1.PodConditionType, s v1.ConditionStatus, r string) *PodWrapper {
|
||||
p.Status.Conditions = append(p.Status.Conditions, v1.PodCondition{Type: t, Status: s, Reason: r})
|
||||
return p
|
||||
}
|
||||
|
||||
// Conditions sets `conditions` as .status.Conditions of the inner pod.
|
||||
func (p *PodWrapper) Conditions(conditions []v1.PodCondition) *PodWrapper {
|
||||
p.Status.Conditions = append(p.Status.Conditions, conditions...)
|
||||
return p
|
||||
}
|
||||
|
||||
// Toleration creates a toleration (with the operator Exists)
|
||||
// and injects into the inner pod.
|
||||
func (p *PodWrapper) Toleration(key string) *PodWrapper {
|
||||
@ -300,9 +357,14 @@ func (p *PodWrapper) Toleration(key string) *PodWrapper {
|
||||
// HostPort creates a container with a hostPort valued `hostPort`,
|
||||
// and injects into the inner pod.
|
||||
func (p *PodWrapper) HostPort(port int32) *PodWrapper {
|
||||
p.Spec.Containers = append(p.Spec.Containers, v1.Container{
|
||||
Ports: []v1.ContainerPort{{HostPort: port}},
|
||||
})
|
||||
p.Spec.Containers = append(p.Spec.Containers, MakeContainer().Name("container").Image("pause").HostPort(port).Obj())
|
||||
return p
|
||||
}
|
||||
|
||||
// ContainerPort creates a container with ports valued `ports`,
|
||||
// and injects into the inner pod.
|
||||
func (p *PodWrapper) ContainerPort(ports []v1.ContainerPort) *PodWrapper {
|
||||
p.Spec.Containers = append(p.Spec.Containers, MakeContainer().Name("container").Image("pause").ContainerPort(ports).Obj())
|
||||
return p
|
||||
}
|
||||
|
||||
@ -317,6 +379,12 @@ func (p *PodWrapper) PVC(name string) *PodWrapper {
|
||||
return p
|
||||
}
|
||||
|
||||
// Volume creates volume and injects into the inner pod.
|
||||
func (p *PodWrapper) Volume(volume v1.Volume) *PodWrapper {
|
||||
p.Spec.Volumes = append(p.Spec.Volumes, volume)
|
||||
return p
|
||||
}
|
||||
|
||||
// PodAffinityKind represents different kinds of PodAffinity.
|
||||
type PodAffinityKind int
|
||||
|
||||
@ -337,9 +405,9 @@ const (
|
||||
PodAntiAffinityWithRequiredPreferredReq
|
||||
)
|
||||
|
||||
// PodAffinityExists creates an PodAffinity with the operator "Exists"
|
||||
// PodAffinity creates a PodAffinity with topology key and label selector
|
||||
// and injects into the inner pod.
|
||||
func (p *PodWrapper) PodAffinityExists(labelKey, topologyKey string, kind PodAffinityKind) *PodWrapper {
|
||||
func (p *PodWrapper) PodAffinity(topologyKey string, labelSelector *metav1.LabelSelector, kind PodAffinityKind) *PodWrapper {
|
||||
if kind == NilPodAffinity {
|
||||
return p
|
||||
}
|
||||
@ -350,7 +418,6 @@ func (p *PodWrapper) PodAffinityExists(labelKey, topologyKey string, kind PodAff
|
||||
if p.Spec.Affinity.PodAffinity == nil {
|
||||
p.Spec.Affinity.PodAffinity = &v1.PodAffinity{}
|
||||
}
|
||||
labelSelector := MakeLabelSelector().Exists(labelKey).Obj()
|
||||
term := v1.PodAffinityTerm{LabelSelector: labelSelector, TopologyKey: topologyKey}
|
||||
switch kind {
|
||||
case PodAffinityWithRequiredReq:
|
||||
@ -376,9 +443,9 @@ func (p *PodWrapper) PodAffinityExists(labelKey, topologyKey string, kind PodAff
|
||||
return p
|
||||
}
|
||||
|
||||
// PodAntiAffinityExists creates an PodAntiAffinity with the operator "Exists"
|
||||
// PodAntiAffinity creates a PodAntiAffinity with topology key and label selector
|
||||
// and injects into the inner pod.
|
||||
func (p *PodWrapper) PodAntiAffinityExists(labelKey, topologyKey string, kind PodAffinityKind) *PodWrapper {
|
||||
func (p *PodWrapper) PodAntiAffinity(topologyKey string, labelSelector *metav1.LabelSelector, kind PodAffinityKind) *PodWrapper {
|
||||
if kind == NilPodAffinity {
|
||||
return p
|
||||
}
|
||||
@ -389,7 +456,6 @@ func (p *PodWrapper) PodAntiAffinityExists(labelKey, topologyKey string, kind Po
|
||||
if p.Spec.Affinity.PodAntiAffinity == nil {
|
||||
p.Spec.Affinity.PodAntiAffinity = &v1.PodAntiAffinity{}
|
||||
}
|
||||
labelSelector := MakeLabelSelector().Exists(labelKey).Obj()
|
||||
term := v1.PodAffinityTerm{LabelSelector: labelSelector, TopologyKey: topologyKey}
|
||||
switch kind {
|
||||
case PodAntiAffinityWithRequiredReq:
|
||||
@ -415,6 +481,70 @@ func (p *PodWrapper) PodAntiAffinityExists(labelKey, topologyKey string, kind Po
|
||||
return p
|
||||
}
|
||||
|
||||
// PodAffinityExists creates a PodAffinity with the operator "Exists"
|
||||
// and injects into the inner pod.
|
||||
func (p *PodWrapper) PodAffinityExists(labelKey, topologyKey string, kind PodAffinityKind) *PodWrapper {
|
||||
labelSelector := MakeLabelSelector().Exists(labelKey).Obj()
|
||||
p.PodAffinity(topologyKey, labelSelector, kind)
|
||||
return p
|
||||
}
|
||||
|
||||
// PodAntiAffinityExists creates a PodAntiAffinity with the operator "Exists"
|
||||
// and injects into the inner pod.
|
||||
func (p *PodWrapper) PodAntiAffinityExists(labelKey, topologyKey string, kind PodAffinityKind) *PodWrapper {
|
||||
labelSelector := MakeLabelSelector().Exists(labelKey).Obj()
|
||||
p.PodAntiAffinity(topologyKey, labelSelector, kind)
|
||||
return p
|
||||
}
|
||||
|
||||
// PodAffinityNotExists creates a PodAffinity with the operator "NotExists"
|
||||
// and injects into the inner pod.
|
||||
func (p *PodWrapper) PodAffinityNotExists(labelKey, topologyKey string, kind PodAffinityKind) *PodWrapper {
|
||||
labelSelector := MakeLabelSelector().NotExist(labelKey).Obj()
|
||||
p.PodAffinity(topologyKey, labelSelector, kind)
|
||||
return p
|
||||
}
|
||||
|
||||
// PodAntiAffinityNotExists creates a PodAntiAffinity with the operator "NotExists"
|
||||
// and injects into the inner pod.
|
||||
func (p *PodWrapper) PodAntiAffinityNotExists(labelKey, topologyKey string, kind PodAffinityKind) *PodWrapper {
|
||||
labelSelector := MakeLabelSelector().NotExist(labelKey).Obj()
|
||||
p.PodAntiAffinity(topologyKey, labelSelector, kind)
|
||||
return p
|
||||
}
|
||||
|
||||
// PodAffinityIn creates a PodAffinity with the operator "In"
|
||||
// and injects into the inner pod.
|
||||
func (p *PodWrapper) PodAffinityIn(labelKey, topologyKey string, vals []string, kind PodAffinityKind) *PodWrapper {
|
||||
labelSelector := MakeLabelSelector().In(labelKey, vals).Obj()
|
||||
p.PodAffinity(topologyKey, labelSelector, kind)
|
||||
return p
|
||||
}
|
||||
|
||||
// PodAntiAffinityIn creates a PodAntiAffinity with the operator "In"
|
||||
// and injects into the inner pod.
|
||||
func (p *PodWrapper) PodAntiAffinityIn(labelKey, topologyKey string, vals []string, kind PodAffinityKind) *PodWrapper {
|
||||
labelSelector := MakeLabelSelector().In(labelKey, vals).Obj()
|
||||
p.PodAntiAffinity(topologyKey, labelSelector, kind)
|
||||
return p
|
||||
}
|
||||
|
||||
// PodAffinityNotIn creates a PodAffinity with the operator "NotIn"
|
||||
// and injects into the inner pod.
|
||||
func (p *PodWrapper) PodAffinityNotIn(labelKey, topologyKey string, vals []string, kind PodAffinityKind) *PodWrapper {
|
||||
labelSelector := MakeLabelSelector().NotIn(labelKey, vals).Obj()
|
||||
p.PodAffinity(topologyKey, labelSelector, kind)
|
||||
return p
|
||||
}
|
||||
|
||||
// PodAntiAffinityNotIn creates a PodAntiAffinity with the operator "NotIn"
|
||||
// and injects into the inner pod.
|
||||
func (p *PodWrapper) PodAntiAffinityNotIn(labelKey, topologyKey string, vals []string, kind PodAffinityKind) *PodWrapper {
|
||||
labelSelector := MakeLabelSelector().NotIn(labelKey, vals).Obj()
|
||||
p.PodAntiAffinity(topologyKey, labelSelector, kind)
|
||||
return p
|
||||
}
|
||||
|
||||
// SpreadConstraint constructs a TopologySpreadConstraint object and injects
|
||||
// into the inner pod.
|
||||
func (p *PodWrapper) SpreadConstraint(maxSkew int, tpKey string, mode v1.UnsatisfiableConstraintAction, selector *metav1.LabelSelector, minDomains *int32) *PodWrapper {
|
||||
@ -429,12 +559,37 @@ func (p *PodWrapper) SpreadConstraint(maxSkew int, tpKey string, mode v1.Unsatis
|
||||
return p
|
||||
}
|
||||
|
||||
// Label sets a {k,v} pair to the inner pod.
|
||||
// Label sets a {k,v} pair to the inner pod label.
|
||||
func (p *PodWrapper) Label(k, v string) *PodWrapper {
|
||||
if p.Labels == nil {
|
||||
p.Labels = make(map[string]string)
|
||||
if p.ObjectMeta.Labels == nil {
|
||||
p.ObjectMeta.Labels = make(map[string]string)
|
||||
}
|
||||
p.ObjectMeta.Labels[k] = v
|
||||
return p
|
||||
}
|
||||
|
||||
// Labels sets all {k,v} pair provided by `labels` to the inner pod labels.
|
||||
func (p *PodWrapper) Labels(labels map[string]string) *PodWrapper {
|
||||
for k, v := range labels {
|
||||
p.Label(k, v)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// Annotation sets a {k,v} pair to the inner pod annotation.
|
||||
func (p *PodWrapper) Annotation(key, value string) *PodWrapper {
|
||||
if p.ObjectMeta.Annotations == nil {
|
||||
p.ObjectMeta.Annotations = make(map[string]string)
|
||||
}
|
||||
p.ObjectMeta.Annotations[key] = value
|
||||
return p
|
||||
}
|
||||
|
||||
// Annotations sets all {k,v} pair provided by `annotations` to the inner pod annotations.
|
||||
func (p *PodWrapper) Annotations(annotations map[string]string) *PodWrapper {
|
||||
for k, v := range annotations {
|
||||
p.Annotation(k, v)
|
||||
}
|
||||
p.Labels[k] = v
|
||||
return p
|
||||
}
|
||||
|
||||
@ -444,18 +599,19 @@ func (p *PodWrapper) Req(resMap map[v1.ResourceName]string) *PodWrapper {
|
||||
return p
|
||||
}
|
||||
|
||||
res := v1.ResourceList{}
|
||||
for k, v := range resMap {
|
||||
res[k] = resource.MustParse(v)
|
||||
name := fmt.Sprintf("con%d", len(p.Spec.Containers))
|
||||
p.Spec.Containers = append(p.Spec.Containers, MakeContainer().Name(name).Image(imageutils.GetPauseImageName()).Resources(resMap).Obj())
|
||||
return p
|
||||
}
|
||||
|
||||
// InitReq adds a new init container to the inner pod with given resource map.
|
||||
func (p *PodWrapper) InitReq(resMap map[v1.ResourceName]string) *PodWrapper {
|
||||
if len(resMap) == 0 {
|
||||
return p
|
||||
}
|
||||
p.Spec.Containers = append(p.Spec.Containers, v1.Container{
|
||||
Name: fmt.Sprintf("con%d", len(p.Spec.Containers)),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: res,
|
||||
Limits: res,
|
||||
},
|
||||
})
|
||||
|
||||
name := fmt.Sprintf("init-con%d", len(p.Spec.InitContainers))
|
||||
p.Spec.InitContainers = append(p.Spec.InitContainers, MakeContainer().Name(name).Image(imageutils.GetPauseImageName()).Resources(resMap).Obj())
|
||||
return p
|
||||
}
|
||||
|
||||
@ -465,7 +621,7 @@ func (p *PodWrapper) PreemptionPolicy(policy v1.PreemptionPolicy) *PodWrapper {
|
||||
return p
|
||||
}
|
||||
|
||||
// Overhead sets the give resourcelist to the inner pod
|
||||
// Overhead sets the give ResourceList to the inner pod
|
||||
func (p *PodWrapper) Overhead(rl v1.ResourceList) *PodWrapper {
|
||||
p.Spec.Overhead = rl
|
||||
return p
|
||||
|
@ -25,7 +25,7 @@ import (
|
||||
// For each of these resources, a pod that doesn't request the resource explicitly
|
||||
// will be treated as having requested the amount indicated below, for the purpose
|
||||
// of computing priority only. This ensures that when scheduling zero-request pods, such
|
||||
// pods will not all be scheduled to the machine with the smallest in-use request,
|
||||
// pods will not all be scheduled to the node with the smallest in-use request,
|
||||
// and that when scheduling regular pods, such pods will not see zero-request pods as
|
||||
// consuming no resources whatsoever. We chose these values to be similar to the
|
||||
// resources that we give to cluster addon pods (#10653). But they are pretty arbitrary.
|
||||
|
Loading…
Reference in New Issue
Block a user