Move predicate types from algorithm to predicates
This commit is contained in:
@@ -34,7 +34,6 @@ import (
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
@@ -91,7 +90,7 @@ func newResourceInitPod(pod *v1.Pod, usage ...schedulernodeinfo.Resource) *v1.Po
|
||||
return pod
|
||||
}
|
||||
|
||||
func PredicateMetadata(p *v1.Pod, nodeInfo map[string]*schedulernodeinfo.NodeInfo) algorithm.PredicateMetadata {
|
||||
func GetPredicateMetadata(p *v1.Pod, nodeInfo map[string]*schedulernodeinfo.NodeInfo) PredicateMetadata {
|
||||
pm := PredicateMetadataFactory{schedulertesting.FakePodLister{p}}
|
||||
return pm.GetMetadata(p, nodeInfo)
|
||||
}
|
||||
@@ -102,7 +101,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
nodeInfo *schedulernodeinfo.NodeInfo
|
||||
fits bool
|
||||
name string
|
||||
reasons []algorithm.PredicateFailureReason
|
||||
reasons []PredicateFailureReason
|
||||
ignoredExtendedResources sets.String
|
||||
}{
|
||||
{
|
||||
@@ -118,7 +117,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 10, Memory: 20})),
|
||||
fits: false,
|
||||
name: "too many resources fails",
|
||||
reasons: []algorithm.PredicateFailureReason{
|
||||
reasons: []PredicateFailureReason{
|
||||
NewInsufficientResourceError(v1.ResourceCPU, 1, 10, 10),
|
||||
NewInsufficientResourceError(v1.ResourceMemory, 1, 20, 20),
|
||||
},
|
||||
@@ -129,7 +128,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 8, Memory: 19})),
|
||||
fits: false,
|
||||
name: "too many resources fails due to init container cpu",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceCPU, 3, 8, 10)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(v1.ResourceCPU, 3, 8, 10)},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 3, Memory: 1}, schedulernodeinfo.Resource{MilliCPU: 2, Memory: 1}),
|
||||
@@ -137,7 +136,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 8, Memory: 19})),
|
||||
fits: false,
|
||||
name: "too many resources fails due to highest init container cpu",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceCPU, 3, 8, 10)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(v1.ResourceCPU, 3, 8, 10)},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 3}),
|
||||
@@ -145,7 +144,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 19})),
|
||||
fits: false,
|
||||
name: "too many resources fails due to init container memory",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceMemory, 3, 19, 20)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(v1.ResourceMemory, 3, 19, 20)},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 3}, schedulernodeinfo.Resource{MilliCPU: 1, Memory: 2}),
|
||||
@@ -153,7 +152,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 19})),
|
||||
fits: false,
|
||||
name: "too many resources fails due to highest init container memory",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceMemory, 3, 19, 20)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(v1.ResourceMemory, 3, 19, 20)},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
|
||||
@@ -182,7 +181,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 5})),
|
||||
fits: false,
|
||||
name: "one resource memory fits",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceCPU, 2, 9, 10)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(v1.ResourceCPU, 2, 9, 10)},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 2}),
|
||||
@@ -190,7 +189,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})),
|
||||
fits: false,
|
||||
name: "one resource cpu fits",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceMemory, 2, 19, 20)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(v1.ResourceMemory, 2, 19, 20)},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}),
|
||||
@@ -225,7 +224,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})),
|
||||
fits: false,
|
||||
name: "extended resource capacity enforced",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 10, 0, 5)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 10, 0, 5)},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
|
||||
@@ -234,7 +233,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})),
|
||||
fits: false,
|
||||
name: "extended resource capacity enforced for init container",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 10, 0, 5)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 10, 0, 5)},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(
|
||||
@@ -243,7 +242,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})),
|
||||
fits: false,
|
||||
name: "extended resource allocatable enforced",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 1, 5, 5)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 1, 5, 5)},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
|
||||
@@ -252,7 +251,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})),
|
||||
fits: false,
|
||||
name: "extended resource allocatable enforced for init container",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 1, 5, 5)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 1, 5, 5)},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(
|
||||
@@ -262,7 +261,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})),
|
||||
fits: false,
|
||||
name: "extended resource allocatable enforced for multiple containers",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 6, 2, 5)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 6, 2, 5)},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
|
||||
@@ -281,7 +280,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})),
|
||||
fits: false,
|
||||
name: "extended resource allocatable enforced for multiple init containers",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 6, 2, 5)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 6, 2, 5)},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(
|
||||
@@ -290,7 +289,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})),
|
||||
fits: false,
|
||||
name: "extended resource allocatable enforced for unknown resource",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceB, 1, 0, 0)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(extendedResourceB, 1, 0, 0)},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
|
||||
@@ -299,7 +298,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})),
|
||||
fits: false,
|
||||
name: "extended resource allocatable enforced for unknown resource for init container",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceB, 1, 0, 0)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(extendedResourceB, 1, 0, 0)},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(
|
||||
@@ -308,7 +307,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})),
|
||||
fits: false,
|
||||
name: "kubernetes.io resource capacity enforced",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(kubernetesIOResourceA, 10, 0, 0)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(kubernetesIOResourceA, 10, 0, 0)},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
|
||||
@@ -317,7 +316,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})),
|
||||
fits: false,
|
||||
name: "kubernetes.io resource capacity enforced for init container",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(kubernetesIOResourceB, 10, 0, 0)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(kubernetesIOResourceB, 10, 0, 0)},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(
|
||||
@@ -326,7 +325,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})),
|
||||
fits: false,
|
||||
name: "hugepages resource capacity enforced",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(hugePageResourceA, 10, 0, 5)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(hugePageResourceA, 10, 0, 5)},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
|
||||
@@ -335,7 +334,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})),
|
||||
fits: false,
|
||||
name: "hugepages resource capacity enforced for init container",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(hugePageResourceA, 10, 0, 5)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(hugePageResourceA, 10, 0, 5)},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(
|
||||
@@ -345,7 +344,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 2}})),
|
||||
fits: false,
|
||||
name: "hugepages resource allocatable enforced for multiple containers",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(hugePageResourceA, 6, 2, 5)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(hugePageResourceA, 6, 2, 5)},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(
|
||||
@@ -363,7 +362,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
node := v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 5, 20, 5).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 5, 20, 5)}}
|
||||
test.nodeInfo.SetNode(&node)
|
||||
RegisterPredicateMetadataProducerWithExtendedResourceOptions(test.ignoredExtendedResources)
|
||||
meta := PredicateMetadata(test.pod, nil)
|
||||
meta := GetPredicateMetadata(test.pod, nil)
|
||||
fits, reasons, err := PodFitsResources(test.pod, meta, test.nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
@@ -382,7 +381,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
nodeInfo *schedulernodeinfo.NodeInfo
|
||||
fits bool
|
||||
name string
|
||||
reasons []algorithm.PredicateFailureReason
|
||||
reasons []PredicateFailureReason
|
||||
}{
|
||||
{
|
||||
pod: &v1.Pod{},
|
||||
@@ -390,7 +389,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 10, Memory: 20})),
|
||||
fits: false,
|
||||
name: "even without specified resources predicate fails when there's no space for additional pod",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
|
||||
@@ -398,7 +397,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 5})),
|
||||
fits: false,
|
||||
name: "even if both resources fit predicate fails when there's no space for additional pod",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}),
|
||||
@@ -406,7 +405,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})),
|
||||
fits: false,
|
||||
name: "even for equal edge case predicate fails when there's no space for additional pod",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}),
|
||||
@@ -414,14 +413,14 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})),
|
||||
fits: false,
|
||||
name: "even for equal edge case predicate fails when there's no space for additional pod due to init container",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)},
|
||||
},
|
||||
}
|
||||
for _, test := range notEnoughPodsTests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
node := v1.Node{Status: v1.NodeStatus{Capacity: v1.ResourceList{}, Allocatable: makeAllocatableResources(10, 20, 1, 0, 0, 0)}}
|
||||
test.nodeInfo.SetNode(&node)
|
||||
fits, reasons, err := PodFitsResources(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
fits, reasons, err := PodFitsResources(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -439,7 +438,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
nodeInfo *schedulernodeinfo.NodeInfo
|
||||
fits bool
|
||||
name string
|
||||
reasons []algorithm.PredicateFailureReason
|
||||
reasons []PredicateFailureReason
|
||||
}{
|
||||
{
|
||||
pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
|
||||
@@ -447,7 +446,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 10, Memory: 10})),
|
||||
fits: false,
|
||||
name: "due to container scratch disk",
|
||||
reasons: []algorithm.PredicateFailureReason{
|
||||
reasons: []PredicateFailureReason{
|
||||
NewInsufficientResourceError(v1.ResourceCPU, 1, 10, 10),
|
||||
},
|
||||
},
|
||||
@@ -464,7 +463,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 2, Memory: 2})),
|
||||
fits: false,
|
||||
name: "storage ephemeral local storage request exceeds allocatable",
|
||||
reasons: []algorithm.PredicateFailureReason{
|
||||
reasons: []PredicateFailureReason{
|
||||
NewInsufficientResourceError(v1.ResourceEphemeralStorage, 25, 0, 20),
|
||||
},
|
||||
},
|
||||
@@ -481,7 +480,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
node := v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 5, 20, 5).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 5, 20, 5)}}
|
||||
test.nodeInfo.SetNode(&node)
|
||||
fits, reasons, err := PodFitsResources(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
fits, reasons, err := PodFitsResources(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -538,13 +537,13 @@ func TestPodFitsHost(t *testing.T) {
|
||||
name: "host doesn't match",
|
||||
},
|
||||
}
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrPodNotMatchHostName}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrPodNotMatchHostName}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||
nodeInfo.SetNode(test.node)
|
||||
fits, reasons, err := PodFitsHost(test.pod, PredicateMetadata(test.pod, nil), nodeInfo)
|
||||
fits, reasons, err := PodFitsHost(test.pod, GetPredicateMetadata(test.pod, nil), nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -680,11 +679,11 @@ func TestPodFitsHostPorts(t *testing.T) {
|
||||
name: "UDP hostPort conflict due to 0.0.0.0 hostIP",
|
||||
},
|
||||
}
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrPodNotFitsHostPorts}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrPodNotFitsHostPorts}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
fits, reasons, err := PodFitsHostPorts(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
fits, reasons, err := PodFitsHostPorts(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -732,11 +731,11 @@ func TestGCEDiskConflicts(t *testing.T) {
|
||||
{&v1.Pod{Spec: volState}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state"},
|
||||
{&v1.Pod{Spec: volState2}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state"},
|
||||
}
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrDiskConflict}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrDiskConflict}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ok, reasons, err := NoDiskConflict(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
ok, reasons, err := NoDiskConflict(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -787,11 +786,11 @@ func TestAWSDiskConflicts(t *testing.T) {
|
||||
{&v1.Pod{Spec: volState}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state"},
|
||||
{&v1.Pod{Spec: volState2}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state"},
|
||||
}
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrDiskConflict}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrDiskConflict}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ok, reasons, err := NoDiskConflict(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
ok, reasons, err := NoDiskConflict(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -848,11 +847,11 @@ func TestRBDDiskConflicts(t *testing.T) {
|
||||
{&v1.Pod{Spec: volState}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state"},
|
||||
{&v1.Pod{Spec: volState2}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state"},
|
||||
}
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrDiskConflict}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrDiskConflict}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ok, reasons, err := NoDiskConflict(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
ok, reasons, err := NoDiskConflict(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -909,11 +908,11 @@ func TestISCSIDiskConflicts(t *testing.T) {
|
||||
{&v1.Pod{Spec: volState}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state"},
|
||||
{&v1.Pod{Spec: volState2}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state"},
|
||||
}
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrDiskConflict}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrDiskConflict}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ok, reasons, err := NoDiskConflict(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
ok, reasons, err := NoDiskConflict(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -1603,7 +1602,7 @@ func TestPodFitsSelector(t *testing.T) {
|
||||
name: "Pod with two terms: both matchFields and matchExpressions do not match",
|
||||
},
|
||||
}
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrNodeSelectorNotMatch}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrNodeSelectorNotMatch}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
@@ -1614,7 +1613,7 @@ func TestPodFitsSelector(t *testing.T) {
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||
nodeInfo.SetNode(&node)
|
||||
|
||||
fits, reasons, err := PodMatchNodeSelector(test.pod, PredicateMetadata(test.pod, nil), nodeInfo)
|
||||
fits, reasons, err := PodMatchNodeSelector(test.pod, GetPredicateMetadata(test.pod, nil), nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -1674,7 +1673,7 @@ func TestNodeLabelPresence(t *testing.T) {
|
||||
name: "all labels match, presence false",
|
||||
},
|
||||
}
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrNodeLabelPresenceViolated}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrNodeLabelPresenceViolated}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
@@ -1683,7 +1682,7 @@ func TestNodeLabelPresence(t *testing.T) {
|
||||
nodeInfo.SetNode(&node)
|
||||
|
||||
labelChecker := NodeLabelChecker{test.labels, test.presence}
|
||||
fits, reasons, err := labelChecker.CheckNodeLabelPresence(test.pod, PredicateMetadata(test.pod, nil), nodeInfo)
|
||||
fits, reasons, err := labelChecker.CheckNodeLabelPresence(test.pod, GetPredicateMetadata(test.pod, nil), nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -1823,7 +1822,7 @@ func TestServiceAffinity(t *testing.T) {
|
||||
name: "service pod on different node, multiple labels, all match",
|
||||
},
|
||||
}
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrServiceAffinityViolated}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrServiceAffinityViolated}
|
||||
for _, test := range tests {
|
||||
testIt := func(skipPrecompute bool) {
|
||||
t.Run(fmt.Sprintf("%v/skipPrecompute/%v", test.name, skipPrecompute), func(t *testing.T) {
|
||||
@@ -1839,7 +1838,7 @@ func TestServiceAffinity(t *testing.T) {
|
||||
precompute(pm)
|
||||
}
|
||||
})
|
||||
if pmeta, ok := (PredicateMetadata(test.pod, nodeInfoMap)).(*predicateMetadata); ok {
|
||||
if pmeta, ok := (GetPredicateMetadata(test.pod, nodeInfoMap)).(*predicateMetadata); ok {
|
||||
fits, reasons, err := predicate(test.pod, pmeta, nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
@@ -1885,7 +1884,7 @@ func TestRunGeneralPredicates(t *testing.T) {
|
||||
fits bool
|
||||
name string
|
||||
wErr error
|
||||
reasons []algorithm.PredicateFailureReason
|
||||
reasons []PredicateFailureReason
|
||||
}{
|
||||
{
|
||||
pod: &v1.Pod{},
|
||||
@@ -1909,7 +1908,7 @@ func TestRunGeneralPredicates(t *testing.T) {
|
||||
},
|
||||
fits: false,
|
||||
wErr: nil,
|
||||
reasons: []algorithm.PredicateFailureReason{
|
||||
reasons: []PredicateFailureReason{
|
||||
NewInsufficientResourceError(v1.ResourceCPU, 8, 5, 10),
|
||||
NewInsufficientResourceError(v1.ResourceMemory, 10, 19, 20),
|
||||
},
|
||||
@@ -1928,7 +1927,7 @@ func TestRunGeneralPredicates(t *testing.T) {
|
||||
},
|
||||
fits: false,
|
||||
wErr: nil,
|
||||
reasons: []algorithm.PredicateFailureReason{ErrPodNotMatchHostName},
|
||||
reasons: []PredicateFailureReason{ErrPodNotMatchHostName},
|
||||
name: "host not match",
|
||||
},
|
||||
{
|
||||
@@ -1940,14 +1939,14 @@ func TestRunGeneralPredicates(t *testing.T) {
|
||||
},
|
||||
fits: false,
|
||||
wErr: nil,
|
||||
reasons: []algorithm.PredicateFailureReason{ErrPodNotFitsHostPorts},
|
||||
reasons: []PredicateFailureReason{ErrPodNotFitsHostPorts},
|
||||
name: "hostport conflict",
|
||||
},
|
||||
}
|
||||
for _, test := range resourceTests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
test.nodeInfo.SetNode(test.node)
|
||||
fits, reasons, err := GeneralPredicates(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
fits, reasons, err := GeneralPredicates(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -1976,7 +1975,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
node *v1.Node
|
||||
fits bool
|
||||
name string
|
||||
expectFailureReasons []algorithm.PredicateFailureReason
|
||||
expectFailureReasons []PredicateFailureReason
|
||||
}{
|
||||
{
|
||||
pod: new(v1.Pod),
|
||||
@@ -2076,7 +2075,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
node: &node1,
|
||||
fits: false,
|
||||
name: "Does not satisfy the PodAffinity with labelSelector because of diff Namespace",
|
||||
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch},
|
||||
expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch},
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
@@ -2107,7 +2106,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
node: &node1,
|
||||
fits: false,
|
||||
name: "Doesn't satisfy the PodAffinity because of unmatching labelSelector with the existing pod",
|
||||
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch},
|
||||
expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch},
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
@@ -2204,7 +2203,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
node: &node1,
|
||||
fits: false,
|
||||
name: "The labelSelector requirements(items of matchExpressions) are ANDed, the pod cannot schedule onto the node because one of the matchExpression item don't match.",
|
||||
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch},
|
||||
expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch},
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
@@ -2371,7 +2370,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
node: &node1,
|
||||
fits: false,
|
||||
name: "satisfies the PodAffinity but doesn't satisfy the PodAntiAffinity with the existing pod",
|
||||
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
@@ -2444,7 +2443,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
node: &node1,
|
||||
fits: false,
|
||||
name: "satisfies the PodAffinity and PodAntiAffinity but doesn't satisfy PodAntiAffinity symmetry with the existing pod",
|
||||
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
@@ -2476,7 +2475,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
node: &node1,
|
||||
fits: false,
|
||||
name: "pod matches its own Label in PodAffinity and that matches the existing pod Labels",
|
||||
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch},
|
||||
expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch},
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
@@ -2512,7 +2511,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
node: &node1,
|
||||
fits: false,
|
||||
name: "verify that PodAntiAffinity from existing pod is respected when pod has no AntiAffinity constraints. doesn't satisfy PodAntiAffinity symmetry with the existing pod",
|
||||
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
@@ -2612,7 +2611,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
node: &node1,
|
||||
fits: false,
|
||||
name: "satisfies the PodAntiAffinity with existing pod but doesn't satisfy PodAntiAffinity symmetry with incoming pod",
|
||||
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
@@ -2675,7 +2674,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
},
|
||||
node: &node1,
|
||||
fits: false,
|
||||
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
name: "PodAntiAffinity symmetry check a1: incoming pod and existing pod partially match each other on AffinityTerms",
|
||||
},
|
||||
{
|
||||
@@ -2739,7 +2738,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
},
|
||||
node: &node1,
|
||||
fits: false,
|
||||
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
name: "PodAntiAffinity symmetry check a2: incoming pod and existing pod partially match each other on AffinityTerms",
|
||||
},
|
||||
{
|
||||
@@ -2814,7 +2813,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
},
|
||||
node: &node1,
|
||||
fits: false,
|
||||
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
name: "PodAntiAffinity symmetry check b1: incoming pod and existing pod partially match each other on AffinityTerms",
|
||||
},
|
||||
{
|
||||
@@ -2889,7 +2888,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
},
|
||||
node: &node1,
|
||||
fits: false,
|
||||
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
name: "PodAntiAffinity symmetry check b2: incoming pod and existing pod partially match each other on AffinityTerms",
|
||||
},
|
||||
}
|
||||
@@ -2911,7 +2910,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo(podsOnNode...)
|
||||
nodeInfo.SetNode(test.node)
|
||||
nodeInfoMap := map[string]*schedulernodeinfo.NodeInfo{test.node.Name: nodeInfo}
|
||||
fits, reasons, _ := fit.InterPodAffinityMatches(test.pod, PredicateMetadata(test.pod, nodeInfoMap), nodeInfo)
|
||||
fits, reasons, _ := fit.InterPodAffinityMatches(test.pod, GetPredicateMetadata(test.pod, nodeInfoMap), nodeInfo)
|
||||
if !fits && !reflect.DeepEqual(reasons, test.expectFailureReasons) {
|
||||
t.Errorf("unexpected failure reasons: %v, want: %v", reasons, test.expectFailureReasons)
|
||||
}
|
||||
@@ -2944,7 +2943,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
pod *v1.Pod
|
||||
pods []*v1.Pod
|
||||
nodes []v1.Node
|
||||
nodesExpectAffinityFailureReasons [][]algorithm.PredicateFailureReason
|
||||
nodesExpectAffinityFailureReasons [][]PredicateFailureReason
|
||||
fits map[string]bool
|
||||
name string
|
||||
nometa bool
|
||||
@@ -2985,7 +2984,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
"machine2": true,
|
||||
"machine3": false,
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{nil, nil, {ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch}},
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{nil, nil, {ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch}},
|
||||
name: "A pod can be scheduled onto all the nodes that have the same topology key & label value with one of them has an existing pod that matches the affinity rules",
|
||||
},
|
||||
{
|
||||
@@ -3034,7 +3033,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "hostname": "h1"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "hostname": "h2"}}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{nil, nil},
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{nil, nil},
|
||||
fits: map[string]bool{
|
||||
"nodeA": false,
|
||||
"nodeB": true,
|
||||
@@ -3087,7 +3086,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"zone": "az1", "hostname": "h1"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"zone": "az2", "hostname": "h2"}}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{nil, nil},
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{nil, nil},
|
||||
fits: map[string]bool{
|
||||
"nodeA": true,
|
||||
"nodeB": true,
|
||||
@@ -3125,7 +3124,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "hostname": "nodeA"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "hostname": "nodeB"}}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, {ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}},
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, {ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}},
|
||||
fits: map[string]bool{
|
||||
"nodeA": false,
|
||||
"nodeB": false,
|
||||
@@ -3174,7 +3173,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
|
||||
{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
},
|
||||
@@ -3215,7 +3214,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: labelRgChinaAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeC", Labels: labelRgIndia}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, {ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, nil},
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, {ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, nil},
|
||||
fits: map[string]bool{
|
||||
"nodeA": false,
|
||||
"nodeB": false,
|
||||
@@ -3279,7 +3278,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeC", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeD", Labels: labelRgUS}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
|
||||
{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
@@ -3359,7 +3358,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: labelRgChinaAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeC", Labels: labelRgIndia}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
|
||||
{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
nil,
|
||||
@@ -3403,7 +3402,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeB"}}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{},
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{},
|
||||
fits: map[string]bool{
|
||||
"nodeA": true,
|
||||
"nodeB": true,
|
||||
@@ -3444,7 +3443,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeB"}}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{},
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{},
|
||||
fits: map[string]bool{
|
||||
"nodeA": true,
|
||||
"nodeB": true,
|
||||
@@ -3507,7 +3506,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
|
||||
{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
},
|
||||
@@ -3568,7 +3567,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
|
||||
{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
},
|
||||
@@ -3621,7 +3620,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
|
||||
{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
},
|
||||
fits: map[string]bool{
|
||||
@@ -3675,7 +3674,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
|
||||
{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
},
|
||||
fits: map[string]bool{
|
||||
@@ -3727,7 +3726,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
|
||||
{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
},
|
||||
@@ -3782,7 +3781,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
|
||||
{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
},
|
||||
@@ -3869,7 +3868,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeC", Labels: map[string]string{"region": "r1", "zone": "z3", "hostname": "nodeC"}}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
|
||||
{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
},
|
||||
@@ -3925,7 +3924,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeB"}}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
|
||||
{},
|
||||
{ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch},
|
||||
},
|
||||
@@ -3986,7 +3985,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
|
||||
{ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch},
|
||||
{ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch},
|
||||
},
|
||||
@@ -3998,7 +3997,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
selectorExpectedFailureReasons := []algorithm.PredicateFailureReason{ErrNodeSelectorNotMatch}
|
||||
selectorExpectedFailureReasons := []PredicateFailureReason{ErrNodeSelectorNotMatch}
|
||||
|
||||
for indexTest, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
@@ -4023,9 +4022,9 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
podLister: schedulertesting.FakePodLister(test.pods),
|
||||
}
|
||||
|
||||
var meta algorithm.PredicateMetadata
|
||||
var meta PredicateMetadata
|
||||
if !test.nometa {
|
||||
meta = PredicateMetadata(test.pod, nodeInfoMap)
|
||||
meta = GetPredicateMetadata(test.pod, nodeInfoMap)
|
||||
}
|
||||
|
||||
fits, reasons, _ := testFit.InterPodAffinityMatches(test.pod, meta, nodeInfoMap[node.Name])
|
||||
@@ -4037,7 +4036,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||
nodeInfo.SetNode(&node)
|
||||
nodeInfoMap := map[string]*schedulernodeinfo.NodeInfo{node.Name: nodeInfo}
|
||||
fits2, reasons, err := PodMatchNodeSelector(test.pod, PredicateMetadata(test.pod, nodeInfoMap), nodeInfo)
|
||||
fits2, reasons, err := PodMatchNodeSelector(test.pod, GetPredicateMetadata(test.pod, nodeInfoMap), nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -4238,13 +4237,13 @@ func TestPodToleratesTaints(t *testing.T) {
|
||||
"but the effect of taint on node is PreferNochedule. Pod can be scheduled onto the node",
|
||||
},
|
||||
}
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrTaintsTolerationsNotMatch}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrTaintsTolerationsNotMatch}
|
||||
|
||||
for _, test := range podTolerateTaintsTests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||
nodeInfo.SetNode(&test.node)
|
||||
fits, reasons, err := PodToleratesNodeTaints(test.pod, PredicateMetadata(test.pod, nil), nodeInfo)
|
||||
fits, reasons, err := PodToleratesNodeTaints(test.pod, GetPredicateMetadata(test.pod, nil), nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -4352,11 +4351,11 @@ func TestPodSchedulesOnNodeWithMemoryPressureCondition(t *testing.T) {
|
||||
name: "non best-effort pod schedulable on node without memory pressure condition on",
|
||||
},
|
||||
}
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrNodeUnderMemoryPressure}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrNodeUnderMemoryPressure}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
fits, reasons, err := CheckNodeMemoryPressurePredicate(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
fits, reasons, err := CheckNodeMemoryPressurePredicate(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -4426,11 +4425,11 @@ func TestPodSchedulesOnNodeWithDiskPressureCondition(t *testing.T) {
|
||||
name: "pod not schedulable on node with pressure condition on",
|
||||
},
|
||||
}
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrNodeUnderDiskPressure}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrNodeUnderDiskPressure}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
fits, reasons, err := CheckNodeDiskPressurePredicate(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
fits, reasons, err := CheckNodeDiskPressurePredicate(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -4486,11 +4485,11 @@ func TestPodSchedulesOnNodeWithPIDPressureCondition(t *testing.T) {
|
||||
name: "pod not schedulable on node with pressure condition on",
|
||||
},
|
||||
}
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrNodeUnderPIDPressure}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrNodeUnderPIDPressure}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
fits, reasons, err := CheckNodePIDPressurePredicate(&v1.Pod{}, PredicateMetadata(&v1.Pod{}, nil), test.nodeInfo)
|
||||
fits, reasons, err := CheckNodePIDPressurePredicate(&v1.Pod{}, GetPredicateMetadata(&v1.Pod{}, nil), test.nodeInfo)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -4673,7 +4672,7 @@ func TestVolumeZonePredicate(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrVolumeZoneConflict}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrVolumeZoneConflict}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
@@ -4767,7 +4766,7 @@ func TestVolumeZonePredicateMultiZone(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrVolumeZoneConflict}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrVolumeZoneConflict}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
|
||||
Reference in New Issue
Block a user