rebase master

This commit is contained in:
boenn
2021-11-25 10:34:43 +08:00
parent aff056d8a1
commit cec2aae1e5
8 changed files with 294 additions and 188 deletions

View File

@@ -174,7 +174,6 @@ func computePodResourceRequest(pod *v1.Pod, enablePodOverhead bool) *preFilterSt
if pod.Spec.Overhead != nil && enablePodOverhead {
result.Add(pod.Spec.Overhead)
}
return result
}
@@ -259,11 +258,11 @@ func fitsRequest(podRequest *preFilterState, nodeInfo *framework.NodeInfo, ignor
allowedPodNumber := nodeInfo.Allocatable.AllowedPodNumber
if len(nodeInfo.Pods)+1 > allowedPodNumber {
insufficientResources = append(insufficientResources, InsufficientResource{
v1.ResourcePods,
"Too many pods",
1,
int64(len(nodeInfo.Pods)),
int64(allowedPodNumber),
ResourceName: v1.ResourcePods,
Reason: "Too many pods",
Requested: 1,
Used: int64(len(nodeInfo.Pods)),
Capacity: int64(allowedPodNumber),
})
}
@@ -276,29 +275,29 @@ func fitsRequest(podRequest *preFilterState, nodeInfo *framework.NodeInfo, ignor
if podRequest.MilliCPU > (nodeInfo.Allocatable.MilliCPU - nodeInfo.Requested.MilliCPU) {
insufficientResources = append(insufficientResources, InsufficientResource{
v1.ResourceCPU,
"Insufficient cpu",
podRequest.MilliCPU,
nodeInfo.Requested.MilliCPU,
nodeInfo.Allocatable.MilliCPU,
ResourceName: v1.ResourceCPU,
Reason: "Insufficient cpu",
Requested: podRequest.MilliCPU,
Used: nodeInfo.Requested.MilliCPU,
Capacity: nodeInfo.Allocatable.MilliCPU,
})
}
if podRequest.Memory > (nodeInfo.Allocatable.Memory - nodeInfo.Requested.Memory) {
insufficientResources = append(insufficientResources, InsufficientResource{
v1.ResourceMemory,
"Insufficient memory",
podRequest.Memory,
nodeInfo.Requested.Memory,
nodeInfo.Allocatable.Memory,
ResourceName: v1.ResourceMemory,
Reason: "Insufficient memory",
Requested: podRequest.Memory,
Used: nodeInfo.Requested.Memory,
Capacity: nodeInfo.Allocatable.Memory,
})
}
if podRequest.EphemeralStorage > (nodeInfo.Allocatable.EphemeralStorage - nodeInfo.Requested.EphemeralStorage) {
insufficientResources = append(insufficientResources, InsufficientResource{
v1.ResourceEphemeralStorage,
"Insufficient ephemeral-storage",
podRequest.EphemeralStorage,
nodeInfo.Requested.EphemeralStorage,
nodeInfo.Allocatable.EphemeralStorage,
ResourceName: v1.ResourceEphemeralStorage,
Reason: "Insufficient ephemeral-storage",
Requested: podRequest.EphemeralStorage,
Used: nodeInfo.Requested.EphemeralStorage,
Capacity: nodeInfo.Allocatable.EphemeralStorage,
})
}
@@ -316,11 +315,11 @@ func fitsRequest(podRequest *preFilterState, nodeInfo *framework.NodeInfo, ignor
}
if rQuant > (nodeInfo.Allocatable.ScalarResources[rName] - nodeInfo.Requested.ScalarResources[rName]) {
insufficientResources = append(insufficientResources, InsufficientResource{
rName,
fmt.Sprintf("Insufficient %v", rName),
podRequest.ScalarResources[rName],
nodeInfo.Requested.ScalarResources[rName],
nodeInfo.Allocatable.ScalarResources[rName],
ResourceName: rName,
Reason: fmt.Sprintf("Insufficient %v", rName),
Requested: podRequest.ScalarResources[rName],
Used: nodeInfo.Requested.ScalarResources[rName],
Capacity: nodeInfo.Allocatable.ScalarResources[rName],
})
}
}

View File

@@ -136,41 +136,52 @@ func TestEnoughRequests(t *testing.T) {
pod: newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 10, Memory: 20})),
name: "too many resources fails",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU), getErrReason(v1.ResourceMemory)),
wantInsufficientResources: []InsufficientResource{{v1.ResourceCPU, getErrReason(v1.ResourceCPU), 1, 10, 10}, {v1.ResourceMemory, getErrReason(v1.ResourceMemory), 1, 20, 20}},
name: "too many resources fails",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU), getErrReason(v1.ResourceMemory)),
wantInsufficientResources: []InsufficientResource{
{ResourceName: v1.ResourceCPU, Reason: getErrReason(v1.ResourceCPU), Requested: 1, Used: 10, Capacity: 10},
{ResourceName: v1.ResourceMemory, Reason: getErrReason(v1.ResourceMemory), Requested: 1, Used: 20, Capacity: 20},
},
},
{
pod: newResourceInitPod(newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}), framework.Resource{MilliCPU: 3, Memory: 1}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 8, Memory: 19})),
name: "too many resources fails due to init container cpu",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)),
wantInsufficientResources: []InsufficientResource{{v1.ResourceCPU, getErrReason(v1.ResourceCPU), 3, 8, 10}},
name: "too many resources fails due to init container cpu",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)),
wantInsufficientResources: []InsufficientResource{
{ResourceName: v1.ResourceCPU, Reason: getErrReason(v1.ResourceCPU), Requested: 3, Used: 8, Capacity: 10},
},
},
{
pod: newResourceInitPod(newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}), framework.Resource{MilliCPU: 3, Memory: 1}, framework.Resource{MilliCPU: 2, Memory: 1}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 8, Memory: 19})),
name: "too many resources fails due to highest init container cpu",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)),
wantInsufficientResources: []InsufficientResource{{v1.ResourceCPU, getErrReason(v1.ResourceCPU), 3, 8, 10}},
name: "too many resources fails due to highest init container cpu",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)),
wantInsufficientResources: []InsufficientResource{
{ResourceName: v1.ResourceCPU, Reason: getErrReason(v1.ResourceCPU), Requested: 3, Used: 8, Capacity: 10},
},
},
{
pod: newResourceInitPod(newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}), framework.Resource{MilliCPU: 1, Memory: 3}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 9, Memory: 19})),
name: "too many resources fails due to init container memory",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)),
wantInsufficientResources: []InsufficientResource{{v1.ResourceMemory, getErrReason(v1.ResourceMemory), 3, 19, 20}},
name: "too many resources fails due to init container memory",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)),
wantInsufficientResources: []InsufficientResource{
{ResourceName: v1.ResourceMemory, Reason: getErrReason(v1.ResourceMemory), Requested: 3, Used: 19, Capacity: 20},
},
},
{
pod: newResourceInitPod(newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}), framework.Resource{MilliCPU: 1, Memory: 3}, framework.Resource{MilliCPU: 1, Memory: 2}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 9, Memory: 19})),
name: "too many resources fails due to highest init container memory",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)),
wantInsufficientResources: []InsufficientResource{{v1.ResourceMemory, getErrReason(v1.ResourceMemory), 3, 19, 20}},
name: "too many resources fails due to highest init container memory",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)),
wantInsufficientResources: []InsufficientResource{
{ResourceName: v1.ResourceMemory, Reason: getErrReason(v1.ResourceMemory), Requested: 3, Used: 19, Capacity: 20},
},
},
{
pod: newResourceInitPod(newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}), framework.Resource{MilliCPU: 1, Memory: 1}),
@@ -197,17 +208,21 @@ func TestEnoughRequests(t *testing.T) {
pod: newResourcePod(framework.Resource{MilliCPU: 2, Memory: 1}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 9, Memory: 5})),
name: "one resource memory fits",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)),
wantInsufficientResources: []InsufficientResource{{v1.ResourceCPU, getErrReason(v1.ResourceCPU), 2, 9, 10}},
name: "one resource memory fits",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)),
wantInsufficientResources: []InsufficientResource{
{ResourceName: v1.ResourceCPU, Reason: getErrReason(v1.ResourceCPU), Requested: 2, Used: 9, Capacity: 10},
},
},
{
pod: newResourcePod(framework.Resource{MilliCPU: 1, Memory: 2}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 5, Memory: 19})),
name: "one resource cpu fits",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)),
wantInsufficientResources: []InsufficientResource{{v1.ResourceMemory, getErrReason(v1.ResourceMemory), 2, 19, 20}},
name: "one resource cpu fits",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)),
wantInsufficientResources: []InsufficientResource{
{ResourceName: v1.ResourceMemory, Reason: getErrReason(v1.ResourceMemory), Requested: 2, Used: 19, Capacity: 20},
},
},
{
pod: newResourcePod(framework.Resource{MilliCPU: 5, Memory: 1}),
@@ -240,36 +255,44 @@ func TestEnoughRequests(t *testing.T) {
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 10}}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})),
name: "extended resource capacity enforced",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 10, 0, 5}},
name: "extended resource capacity enforced",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
wantInsufficientResources: []InsufficientResource{
{ResourceName: extendedResourceA, Reason: getErrReason(extendedResourceA), Requested: 10, Used: 0, Capacity: 5},
},
},
{
pod: newResourceInitPod(newResourcePod(framework.Resource{}),
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 10}}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})),
name: "extended resource capacity enforced for init container",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 10, 0, 5}},
name: "extended resource capacity enforced for init container",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
wantInsufficientResources: []InsufficientResource{
{ResourceName: extendedResourceA, Reason: getErrReason(extendedResourceA), Requested: 10, Used: 0, Capacity: 5},
},
},
{
pod: newResourcePod(
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})),
name: "extended resource allocatable enforced",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 1, 5, 5}},
name: "extended resource allocatable enforced",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
wantInsufficientResources: []InsufficientResource{
{ResourceName: extendedResourceA, Reason: getErrReason(extendedResourceA), Requested: 1, Used: 5, Capacity: 5},
},
},
{
pod: newResourceInitPod(newResourcePod(framework.Resource{}),
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})),
name: "extended resource allocatable enforced for init container",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 1, 5, 5}},
name: "extended resource allocatable enforced for init container",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
wantInsufficientResources: []InsufficientResource{
{ResourceName: extendedResourceA, Reason: getErrReason(extendedResourceA), Requested: 1, Used: 5, Capacity: 5},
},
},
{
pod: newResourcePod(
@@ -277,9 +300,11 @@ func TestEnoughRequests(t *testing.T) {
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})),
name: "extended resource allocatable enforced for multiple containers",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 6, 2, 5}},
name: "extended resource allocatable enforced for multiple containers",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
wantInsufficientResources: []InsufficientResource{
{ResourceName: extendedResourceA, Reason: getErrReason(extendedResourceA), Requested: 6, Used: 2, Capacity: 5},
},
},
{
pod: newResourceInitPod(newResourcePod(framework.Resource{}),
@@ -296,63 +321,77 @@ func TestEnoughRequests(t *testing.T) {
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})),
name: "extended resource allocatable enforced for multiple init containers",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 6, 2, 5}},
name: "extended resource allocatable enforced for multiple init containers",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
wantInsufficientResources: []InsufficientResource{
{ResourceName: extendedResourceA, Reason: getErrReason(extendedResourceA), Requested: 6, Used: 2, Capacity: 5},
},
},
{
pod: newResourcePod(
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0})),
name: "extended resource allocatable enforced for unknown resource",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceB)),
wantInsufficientResources: []InsufficientResource{{extendedResourceB, getErrReason(extendedResourceB), 1, 0, 0}},
name: "extended resource allocatable enforced for unknown resource",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceB)),
wantInsufficientResources: []InsufficientResource{
{ResourceName: extendedResourceB, Reason: getErrReason(extendedResourceB), Requested: 1, Used: 0, Capacity: 0},
},
},
{
pod: newResourceInitPod(newResourcePod(framework.Resource{}),
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0})),
name: "extended resource allocatable enforced for unknown resource for init container",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceB)),
wantInsufficientResources: []InsufficientResource{{extendedResourceB, getErrReason(extendedResourceB), 1, 0, 0}},
name: "extended resource allocatable enforced for unknown resource for init container",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceB)),
wantInsufficientResources: []InsufficientResource{
{ResourceName: extendedResourceB, Reason: getErrReason(extendedResourceB), Requested: 1, Used: 0, Capacity: 0},
},
},
{
pod: newResourcePod(
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{kubernetesIOResourceA: 10}}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0})),
name: "kubernetes.io resource capacity enforced",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(kubernetesIOResourceA)),
wantInsufficientResources: []InsufficientResource{{kubernetesIOResourceA, getErrReason(kubernetesIOResourceA), 10, 0, 0}},
name: "kubernetes.io resource capacity enforced",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(kubernetesIOResourceA)),
wantInsufficientResources: []InsufficientResource{
{ResourceName: kubernetesIOResourceA, Reason: getErrReason(kubernetesIOResourceA), Requested: 10, Used: 0, Capacity: 0},
},
},
{
pod: newResourceInitPod(newResourcePod(framework.Resource{}),
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{kubernetesIOResourceB: 10}}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0})),
name: "kubernetes.io resource capacity enforced for init container",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(kubernetesIOResourceB)),
wantInsufficientResources: []InsufficientResource{{kubernetesIOResourceB, getErrReason(kubernetesIOResourceB), 10, 0, 0}},
name: "kubernetes.io resource capacity enforced for init container",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(kubernetesIOResourceB)),
wantInsufficientResources: []InsufficientResource{
{ResourceName: kubernetesIOResourceB, Reason: getErrReason(kubernetesIOResourceB), Requested: 10, Used: 0, Capacity: 0},
},
},
{
pod: newResourcePod(
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 10}}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})),
name: "hugepages resource capacity enforced",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(hugePageResourceA)),
wantInsufficientResources: []InsufficientResource{{hugePageResourceA, getErrReason(hugePageResourceA), 10, 0, 5}},
name: "hugepages resource capacity enforced",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(hugePageResourceA)),
wantInsufficientResources: []InsufficientResource{
{ResourceName: hugePageResourceA, Reason: getErrReason(hugePageResourceA), Requested: 10, Used: 0, Capacity: 5},
},
},
{
pod: newResourceInitPod(newResourcePod(framework.Resource{}),
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 10}}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})),
name: "hugepages resource capacity enforced for init container",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(hugePageResourceA)),
wantInsufficientResources: []InsufficientResource{{hugePageResourceA, getErrReason(hugePageResourceA), 10, 0, 5}},
name: "hugepages resource capacity enforced for init container",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(hugePageResourceA)),
wantInsufficientResources: []InsufficientResource{
{ResourceName: hugePageResourceA, Reason: getErrReason(hugePageResourceA), Requested: 10, Used: 0, Capacity: 5},
},
},
{
pod: newResourcePod(
@@ -360,9 +399,11 @@ func TestEnoughRequests(t *testing.T) {
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 3}}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 2}})),
name: "hugepages resource allocatable enforced for multiple containers",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(hugePageResourceA)),
wantInsufficientResources: []InsufficientResource{{hugePageResourceA, getErrReason(hugePageResourceA), 6, 2, 5}},
name: "hugepages resource allocatable enforced for multiple containers",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(hugePageResourceA)),
wantInsufficientResources: []InsufficientResource{
{ResourceName: hugePageResourceA, Reason: getErrReason(hugePageResourceA), Requested: 6, Used: 2, Capacity: 5},
},
},
{
pod: newResourcePod(
@@ -388,10 +429,12 @@ func TestEnoughRequests(t *testing.T) {
newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}),
v1.ResourceList{v1.ResourceCPU: resource.MustParse("1m"), v1.ResourceMemory: resource.MustParse("15")},
),
nodeInfo: framework.NewNodeInfo(newResourcePod(framework.Resource{MilliCPU: 5, Memory: 5})),
name: "requests + overhead does not fit for memory",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)),
wantInsufficientResources: []InsufficientResource{{v1.ResourceMemory, getErrReason(v1.ResourceMemory), 16, 5, 20}},
nodeInfo: framework.NewNodeInfo(newResourcePod(framework.Resource{MilliCPU: 5, Memory: 5})),
name: "requests + overhead does not fit for memory",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)),
wantInsufficientResources: []InsufficientResource{
{ResourceName: v1.ResourceMemory, Reason: getErrReason(v1.ResourceMemory), Requested: 16, Used: 5, Capacity: 20},
},
},
{
pod: newResourcePod(