Merge pull request #11788 from HaiyangDING/ImproveClarityResource
Improve clarity around PodFitsResource(issue#11453)
This commit is contained in:
@@ -105,6 +105,8 @@ type resourceRequest struct {
|
||||
memory int64
|
||||
}
|
||||
|
||||
var FailedResourceType string
|
||||
|
||||
func getResourceRequest(pod *api.Pod) resourceRequest {
|
||||
result := resourceRequest{}
|
||||
for _, container := range pod.Spec.Containers {
|
||||
@@ -115,7 +117,7 @@ func getResourceRequest(pod *api.Pod) resourceRequest {
|
||||
return result
|
||||
}
|
||||
|
||||
func CheckPodsExceedingCapacity(pods []*api.Pod, capacity api.ResourceList) (fitting []*api.Pod, notFitting []*api.Pod) {
|
||||
func CheckPodsExceedingFreeResources(pods []*api.Pod, capacity api.ResourceList) (fitting []*api.Pod, notFittingCPU, notFittingMemory []*api.Pod) {
|
||||
totalMilliCPU := capacity.Cpu().MilliValue()
|
||||
totalMemory := capacity.Memory().Value()
|
||||
milliCPURequested := int64(0)
|
||||
@@ -124,9 +126,14 @@ func CheckPodsExceedingCapacity(pods []*api.Pod, capacity api.ResourceList) (fit
|
||||
podRequest := getResourceRequest(pod)
|
||||
fitsCPU := totalMilliCPU == 0 || (totalMilliCPU-milliCPURequested) >= podRequest.milliCPU
|
||||
fitsMemory := totalMemory == 0 || (totalMemory-memoryRequested) >= podRequest.memory
|
||||
if !fitsCPU || !fitsMemory {
|
||||
// the pod doesn't fit
|
||||
notFitting = append(notFitting, pod)
|
||||
if !fitsCPU {
|
||||
// the pod doesn't fit due to CPU limit
|
||||
notFittingCPU = append(notFittingCPU, pod)
|
||||
continue
|
||||
}
|
||||
if !fitsMemory {
|
||||
// the pod doesn't fit due to Memory limit
|
||||
notFittingMemory = append(notFittingMemory, pod)
|
||||
continue
|
||||
}
|
||||
// the pod fits
|
||||
@@ -150,9 +157,20 @@ func (r *ResourceFit) PodFitsResources(pod *api.Pod, existingPods []*api.Pod, no
|
||||
pods := []*api.Pod{}
|
||||
copy(pods, existingPods)
|
||||
pods = append(existingPods, pod)
|
||||
_, exceeding := CheckPodsExceedingCapacity(pods, info.Status.Capacity)
|
||||
if len(exceeding) > 0 || int64(len(pods)) > info.Status.Capacity.Pods().Value() {
|
||||
_, exceedingCPU, exceedingMemory := CheckPodsExceedingFreeResources(pods, info.Status.Capacity)
|
||||
if int64(len(pods)) > info.Status.Capacity.Pods().Value() {
|
||||
glog.V(4).Infof("Cannot schedule Pod %v, because Node %v is full, running %v out of %v Pods.", pod, node, len(pods)-1, info.Status.Capacity.Pods().Value())
|
||||
FailedResourceType = "PodExceedsMaxPodNumber"
|
||||
return false, nil
|
||||
}
|
||||
if len(exceedingCPU) > 0 {
|
||||
glog.V(4).Infof("Cannot schedule Pod %v, because Node does not have sufficient CPU", pod)
|
||||
FailedResourceType = "PodExceedsFreeCPU"
|
||||
return false, nil
|
||||
}
|
||||
if len(exceedingMemory) > 0 {
|
||||
glog.V(4).Infof("Cannot schedule Pod %v, because Node does not have sufficient Memory", pod)
|
||||
FailedResourceType = "PodExceedsFreeMemory"
|
||||
return false, nil
|
||||
}
|
||||
glog.V(4).Infof("Schedule Pod %v on Node %v is allowed, Node is running only %v out of %v Pods.", pod, node, len(pods)-1, info.Status.Capacity.Pods().Value())
|
||||
|
@@ -116,6 +116,7 @@ func findNodesThatFit(pod *api.Pod, podLister algorithm.PodLister, predicateFunc
|
||||
for _, node := range nodes.Items {
|
||||
fits := true
|
||||
for name, predicate := range predicateFuncs {
|
||||
predicates.FailedResourceType = ""
|
||||
fit, err := predicate(pod, machineToPods[node.Name], node.Name)
|
||||
if err != nil {
|
||||
return api.NodeList{}, FailedPredicateMap{}, err
|
||||
@@ -125,6 +126,10 @@ func findNodesThatFit(pod *api.Pod, podLister algorithm.PodLister, predicateFunc
|
||||
if _, found := failedPredicateMap[node.Name]; !found {
|
||||
failedPredicateMap[node.Name] = util.StringSet{}
|
||||
}
|
||||
if predicates.FailedResourceType != "" {
|
||||
failedPredicateMap[node.Name].Insert(predicates.FailedResourceType)
|
||||
break
|
||||
}
|
||||
failedPredicateMap[node.Name].Insert(name)
|
||||
break
|
||||
}
|
||||
|
Reference in New Issue
Block a user