Fix unbounded growth of cached OIRs in sched cache

- Added schedulercache.Resource.SetOpaque helper.
- Amend kubelet allocatable sync so that when OIRs are removed from capacity
  they are also removed from allocatable.
- Fixes #41861.
This commit is contained in:
Connor Doyle
2017-02-27 22:33:14 -08:00
parent e402c5d5e5
commit 8a42189690
3 changed files with 39 additions and 5 deletions

View File

@@ -468,6 +468,30 @@ func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta interface{}, nodeInfo *s
return true, nil, nil
}
// Returns a *schedulercache.Resource that covers the largest width in each
// resource dimension. Because init-containers run sequentially, we collect the
// max in each dimension iteratively. In contrast, we sum the resource vectors
// for regular containers since they run simultaneously.
//
// Example:
//
// Pod:
// InitContainers
// IC1:
// CPU: 2
// Memory: 1G
// IC2:
// CPU: 2
// Memory: 3G
// Containers
// C1:
// CPU: 2
// Memory: 1G
// C2:
// CPU: 1
// Memory: 1G
//
// Result: CPU: 3, Memory: 3G
func GetResourceRequest(pod *v1.Pod) *schedulercache.Resource {
result := schedulercache.Resource{}
for _, container := range pod.Spec.Containers {
@@ -505,10 +529,8 @@ func GetResourceRequest(pod *v1.Pod) *schedulercache.Resource {
default:
if v1.IsOpaqueIntResourceName(rName) {
value := rQuantity.Value()
// Ensure the opaque resource map is initialized in the result.
result.AddOpaque(rName, int64(0))
if value > result.OpaqueIntResources[rName] {
result.OpaqueIntResources[rName] = value
result.SetOpaque(rName, value)
}
}
}