fix bugs of container cpu shares when cpu request set to zero

This commit is contained in:
waynepeking348 2022-03-20 21:53:07 +08:00 committed by shaowei.wayne
parent 475f7af1c1
commit 4c87589300
3 changed files with 36 additions and 4 deletions

View File

@ -63,7 +63,11 @@ func (m *kubeGenericRuntimeManager) generateLinuxContainerConfig(container *v1.C
}
// set linux container resources
lc.Resources = m.calculateLinuxResources(container.Resources.Requests.Cpu(), container.Resources.Limits.Cpu(), container.Resources.Limits.Memory())
var cpuRequest *resource.Quantity
if _, cpuRequestExists := container.Resources.Requests[v1.ResourceCPU]; cpuRequestExists {
cpuRequest = container.Resources.Requests.Cpu()
}
lc.Resources = m.calculateLinuxResources(cpuRequest, container.Resources.Limits.Cpu(), container.Resources.Limits.Memory())
lc.Resources.OomScoreAdj = int64(qos.GetContainerOOMScoreAdjust(pod, container,
int64(m.machineInfo.MemoryCapacity)))
@ -138,9 +142,9 @@ func (m *kubeGenericRuntimeManager) calculateLinuxResources(cpuRequest, cpuLimit
// If request is not specified, but limit is, we want request to default to limit.
// API server does this for new containers, but we repeat this logic in Kubelet
// for containers running on existing Kubernetes clusters.
if cpuRequest.IsZero() && !cpuLimit.IsZero() {
if cpuRequest == nil && !cpuLimit.IsZero() {
cpuShares = int64(cm.MilliCPUToShares(cpuLimit.MilliValue()))
} else {
} else if cpuRequest != nil {
// if cpuRequest.Amount is nil, then MilliCPUToShares will return the minimal number
// of CPU shares.
cpuShares = int64(cm.MilliCPUToShares(cpuRequest.MilliValue()))

View File

@ -260,6 +260,29 @@ func TestCalculateLinuxResources(t *testing.T) {
MemoryLimitInBytes: 0,
},
},
{
name: "RequestNilCPU",
cpuLim: resource.MustParse("2"),
memLim: resource.MustParse("0"),
expected: &runtimeapi.LinuxContainerResources{
CpuPeriod: 100000,
CpuQuota: 200000,
CpuShares: 2048,
MemoryLimitInBytes: 0,
},
},
{
name: "RequestZeroCPU",
cpuReq: resource.MustParse("0"),
cpuLim: resource.MustParse("2"),
memLim: resource.MustParse("0"),
expected: &runtimeapi.LinuxContainerResources{
CpuPeriod: 100000,
CpuQuota: 200000,
CpuShares: 2,
MemoryLimitInBytes: 0,
},
},
}
for _, test := range tests {
linuxContainerResources := m.calculateLinuxResources(&test.cpuReq, &test.cpuLim, &test.memLim)

View File

@ -21,6 +21,7 @@ package kuberuntime
import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
resourcehelper "k8s.io/kubernetes/pkg/api/v1/resource"
)
@ -41,7 +42,11 @@ func (m *kubeGenericRuntimeManager) convertOverheadToLinuxResources(pod *v1.Pod)
func (m *kubeGenericRuntimeManager) calculateSandboxResources(pod *v1.Pod) *runtimeapi.LinuxContainerResources {
req, lim := resourcehelper.PodRequestsAndLimitsWithoutOverhead(pod)
return m.calculateLinuxResources(req.Cpu(), lim.Cpu(), lim.Memory())
var cpuRequest *resource.Quantity
if _, cpuRequestExists := req[v1.ResourceCPU]; cpuRequestExists {
cpuRequest = req.Cpu()
}
return m.calculateLinuxResources(cpuRequest, lim.Cpu(), lim.Memory())
}
func (m *kubeGenericRuntimeManager) applySandboxResources(pod *v1.Pod, config *runtimeapi.PodSandboxConfig) error {