diff --git a/pkg/kubelet/kuberuntime/kuberuntime_container_linux.go b/pkg/kubelet/kuberuntime/kuberuntime_container_linux.go index d12e1d4a681..1e1d59daade 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_container_linux.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_container_linux.go @@ -63,7 +63,11 @@ func (m *kubeGenericRuntimeManager) generateLinuxContainerConfig(container *v1.C } // set linux container resources - lc.Resources = m.calculateLinuxResources(container.Resources.Requests.Cpu(), container.Resources.Limits.Cpu(), container.Resources.Limits.Memory()) + var cpuRequest *resource.Quantity + if _, cpuRequestExists := container.Resources.Requests[v1.ResourceCPU]; cpuRequestExists { + cpuRequest = container.Resources.Requests.Cpu() + } + lc.Resources = m.calculateLinuxResources(cpuRequest, container.Resources.Limits.Cpu(), container.Resources.Limits.Memory()) lc.Resources.OomScoreAdj = int64(qos.GetContainerOOMScoreAdjust(pod, container, int64(m.machineInfo.MemoryCapacity))) @@ -138,9 +142,9 @@ func (m *kubeGenericRuntimeManager) calculateLinuxResources(cpuRequest, cpuLimit // If request is not specified, but limit is, we want request to default to limit. // API server does this for new containers, but we repeat this logic in Kubelet // for containers running on existing Kubernetes clusters. - if cpuRequest.IsZero() && !cpuLimit.IsZero() { + if cpuRequest == nil && !cpuLimit.IsZero() { cpuShares = int64(cm.MilliCPUToShares(cpuLimit.MilliValue())) - } else { + } else if cpuRequest != nil { // if cpuRequest.Amount is nil, then MilliCPUToShares will return the minimal number // of CPU shares. cpuShares = int64(cm.MilliCPUToShares(cpuRequest.MilliValue())) diff --git a/pkg/kubelet/kuberuntime/kuberuntime_container_linux_test.go b/pkg/kubelet/kuberuntime/kuberuntime_container_linux_test.go index 6a494b20e50..68318b59515 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_container_linux_test.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_container_linux_test.go @@ -260,6 +260,29 @@ func TestCalculateLinuxResources(t *testing.T) { MemoryLimitInBytes: 0, }, }, + { + name: "RequestNilCPU", + cpuLim: resource.MustParse("2"), + memLim: resource.MustParse("0"), + expected: &runtimeapi.LinuxContainerResources{ + CpuPeriod: 100000, + CpuQuota: 200000, + CpuShares: 2048, + MemoryLimitInBytes: 0, + }, + }, + { + name: "RequestZeroCPU", + cpuReq: resource.MustParse("0"), + cpuLim: resource.MustParse("2"), + memLim: resource.MustParse("0"), + expected: &runtimeapi.LinuxContainerResources{ + CpuPeriod: 100000, + CpuQuota: 200000, + CpuShares: 2, + MemoryLimitInBytes: 0, + }, + }, } for _, test := range tests { linuxContainerResources := m.calculateLinuxResources(&test.cpuReq, &test.cpuLim, &test.memLim) diff --git a/pkg/kubelet/kuberuntime/kuberuntime_sandbox_linux.go b/pkg/kubelet/kuberuntime/kuberuntime_sandbox_linux.go index 646ee7f23cc..c2c6b1aaee3 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_sandbox_linux.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_sandbox_linux.go @@ -21,6 +21,7 @@ package kuberuntime import ( v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" resourcehelper "k8s.io/kubernetes/pkg/api/v1/resource" ) @@ -41,7 +42,11 @@ func (m *kubeGenericRuntimeManager) convertOverheadToLinuxResources(pod *v1.Pod) func (m *kubeGenericRuntimeManager) calculateSandboxResources(pod *v1.Pod) *runtimeapi.LinuxContainerResources { req, lim := resourcehelper.PodRequestsAndLimitsWithoutOverhead(pod) - return m.calculateLinuxResources(req.Cpu(), lim.Cpu(), lim.Memory()) + var cpuRequest *resource.Quantity + if _, cpuRequestExists := req[v1.ResourceCPU]; cpuRequestExists { + cpuRequest = req.Cpu() + } + return m.calculateLinuxResources(cpuRequest, lim.Cpu(), lim.Memory()) } func (m *kubeGenericRuntimeManager) applySandboxResources(pod *v1.Pod, config *runtimeapi.PodSandboxConfig) error {