fix bugs of container cpu shares when cpu request set to zero
This commit is contained in:
parent
475f7af1c1
commit
4c87589300
@ -63,7 +63,11 @@ func (m *kubeGenericRuntimeManager) generateLinuxContainerConfig(container *v1.C
|
|||||||
}
|
}
|
||||||
|
|
||||||
// set linux container resources
|
// set linux container resources
|
||||||
lc.Resources = m.calculateLinuxResources(container.Resources.Requests.Cpu(), container.Resources.Limits.Cpu(), container.Resources.Limits.Memory())
|
var cpuRequest *resource.Quantity
|
||||||
|
if _, cpuRequestExists := container.Resources.Requests[v1.ResourceCPU]; cpuRequestExists {
|
||||||
|
cpuRequest = container.Resources.Requests.Cpu()
|
||||||
|
}
|
||||||
|
lc.Resources = m.calculateLinuxResources(cpuRequest, container.Resources.Limits.Cpu(), container.Resources.Limits.Memory())
|
||||||
|
|
||||||
lc.Resources.OomScoreAdj = int64(qos.GetContainerOOMScoreAdjust(pod, container,
|
lc.Resources.OomScoreAdj = int64(qos.GetContainerOOMScoreAdjust(pod, container,
|
||||||
int64(m.machineInfo.MemoryCapacity)))
|
int64(m.machineInfo.MemoryCapacity)))
|
||||||
@ -138,9 +142,9 @@ func (m *kubeGenericRuntimeManager) calculateLinuxResources(cpuRequest, cpuLimit
|
|||||||
// If request is not specified, but limit is, we want request to default to limit.
|
// If request is not specified, but limit is, we want request to default to limit.
|
||||||
// API server does this for new containers, but we repeat this logic in Kubelet
|
// API server does this for new containers, but we repeat this logic in Kubelet
|
||||||
// for containers running on existing Kubernetes clusters.
|
// for containers running on existing Kubernetes clusters.
|
||||||
if cpuRequest.IsZero() && !cpuLimit.IsZero() {
|
if cpuRequest == nil && !cpuLimit.IsZero() {
|
||||||
cpuShares = int64(cm.MilliCPUToShares(cpuLimit.MilliValue()))
|
cpuShares = int64(cm.MilliCPUToShares(cpuLimit.MilliValue()))
|
||||||
} else {
|
} else if cpuRequest != nil {
|
||||||
// if cpuRequest.Amount is nil, then MilliCPUToShares will return the minimal number
|
// if cpuRequest.Amount is nil, then MilliCPUToShares will return the minimal number
|
||||||
// of CPU shares.
|
// of CPU shares.
|
||||||
cpuShares = int64(cm.MilliCPUToShares(cpuRequest.MilliValue()))
|
cpuShares = int64(cm.MilliCPUToShares(cpuRequest.MilliValue()))
|
||||||
|
@ -260,6 +260,29 @@ func TestCalculateLinuxResources(t *testing.T) {
|
|||||||
MemoryLimitInBytes: 0,
|
MemoryLimitInBytes: 0,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "RequestNilCPU",
|
||||||
|
cpuLim: resource.MustParse("2"),
|
||||||
|
memLim: resource.MustParse("0"),
|
||||||
|
expected: &runtimeapi.LinuxContainerResources{
|
||||||
|
CpuPeriod: 100000,
|
||||||
|
CpuQuota: 200000,
|
||||||
|
CpuShares: 2048,
|
||||||
|
MemoryLimitInBytes: 0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "RequestZeroCPU",
|
||||||
|
cpuReq: resource.MustParse("0"),
|
||||||
|
cpuLim: resource.MustParse("2"),
|
||||||
|
memLim: resource.MustParse("0"),
|
||||||
|
expected: &runtimeapi.LinuxContainerResources{
|
||||||
|
CpuPeriod: 100000,
|
||||||
|
CpuQuota: 200000,
|
||||||
|
CpuShares: 2,
|
||||||
|
MemoryLimitInBytes: 0,
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
linuxContainerResources := m.calculateLinuxResources(&test.cpuReq, &test.cpuLim, &test.memLim)
|
linuxContainerResources := m.calculateLinuxResources(&test.cpuReq, &test.cpuLim, &test.memLim)
|
||||||
|
@ -21,6 +21,7 @@ package kuberuntime
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||||
resourcehelper "k8s.io/kubernetes/pkg/api/v1/resource"
|
resourcehelper "k8s.io/kubernetes/pkg/api/v1/resource"
|
||||||
)
|
)
|
||||||
@ -41,7 +42,11 @@ func (m *kubeGenericRuntimeManager) convertOverheadToLinuxResources(pod *v1.Pod)
|
|||||||
|
|
||||||
func (m *kubeGenericRuntimeManager) calculateSandboxResources(pod *v1.Pod) *runtimeapi.LinuxContainerResources {
|
func (m *kubeGenericRuntimeManager) calculateSandboxResources(pod *v1.Pod) *runtimeapi.LinuxContainerResources {
|
||||||
req, lim := resourcehelper.PodRequestsAndLimitsWithoutOverhead(pod)
|
req, lim := resourcehelper.PodRequestsAndLimitsWithoutOverhead(pod)
|
||||||
return m.calculateLinuxResources(req.Cpu(), lim.Cpu(), lim.Memory())
|
var cpuRequest *resource.Quantity
|
||||||
|
if _, cpuRequestExists := req[v1.ResourceCPU]; cpuRequestExists {
|
||||||
|
cpuRequest = req.Cpu()
|
||||||
|
}
|
||||||
|
return m.calculateLinuxResources(cpuRequest, lim.Cpu(), lim.Memory())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *kubeGenericRuntimeManager) applySandboxResources(pod *v1.Pod, config *runtimeapi.PodSandboxConfig) error {
|
func (m *kubeGenericRuntimeManager) applySandboxResources(pod *v1.Pod, config *runtimeapi.PodSandboxConfig) error {
|
||||||
|
Loading…
Reference in New Issue
Block a user