Merge pull request #13155 from derekwaynecarr/enforce_limits
Map cpu limits to cpu-quota in docker run
This commit is contained in:
@@ -50,6 +50,9 @@ const (
|
||||
minShares = 2
|
||||
sharesPerCPU = 1024
|
||||
milliCPUToCPU = 1000
|
||||
|
||||
// 100000 is equivalent to 100ms
|
||||
quotaPeriod = 100000
|
||||
)
|
||||
|
||||
// DockerInterface is an abstract interface for testability. It abstracts the interface of docker.Client.
|
||||
@@ -306,6 +309,28 @@ func ConnectToDockerOrDie(dockerEndpoint string) DockerInterface {
|
||||
return client
|
||||
}
|
||||
|
||||
// milliCPUToQuota converts milliCPU to CFS quota and period values
|
||||
func milliCPUToQuota(milliCPU int64) (quota int64, period int64) {
|
||||
// CFS quota is measured in two values:
|
||||
// - cfs_period_us=100ms (the amount of time to measure usage across)
|
||||
// - cfs_quota=20ms (the amount of cpu time allowed to be used across a period)
|
||||
// so in the above example, you are limited to 20% of a single CPU
|
||||
// for multi-cpu environments, you just scale equivalent amounts
|
||||
|
||||
if milliCPU == 0 {
|
||||
// take the default behavior from docker
|
||||
return
|
||||
}
|
||||
|
||||
// we set the period to 100ms by default
|
||||
period = quotaPeriod
|
||||
|
||||
// we then convert your milliCPU to a value normalized over a period
|
||||
quota = (milliCPU * quotaPeriod) / milliCPUToCPU
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func milliCPUToShares(milliCPU int64) int64 {
|
||||
if milliCPU == 0 {
|
||||
// Docker converts zero milliCPU to unset, which maps to kernel default
|
||||
|
||||
@@ -737,3 +737,43 @@ func TestMakePortsAndBindings(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMilliCPUToQuota(t *testing.T) {
|
||||
testCases := []struct {
|
||||
input int64
|
||||
quota int64
|
||||
period int64
|
||||
}{
|
||||
{
|
||||
input: int64(0),
|
||||
quota: int64(0),
|
||||
period: int64(0),
|
||||
},
|
||||
{
|
||||
input: int64(200),
|
||||
quota: int64(20000),
|
||||
period: int64(100000),
|
||||
},
|
||||
{
|
||||
input: int64(500),
|
||||
quota: int64(50000),
|
||||
period: int64(100000),
|
||||
},
|
||||
{
|
||||
input: int64(1000),
|
||||
quota: int64(100000),
|
||||
period: int64(100000),
|
||||
},
|
||||
{
|
||||
input: int64(1500),
|
||||
quota: int64(150000),
|
||||
period: int64(100000),
|
||||
},
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
quota, period := milliCPUToQuota(testCase.input)
|
||||
if quota != testCase.quota || period != testCase.period {
|
||||
t.Errorf("Input %v, expected quota %v period %v, but got quota %v period %v", testCase.input, testCase.quota, testCase.period, quota, period)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -46,7 +46,7 @@ func NewFakeDockerManager(
|
||||
fakeProcFs := procfs.NewFakeProcFs()
|
||||
dm := NewDockerManager(client, recorder, readinessManager, containerRefManager, machineInfo, podInfraContainerImage, qps,
|
||||
burst, containerLogsDir, osInterface, networkPlugin, generator, httpClient, &NativeExecHandler{},
|
||||
fakeOomAdjuster, fakeProcFs)
|
||||
fakeOomAdjuster, fakeProcFs, false)
|
||||
dm.dockerPuller = &FakeDockerPuller{}
|
||||
dm.prober = prober.New(nil, readinessManager, containerRefManager, recorder)
|
||||
return dm
|
||||
|
||||
@@ -132,6 +132,9 @@ type DockerManager struct {
|
||||
|
||||
// Get information from /proc mount.
|
||||
procFs procfs.ProcFsInterface
|
||||
|
||||
// If true, enforce container cpu limits with CFS quota support
|
||||
cpuCFSQuota bool
|
||||
}
|
||||
|
||||
func NewDockerManager(
|
||||
@@ -150,7 +153,8 @@ func NewDockerManager(
|
||||
httpClient kubeletTypes.HttpGetter,
|
||||
execHandler ExecHandler,
|
||||
oomAdjuster *oom.OomAdjuster,
|
||||
procFs procfs.ProcFsInterface) *DockerManager {
|
||||
procFs procfs.ProcFsInterface,
|
||||
cpuCFSQuota bool) *DockerManager {
|
||||
// Work out the location of the Docker runtime, defaulting to /var/lib/docker
|
||||
// if there are any problems.
|
||||
dockerRoot := "/var/lib/docker"
|
||||
@@ -201,6 +205,7 @@ func NewDockerManager(
|
||||
execHandler: execHandler,
|
||||
oomAdjuster: oomAdjuster,
|
||||
procFs: procFs,
|
||||
cpuCFSQuota: cpuCFSQuota,
|
||||
}
|
||||
dm.runner = lifecycle.NewHandlerRunner(httpClient, dm, dm)
|
||||
dm.prober = prober.New(dm, readinessManager, containerRefManager, recorder)
|
||||
@@ -673,6 +678,7 @@ func (dm *DockerManager) runContainer(
|
||||
// of CPU shares.
|
||||
cpuShares = milliCPUToShares(cpuRequest.MilliValue())
|
||||
}
|
||||
|
||||
_, containerName := BuildDockerName(dockerName, container)
|
||||
dockerOpts := docker.CreateContainerOptions{
|
||||
Name: containerName,
|
||||
@@ -742,6 +748,15 @@ func (dm *DockerManager) runContainer(
|
||||
MemorySwap: -1,
|
||||
CPUShares: cpuShares,
|
||||
}
|
||||
|
||||
if dm.cpuCFSQuota {
|
||||
// if cpuLimit.Amount is nil, then the appropriate default value is returned to allow full usage of cpu resource.
|
||||
cpuQuota, cpuPeriod := milliCPUToQuota(cpuLimit.MilliValue())
|
||||
|
||||
hc.CPUQuota = cpuQuota
|
||||
hc.CPUPeriod = cpuPeriod
|
||||
}
|
||||
|
||||
if len(opts.DNS) > 0 {
|
||||
hc.DNS = opts.DNS
|
||||
}
|
||||
|
||||
@@ -168,7 +168,8 @@ func NewMainKubelet(
|
||||
podCIDR string,
|
||||
pods int,
|
||||
dockerExecHandler dockertools.ExecHandler,
|
||||
resolverConfig string) (*Kubelet, error) {
|
||||
resolverConfig string,
|
||||
cpuCFSQuota bool) (*Kubelet, error) {
|
||||
if rootDirectory == "" {
|
||||
return nil, fmt.Errorf("invalid root directory %q", rootDirectory)
|
||||
}
|
||||
@@ -285,6 +286,7 @@ func NewMainKubelet(
|
||||
pods: pods,
|
||||
syncLoopMonitor: util.AtomicValue{},
|
||||
resolverConfig: resolverConfig,
|
||||
cpuCFSQuota: cpuCFSQuota,
|
||||
}
|
||||
|
||||
if plug, err := network.InitNetworkPlugin(networkPlugins, networkPluginName, &networkHost{klet}); err != nil {
|
||||
@@ -321,7 +323,8 @@ func NewMainKubelet(
|
||||
klet.httpClient,
|
||||
dockerExecHandler,
|
||||
oomAdjuster,
|
||||
procFs)
|
||||
procFs,
|
||||
klet.cpuCFSQuota)
|
||||
case "rkt":
|
||||
conf := &rkt.Config{
|
||||
Path: rktPath,
|
||||
@@ -560,6 +563,9 @@ type Kubelet struct {
|
||||
|
||||
// Optionally shape the bandwidth of a pod
|
||||
shaper bandwidth.BandwidthShaper
|
||||
|
||||
// True if container cpu limits should be enforced via cgroup CFS quota
|
||||
cpuCFSQuota bool
|
||||
}
|
||||
|
||||
// getRootDir returns the full path to the directory under which kubelet can
|
||||
|
||||
Reference in New Issue
Block a user