Add QoS support on node

This commit is contained in:
Ananya Kumar
2015-08-03 17:28:33 -07:00
parent f53e0ff5a8
commit 6ef3de1d5f
27 changed files with 916 additions and 71 deletions

View File

@@ -35,6 +35,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
"k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/errors"
"k8s.io/kubernetes/pkg/util/oom"
)
const (
@@ -229,7 +230,8 @@ func ensureDockerInContainer(cadvisor cadvisor.Interface, oomScoreAdj int, manag
}
// Also apply oom_score_adj to processes
if err := util.ApplyOomScoreAdj(pid, oomScoreAdj); err != nil {
oomAdjuster := oom.NewOomAdjuster()
if err := oomAdjuster.ApplyOomScoreAdj(pid, oomScoreAdj); err != nil {
errs = append(errs, fmt.Errorf("failed to apply oom score %d to PID %d", oomScoreAdj, pid))
}
}

View File

@@ -27,6 +27,7 @@ import (
"github.com/docker/docker/pkg/jsonmessage"
docker "github.com/fsouza/go-dockerclient"
cadvisorApi "github.com/google/cadvisor/info/v1"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/credentialprovider"
@@ -655,7 +656,7 @@ func TestFindContainersByPod(t *testing.T) {
}
fakeClient := &FakeDockerClient{}
np, _ := network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil))
containerManager := NewFakeDockerManager(fakeClient, &record.FakeRecorder{}, nil, nil, PodInfraContainerImage, 0, 0, "", kubecontainer.FakeOS{}, np, nil, nil, nil)
containerManager := NewFakeDockerManager(fakeClient, &record.FakeRecorder{}, nil, nil, &cadvisorApi.MachineInfo{}, PodInfraContainerImage, 0, 0, "", kubecontainer.FakeOS{}, np, nil, nil, nil)
for i, test := range tests {
fakeClient.ContainerList = test.containerList
fakeClient.ExitedContainerList = test.exitedContainerList

View File

@@ -17,11 +17,14 @@ limitations under the License.
package dockertools
import (
cadvisorApi "github.com/google/cadvisor/info/v1"
"k8s.io/kubernetes/pkg/client/record"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/network"
"k8s.io/kubernetes/pkg/kubelet/prober"
kubeletTypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/util/oom"
"k8s.io/kubernetes/pkg/util/procfs"
)
func NewFakeDockerManager(
@@ -29,6 +32,7 @@ func NewFakeDockerManager(
recorder record.EventRecorder,
readinessManager *kubecontainer.ReadinessManager,
containerRefManager *kubecontainer.RefManager,
machineInfo *cadvisorApi.MachineInfo,
podInfraContainerImage string,
qps float32,
burst int,
@@ -39,8 +43,11 @@ func NewFakeDockerManager(
httpClient kubeletTypes.HttpGetter,
runtimeHooks kubecontainer.RuntimeHooks) *DockerManager {
dm := NewDockerManager(client, recorder, readinessManager, containerRefManager, podInfraContainerImage, qps,
burst, containerLogsDir, osInterface, networkPlugin, generator, httpClient, runtimeHooks, &NativeExecHandler{})
fakeOomAdjuster := oom.NewFakeOomAdjuster()
fakeProcFs := procfs.NewFakeProcFs()
dm := NewDockerManager(client, recorder, readinessManager, containerRefManager, machineInfo, podInfraContainerImage, qps,
burst, containerLogsDir, osInterface, networkPlugin, generator, httpClient, runtimeHooks, &NativeExecHandler{},
fakeOomAdjuster, fakeProcFs)
dm.puller = &FakeDockerPuller{}
dm.prober = prober.New(nil, readinessManager, containerRefManager, recorder)
return dm

View File

@@ -34,6 +34,7 @@ import (
docker "github.com/fsouza/go-dockerclient"
"github.com/golang/glog"
"github.com/golang/groupcache/lru"
cadvisorApi "github.com/google/cadvisor/info/v1"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/latest"
"k8s.io/kubernetes/pkg/client/record"
@@ -42,20 +43,17 @@ import (
"k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/pkg/kubelet/network"
"k8s.io/kubernetes/pkg/kubelet/prober"
"k8s.io/kubernetes/pkg/kubelet/qos"
kubeletTypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/probe"
"k8s.io/kubernetes/pkg/securitycontext"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/oom"
"k8s.io/kubernetes/pkg/util/procfs"
)
const (
// The oom_score_adj of the POD infrastructure container. The default is 0 for
// any other docker containers, so any value below that makes it *less* likely
// to get OOM killed.
podOomScoreAdj = -100
userContainerOomScoreAdj = 0
maxReasonCacheEntries = 200
kubernetesPodLabel = "io.kubernetes.pod.data"
@@ -75,6 +73,7 @@ type DockerManager struct {
readinessManager *kubecontainer.ReadinessManager
containerRefManager *kubecontainer.RefManager
os kubecontainer.OSInterface
machineInfo *cadvisorApi.MachineInfo
// The image name of the pod infra container.
podInfraContainerImage string
@@ -114,6 +113,12 @@ type DockerManager struct {
// Handler used to execute commands in containers.
execHandler ExecHandler
// Used to set OOM scores of processes.
oomAdjuster *oom.OomAdjuster
// Get information from /proc mount.
procFs procfs.ProcFsInterface
}
func NewDockerManager(
@@ -121,6 +126,7 @@ func NewDockerManager(
recorder record.EventRecorder,
readinessManager *kubecontainer.ReadinessManager,
containerRefManager *kubecontainer.RefManager,
machineInfo *cadvisorApi.MachineInfo,
podInfraContainerImage string,
qps float32,
burst int,
@@ -130,7 +136,9 @@ func NewDockerManager(
generator kubecontainer.RunContainerOptionsGenerator,
httpClient kubeletTypes.HttpGetter,
runtimeHooks kubecontainer.RuntimeHooks,
execHandler ExecHandler) *DockerManager {
execHandler ExecHandler,
oomAdjuster *oom.OomAdjuster,
procFs procfs.ProcFsInterface) *DockerManager {
// Work out the location of the Docker runtime, defaulting to /var/lib/docker
// if there are any problems.
dockerRoot := "/var/lib/docker"
@@ -164,11 +172,12 @@ func NewDockerManager(
reasonCache := stringCache{cache: lru.New(maxReasonCacheEntries)}
dm := &DockerManager{
client: client,
recorder: recorder,
readinessManager: readinessManager,
containerRefManager: containerRefManager,
os: osInterface,
client: client,
recorder: recorder,
readinessManager: readinessManager,
containerRefManager: containerRefManager,
os: osInterface,
machineInfo: machineInfo,
podInfraContainerImage: podInfraContainerImage,
reasonCache: reasonCache,
puller: newDockerPuller(client, qps, burst),
@@ -179,6 +188,8 @@ func NewDockerManager(
generator: generator,
runtimeHooks: runtimeHooks,
execHandler: execHandler,
oomAdjuster: oomAdjuster,
procFs: procFs,
}
dm.runner = lifecycle.NewHandlerRunner(httpClient, dm, dm)
dm.prober = prober.New(dm, readinessManager, containerRefManager, recorder)
@@ -1231,32 +1242,42 @@ func (dm *DockerManager) runContainerInPod(pod *api.Pod, container *api.Containe
glog.Errorf("Failed to create symbolic link to the log file of pod %q container %q: %v", podFullName, container.Name, err)
}
// Set OOM score of POD container to lower than those of the other containers
// which have OOM score 0 by default in the pod. This ensures that it is
// killed only as a last resort.
// Container information is used in adjusting OOM scores and adding ndots.
containerInfo, err := dm.client.InspectContainer(string(id))
if err != nil {
return "", err
}
// Ensure the PID actually exists, else we'll move ourselves.
if containerInfo.State.Pid == 0 {
return "", fmt.Errorf("failed to get init PID for Docker container %q", string(id))
}
// Set OOM score of the container based on the priority of the container.
// Processes in lower-priority pods should be killed first if the system runs out of memory.
// The main pod infrastructure container is considered high priority, since if it is killed the
// whole pod will die.
var oomScoreAdj int
if container.Name == PodInfraContainerName {
util.ApplyOomScoreAdj(containerInfo.State.Pid, podOomScoreAdj)
// currently, Docker does not have a flag by which the ndots option can be passed.
// (A seperate issue has been filed with Docker to add a ndots flag)
// The addNDotsOption call appends the ndots option to the resolv.conf file generated by docker.
// This resolv.conf file is shared by all containers of the same pod, and needs to be modified only once per pod.
// we modify it when the pause container is created since it is the first container created in the pod since it holds
// the networking namespace.
err = addNDotsOption(containerInfo.ResolvConfPath)
oomScoreAdj = qos.PodInfraOomAdj
} else {
// Children processes of docker daemon will inheritant the OOM score from docker
// daemon process. We explicitly apply OOM score 0 by default to the user
// containers to avoid daemons or POD containers are killed by oom killer.
util.ApplyOomScoreAdj(containerInfo.State.Pid, userContainerOomScoreAdj)
oomScoreAdj = qos.GetContainerOomScoreAdjust(container, dm.machineInfo.MemoryCapacity)
}
cgroupName, err := dm.procFs.GetFullContainerName(containerInfo.State.Pid)
if err != nil {
return "", err
}
if err = dm.oomAdjuster.ApplyOomScoreAdjContainer(cgroupName, oomScoreAdj, 5); err != nil {
return "", err
}
// currently, Docker does not have a flag by which the ndots option can be passed.
// (A seperate issue has been filed with Docker to add a ndots flag)
// The addNDotsOption call appends the ndots option to the resolv.conf file generated by docker.
// This resolv.conf file is shared by all containers of the same pod, and needs to be modified only once per pod.
// we modify it when the pause container is created since it is the first container created in the pod since it holds
// the networking namespace.
if container.Name == PodInfraContainerName {
err = addNDotsOption(containerInfo.ResolvConfPath)
}
return kubeletTypes.DockerID(id), err

View File

@@ -31,6 +31,7 @@ import (
"time"
docker "github.com/fsouza/go-dockerclient"
cadvisorApi "github.com/google/cadvisor/info/v1"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/client/record"
@@ -114,6 +115,7 @@ func newTestDockerManagerWithHTTPClient(fakeHTTPClient *fakeHTTP) (*DockerManage
fakeRecorder,
readinessManager,
containerRefManager,
&cadvisorApi.MachineInfo{},
PodInfraContainerImage,
0, 0, "",
kubecontainer.FakeOS{},

View File

@@ -34,6 +34,7 @@ import (
"time"
"github.com/golang/glog"
cadvisorApi "github.com/google/cadvisor/info/v1"
"k8s.io/kubernetes/pkg/api"
apierrors "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/resource"
@@ -60,13 +61,13 @@ import (
utilErrors "k8s.io/kubernetes/pkg/util/errors"
"k8s.io/kubernetes/pkg/util/mount"
nodeutil "k8s.io/kubernetes/pkg/util/node"
"k8s.io/kubernetes/pkg/util/oom"
"k8s.io/kubernetes/pkg/util/procfs"
"k8s.io/kubernetes/pkg/version"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/watch"
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
"k8s.io/kubernetes/third_party/golang/expansion"
cadvisorApi "github.com/google/cadvisor/info/v1"
)
const (
@@ -274,6 +275,14 @@ func NewMainKubelet(
klet.networkPlugin = plug
}
machineInfo, err := klet.GetCachedMachineInfo()
if err != nil {
return nil, err
}
oomAdjuster := oom.NewOomAdjuster()
procFs := procfs.NewProcFs()
// Initialize the runtime.
switch containerRuntime {
case "docker":
@@ -283,6 +292,7 @@ func NewMainKubelet(
recorder,
readinessManager,
containerRefManager,
machineInfo,
podInfraContainerImage,
pullQPS,
pullBurst,
@@ -292,7 +302,9 @@ func NewMainKubelet(
klet,
klet.httpClient,
newKubeletRuntimeHooks(recorder),
dockerExecHandler)
dockerExecHandler,
oomAdjuster,
procFs)
case "rkt":
conf := &rkt.Config{InsecureSkipVerify: true}
rktRuntime, err := rkt.New(

View File

@@ -24,6 +24,7 @@ import (
"time"
docker "github.com/fsouza/go-dockerclient"
cadvisorApi "github.com/google/cadvisor/info/v1"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/record"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
@@ -44,7 +45,7 @@ func newPod(uid, name string) *api.Pod {
func createFakeRuntimeCache(fakeRecorder *record.FakeRecorder) kubecontainer.RuntimeCache {
fakeDocker := &dockertools.FakeDockerClient{}
np, _ := network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil))
dockerManager := dockertools.NewFakeDockerManager(fakeDocker, fakeRecorder, nil, nil, dockertools.PodInfraContainerImage, 0, 0, "", kubecontainer.FakeOS{}, np, nil, nil, newKubeletRuntimeHooks(fakeRecorder))
dockerManager := dockertools.NewFakeDockerManager(fakeDocker, fakeRecorder, nil, nil, &cadvisorApi.MachineInfo{}, dockertools.PodInfraContainerImage, 0, 0, "", kubecontainer.FakeOS{}, np, nil, nil, newKubeletRuntimeHooks(fakeRecorder))
return kubecontainer.NewFakeRuntimeCache(dockerManager)
}
@@ -224,7 +225,7 @@ func TestFakePodWorkers(t *testing.T) {
fakeDocker := &dockertools.FakeDockerClient{}
fakeRecorder := &record.FakeRecorder{}
np, _ := network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil))
dockerManager := dockertools.NewFakeDockerManager(fakeDocker, fakeRecorder, nil, nil, dockertools.PodInfraContainerImage, 0, 0, "", kubecontainer.FakeOS{}, np, nil, nil, newKubeletRuntimeHooks(fakeRecorder))
dockerManager := dockertools.NewFakeDockerManager(fakeDocker, fakeRecorder, nil, nil, &cadvisorApi.MachineInfo{}, dockertools.PodInfraContainerImage, 0, 0, "", kubecontainer.FakeOS{}, np, nil, nil, newKubeletRuntimeHooks(fakeRecorder))
fakeRuntimeCache := kubecontainer.NewFakeRuntimeCache(dockerManager)
kubeletForRealWorkers := &simpleFakeKubelet{}

25
pkg/kubelet/qos/doc.go Normal file
View File

@@ -0,0 +1,25 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// package qos contains helper functions for quality of service.
// For each resource (memory, CPU) Kubelet supports three classes of containers.
// Memory guaranteed containers will receive the highest priority and will get all the resources
// they need.
// Burstable containers will be guaranteed their request and can “burst” and use more resources
// when available.
// Best-Effort containers, which dont specify a request, can use resources only if not being used
// by other pods.
package qos

View File

@@ -0,0 +1,75 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package qos
import (
"k8s.io/kubernetes/pkg/api"
)
const (
PodInfraOomAdj int = -999
KubeletOomScoreAdj int = -999
KubeProxyOomScoreAdj int = -999
)
// isMemoryBestEffort returns true if the container's memory requirements are best-effort.
func isMemoryBestEffort(container *api.Container) bool {
// A container is memory best-effort if its memory request is unspecified or 0.
// If a request is specified, then the user expects some kind of resource guarantee.
return container.Resources.Requests.Memory().Value() == 0
}
// isMemoryGuaranteed returns true if the container's memory requirements are Guaranteed.
func isMemoryGuaranteed(container *api.Container) bool {
// A container is memory guaranteed if its memory request == memory limit.
// If memory request == memory limit, the user is very confident of resource consumption.
memoryRequestValue := container.Resources.Requests.Memory().Value()
memoryLimitValue := container.Resources.Limits.Memory().Value()
return memoryRequestValue == memoryLimitValue && memoryRequestValue != 0
}
// GetContainerOomAdjust returns the amount by which the OOM score of all processes in the
// container should be adjusted. The OOM score of a process is the percentage of memory it consumes
// multiplied by 100 (barring exceptional cases) + a configurable quantity which is between -1000
// and 1000. Containers with higher OOM scores are killed if the system runs out of memory.
// See https://lwn.net/Articles/391222/ for more information.
func GetContainerOomScoreAdjust(container *api.Container, memoryCapacity int64) int {
if isMemoryGuaranteed(container) {
// Memory guaranteed containers should be the last to get killed.
return -999
} else if isMemoryBestEffort(container) {
// Memory best-effort containers should be the first to be killed.
return 1000
} else {
// Burstable containers are a middle tier, between Guaranteed and Best-Effort. Ideally,
// we want to protect Burstable containers that consume less memory than requested.
// The formula below is a heuristic. A container requesting for 10% of a system's
// memory will have an oom score adjust of 900. If a process in container Y
// uses over 10% of memory, its OOM score will be 1000. The idea is that containers
// which use more than their request will have an OOM score of 1000 and will be prime
// targets for OOM kills.
// Note that this is a heuristic, it won't work if a container has many small processes.
memoryRequest := container.Resources.Requests.Memory().Value()
oomScoreAdjust := 1000 - (1000*memoryRequest)/memoryCapacity
// A memory guaranteed container using 100% of memory can have an OOM score of 1. Ensure
// that memory burstable containers have a higher OOM score.
if oomScoreAdjust < 2 {
return 2
}
return int(oomScoreAdjust)
}
}

View File

@@ -0,0 +1,187 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package qos
import (
"strconv"
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
)
const (
standardMemoryAmount = 8000000000
)
var (
zeroRequestMemoryBestEffort = api.Container{
Resources: api.ResourceRequirements{
Requests: api.ResourceList{
api.ResourceName(api.ResourceCPU): resource.MustParse("5m"),
api.ResourceName(api.ResourceMemory): resource.MustParse("0G"),
},
Limits: api.ResourceList{
api.ResourceName(api.ResourceCPU): resource.MustParse("5m"),
api.ResourceName(api.ResourceMemory): resource.MustParse("10G"),
},
},
}
edgeMemoryBestEffort = api.Container{
Resources: api.ResourceRequirements{
Requests: api.ResourceList{
api.ResourceName(api.ResourceMemory): resource.MustParse("0G"),
},
Limits: api.ResourceList{
api.ResourceName(api.ResourceMemory): resource.MustParse("0G"),
},
},
}
noRequestMemoryBestEffort = api.Container{
Resources: api.ResourceRequirements{
Limits: api.ResourceList{
api.ResourceName(api.ResourceMemory): resource.MustParse("10G"),
},
},
}
noLimitMemoryBestEffort = api.Container{}
memoryGuaranteed = api.Container{
Resources: api.ResourceRequirements{
Requests: api.ResourceList{
api.ResourceName(api.ResourceMemory): resource.MustParse("10G"),
},
Limits: api.ResourceList{
api.ResourceName(api.ResourceCPU): resource.MustParse("5m"),
api.ResourceName(api.ResourceMemory): resource.MustParse("10G"),
},
},
}
memoryBurstable = api.Container{
Resources: api.ResourceRequirements{
Requests: api.ResourceList{
api.ResourceName(api.ResourceMemory): resource.MustParse(strconv.Itoa(standardMemoryAmount / 2)),
},
Limits: api.ResourceList{
api.ResourceName(api.ResourceMemory): resource.MustParse("10G"),
},
},
}
memoryBurstableNoLimit = api.Container{
Resources: api.ResourceRequirements{
Requests: api.ResourceList{
api.ResourceName(api.ResourceMemory): resource.MustParse(strconv.Itoa(standardMemoryAmount - 1)),
},
},
}
)
func TestIsMemoryBestEffort(t *testing.T) {
validCases := []api.Container{zeroRequestMemoryBestEffort, noRequestMemoryBestEffort, noLimitMemoryBestEffort, edgeMemoryBestEffort}
for _, container := range validCases {
if !isMemoryBestEffort(&container) {
t.Errorf("container %+v is memory best-effort", container)
}
}
invalidCases := []api.Container{memoryGuaranteed, memoryBurstable}
for _, container := range invalidCases {
if isMemoryBestEffort(&container) {
t.Errorf("container %+v is not memory best-effort", container)
}
}
}
func TestIsMemoryGuaranteed(t *testing.T) {
validCases := []api.Container{memoryGuaranteed}
for _, container := range validCases {
if !isMemoryGuaranteed(&container) {
t.Errorf("container %+v is memory guaranteed", container)
}
}
invalidCases := []api.Container{zeroRequestMemoryBestEffort, noRequestMemoryBestEffort, noLimitMemoryBestEffort, edgeMemoryBestEffort, memoryBurstable}
for _, container := range invalidCases {
if isMemoryGuaranteed(&container) {
t.Errorf("container %+v is not memory guaranteed", container)
}
}
}
type oomTest struct {
container *api.Container
memoryCapacity int64
lowOomScoreAdj int // The max oom_score_adj score the container should be assigned.
highOomScoreAdj int // The min oom_score_adj score the container should be assigned.
}
func TestGetContainerOomScoreAdjust(t *testing.T) {
oomTests := []oomTest{
{
container: &zeroRequestMemoryBestEffort,
memoryCapacity: 4000000000,
lowOomScoreAdj: 1000,
highOomScoreAdj: 1000,
},
{
container: &edgeMemoryBestEffort,
memoryCapacity: 8000000000,
lowOomScoreAdj: 1000,
highOomScoreAdj: 1000,
},
{
container: &noRequestMemoryBestEffort,
memoryCapacity: 7230457451,
lowOomScoreAdj: 1000,
highOomScoreAdj: 1000,
},
{
container: &noLimitMemoryBestEffort,
memoryCapacity: 4000000000,
lowOomScoreAdj: 1000,
highOomScoreAdj: 1000,
},
{
container: &memoryGuaranteed,
memoryCapacity: 123456789,
lowOomScoreAdj: -999,
highOomScoreAdj: -999,
},
{
container: &memoryBurstable,
memoryCapacity: standardMemoryAmount,
lowOomScoreAdj: 495,
highOomScoreAdj: 505,
},
{
container: &memoryBurstableNoLimit,
memoryCapacity: standardMemoryAmount,
lowOomScoreAdj: 2,
highOomScoreAdj: 2,
},
}
for _, test := range oomTests {
oomScoreAdj := GetContainerOomScoreAdjust(test.container, test.memoryCapacity)
if oomScoreAdj < test.lowOomScoreAdj || oomScoreAdj > test.highOomScoreAdj {
t.Errorf("oom_score_adj should be between %d and %d, but was %d", test.lowOomScoreAdj, test.highOomScoreAdj, oomScoreAdj)
}
}
}

View File

@@ -154,6 +154,7 @@ func TestRunOnce(t *testing.T) {
kb.recorder,
kb.readinessManager,
kb.containerRefManager,
&cadvisorApi.MachineInfo{},
dockertools.PodInfraContainerImage,
0,
0,