default memoryThrottlingFactor to 0.9 and optimize the memory.high calculation formulas

This commit is contained in:
Paco Xu
2023-01-28 17:35:29 +08:00
parent 3835c7aecd
commit 7dab6253e1
10 changed files with 18 additions and 15 deletions

View File

@@ -61,7 +61,7 @@ maxOpenFiles: 1000000
maxPods: 110
memoryManagerPolicy: None
memorySwap: {}
memoryThrottlingFactor: 0.8
memoryThrottlingFactor: 0.9
nodeLeaseDurationSeconds: 40
nodeStatusMaxImages: 50
nodeStatusReportFrequency: 5m0s

View File

@@ -61,7 +61,7 @@ maxOpenFiles: 1000000
maxPods: 110
memoryManagerPolicy: None
memorySwap: {}
memoryThrottlingFactor: 0.8
memoryThrottlingFactor: 0.9
nodeLeaseDurationSeconds: 40
nodeStatusMaxImages: 50
nodeStatusReportFrequency: 5m0s

View File

@@ -440,7 +440,7 @@ type KubeletConfiguration struct {
// Decreasing this factor will set lower high limit for container cgroups and put heavier reclaim pressure
// while increasing will put less reclaim pressure.
// See https://kep.k8s.io/2570 for more details.
// Default: 0.8
// Default: 0.9
// +featureGate=MemoryQoS
// +optional
MemoryThrottlingFactor *float64

View File

@@ -38,7 +38,7 @@ const (
DefaultVolumePluginDir = "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/"
// See https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/2570-memory-qos
DefaultMemoryThrottlingFactor = 0.8
DefaultMemoryThrottlingFactor = 0.9
)
var (

View File

@@ -65,7 +65,7 @@ var (
TopologyManagerPolicy: kubeletconfig.SingleNumaNodeTopologyManagerPolicy,
ShutdownGracePeriod: metav1.Duration{Duration: 30 * time.Second},
ShutdownGracePeriodCriticalPods: metav1.Duration{Duration: 10 * time.Second},
MemoryThrottlingFactor: utilpointer.Float64(0.8),
MemoryThrottlingFactor: utilpointer.Float64(0.9),
FeatureGates: map[string]bool{
"CustomCPUCFSQuotaPeriod": true,
"GracefulNodeShutdown": true,

View File

@@ -113,7 +113,7 @@ func newFakeKubeRuntimeManager(runtimeService internalapi.RuntimeService, imageS
internalLifecycle: cm.NewFakeInternalContainerLifecycle(),
logReduction: logreduction.NewLogReduction(identicalErrorDelay),
logManager: logManager,
memoryThrottlingFactor: 0.8,
memoryThrottlingFactor: 0.9,
}
typedVersion, err := runtimeService.Version(ctx, kubeRuntimeAPIVersion)

View File

@@ -118,12 +118,12 @@ func (m *kubeGenericRuntimeManager) generateLinuxContainerResources(pod *v1.Pod,
// for container level cgroup.
memoryHigh := int64(0)
if memoryLimit != 0 {
memoryHigh = int64(float64(memoryLimit) * m.memoryThrottlingFactor)
memoryHigh = int64(float64(memoryRequest) + (float64(memoryLimit)-float64(memoryRequest))*m.memoryThrottlingFactor)
} else {
allocatable := m.getNodeAllocatable()
allocatableMemory, ok := allocatable[v1.ResourceMemory]
if ok && allocatableMemory.Value() > 0 {
memoryHigh = int64(float64(allocatableMemory.Value()) * m.memoryThrottlingFactor)
memoryHigh = int64(float64(memoryRequest) + (float64(allocatableMemory.Value())-float64(memoryRequest))*m.memoryThrottlingFactor)
}
}
if memoryHigh > memoryRequest {

View File

@@ -307,6 +307,8 @@ func TestGenerateContainerConfigWithMemoryQoSEnforced(t *testing.T) {
_, _, m, err := createTestRuntimeManager()
assert.NoError(t, err)
podRequestMemory := resource.MustParse("128Mi")
pod1LimitMemory := resource.MustParse("256Mi")
pod1 := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: "12345678",
@@ -323,10 +325,10 @@ func TestGenerateContainerConfigWithMemoryQoSEnforced(t *testing.T) {
WorkingDir: "testWorkingDir",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("128Mi"),
v1.ResourceMemory: podRequestMemory,
},
Limits: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("256Mi"),
v1.ResourceMemory: pod1LimitMemory,
},
},
},
@@ -350,7 +352,7 @@ func TestGenerateContainerConfigWithMemoryQoSEnforced(t *testing.T) {
WorkingDir: "testWorkingDir",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("128Mi"),
v1.ResourceMemory: podRequestMemory,
},
},
},
@@ -358,7 +360,8 @@ func TestGenerateContainerConfigWithMemoryQoSEnforced(t *testing.T) {
},
}
memoryNodeAllocatable := resource.MustParse(fakeNodeAllocatableMemory)
pod2MemoryHigh := float64(memoryNodeAllocatable.Value()) * m.memoryThrottlingFactor
pod1MemoryHigh := float64(podRequestMemory.Value()) + (float64(pod1LimitMemory.Value())-float64(podRequestMemory.Value()))*m.memoryThrottlingFactor
pod2MemoryHigh := float64(podRequestMemory.Value()) + (float64(memoryNodeAllocatable.Value())-float64(podRequestMemory.Value()))*m.memoryThrottlingFactor
type expectedResult struct {
containerConfig *runtimeapi.LinuxContainerConfig
@@ -378,7 +381,7 @@ func TestGenerateContainerConfigWithMemoryQoSEnforced(t *testing.T) {
expected: &expectedResult{
l1,
128 * 1024 * 1024,
int64(float64(256*1024*1024) * m.memoryThrottlingFactor),
int64(pod1MemoryHigh),
},
},
{