fix: rename

This commit is contained in:
sivchari 2022-08-26 00:44:31 +09:00
parent 5494b30ce5
commit 12d49b6bfb
4 changed files with 23 additions and 23 deletions

View File

@ -307,8 +307,8 @@ func (m *cgroupManagerImpl) Destroy(cgroupConfig *CgroupConfig) error {
return nil return nil
} }
// getCpuWeight converts from the range [2, 262144] to [1, 10000] // getCPUWeight converts from the range [2, 262144] to [1, 10000]
func getCpuWeight(cpuShares *uint64) uint64 { func getCPUWeight(cpuShares *uint64) uint64 {
if cpuShares == nil { if cpuShares == nil {
return 0 return 0
} }
@ -360,18 +360,18 @@ func (m *cgroupManagerImpl) toResources(resourceConfig *ResourceConfig) *libcont
if resourceConfig.Memory != nil { if resourceConfig.Memory != nil {
resources.Memory = *resourceConfig.Memory resources.Memory = *resourceConfig.Memory
} }
if resourceConfig.CpuShares != nil { if resourceConfig.CPUShares != nil {
if libcontainercgroups.IsCgroup2UnifiedMode() { if libcontainercgroups.IsCgroup2UnifiedMode() {
resources.CpuWeight = getCpuWeight(resourceConfig.CpuShares) resources.CpuWeight = getCPUWeight(resourceConfig.CPUShares)
} else { } else {
resources.CpuShares = *resourceConfig.CpuShares resources.CpuShares = *resourceConfig.CPUShares
} }
} }
if resourceConfig.CpuQuota != nil { if resourceConfig.CPUQuota != nil {
resources.CpuQuota = *resourceConfig.CpuQuota resources.CpuQuota = *resourceConfig.CPUQuota
} }
if resourceConfig.CpuPeriod != nil { if resourceConfig.CPUPeriod != nil {
resources.CpuPeriod = *resourceConfig.CpuPeriod resources.CpuPeriod = *resourceConfig.CPUPeriod
} }
if resourceConfig.PidsLimit != nil { if resourceConfig.PidsLimit != nil {
resources.PidsLimit = *resourceConfig.PidsLimit resources.PidsLimit = *resourceConfig.PidsLimit
@ -531,7 +531,7 @@ func (m *cgroupManagerImpl) ReduceCPULimits(cgroupName CgroupName) error {
// Set lowest possible CpuShares value for the cgroup // Set lowest possible CpuShares value for the cgroup
minimumCPUShares := uint64(MinShares) minimumCPUShares := uint64(MinShares)
resources := &ResourceConfig{ resources := &ResourceConfig{
CpuShares: &minimumCPUShares, CPUShares: &minimumCPUShares,
} }
containerConfig := &CgroupConfig{ containerConfig := &CgroupConfig{
Name: cgroupName, Name: cgroupName,

View File

@ -182,22 +182,22 @@ func ResourceConfigForPod(pod *v1.Pod, enforceCPULimits bool, cpuPeriod uint64,
// build the result // build the result
result := &ResourceConfig{} result := &ResourceConfig{}
if qosClass == v1.PodQOSGuaranteed { if qosClass == v1.PodQOSGuaranteed {
result.CpuShares = &cpuShares result.CPUShares = &cpuShares
result.CpuQuota = &cpuQuota result.CPUQuota = &cpuQuota
result.CpuPeriod = &cpuPeriod result.CPUPeriod = &cpuPeriod
result.Memory = &memoryLimits result.Memory = &memoryLimits
} else if qosClass == v1.PodQOSBurstable { } else if qosClass == v1.PodQOSBurstable {
result.CpuShares = &cpuShares result.CPUShares = &cpuShares
if cpuLimitsDeclared { if cpuLimitsDeclared {
result.CpuQuota = &cpuQuota result.CPUQuota = &cpuQuota
result.CpuPeriod = &cpuPeriod result.CPUPeriod = &cpuPeriod
} }
if memoryLimitsDeclared { if memoryLimitsDeclared {
result.Memory = &memoryLimits result.Memory = &memoryLimits
} }
} else { } else {
shares := uint64(MinShares) shares := uint64(MinShares)
result.CpuShares = &shares result.CPUShares = &shares
} }
result.HugePageLimit = hugePageLimits result.HugePageLimit = hugePageLimits

View File

@ -40,7 +40,7 @@ const (
defaultNodeAllocatableCgroupName = "kubepods" defaultNodeAllocatableCgroupName = "kubepods"
) )
//createNodeAllocatableCgroups creates Node Allocatable Cgroup when CgroupsPerQOS flag is specified as true // createNodeAllocatableCgroups creates Node Allocatable Cgroup when CgroupsPerQOS flag is specified as true
func (cm *containerManagerImpl) createNodeAllocatableCgroups() error { func (cm *containerManagerImpl) createNodeAllocatableCgroups() error {
nodeAllocatable := cm.internalCapacity nodeAllocatable := cm.internalCapacity
// Use Node Allocatable limits instead of capacity if the user requested enforcing node allocatable. // Use Node Allocatable limits instead of capacity if the user requested enforcing node allocatable.
@ -155,7 +155,7 @@ func enforceExistingCgroup(cgroupManager CgroupManager, cName CgroupName, rl v1.
Name: cName, Name: cName,
ResourceParameters: rp, ResourceParameters: rp,
} }
klog.V(4).InfoS("Enforcing limits on cgroup", "cgroupName", cName, "cpuShares", cgroupConfig.ResourceParameters.CpuShares, "memory", cgroupConfig.ResourceParameters.Memory, "pidsLimit", cgroupConfig.ResourceParameters.PidsLimit) klog.V(4).InfoS("Enforcing limits on cgroup", "cgroupName", cName, "cpuShares", cgroupConfig.ResourceParameters.CPUShares, "memory", cgroupConfig.ResourceParameters.Memory, "pidsLimit", cgroupConfig.ResourceParameters.PidsLimit)
if err := cgroupManager.Validate(cgroupConfig.Name); err != nil { if err := cgroupManager.Validate(cgroupConfig.Name); err != nil {
return err return err
} }
@ -180,7 +180,7 @@ func getCgroupConfig(rl v1.ResourceList) *ResourceConfig {
if q, exists := rl[v1.ResourceCPU]; exists { if q, exists := rl[v1.ResourceCPU]; exists {
// CPU is defined in milli-cores. // CPU is defined in milli-cores.
val := MilliCPUToShares(q.MilliValue()) val := MilliCPUToShares(q.MilliValue())
rc.CpuShares = &val rc.CPUShares = &val
} }
if q, exists := rl[pidlimit.PIDs]; exists { if q, exists := rl[pidlimit.PIDs]; exists {
val := q.Value() val := q.Value()

View File

@ -98,7 +98,7 @@ func (m *qosContainerManagerImpl) Start(getNodeAllocatable func() v1.ResourceLis
// the BestEffort QoS class has a statically configured minShares value // the BestEffort QoS class has a statically configured minShares value
if qosClass == v1.PodQOSBestEffort { if qosClass == v1.PodQOSBestEffort {
minShares := uint64(MinShares) minShares := uint64(MinShares)
resourceParameters.CpuShares = &minShares resourceParameters.CPUShares = &minShares
} }
// containerConfig object stores the cgroup specifications // containerConfig object stores the cgroup specifications
@ -184,11 +184,11 @@ func (m *qosContainerManagerImpl) setCPUCgroupConfig(configs map[v1.PodQOSClass]
// make sure best effort is always 2 shares // make sure best effort is always 2 shares
bestEffortCPUShares := uint64(MinShares) bestEffortCPUShares := uint64(MinShares)
configs[v1.PodQOSBestEffort].ResourceParameters.CpuShares = &bestEffortCPUShares configs[v1.PodQOSBestEffort].ResourceParameters.CPUShares = &bestEffortCPUShares
// set burstable shares based on current observe state // set burstable shares based on current observe state
burstableCPUShares := MilliCPUToShares(burstablePodCPURequest) burstableCPUShares := MilliCPUToShares(burstablePodCPURequest)
configs[v1.PodQOSBurstable].ResourceParameters.CpuShares = &burstableCPUShares configs[v1.PodQOSBurstable].ResourceParameters.CPUShares = &burstableCPUShares
return nil return nil
} }