From 5494b30ce5ac3a161d1a52abbcfa9dd349fedfad Mon Sep 17 00:00:00 2001 From: sivchari Date: Sun, 24 Jul 2022 19:04:08 +0900 Subject: [PATCH 1/3] feat: improve naming I fixed naming to recommended by Golang. --- pkg/kubelet/cm/devicemanager/endpoint.go | 2 +- pkg/kubelet/cm/devicemanager/manager.go | 2 +- pkg/kubelet/cm/devicemanager/plugin/v1beta1/client.go | 4 ++-- pkg/kubelet/cm/devicemanager/pod_devices.go | 6 +++--- pkg/kubelet/cm/devicemanager/pod_devices_test.go | 4 ++-- pkg/kubelet/cm/types.go | 6 +++--- pkg/kubelet/preemption/preemption_test.go | 4 ++-- 7 files changed, 14 insertions(+), 14 deletions(-) diff --git a/pkg/kubelet/cm/devicemanager/endpoint.go b/pkg/kubelet/cm/devicemanager/endpoint.go index 8ae98d3ffca..a9f7e7bed90 100644 --- a/pkg/kubelet/cm/devicemanager/endpoint.go +++ b/pkg/kubelet/cm/devicemanager/endpoint.go @@ -50,7 +50,7 @@ type endpointImpl struct { // This is to be used during normal device plugin registration. func newEndpointImpl(p plugin.DevicePlugin) *endpointImpl { return &endpointImpl{ - api: p.Api(), + api: p.API(), resourceName: p.Resource(), } } diff --git a/pkg/kubelet/cm/devicemanager/manager.go b/pkg/kubelet/cm/devicemanager/manager.go index 7b0283d93e4..13459af0ff0 100644 --- a/pkg/kubelet/cm/devicemanager/manager.go +++ b/pkg/kubelet/cm/devicemanager/manager.go @@ -203,7 +203,7 @@ func (m *ManagerImpl) CleanupPluginDirectory(dir string) error { } func (m *ManagerImpl) PluginConnected(resourceName string, p plugin.DevicePlugin) error { - options, err := p.Api().GetDevicePluginOptions(context.Background(), &pluginapi.Empty{}) + options, err := p.API().GetDevicePluginOptions(context.Background(), &pluginapi.Empty{}) if err != nil { return fmt.Errorf("failed to get device plugin options: %v", err) } diff --git a/pkg/kubelet/cm/devicemanager/plugin/v1beta1/client.go b/pkg/kubelet/cm/devicemanager/plugin/v1beta1/client.go index 13b1249009b..03c946c0025 100644 --- a/pkg/kubelet/cm/devicemanager/plugin/v1beta1/client.go +++ b/pkg/kubelet/cm/devicemanager/plugin/v1beta1/client.go @@ -30,7 +30,7 @@ import ( ) type DevicePlugin interface { - Api() api.DevicePluginClient + API() api.DevicePluginClient Resource() string SocketPath() string } @@ -104,7 +104,7 @@ func (c *client) Resource() string { return c.resource } -func (c *client) Api() api.DevicePluginClient { +func (c *client) API() api.DevicePluginClient { return c.client } diff --git a/pkg/kubelet/cm/devicemanager/pod_devices.go b/pkg/kubelet/cm/devicemanager/pod_devices.go index 4e53c6c3a82..8d359dfe9f6 100644 --- a/pkg/kubelet/cm/devicemanager/pod_devices.go +++ b/pkg/kubelet/cm/devicemanager/pod_devices.go @@ -343,11 +343,11 @@ func (pdev *podDevices) getContainerDevices(podUID, contName string) ResourceDev } devicePluginMap := make(map[string]pluginapi.Device) for numaid, devlist := range allocateInfo.deviceIds { - for _, devId := range devlist { + for _, devID := range devlist { var topology *pluginapi.TopologyInfo if numaid != nodeWithoutTopology { NUMANodes := []*pluginapi.NUMANode{{ID: numaid}} - if pDev, ok := devicePluginMap[devId]; ok && pDev.Topology != nil { + if pDev, ok := devicePluginMap[devID]; ok && pDev.Topology != nil { if nodes := pDev.Topology.GetNodes(); nodes != nil { NUMANodes = append(NUMANodes, nodes...) } @@ -356,7 +356,7 @@ func (pdev *podDevices) getContainerDevices(podUID, contName string) ResourceDev // ID and Healthy are not relevant here. topology = &pluginapi.TopologyInfo{Nodes: NUMANodes} } - devicePluginMap[devId] = pluginapi.Device{ + devicePluginMap[devID] = pluginapi.Device{ Topology: topology, } } diff --git a/pkg/kubelet/cm/devicemanager/pod_devices_test.go b/pkg/kubelet/cm/devicemanager/pod_devices_test.go index 37a1e3a7dfb..f21fee36816 100644 --- a/pkg/kubelet/cm/devicemanager/pod_devices_test.go +++ b/pkg/kubelet/cm/devicemanager/pod_devices_test.go @@ -42,14 +42,14 @@ func TestGetContainerDevices(t *testing.T) { contDevices, ok := resContDevices[resourceName1] require.True(t, ok, "resource %q not present", resourceName1) - for devId, plugInfo := range contDevices { + for devID, plugInfo := range contDevices { nodes := plugInfo.GetTopology().GetNodes() require.Equal(t, len(nodes), len(devices), "Incorrect container devices: %v - %v (nodes %v)", devices, contDevices, nodes) for _, node := range plugInfo.GetTopology().GetNodes() { dev, ok := devices[node.ID] require.True(t, ok, "NUMA id %v doesn't exist in result", node.ID) - require.Equal(t, devId, dev[0], "Can't find device %s in result", dev[0]) + require.Equal(t, devID, dev[0], "Can't find device %s in result", dev[0]) } } } diff --git a/pkg/kubelet/cm/types.go b/pkg/kubelet/cm/types.go index edf97334781..f9ddc55ae2c 100644 --- a/pkg/kubelet/cm/types.go +++ b/pkg/kubelet/cm/types.go @@ -26,11 +26,11 @@ type ResourceConfig struct { // Memory limit (in bytes). Memory *int64 // CPU shares (relative weight vs. other containers). - CpuShares *uint64 + CPUShares *uint64 // CPU hardcap limit (in usecs). Allowed cpu time in a given period. - CpuQuota *int64 + CPUQuota *int64 // CPU quota period. - CpuPeriod *uint64 + CPUPeriod *uint64 // HugePageLimit map from page size (in bytes) to limit (in bytes) HugePageLimit map[int64]int64 // Maximum number of pids diff --git a/pkg/kubelet/preemption/preemption_test.go b/pkg/kubelet/preemption/preemption_test.go index f643dd4a936..96a2158dc0a 100644 --- a/pkg/kubelet/preemption/preemption_test.go +++ b/pkg/kubelet/preemption/preemption_test.go @@ -495,7 +495,7 @@ func parseCPUToInt64(res string) int64 { return (&r).MilliValue() } -func parseNonCpuResourceToInt64(res string) int64 { +func parseNonCPUResourceToInt64(res string) int64 { r := resource.MustParse(res) return (&r).Value() } @@ -511,7 +511,7 @@ func getAdmissionRequirementList(cpu, memory, pods int) admissionRequirementList if memory > 0 { reqs = append(reqs, &admissionRequirement{ resourceName: v1.ResourceMemory, - quantity: parseNonCpuResourceToInt64(fmt.Sprintf("%dMi", memory)), + quantity: parseNonCPUResourceToInt64(fmt.Sprintf("%dMi", memory)), }) } if pods > 0 { From 12d49b6bfbd0f3746b4a215cc8fc43548bc07a40 Mon Sep 17 00:00:00 2001 From: sivchari Date: Fri, 26 Aug 2022 00:44:31 +0900 Subject: [PATCH 2/3] fix: rename --- pkg/kubelet/cm/cgroup_manager_linux.go | 20 +++++++++---------- pkg/kubelet/cm/helpers_linux.go | 14 ++++++------- .../cm/node_container_manager_linux.go | 6 +++--- pkg/kubelet/cm/qos_container_manager_linux.go | 6 +++--- 4 files changed, 23 insertions(+), 23 deletions(-) diff --git a/pkg/kubelet/cm/cgroup_manager_linux.go b/pkg/kubelet/cm/cgroup_manager_linux.go index fa36d6bc1d7..ef5560f4b3d 100644 --- a/pkg/kubelet/cm/cgroup_manager_linux.go +++ b/pkg/kubelet/cm/cgroup_manager_linux.go @@ -307,8 +307,8 @@ func (m *cgroupManagerImpl) Destroy(cgroupConfig *CgroupConfig) error { return nil } -// getCpuWeight converts from the range [2, 262144] to [1, 10000] -func getCpuWeight(cpuShares *uint64) uint64 { +// getCPUWeight converts from the range [2, 262144] to [1, 10000] +func getCPUWeight(cpuShares *uint64) uint64 { if cpuShares == nil { return 0 } @@ -360,18 +360,18 @@ func (m *cgroupManagerImpl) toResources(resourceConfig *ResourceConfig) *libcont if resourceConfig.Memory != nil { resources.Memory = *resourceConfig.Memory } - if resourceConfig.CpuShares != nil { + if resourceConfig.CPUShares != nil { if libcontainercgroups.IsCgroup2UnifiedMode() { - resources.CpuWeight = getCpuWeight(resourceConfig.CpuShares) + resources.CpuWeight = getCPUWeight(resourceConfig.CPUShares) } else { - resources.CpuShares = *resourceConfig.CpuShares + resources.CpuShares = *resourceConfig.CPUShares } } - if resourceConfig.CpuQuota != nil { - resources.CpuQuota = *resourceConfig.CpuQuota + if resourceConfig.CPUQuota != nil { + resources.CpuQuota = *resourceConfig.CPUQuota } - if resourceConfig.CpuPeriod != nil { - resources.CpuPeriod = *resourceConfig.CpuPeriod + if resourceConfig.CPUPeriod != nil { + resources.CpuPeriod = *resourceConfig.CPUPeriod } if resourceConfig.PidsLimit != nil { resources.PidsLimit = *resourceConfig.PidsLimit @@ -531,7 +531,7 @@ func (m *cgroupManagerImpl) ReduceCPULimits(cgroupName CgroupName) error { // Set lowest possible CpuShares value for the cgroup minimumCPUShares := uint64(MinShares) resources := &ResourceConfig{ - CpuShares: &minimumCPUShares, + CPUShares: &minimumCPUShares, } containerConfig := &CgroupConfig{ Name: cgroupName, diff --git a/pkg/kubelet/cm/helpers_linux.go b/pkg/kubelet/cm/helpers_linux.go index 25ff3f13b82..96546f47be0 100644 --- a/pkg/kubelet/cm/helpers_linux.go +++ b/pkg/kubelet/cm/helpers_linux.go @@ -182,22 +182,22 @@ func ResourceConfigForPod(pod *v1.Pod, enforceCPULimits bool, cpuPeriod uint64, // build the result result := &ResourceConfig{} if qosClass == v1.PodQOSGuaranteed { - result.CpuShares = &cpuShares - result.CpuQuota = &cpuQuota - result.CpuPeriod = &cpuPeriod + result.CPUShares = &cpuShares + result.CPUQuota = &cpuQuota + result.CPUPeriod = &cpuPeriod result.Memory = &memoryLimits } else if qosClass == v1.PodQOSBurstable { - result.CpuShares = &cpuShares + result.CPUShares = &cpuShares if cpuLimitsDeclared { - result.CpuQuota = &cpuQuota - result.CpuPeriod = &cpuPeriod + result.CPUQuota = &cpuQuota + result.CPUPeriod = &cpuPeriod } if memoryLimitsDeclared { result.Memory = &memoryLimits } } else { shares := uint64(MinShares) - result.CpuShares = &shares + result.CPUShares = &shares } result.HugePageLimit = hugePageLimits diff --git a/pkg/kubelet/cm/node_container_manager_linux.go b/pkg/kubelet/cm/node_container_manager_linux.go index 4b35d3c4fe3..74221c67047 100644 --- a/pkg/kubelet/cm/node_container_manager_linux.go +++ b/pkg/kubelet/cm/node_container_manager_linux.go @@ -40,7 +40,7 @@ const ( defaultNodeAllocatableCgroupName = "kubepods" ) -//createNodeAllocatableCgroups creates Node Allocatable Cgroup when CgroupsPerQOS flag is specified as true +// createNodeAllocatableCgroups creates Node Allocatable Cgroup when CgroupsPerQOS flag is specified as true func (cm *containerManagerImpl) createNodeAllocatableCgroups() error { nodeAllocatable := cm.internalCapacity // Use Node Allocatable limits instead of capacity if the user requested enforcing node allocatable. @@ -155,7 +155,7 @@ func enforceExistingCgroup(cgroupManager CgroupManager, cName CgroupName, rl v1. Name: cName, ResourceParameters: rp, } - klog.V(4).InfoS("Enforcing limits on cgroup", "cgroupName", cName, "cpuShares", cgroupConfig.ResourceParameters.CpuShares, "memory", cgroupConfig.ResourceParameters.Memory, "pidsLimit", cgroupConfig.ResourceParameters.PidsLimit) + klog.V(4).InfoS("Enforcing limits on cgroup", "cgroupName", cName, "cpuShares", cgroupConfig.ResourceParameters.CPUShares, "memory", cgroupConfig.ResourceParameters.Memory, "pidsLimit", cgroupConfig.ResourceParameters.PidsLimit) if err := cgroupManager.Validate(cgroupConfig.Name); err != nil { return err } @@ -180,7 +180,7 @@ func getCgroupConfig(rl v1.ResourceList) *ResourceConfig { if q, exists := rl[v1.ResourceCPU]; exists { // CPU is defined in milli-cores. val := MilliCPUToShares(q.MilliValue()) - rc.CpuShares = &val + rc.CPUShares = &val } if q, exists := rl[pidlimit.PIDs]; exists { val := q.Value() diff --git a/pkg/kubelet/cm/qos_container_manager_linux.go b/pkg/kubelet/cm/qos_container_manager_linux.go index 0ddd44ac234..1525087a33e 100644 --- a/pkg/kubelet/cm/qos_container_manager_linux.go +++ b/pkg/kubelet/cm/qos_container_manager_linux.go @@ -98,7 +98,7 @@ func (m *qosContainerManagerImpl) Start(getNodeAllocatable func() v1.ResourceLis // the BestEffort QoS class has a statically configured minShares value if qosClass == v1.PodQOSBestEffort { minShares := uint64(MinShares) - resourceParameters.CpuShares = &minShares + resourceParameters.CPUShares = &minShares } // containerConfig object stores the cgroup specifications @@ -184,11 +184,11 @@ func (m *qosContainerManagerImpl) setCPUCgroupConfig(configs map[v1.PodQOSClass] // make sure best effort is always 2 shares bestEffortCPUShares := uint64(MinShares) - configs[v1.PodQOSBestEffort].ResourceParameters.CpuShares = &bestEffortCPUShares + configs[v1.PodQOSBestEffort].ResourceParameters.CPUShares = &bestEffortCPUShares // set burstable shares based on current observe state burstableCPUShares := MilliCPUToShares(burstablePodCPURequest) - configs[v1.PodQOSBurstable].ResourceParameters.CpuShares = &burstableCPUShares + configs[v1.PodQOSBurstable].ResourceParameters.CPUShares = &burstableCPUShares return nil } From c62a7cdb32261eab2fc7fcd676f6674a07eb688f Mon Sep 17 00:00:00 2001 From: sivchari Date: Fri, 26 Aug 2022 01:05:02 +0900 Subject: [PATCH 3/3] fix: test --- .../cm/container_manager_linux_test.go | 8 +- pkg/kubelet/cm/helpers_linux_test.go | 94 +++++++++---------- 2 files changed, 51 insertions(+), 51 deletions(-) diff --git a/pkg/kubelet/cm/container_manager_linux_test.go b/pkg/kubelet/cm/container_manager_linux_test.go index 01434ccb587..6c2dcbd0c98 100644 --- a/pkg/kubelet/cm/container_manager_linux_test.go +++ b/pkg/kubelet/cm/container_manager_linux_test.go @@ -131,16 +131,16 @@ func TestCgroupMountValidationMultipleSubsystem(t *testing.T) { } func TestGetCpuWeight(t *testing.T) { - assert.Equal(t, uint64(0), getCpuWeight(nil)) + assert.Equal(t, uint64(0), getCPUWeight(nil)) v := uint64(2) - assert.Equal(t, uint64(1), getCpuWeight(&v)) + assert.Equal(t, uint64(1), getCPUWeight(&v)) v = uint64(262144) - assert.Equal(t, uint64(10000), getCpuWeight(&v)) + assert.Equal(t, uint64(10000), getCPUWeight(&v)) v = uint64(1000000000) - assert.Equal(t, uint64(10000), getCpuWeight(&v)) + assert.Equal(t, uint64(10000), getCPUWeight(&v)) } func TestSoftRequirementsValidationSuccess(t *testing.T) { diff --git a/pkg/kubelet/cm/helpers_linux_test.go b/pkg/kubelet/cm/helpers_linux_test.go index 101b21e682a..318610b89bf 100644 --- a/pkg/kubelet/cm/helpers_linux_test.go +++ b/pkg/kubelet/cm/helpers_linux_test.go @@ -25,7 +25,7 @@ import ( "testing" "time" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" utilfeature "k8s.io/apiserver/pkg/util/feature" featuregatetesting "k8s.io/component-base/featuregate/testing" @@ -87,7 +87,7 @@ func TestResourceConfigForPod(t *testing.T) { }, enforceCPULimits: true, quotaPeriod: defaultQuotaPeriod, - expected: &ResourceConfig{CpuShares: &minShares}, + expected: &ResourceConfig{CPUShares: &minShares}, }, "burstable-no-limits": { pod: &v1.Pod{ @@ -101,7 +101,7 @@ func TestResourceConfigForPod(t *testing.T) { }, enforceCPULimits: true, quotaPeriod: defaultQuotaPeriod, - expected: &ResourceConfig{CpuShares: &burstableShares}, + expected: &ResourceConfig{CPUShares: &burstableShares}, }, "burstable-with-limits": { pod: &v1.Pod{ @@ -115,7 +115,7 @@ func TestResourceConfigForPod(t *testing.T) { }, enforceCPULimits: true, quotaPeriod: defaultQuotaPeriod, - expected: &ResourceConfig{CpuShares: &burstableShares, CpuQuota: &burstableQuota, CpuPeriod: &defaultQuotaPeriod, Memory: &burstableMemory}, + expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &burstableQuota, CPUPeriod: &defaultQuotaPeriod, Memory: &burstableMemory}, }, "burstable-with-limits-no-cpu-enforcement": { pod: &v1.Pod{ @@ -129,7 +129,7 @@ func TestResourceConfigForPod(t *testing.T) { }, enforceCPULimits: false, quotaPeriod: defaultQuotaPeriod, - expected: &ResourceConfig{CpuShares: &burstableShares, CpuQuota: &cpuNoLimit, CpuPeriod: &defaultQuotaPeriod, Memory: &burstableMemory}, + expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &cpuNoLimit, CPUPeriod: &defaultQuotaPeriod, Memory: &burstableMemory}, }, "burstable-partial-limits": { pod: &v1.Pod{ @@ -146,7 +146,7 @@ func TestResourceConfigForPod(t *testing.T) { }, enforceCPULimits: true, quotaPeriod: defaultQuotaPeriod, - expected: &ResourceConfig{CpuShares: &burstablePartialShares}, + expected: &ResourceConfig{CPUShares: &burstablePartialShares}, }, "burstable-with-limits-with-tuned-quota": { pod: &v1.Pod{ @@ -160,7 +160,7 @@ func TestResourceConfigForPod(t *testing.T) { }, enforceCPULimits: true, quotaPeriod: tunedQuotaPeriod, - expected: &ResourceConfig{CpuShares: &burstableShares, CpuQuota: &burstableQuota, CpuPeriod: &tunedQuotaPeriod, Memory: &burstableMemory}, + expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &burstableQuota, CPUPeriod: &tunedQuotaPeriod, Memory: &burstableMemory}, }, "burstable-with-limits-no-cpu-enforcement-with-tuned-quota": { pod: &v1.Pod{ @@ -174,7 +174,7 @@ func TestResourceConfigForPod(t *testing.T) { }, enforceCPULimits: false, quotaPeriod: tunedQuotaPeriod, - expected: &ResourceConfig{CpuShares: &burstableShares, CpuQuota: &cpuNoLimit, CpuPeriod: &tunedQuotaPeriod, Memory: &burstableMemory}, + expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &cpuNoLimit, CPUPeriod: &tunedQuotaPeriod, Memory: &burstableMemory}, }, "burstable-partial-limits-with-tuned-quota": { pod: &v1.Pod{ @@ -191,7 +191,7 @@ func TestResourceConfigForPod(t *testing.T) { }, enforceCPULimits: true, quotaPeriod: tunedQuotaPeriod, - expected: &ResourceConfig{CpuShares: &burstablePartialShares}, + expected: &ResourceConfig{CPUShares: &burstablePartialShares}, }, "guaranteed": { pod: &v1.Pod{ @@ -205,7 +205,7 @@ func TestResourceConfigForPod(t *testing.T) { }, enforceCPULimits: true, quotaPeriod: defaultQuotaPeriod, - expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedQuota, CpuPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory}, + expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &guaranteedQuota, CPUPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory}, }, "guaranteed-no-cpu-enforcement": { pod: &v1.Pod{ @@ -219,7 +219,7 @@ func TestResourceConfigForPod(t *testing.T) { }, enforceCPULimits: false, quotaPeriod: defaultQuotaPeriod, - expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &cpuNoLimit, CpuPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory}, + expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &cpuNoLimit, CPUPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory}, }, "guaranteed-with-tuned-quota": { pod: &v1.Pod{ @@ -233,7 +233,7 @@ func TestResourceConfigForPod(t *testing.T) { }, enforceCPULimits: true, quotaPeriod: tunedQuotaPeriod, - expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedTunedQuota, CpuPeriod: &tunedQuotaPeriod, Memory: &guaranteedMemory}, + expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &guaranteedTunedQuota, CPUPeriod: &tunedQuotaPeriod, Memory: &guaranteedMemory}, }, "guaranteed-no-cpu-enforcement-with-tuned-quota": { pod: &v1.Pod{ @@ -247,7 +247,7 @@ func TestResourceConfigForPod(t *testing.T) { }, enforceCPULimits: false, quotaPeriod: tunedQuotaPeriod, - expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &cpuNoLimit, CpuPeriod: &tunedQuotaPeriod, Memory: &guaranteedMemory}, + expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &cpuNoLimit, CPUPeriod: &tunedQuotaPeriod, Memory: &guaranteedMemory}, }, "burstable-partial-limits-with-init-containers": { pod: &v1.Pod{ @@ -272,7 +272,7 @@ func TestResourceConfigForPod(t *testing.T) { }, enforceCPULimits: true, quotaPeriod: tunedQuotaPeriod, - expected: &ResourceConfig{CpuShares: &burstablePartialShares}, + expected: &ResourceConfig{CPUShares: &burstablePartialShares}, }, } @@ -280,14 +280,14 @@ func TestResourceConfigForPod(t *testing.T) { actual := ResourceConfigForPod(testCase.pod, testCase.enforceCPULimits, testCase.quotaPeriod, false) - if !reflect.DeepEqual(actual.CpuPeriod, testCase.expected.CpuPeriod) { - t.Errorf("unexpected result, test: %v, cpu period not as expected. Expected: %v, Actual:%v", testName, *testCase.expected.CpuPeriod, *actual.CpuPeriod) + if !reflect.DeepEqual(actual.CPUPeriod, testCase.expected.CPUPeriod) { + t.Errorf("unexpected result, test: %v, cpu period not as expected. Expected: %v, Actual:%v", testName, *testCase.expected.CPUPeriod, *actual.CPUPeriod) } - if !reflect.DeepEqual(actual.CpuQuota, testCase.expected.CpuQuota) { - t.Errorf("unexpected result, test: %v, cpu quota not as expected. Expected: %v, Actual:%v", testName, *testCase.expected.CpuQuota, *actual.CpuQuota) + if !reflect.DeepEqual(actual.CPUQuota, testCase.expected.CPUQuota) { + t.Errorf("unexpected result, test: %v, cpu quota not as expected. Expected: %v, Actual:%v", testName, *testCase.expected.CPUQuota, *actual.CPUQuota) } - if !reflect.DeepEqual(actual.CpuShares, testCase.expected.CpuShares) { - t.Errorf("unexpected result, test: %v, cpu shares not as expected. Expected: %v, Actual:%v", testName, *testCase.expected.CpuShares, &actual.CpuShares) + if !reflect.DeepEqual(actual.CPUShares, testCase.expected.CPUShares) { + t.Errorf("unexpected result, test: %v, cpu shares not as expected. Expected: %v, Actual:%v", testName, *testCase.expected.CPUShares, &actual.CPUShares) } if !reflect.DeepEqual(actual.Memory, testCase.expected.Memory) { t.Errorf("unexpected result, test: %v, memory not as expected. Expected: %v, Actual:%v", testName, *testCase.expected.Memory, *actual.Memory) @@ -332,7 +332,7 @@ func TestResourceConfigForPodWithCustomCPUCFSQuotaPeriod(t *testing.T) { }, enforceCPULimits: true, quotaPeriod: defaultQuotaPeriod, - expected: &ResourceConfig{CpuShares: &minShares}, + expected: &ResourceConfig{CPUShares: &minShares}, }, "burstable-no-limits": { pod: &v1.Pod{ @@ -346,7 +346,7 @@ func TestResourceConfigForPodWithCustomCPUCFSQuotaPeriod(t *testing.T) { }, enforceCPULimits: true, quotaPeriod: defaultQuotaPeriod, - expected: &ResourceConfig{CpuShares: &burstableShares}, + expected: &ResourceConfig{CPUShares: &burstableShares}, }, "burstable-with-limits": { pod: &v1.Pod{ @@ -360,7 +360,7 @@ func TestResourceConfigForPodWithCustomCPUCFSQuotaPeriod(t *testing.T) { }, enforceCPULimits: true, quotaPeriod: defaultQuotaPeriod, - expected: &ResourceConfig{CpuShares: &burstableShares, CpuQuota: &burstableQuota, CpuPeriod: &defaultQuotaPeriod, Memory: &burstableMemory}, + expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &burstableQuota, CPUPeriod: &defaultQuotaPeriod, Memory: &burstableMemory}, }, "burstable-with-limits-no-cpu-enforcement": { pod: &v1.Pod{ @@ -374,7 +374,7 @@ func TestResourceConfigForPodWithCustomCPUCFSQuotaPeriod(t *testing.T) { }, enforceCPULimits: false, quotaPeriod: defaultQuotaPeriod, - expected: &ResourceConfig{CpuShares: &burstableShares, CpuQuota: &cpuNoLimit, CpuPeriod: &defaultQuotaPeriod, Memory: &burstableMemory}, + expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &cpuNoLimit, CPUPeriod: &defaultQuotaPeriod, Memory: &burstableMemory}, }, "burstable-partial-limits": { pod: &v1.Pod{ @@ -391,7 +391,7 @@ func TestResourceConfigForPodWithCustomCPUCFSQuotaPeriod(t *testing.T) { }, enforceCPULimits: true, quotaPeriod: defaultQuotaPeriod, - expected: &ResourceConfig{CpuShares: &burstablePartialShares}, + expected: &ResourceConfig{CPUShares: &burstablePartialShares}, }, "burstable-with-limits-with-tuned-quota": { pod: &v1.Pod{ @@ -405,7 +405,7 @@ func TestResourceConfigForPodWithCustomCPUCFSQuotaPeriod(t *testing.T) { }, enforceCPULimits: true, quotaPeriod: tunedQuotaPeriod, - expected: &ResourceConfig{CpuShares: &burstableShares, CpuQuota: &tunedQuota, CpuPeriod: &tunedQuotaPeriod, Memory: &burstableMemory}, + expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &tunedQuota, CPUPeriod: &tunedQuotaPeriod, Memory: &burstableMemory}, }, "burstable-with-limits-no-cpu-enforcement-with-tuned-quota": { pod: &v1.Pod{ @@ -419,7 +419,7 @@ func TestResourceConfigForPodWithCustomCPUCFSQuotaPeriod(t *testing.T) { }, enforceCPULimits: false, quotaPeriod: tunedQuotaPeriod, - expected: &ResourceConfig{CpuShares: &burstableShares, CpuQuota: &cpuNoLimit, CpuPeriod: &tunedQuotaPeriod, Memory: &burstableMemory}, + expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &cpuNoLimit, CPUPeriod: &tunedQuotaPeriod, Memory: &burstableMemory}, }, "burstable-partial-limits-with-tuned-quota": { pod: &v1.Pod{ @@ -436,7 +436,7 @@ func TestResourceConfigForPodWithCustomCPUCFSQuotaPeriod(t *testing.T) { }, enforceCPULimits: true, quotaPeriod: tunedQuotaPeriod, - expected: &ResourceConfig{CpuShares: &burstablePartialShares}, + expected: &ResourceConfig{CPUShares: &burstablePartialShares}, }, "guaranteed": { pod: &v1.Pod{ @@ -450,7 +450,7 @@ func TestResourceConfigForPodWithCustomCPUCFSQuotaPeriod(t *testing.T) { }, enforceCPULimits: true, quotaPeriod: defaultQuotaPeriod, - expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedQuota, CpuPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory}, + expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &guaranteedQuota, CPUPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory}, }, "guaranteed-no-cpu-enforcement": { pod: &v1.Pod{ @@ -464,7 +464,7 @@ func TestResourceConfigForPodWithCustomCPUCFSQuotaPeriod(t *testing.T) { }, enforceCPULimits: false, quotaPeriod: defaultQuotaPeriod, - expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &cpuNoLimit, CpuPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory}, + expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &cpuNoLimit, CPUPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory}, }, "guaranteed-with-tuned-quota": { pod: &v1.Pod{ @@ -478,7 +478,7 @@ func TestResourceConfigForPodWithCustomCPUCFSQuotaPeriod(t *testing.T) { }, enforceCPULimits: true, quotaPeriod: tunedQuotaPeriod, - expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedTunedQuota, CpuPeriod: &tunedQuotaPeriod, Memory: &guaranteedMemory}, + expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &guaranteedTunedQuota, CPUPeriod: &tunedQuotaPeriod, Memory: &guaranteedMemory}, }, "guaranteed-no-cpu-enforcement-with-tuned-quota": { pod: &v1.Pod{ @@ -492,7 +492,7 @@ func TestResourceConfigForPodWithCustomCPUCFSQuotaPeriod(t *testing.T) { }, enforceCPULimits: false, quotaPeriod: tunedQuotaPeriod, - expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &cpuNoLimit, CpuPeriod: &tunedQuotaPeriod, Memory: &guaranteedMemory}, + expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &cpuNoLimit, CPUPeriod: &tunedQuotaPeriod, Memory: &guaranteedMemory}, }, } @@ -500,13 +500,13 @@ func TestResourceConfigForPodWithCustomCPUCFSQuotaPeriod(t *testing.T) { actual := ResourceConfigForPod(testCase.pod, testCase.enforceCPULimits, testCase.quotaPeriod, false) - if !reflect.DeepEqual(actual.CpuPeriod, testCase.expected.CpuPeriod) { + if !reflect.DeepEqual(actual.CPUPeriod, testCase.expected.CPUPeriod) { t.Errorf("unexpected result, test: %v, cpu period not as expected", testName) } - if !reflect.DeepEqual(actual.CpuQuota, testCase.expected.CpuQuota) { + if !reflect.DeepEqual(actual.CPUQuota, testCase.expected.CPUQuota) { t.Errorf("unexpected result, test: %v, cpu quota not as expected", testName) } - if !reflect.DeepEqual(actual.CpuShares, testCase.expected.CpuShares) { + if !reflect.DeepEqual(actual.CPUShares, testCase.expected.CPUShares) { t.Errorf("unexpected result, test: %v, cpu shares not as expected", testName) } if !reflect.DeepEqual(actual.Memory, testCase.expected.Memory) { @@ -683,7 +683,7 @@ func TestResourceConfigForPodWithEnforceMemoryQoS(t *testing.T) { }, enforceCPULimits: true, quotaPeriod: defaultQuotaPeriod, - expected: &ResourceConfig{CpuShares: &minShares}, + expected: &ResourceConfig{CPUShares: &minShares}, }, "burstable-no-limits": { pod: &v1.Pod{ @@ -697,7 +697,7 @@ func TestResourceConfigForPodWithEnforceMemoryQoS(t *testing.T) { }, enforceCPULimits: true, quotaPeriod: defaultQuotaPeriod, - expected: &ResourceConfig{CpuShares: &burstableShares, Unified: map[string]string{"memory.min": "104857600"}}, + expected: &ResourceConfig{CPUShares: &burstableShares, Unified: map[string]string{"memory.min": "104857600"}}, }, "burstable-with-limits": { pod: &v1.Pod{ @@ -711,7 +711,7 @@ func TestResourceConfigForPodWithEnforceMemoryQoS(t *testing.T) { }, enforceCPULimits: true, quotaPeriod: defaultQuotaPeriod, - expected: &ResourceConfig{CpuShares: &burstableShares, CpuQuota: &burstableQuota, CpuPeriod: &defaultQuotaPeriod, Memory: &burstableMemory, Unified: map[string]string{"memory.min": "104857600"}}, + expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &burstableQuota, CPUPeriod: &defaultQuotaPeriod, Memory: &burstableMemory, Unified: map[string]string{"memory.min": "104857600"}}, }, "burstable-with-limits-no-cpu-enforcement": { pod: &v1.Pod{ @@ -725,7 +725,7 @@ func TestResourceConfigForPodWithEnforceMemoryQoS(t *testing.T) { }, enforceCPULimits: false, quotaPeriod: defaultQuotaPeriod, - expected: &ResourceConfig{CpuShares: &burstableShares, CpuQuota: &cpuNoLimit, CpuPeriod: &defaultQuotaPeriod, Memory: &burstableMemory, Unified: map[string]string{"memory.min": "104857600"}}, + expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &cpuNoLimit, CPUPeriod: &defaultQuotaPeriod, Memory: &burstableMemory, Unified: map[string]string{"memory.min": "104857600"}}, }, "burstable-partial-limits": { pod: &v1.Pod{ @@ -742,7 +742,7 @@ func TestResourceConfigForPodWithEnforceMemoryQoS(t *testing.T) { }, enforceCPULimits: true, quotaPeriod: defaultQuotaPeriod, - expected: &ResourceConfig{CpuShares: &burstablePartialShares, Unified: map[string]string{"memory.min": "209715200"}}, + expected: &ResourceConfig{CPUShares: &burstablePartialShares, Unified: map[string]string{"memory.min": "209715200"}}, }, "burstable-with-limits-with-tuned-quota": { pod: &v1.Pod{ @@ -756,7 +756,7 @@ func TestResourceConfigForPodWithEnforceMemoryQoS(t *testing.T) { }, enforceCPULimits: true, quotaPeriod: tunedQuotaPeriod, - expected: &ResourceConfig{CpuShares: &burstableShares, CpuQuota: &burstableQuota, CpuPeriod: &tunedQuotaPeriod, Memory: &burstableMemory, Unified: map[string]string{"memory.min": "104857600"}}, + expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &burstableQuota, CPUPeriod: &tunedQuotaPeriod, Memory: &burstableMemory, Unified: map[string]string{"memory.min": "104857600"}}, }, "burstable-with-limits-no-cpu-enforcement-with-tuned-quota": { pod: &v1.Pod{ @@ -770,7 +770,7 @@ func TestResourceConfigForPodWithEnforceMemoryQoS(t *testing.T) { }, enforceCPULimits: false, quotaPeriod: tunedQuotaPeriod, - expected: &ResourceConfig{CpuShares: &burstableShares, CpuQuota: &cpuNoLimit, CpuPeriod: &tunedQuotaPeriod, Memory: &burstableMemory, Unified: map[string]string{"memory.min": "104857600"}}, + expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &cpuNoLimit, CPUPeriod: &tunedQuotaPeriod, Memory: &burstableMemory, Unified: map[string]string{"memory.min": "104857600"}}, }, "burstable-partial-limits-with-tuned-quota": { pod: &v1.Pod{ @@ -787,7 +787,7 @@ func TestResourceConfigForPodWithEnforceMemoryQoS(t *testing.T) { }, enforceCPULimits: true, quotaPeriod: tunedQuotaPeriod, - expected: &ResourceConfig{CpuShares: &burstablePartialShares, Unified: map[string]string{"memory.min": "209715200"}}, + expected: &ResourceConfig{CPUShares: &burstablePartialShares, Unified: map[string]string{"memory.min": "209715200"}}, }, "guaranteed": { pod: &v1.Pod{ @@ -801,7 +801,7 @@ func TestResourceConfigForPodWithEnforceMemoryQoS(t *testing.T) { }, enforceCPULimits: true, quotaPeriod: defaultQuotaPeriod, - expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedQuota, CpuPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory, Unified: map[string]string{"memory.min": "104857600"}}, + expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &guaranteedQuota, CPUPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory, Unified: map[string]string{"memory.min": "104857600"}}, }, "guaranteed-no-cpu-enforcement": { pod: &v1.Pod{ @@ -815,7 +815,7 @@ func TestResourceConfigForPodWithEnforceMemoryQoS(t *testing.T) { }, enforceCPULimits: false, quotaPeriod: defaultQuotaPeriod, - expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &cpuNoLimit, CpuPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory, Unified: map[string]string{"memory.min": "104857600"}}, + expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &cpuNoLimit, CPUPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory, Unified: map[string]string{"memory.min": "104857600"}}, }, "guaranteed-with-tuned-quota": { pod: &v1.Pod{ @@ -829,7 +829,7 @@ func TestResourceConfigForPodWithEnforceMemoryQoS(t *testing.T) { }, enforceCPULimits: true, quotaPeriod: tunedQuotaPeriod, - expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedTunedQuota, CpuPeriod: &tunedQuotaPeriod, Memory: &guaranteedMemory, Unified: map[string]string{"memory.min": "104857600"}}, + expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &guaranteedTunedQuota, CPUPeriod: &tunedQuotaPeriod, Memory: &guaranteedMemory, Unified: map[string]string{"memory.min": "104857600"}}, }, "guaranteed-no-cpu-enforcement-with-tuned-quota": { pod: &v1.Pod{ @@ -843,7 +843,7 @@ func TestResourceConfigForPodWithEnforceMemoryQoS(t *testing.T) { }, enforceCPULimits: false, quotaPeriod: tunedQuotaPeriod, - expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &cpuNoLimit, CpuPeriod: &tunedQuotaPeriod, Memory: &guaranteedMemory, Unified: map[string]string{"memory.min": "104857600"}}, + expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &cpuNoLimit, CPUPeriod: &tunedQuotaPeriod, Memory: &guaranteedMemory, Unified: map[string]string{"memory.min": "104857600"}}, }, }