smtalign: propagate policy options to policies

Consume in the static policy the cpu manager policy options from
the cpumanager instance.
Validate in the none policy if any option is given, and fail if so -
this is almost surely a configuration mistake.

Add new cpumanager.Options type to hold the options and translate from
user arguments to flags.

Co-authored-by: Swati Sehgal <swsehgal@redhat.com>
Signed-off-by: Francesco Romani <fromani@redhat.com>
This commit is contained in:
Francesco Romani 2021-05-17 10:53:43 +02:00
parent 6dcec345df
commit 23abdab2b7
8 changed files with 440 additions and 79 deletions

View File

@ -146,14 +146,17 @@ func (s *sourcesReadyStub) AllReady() bool { return true }
func NewManager(cpuPolicyName string, cpuPolicyOptions map[string]string, reconcilePeriod time.Duration, machineInfo *cadvisorapi.MachineInfo, specificCPUs cpuset.CPUSet, nodeAllocatableReservation v1.ResourceList, stateFileDirectory string, affinity topologymanager.Store) (Manager, error) {
var topo *topology.CPUTopology
var policy Policy
var err error
switch policyName(cpuPolicyName) {
case PolicyNone:
policy = NewNonePolicy()
policy, err = NewNonePolicy(cpuPolicyOptions)
if err != nil {
return nil, fmt.Errorf("new none policy error: %w", err)
}
case PolicyStatic:
var err error
topo, err = topology.Discover(machineInfo)
if err != nil {
return nil, err
@ -178,9 +181,9 @@ func NewManager(cpuPolicyName string, cpuPolicyOptions map[string]string, reconc
// exclusively allocated.
reservedCPUsFloat := float64(reservedCPUs.MilliValue()) / 1000
numReservedCPUs := int(math.Ceil(reservedCPUsFloat))
policy, err = NewStaticPolicy(topo, numReservedCPUs, specificCPUs, affinity)
policy, err = NewStaticPolicy(topo, numReservedCPUs, specificCPUs, affinity, cpuPolicyOptions)
if err != nil {
return nil, fmt.Errorf("new static policy error: %v", err)
return nil, fmt.Errorf("new static policy error: %w", err)
}
default:

View File

@ -229,7 +229,8 @@ func TestCPUManagerAdd(t *testing.T) {
},
0,
cpuset.NewCPUSet(),
topologymanager.NewFakeManager())
topologymanager.NewFakeManager(),
nil)
testCases := []struct {
description string
updateErr error
@ -479,7 +480,7 @@ func TestCPUManagerAddWithInitContainers(t *testing.T) {
}
for _, testCase := range testCases {
policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager())
policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil)
mockState := &mockState{
assignments: testCase.stAssignments,
@ -1004,7 +1005,8 @@ func TestCPUManagerAddWithResvList(t *testing.T) {
},
1,
cpuset.NewCPUSet(0),
topologymanager.NewFakeManager())
topologymanager.NewFakeManager(),
nil)
testCases := []struct {
description string
updateErr error
@ -1061,3 +1063,69 @@ func TestCPUManagerAddWithResvList(t *testing.T) {
}
}
}
func TestCPUManagerHandlePolicyOptions(t *testing.T) {
testCases := []struct {
description string
cpuPolicyName string
cpuPolicyOptions map[string]string
expectedError error
}{
{
description: "options to none policy",
cpuPolicyName: "none",
cpuPolicyOptions: map[string]string{
FullPCPUsOnlyOption: "true",
},
expectedError: fmt.Errorf("received unsupported options"),
},
}
// any correct realistic topology is fine. We pick a simple one.
mockedMachineInfo := cadvisorapi.MachineInfo{
NumCores: 4,
Topology: []cadvisorapi.Node{
{
Cores: []cadvisorapi.Core{
{
Id: 0,
Threads: []int{0},
},
{
Id: 1,
Threads: []int{1},
},
{
Id: 2,
Threads: []int{2},
},
{
Id: 3,
Threads: []int{3},
},
},
},
},
}
for _, testCase := range testCases {
t.Run(testCase.description, func(t *testing.T) {
machineInfo := &mockedMachineInfo
nodeAllocatableReservation := v1.ResourceList{}
sDir, err := ioutil.TempDir("/tmp/", "cpu_manager_test")
if err != nil {
t.Errorf("cannot create state file: %s", err.Error())
}
defer os.RemoveAll(sDir)
_, err = NewManager(testCase.cpuPolicyName, testCase.cpuPolicyOptions, 5*time.Second, machineInfo, cpuset.NewCPUSet(), nodeAllocatableReservation, sDir, topologymanager.NewFakeManager())
if err == nil {
t.Errorf("Expected error, but NewManager succeeded")
}
if !strings.Contains(err.Error(), testCase.expectedError.Error()) {
t.Errorf("Unexpected error message. Have: %s wants %s", err.Error(), testCase.expectedError.Error())
}
})
}
}

View File

@ -38,7 +38,8 @@ func (m *fakeManager) Start(activePods ActivePodsFunc, sourcesReady config.Sourc
func (m *fakeManager) Policy() Policy {
klog.InfoS("Policy()")
return NewNonePolicy()
pol, _ := NewNonePolicy(nil)
return pol
}
func (m *fakeManager) Allocate(pod *v1.Pod, container *v1.Container) error {

View File

@ -17,6 +17,8 @@ limitations under the License.
package cpumanager
import (
"fmt"
"k8s.io/api/core/v1"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
@ -32,8 +34,11 @@ var _ Policy = &nonePolicy{}
const PolicyNone policyName = "none"
// NewNonePolicy returns a cpuset manager policy that does nothing
func NewNonePolicy() Policy {
return &nonePolicy{}
func NewNonePolicy(cpuPolicyOptions map[string]string) (Policy, error) {
if len(cpuPolicyOptions) > 0 {
return nil, fmt.Errorf("None policy: received unsupported options=%v", cpuPolicyOptions)
}
return &nonePolicy{}, nil
}
func (p *nonePolicy) Name() string {

View File

@ -86,3 +86,20 @@ func TestNonePolicyGetAllocatableCPUs(t *testing.T) {
t.Errorf("NonePolicy GetAllocatableCPUs() error. expected empty set, returned: %v", cpus)
}
}
func TestNonePolicyOptions(t *testing.T) {
var err error
_, err = NewNonePolicy(nil)
if err != nil {
t.Errorf("NewNonePolicy with nil options failure. expected no error but got: %v", err)
}
opts := map[string]string{
FullPCPUsOnlyOption: "true",
}
_, err = NewNonePolicy(opts)
if err == nil {
t.Errorf("NewNonePolicy with (any) options failure. expected error but got none")
}
}

View File

@ -0,0 +1,56 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cpumanager
import (
"fmt"
"strconv"
)
const (
// FullPCPUsOnlyOption is the name of the CPU Manager policy option
FullPCPUsOnlyOption string = "full-pcpus-only"
)
type StaticPolicyOptions struct {
// flag to enable extra allocation restrictions to avoid
// different containers to possibly end up on the same core.
// we consider "core" and "physical CPU" synonim here, leaning
// towards the terminoloy k8s hints. We acknowledge this is confusing.
//
// looking at https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/,
// any possible naming scheme will lead to ambiguity to some extent.
// We picked "pcpu" because it the established docs hints at vCPU already.
FullPhysicalCPUsOnly bool
}
func NewStaticPolicyOptions(policyOptions map[string]string) (StaticPolicyOptions, error) {
opts := StaticPolicyOptions{}
for name, value := range policyOptions {
switch name {
case FullPCPUsOnlyOption:
optValue, err := strconv.ParseBool(value)
if err != nil {
return opts, fmt.Errorf("bad value for option %q: %w", name, err)
}
opts.FullPhysicalCPUsOnly = optValue
default:
return opts, fmt.Errorf("unsupported cpumanager option: %q (%s)", name, value)
}
}
return opts, nil
}

View File

@ -29,8 +29,29 @@ import (
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask"
)
// PolicyStatic is the name of the static policy
const PolicyStatic policyName = "static"
const (
// PolicyStatic is the name of the static policy.
// Should options be given, these will be ignored and backward (up to 1.21 included)
// compatible behaviour will be enforced
PolicyStatic policyName = "static"
// ErrorSMTAlignment represents the type of an SMTAlignmentError
ErrorSMTAlignment = "SMTAlignmentError"
)
// SMTAlignmentError represents an error due to SMT alignment
type SMTAlignmentError struct {
RequestedCPUs int
CpusPerCore int
}
func (e SMTAlignmentError) Error() string {
return fmt.Sprintf("SMT Alignment Error: requested %d cpus not multiple cpus per core = %d", e.RequestedCPUs, e.CpusPerCore)
}
func (e SMTAlignmentError) Type() string {
return ErrorSMTAlignment
}
// staticPolicy is a CPU manager policy that does not change CPU
// assignments for exclusively pinned guaranteed containers after the main
@ -79,6 +100,8 @@ type staticPolicy struct {
affinity topologymanager.Store
// set of CPUs to reuse across allocations in a pod
cpusToReuse map[string]cpuset.CPUSet
// options allow to fine-tune the behaviour of the policy
options StaticPolicyOptions
}
// Ensure staticPolicy implements Policy interface
@ -87,7 +110,14 @@ var _ Policy = &staticPolicy{}
// NewStaticPolicy returns a CPU manager policy that does not change CPU
// assignments for exclusively pinned guaranteed containers after the main
// container process starts.
func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, reservedCPUs cpuset.CPUSet, affinity topologymanager.Store) (Policy, error) {
func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, reservedCPUs cpuset.CPUSet, affinity topologymanager.Store, cpuPolicyOptions map[string]string) (Policy, error) {
opts, err := NewStaticPolicyOptions(cpuPolicyOptions)
if err != nil {
return nil, err
}
klog.InfoS("Static policy created with configuration", "options", opts)
allCPUs := topology.CPUDetails.CPUs()
var reserved cpuset.CPUSet
if reservedCPUs.Size() > 0 {
@ -113,6 +143,7 @@ func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, reserv
reserved: reserved,
affinity: affinity,
cpusToReuse: make(map[string]cpuset.CPUSet),
options: opts,
}, nil
}
@ -220,6 +251,21 @@ func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Contai
klog.InfoS("Static policy: Allocate", "pod", klog.KObj(pod), "containerName", container.Name)
// container belongs in an exclusively allocated pool
if p.options.FullPhysicalCPUsOnly && ((numCPUs % p.topology.CPUsPerCore()) != 0) {
// Since CPU Manager has been enabled requesting strict SMT alignment, it means a guaranteed pod can only be admitted
// if the CPU requested is a multiple of the number of virtual cpus per physical cores.
// In case CPU request is not a multiple of the number of virtual cpus per physical cores the Pod will be put
// in Failed state, with SMTAlignmentError as reason. Since the allocation happens in terms of physical cores
// and the scheduler is responsible for ensuring that the workload goes to a node that has enough CPUs,
// the pod would be placed on a node where there are enough physical cores available to be allocated.
// Just like the behaviour in case of static policy, takeByTopology will try to first allocate CPUs from the same socket
// and only in case the request cannot be sattisfied on a single socket, CPU allocation is done for a workload to occupy all
// CPUs on a physical core. Allocation of individual threads would never have to occur.
return SMTAlignmentError{
RequestedCPUs: numCPUs,
CpusPerCore: p.topology.CPUsPerCore(),
}
}
if cpuset, ok := s.GetCPUSet(string(pod.UID), container.Name); ok {
p.updateCPUsToReuse(pod, container, cpuset)
klog.InfoS("Static policy: container already present in state, skipping", "pod", klog.KObj(pod), "containerName", container.Name)

View File

@ -34,6 +34,7 @@ type staticPolicyTest struct {
topo *topology.CPUTopology
numReservedCPUs int
podUID string
options map[string]string
containerName string
stAssignments state.ContainerCPUAssignments
stDefaultCPUSet cpuset.CPUSet
@ -43,8 +44,27 @@ type staticPolicyTest struct {
expCSet cpuset.CPUSet
}
// this is not a real Clone() - hence Pseudo- - because we don't clone some
// objects which are accessed read-only
func (spt staticPolicyTest) PseudoClone() staticPolicyTest {
return staticPolicyTest{
description: spt.description,
topo: spt.topo, // accessed in read-only
numReservedCPUs: spt.numReservedCPUs,
podUID: spt.podUID,
options: spt.options, // accessed in read-only
containerName: spt.containerName,
stAssignments: spt.stAssignments.Clone(),
stDefaultCPUSet: spt.stDefaultCPUSet.Clone(),
pod: spt.pod, // accessed in read-only
expErr: spt.expErr,
expCPUAlloc: spt.expCPUAlloc,
expCSet: spt.expCSet.Clone(),
}
}
func TestStaticPolicyName(t *testing.T) {
policy, _ := NewStaticPolicy(topoSingleSocketHT, 1, cpuset.NewCPUSet(), topologymanager.NewFakeManager())
policy, _ := NewStaticPolicy(topoSingleSocketHT, 1, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil)
policyName := policy.Name()
if policyName != "static" {
@ -120,7 +140,7 @@ func TestStaticPolicyStart(t *testing.T) {
}
for _, testCase := range testCases {
t.Run(testCase.description, func(t *testing.T) {
p, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager())
p, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil)
policy := p.(*staticPolicy)
st := &mockState{
assignments: testCase.stAssignments,
@ -168,7 +188,9 @@ func TestStaticPolicyAdd(t *testing.T) {
largeTopoSock0CPUSet := largeTopoSock0Builder.Result()
largeTopoSock1CPUSet := largeTopoSock1Builder.Result()
testCases := []staticPolicyTest{
// these are the cases which must behave the same regardless the policy options.
// So we will permutate the options to ensure this holds true.
optionsInsensitiveTestCases := []staticPolicyTest{
{
description: "GuPodSingleCore, SingleSocketHT, ExpectError",
topo: topoSingleSocketHT,
@ -180,17 +202,6 @@ func TestStaticPolicyAdd(t *testing.T) {
expCPUAlloc: false,
expCSet: cpuset.NewCPUSet(),
},
{
description: "GuPodSingleCore, SingleSocketHT, ExpectAllocOneCPU",
topo: topoSingleSocketHT,
numReservedCPUs: 1,
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
pod: makePod("fakePod", "fakeContainer2", "1000m", "1000m"),
expErr: nil,
expCPUAlloc: true,
expCSet: cpuset.NewCPUSet(4), // expect sibling of partial core
},
{
description: "GuPodMultipleCores, SingleSocketHT, ExpectAllocOneCore",
topo: topoSingleSocketHT,
@ -400,22 +411,6 @@ func TestStaticPolicyAdd(t *testing.T) {
expCPUAlloc: true,
expCSet: largeTopoSock1CPUSet.Union(cpuset.NewCPUSet(10, 34, 22, 47)),
},
{
// Only partial cores are available in the entire system.
// Expect allocation of all the CPUs from the partial cores.
description: "GuPodMultipleCores, topoQuadSocketFourWayHT, ExpectAllocCPUs",
topo: topoQuadSocketFourWayHT,
stAssignments: state.ContainerCPUAssignments{
"fakePod": map[string]cpuset.CPUSet{
"fakeContainer100": largeTopoCPUSet.Difference(cpuset.NewCPUSet(10, 11, 53, 37, 55, 67, 52)),
},
},
stDefaultCPUSet: cpuset.NewCPUSet(10, 11, 53, 67, 52),
pod: makePod("fakePod", "fakeContainer5", "5000m", "5000m"),
expErr: nil,
expCPUAlloc: true,
expCSet: cpuset.NewCPUSet(10, 11, 53, 67, 52),
},
{
// Only 7 CPUs are available.
// Pod requests 76 cores.
@ -435,45 +430,130 @@ func TestStaticPolicyAdd(t *testing.T) {
},
}
for _, testCase := range testCases {
policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager())
// testcases for the default behaviour of the policy.
defaultOptionsTestCases := []staticPolicyTest{
{
description: "GuPodSingleCore, SingleSocketHT, ExpectAllocOneCPU",
topo: topoSingleSocketHT,
numReservedCPUs: 1,
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
pod: makePod("fakePod", "fakeContainer2", "1000m", "1000m"),
expErr: nil,
expCPUAlloc: true,
expCSet: cpuset.NewCPUSet(4), // expect sibling of partial core
},
{
// Only partial cores are available in the entire system.
// Expect allocation of all the CPUs from the partial cores.
description: "GuPodMultipleCores, topoQuadSocketFourWayHT, ExpectAllocCPUs",
topo: topoQuadSocketFourWayHT,
stAssignments: state.ContainerCPUAssignments{
"fakePod": map[string]cpuset.CPUSet{
"fakeContainer100": largeTopoCPUSet.Difference(cpuset.NewCPUSet(10, 11, 53, 37, 55, 67, 52)),
},
},
stDefaultCPUSet: cpuset.NewCPUSet(10, 11, 53, 67, 52),
pod: makePod("fakePod", "fakeContainer5", "5000m", "5000m"),
expErr: nil,
expCPUAlloc: true,
expCSet: cpuset.NewCPUSet(10, 11, 53, 67, 52),
},
}
st := &mockState{
assignments: testCase.stAssignments,
defaultCPUSet: testCase.stDefaultCPUSet,
// testcases for the FullPCPUsOnlyOption
smtalignOptionTestCases := []staticPolicyTest{
{
description: "GuPodSingleCore, SingleSocketHT, ExpectAllocOneCPU",
topo: topoSingleSocketHT,
options: map[string]string{
FullPCPUsOnlyOption: "true",
},
numReservedCPUs: 1,
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
pod: makePod("fakePod", "fakeContainer2", "1000m", "1000m"),
expErr: SMTAlignmentError{RequestedCPUs: 1, CpusPerCore: 2},
expCPUAlloc: false,
expCSet: cpuset.NewCPUSet(), // reject allocation of sibling of partial core
},
{
// test SMT-level != 2 - which is the default on x86_64
description: "GuPodMultipleCores, topoQuadSocketFourWayHT, ExpectAllocOneCPUs",
topo: topoQuadSocketFourWayHT,
options: map[string]string{
FullPCPUsOnlyOption: "true",
},
numReservedCPUs: 8,
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: largeTopoCPUSet,
pod: makePod("fakePod", "fakeContainer15", "15000m", "15000m"),
expErr: SMTAlignmentError{RequestedCPUs: 15, CpusPerCore: 4},
expCPUAlloc: false,
expCSet: cpuset.NewCPUSet(),
},
}
for _, testCase := range optionsInsensitiveTestCases {
for _, options := range []map[string]string{
nil,
{
FullPCPUsOnlyOption: "true",
},
} {
tCase := testCase.PseudoClone()
tCase.description = fmt.Sprintf("options=%v %s", options, testCase.description)
tCase.options = options
runStaticPolicyTestCase(t, tCase)
}
}
for _, testCase := range defaultOptionsTestCases {
runStaticPolicyTestCase(t, testCase)
}
for _, testCase := range smtalignOptionTestCases {
runStaticPolicyTestCase(t, testCase)
}
}
func runStaticPolicyTestCase(t *testing.T, testCase staticPolicyTest) {
policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), testCase.options)
st := &mockState{
assignments: testCase.stAssignments,
defaultCPUSet: testCase.stDefaultCPUSet,
}
container := &testCase.pod.Spec.Containers[0]
err := policy.Allocate(st, testCase.pod, container)
if !reflect.DeepEqual(err, testCase.expErr) {
t.Errorf("StaticPolicy Allocate() error (%v). expected add error: %q but got: %q",
testCase.description, testCase.expErr, err)
}
if testCase.expCPUAlloc {
cset, found := st.assignments[string(testCase.pod.UID)][container.Name]
if !found {
t.Errorf("StaticPolicy Allocate() error (%v). expected container %v to be present in assignments %v",
testCase.description, container.Name, st.assignments)
}
container := &testCase.pod.Spec.Containers[0]
err := policy.Allocate(st, testCase.pod, container)
if !reflect.DeepEqual(err, testCase.expErr) {
t.Errorf("StaticPolicy Allocate() error (%v). expected add error: %v but got: %v",
testCase.description, testCase.expErr, err)
if !reflect.DeepEqual(cset, testCase.expCSet) {
t.Errorf("StaticPolicy Allocate() error (%v). expected cpuset %v but got %v",
testCase.description, testCase.expCSet, cset)
}
if testCase.expCPUAlloc {
cset, found := st.assignments[string(testCase.pod.UID)][container.Name]
if !found {
t.Errorf("StaticPolicy Allocate() error (%v). expected container %v to be present in assignments %v",
testCase.description, container.Name, st.assignments)
}
if !reflect.DeepEqual(cset, testCase.expCSet) {
t.Errorf("StaticPolicy Allocate() error (%v). expected cpuset %v but got %v",
testCase.description, testCase.expCSet, cset)
}
if !cset.Intersection(st.defaultCPUSet).IsEmpty() {
t.Errorf("StaticPolicy Allocate() error (%v). expected cpuset %v to be disoint from the shared cpuset %v",
testCase.description, cset, st.defaultCPUSet)
}
if !cset.Intersection(st.defaultCPUSet).IsEmpty() {
t.Errorf("StaticPolicy Allocate() error (%v). expected cpuset %v to be disoint from the shared cpuset %v",
testCase.description, cset, st.defaultCPUSet)
}
}
if !testCase.expCPUAlloc {
_, found := st.assignments[string(testCase.pod.UID)][container.Name]
if found {
t.Errorf("StaticPolicy Allocate() error (%v). Did not expect container %v to be present in assignments %v",
testCase.description, container.Name, st.assignments)
}
if !testCase.expCPUAlloc {
_, found := st.assignments[string(testCase.pod.UID)][container.Name]
if found {
t.Errorf("StaticPolicy Allocate() error (%v). Did not expect container %v to be present in assignments %v",
testCase.description, container.Name, st.assignments)
}
}
}
@ -537,7 +617,7 @@ func TestStaticPolicyRemove(t *testing.T) {
}
for _, testCase := range testCases {
policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager())
policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil)
st := &mockState{
assignments: testCase.stAssignments,
@ -627,7 +707,7 @@ func TestTopologyAwareAllocateCPUs(t *testing.T) {
},
}
for _, tc := range testCases {
p, _ := NewStaticPolicy(tc.topo, 0, cpuset.NewCPUSet(), topologymanager.NewFakeManager())
p, _ := NewStaticPolicy(tc.topo, 0, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil)
policy := p.(*staticPolicy)
st := &mockState{
assignments: tc.stAssignments,
@ -701,7 +781,7 @@ func TestStaticPolicyStartWithResvList(t *testing.T) {
}
for _, testCase := range testCases {
t.Run(testCase.description, func(t *testing.T) {
p, err := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, testCase.reserved, topologymanager.NewFakeManager())
p, err := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, testCase.reserved, topologymanager.NewFakeManager(), nil)
if !reflect.DeepEqual(err, testCase.expNewErr) {
t.Errorf("StaticPolicy Start() error (%v). expected error: %v but got: %v",
testCase.description, testCase.expNewErr, err)
@ -778,7 +858,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
}
for _, testCase := range testCases {
policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, testCase.reserved, topologymanager.NewFakeManager())
policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, testCase.reserved, topologymanager.NewFakeManager(), nil)
st := &mockState{
assignments: testCase.stAssignments,
@ -819,3 +899,88 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
}
}
}
type staticPolicyOptionTestCase struct {
description string
policyOptions map[string]string
expectedError bool
expectedValue StaticPolicyOptions
}
func TestStaticPolicyOptions(t *testing.T) {
testCases := []staticPolicyOptionTestCase{
{
description: "nil args",
policyOptions: nil,
expectedError: false,
expectedValue: StaticPolicyOptions{},
},
{
description: "empty args",
policyOptions: map[string]string{},
expectedError: false,
expectedValue: StaticPolicyOptions{},
},
{
description: "bad single arg",
policyOptions: map[string]string{
"badValue1": "",
},
expectedError: true,
},
{
description: "bad multiple arg",
policyOptions: map[string]string{
"badValue1": "",
"badvalue2": "aaaa",
},
expectedError: true,
},
{
description: "good arg",
policyOptions: map[string]string{
FullPCPUsOnlyOption: "true",
},
expectedError: false,
expectedValue: StaticPolicyOptions{
FullPhysicalCPUsOnly: true,
},
},
{
description: "good arg, bad value",
policyOptions: map[string]string{
FullPCPUsOnlyOption: "enabled!",
},
expectedError: true,
},
{
description: "bad arg intermixed",
policyOptions: map[string]string{
FullPCPUsOnlyOption: "1",
"badvalue2": "lorem ipsum",
},
expectedError: true,
},
}
for _, testCase := range testCases {
t.Run(testCase.description, func(t *testing.T) {
opts, err := NewStaticPolicyOptions(testCase.policyOptions)
gotError := (err != nil)
if gotError != testCase.expectedError {
t.Fatalf("error with args %v expected error %v got %v: %v",
testCase.policyOptions, testCase.expectedError, gotError, err)
}
if testCase.expectedError {
return
}
if !reflect.DeepEqual(opts, testCase.expectedValue) {
t.Fatalf("value mismatch with args %v expected value %v got %v",
testCase.policyOptions, testCase.expectedValue, opts)
}
})
}
}