Update CPUManager policies to adhere to new state semantics

This commit is contained in:
Kevin Klues 2019-11-05 20:24:36 +00:00
parent 7c760fea38
commit 185e790f71
5 changed files with 142 additions and 107 deletions

View File

@ -27,9 +27,9 @@ type Policy interface {
Name() string
Start(s state.State)
// AddContainer call is idempotent
AddContainer(s state.State, pod *v1.Pod, container *v1.Container, containerID string) error
AddContainer(s state.State, pod *v1.Pod, container *v1.Container) error
// RemoveContainer call is idempotent
RemoveContainer(s state.State, containerID string) error
RemoveContainer(s state.State, podUID string, containerName string) error
// GetTopologyHints implements the topologymanager.HintProvider Interface
// and is consulted to achieve NUMA aware resource alignment among this
// and other resource controllers.

View File

@ -43,11 +43,11 @@ func (p *nonePolicy) Start(s state.State) {
klog.Info("[cpumanager] none policy: Start")
}
func (p *nonePolicy) AddContainer(s state.State, pod *v1.Pod, container *v1.Container, containerID string) error {
func (p *nonePolicy) AddContainer(s state.State, pod *v1.Pod, container *v1.Container) error {
return nil
}
func (p *nonePolicy) RemoveContainer(s state.State, containerID string) error {
func (p *nonePolicy) RemoveContainer(s state.State, podUID string, containerName string) error {
return nil
}

View File

@ -44,7 +44,7 @@ func TestNonePolicyAdd(t *testing.T) {
testPod := makePod("fakePod", "fakeContainer", "1000m", "1000m")
container := &testPod.Spec.Containers[0]
err := policy.AddContainer(st, testPod, container, "fakeID")
err := policy.AddContainer(st, testPod, container)
if err != nil {
t.Errorf("NonePolicy AddContainer() error. expected no error but got: %v", err)
}
@ -58,7 +58,10 @@ func TestNonePolicyRemove(t *testing.T) {
defaultCPUSet: cpuset.NewCPUSet(1, 2, 3, 4, 5, 6, 7),
}
err := policy.RemoveContainer(st, "fakeID")
testPod := makePod("fakePod", "fakeContainer", "1000m", "1000m")
container := &testPod.Spec.Containers[0]
err := policy.RemoveContainer(st, string(testPod.UID), container.Name)
if err != nil {
t.Errorf("NonePolicy RemoveContainer() error. expected no error but got %v", err)
}

View File

@ -148,11 +148,13 @@ func (p *staticPolicy) validateState(s state.State) error {
}
// 2. Check if state for static policy is consistent
for cID, cset := range tmpAssignments {
// None of the cpu in DEFAULT cset should be in s.assignments
if !tmpDefaultCPUset.Intersection(cset).IsEmpty() {
return fmt.Errorf("container id: %s cpuset: \"%s\" overlaps with default cpuset \"%s\"",
cID, cset.String(), tmpDefaultCPUset.String())
for pod := range tmpAssignments {
for container, cset := range tmpAssignments[pod] {
// None of the cpu in DEFAULT cset should be in s.assignments
if !tmpDefaultCPUset.Intersection(cset).IsEmpty() {
return fmt.Errorf("pod: %s, container: %s cpuset: \"%s\" overlaps with default cpuset \"%s\"",
pod, container, cset.String(), tmpDefaultCPUset.String())
}
}
}
@ -165,8 +167,10 @@ func (p *staticPolicy) validateState(s state.State) error {
// the set of CPUs stored in the state.
totalKnownCPUs := tmpDefaultCPUset.Clone()
tmpCPUSets := []cpuset.CPUSet{}
for _, cset := range tmpAssignments {
tmpCPUSets = append(tmpCPUSets, cset)
for pod := range tmpAssignments {
for _, cset := range tmpAssignments[pod] {
tmpCPUSets = append(tmpCPUSets, cset)
}
}
totalKnownCPUs = totalKnownCPUs.UnionAll(tmpCPUSets)
if !totalKnownCPUs.Equals(p.topology.CPUDetails.CPUs()) {
@ -182,13 +186,13 @@ func (p *staticPolicy) assignableCPUs(s state.State) cpuset.CPUSet {
return s.GetDefaultCPUSet().Difference(p.reserved)
}
func (p *staticPolicy) AddContainer(s state.State, pod *v1.Pod, container *v1.Container, containerID string) (rerr error) {
func (p *staticPolicy) AddContainer(s state.State, pod *v1.Pod, container *v1.Container) error {
if numCPUs := p.guaranteedCPUs(pod, container); numCPUs != 0 {
klog.Infof("[cpumanager] static policy: AddContainer (pod: %s, container: %s, container id: %s)", pod.Name, container.Name, containerID)
klog.Infof("[cpumanager] static policy: AddContainer (pod: %s, container: %s)", pod.Name, container.Name)
// container belongs in an exclusively allocated pool
if _, ok := s.GetCPUSet(containerID); ok {
klog.Infof("[cpumanager] static policy: container already present in state, skipping (container: %s, container id: %s)", container.Name, containerID)
if _, ok := s.GetCPUSet(string(pod.UID), container.Name); ok {
klog.Infof("[cpumanager] static policy: container already present in state, skipping (pod: %s, container: %s)", pod.Name, container.Name)
return nil
}
@ -199,19 +203,19 @@ func (p *staticPolicy) AddContainer(s state.State, pod *v1.Pod, container *v1.Co
// Allocate CPUs according to the NUMA affinity contained in the hint.
cpuset, err := p.allocateCPUs(s, numCPUs, hint.NUMANodeAffinity)
if err != nil {
klog.Errorf("[cpumanager] unable to allocate %d CPUs (container id: %s, error: %v)", numCPUs, containerID, err)
klog.Errorf("[cpumanager] unable to allocate %d CPUs (pod: %s, container: %s, error: %v)", numCPUs, pod.Name, container.Name, err)
return err
}
s.SetCPUSet(containerID, cpuset)
s.SetCPUSet(string(pod.UID), container.Name, cpuset)
}
// container belongs in the shared pool (nothing to do; use default cpuset)
return nil
}
func (p *staticPolicy) RemoveContainer(s state.State, containerID string) (rerr error) {
klog.Infof("[cpumanager] static policy: RemoveContainer (container id: %s)", containerID)
if toRelease, ok := s.GetCPUSet(containerID); ok {
s.Delete(containerID)
func (p *staticPolicy) RemoveContainer(s state.State, podUID string, containerName string) error {
klog.Infof("[cpumanager] static policy: RemoveContainer (pod: %s, container: %s)", podUID, containerName)
if toRelease, ok := s.GetCPUSet(podUID, containerName); ok {
s.Delete(podUID, containerName)
// Mutate the shared pool, adding released cpus.
s.SetDefaultCPUSet(s.GetDefaultCPUSet().Union(toRelease))
}
@ -291,8 +295,7 @@ func (p *staticPolicy) GetTopologyHints(s state.State, pod v1.Pod, container v1.
// Short circuit to regenerate the same hints if there are already
// guaranteed CPUs allocated to the Container. This might happen after a
// kubelet restart, for example.
containerID, _ := findContainerIDByName(&pod.Status, container.Name)
if allocated, exists := s.GetCPUSet(containerID); exists {
if allocated, exists := s.GetCPUSet(string(pod.UID), container.Name); exists {
if allocated.Size() != requested {
klog.Errorf("[cpumanager] CPUs already allocated to (pod %v, container %v) with different number than request: requested: %d, allocated: %d", string(pod.UID), container.Name, requested, allocated.Size())
return map[string][]topologymanager.TopologyHint{

View File

@ -33,7 +33,8 @@ type staticPolicyTest struct {
description string
topo *topology.CPUTopology
numReservedCPUs int
containerID string
podUID string
containerName string
stAssignments state.ContainerCPUAssignments
stDefaultCPUSet cpuset.CPUSet
pod *v1.Pod
@ -59,7 +60,9 @@ func TestStaticPolicyStart(t *testing.T) {
description: "non-corrupted state",
topo: topoDualSocketHT,
stAssignments: state.ContainerCPUAssignments{
"0": cpuset.NewCPUSet(0),
"fakePod": map[string]cpuset.CPUSet{
"0": cpuset.NewCPUSet(0),
},
},
stDefaultCPUSet: cpuset.NewCPUSet(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
expCSet: cpuset.NewCPUSet(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
@ -84,7 +87,9 @@ func TestStaticPolicyStart(t *testing.T) {
description: "assigned core 2 is still present in available cpuset",
topo: topoDualSocketHT,
stAssignments: state.ContainerCPUAssignments{
"0": cpuset.NewCPUSet(0, 1, 2),
"fakePod": map[string]cpuset.CPUSet{
"0": cpuset.NewCPUSet(0, 1, 2),
},
},
stDefaultCPUSet: cpuset.NewCPUSet(2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
expPanic: true,
@ -93,8 +98,10 @@ func TestStaticPolicyStart(t *testing.T) {
description: "core 12 is not present in topology but is in state cpuset",
topo: topoDualSocketHT,
stAssignments: state.ContainerCPUAssignments{
"0": cpuset.NewCPUSet(0, 1, 2),
"1": cpuset.NewCPUSet(3, 4),
"fakePod": map[string]cpuset.CPUSet{
"0": cpuset.NewCPUSet(0, 1, 2),
"1": cpuset.NewCPUSet(3, 4),
},
},
stDefaultCPUSet: cpuset.NewCPUSet(5, 6, 7, 8, 9, 10, 11, 12),
expPanic: true,
@ -103,8 +110,10 @@ func TestStaticPolicyStart(t *testing.T) {
description: "core 11 is present in topology but is not in state cpuset",
topo: topoDualSocketHT,
stAssignments: state.ContainerCPUAssignments{
"0": cpuset.NewCPUSet(0, 1, 2),
"1": cpuset.NewCPUSet(3, 4),
"fakePod": map[string]cpuset.CPUSet{
"0": cpuset.NewCPUSet(0, 1, 2),
"1": cpuset.NewCPUSet(3, 4),
},
},
stDefaultCPUSet: cpuset.NewCPUSet(5, 6, 7, 8, 9, 10),
expPanic: true,
@ -166,7 +175,6 @@ func TestStaticPolicyAdd(t *testing.T) {
description: "GuPodSingleCore, SingleSocketHT, ExpectError",
topo: topoSingleSocketHT,
numReservedCPUs: 1,
containerID: "fakeID2",
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
pod: makePod("fakePod", "fakeContainer2", "8000m", "8000m"),
@ -178,7 +186,6 @@ func TestStaticPolicyAdd(t *testing.T) {
description: "GuPodSingleCore, SingleSocketHT, ExpectAllocOneCPU",
topo: topoSingleSocketHT,
numReservedCPUs: 1,
containerID: "fakeID2",
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
pod: makePod("fakePod", "fakeContainer2", "1000m", "1000m"),
@ -190,9 +197,10 @@ func TestStaticPolicyAdd(t *testing.T) {
description: "GuPodMultipleCores, SingleSocketHT, ExpectAllocOneCore",
topo: topoSingleSocketHT,
numReservedCPUs: 1,
containerID: "fakeID3",
stAssignments: state.ContainerCPUAssignments{
"fakeID100": cpuset.NewCPUSet(2, 3, 6, 7),
"fakePod": map[string]cpuset.CPUSet{
"fakeContainer100": cpuset.NewCPUSet(2, 3, 6, 7),
},
},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 4, 5),
pod: makePod("fakePod", "fakeContainer3", "2000m", "2000m"),
@ -204,9 +212,10 @@ func TestStaticPolicyAdd(t *testing.T) {
description: "GuPodMultipleCores, SingleSocketHT, ExpectSameAllocation",
topo: topoSingleSocketHT,
numReservedCPUs: 1,
containerID: "fakeID3",
stAssignments: state.ContainerCPUAssignments{
"fakeID3": cpuset.NewCPUSet(2, 3, 6, 7),
"fakePod": map[string]cpuset.CPUSet{
"fakeContainer3": cpuset.NewCPUSet(2, 3, 6, 7),
},
},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 4, 5),
pod: makePod("fakePod", "fakeContainer3", "4000m", "4000m"),
@ -218,9 +227,10 @@ func TestStaticPolicyAdd(t *testing.T) {
description: "GuPodMultipleCores, DualSocketHT, ExpectAllocOneSocket",
topo: topoDualSocketHT,
numReservedCPUs: 1,
containerID: "fakeID3",
stAssignments: state.ContainerCPUAssignments{
"fakeID100": cpuset.NewCPUSet(2),
"fakePod": map[string]cpuset.CPUSet{
"fakeContainer100": cpuset.NewCPUSet(2),
},
},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11),
pod: makePod("fakePod", "fakeContainer3", "6000m", "6000m"),
@ -232,9 +242,10 @@ func TestStaticPolicyAdd(t *testing.T) {
description: "GuPodMultipleCores, DualSocketHT, ExpectAllocThreeCores",
topo: topoDualSocketHT,
numReservedCPUs: 1,
containerID: "fakeID3",
stAssignments: state.ContainerCPUAssignments{
"fakeID100": cpuset.NewCPUSet(1, 5),
"fakePod": map[string]cpuset.CPUSet{
"fakeContainer100": cpuset.NewCPUSet(1, 5),
},
},
stDefaultCPUSet: cpuset.NewCPUSet(0, 2, 3, 4, 6, 7, 8, 9, 10, 11),
pod: makePod("fakePod", "fakeContainer3", "6000m", "6000m"),
@ -246,9 +257,10 @@ func TestStaticPolicyAdd(t *testing.T) {
description: "GuPodMultipleCores, DualSocketNoHT, ExpectAllocOneSocket",
topo: topoDualSocketNoHT,
numReservedCPUs: 1,
containerID: "fakeID1",
stAssignments: state.ContainerCPUAssignments{
"fakeID100": cpuset.NewCPUSet(),
"fakePod": map[string]cpuset.CPUSet{
"fakeContainer100": cpuset.NewCPUSet(),
},
},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 3, 4, 5, 6, 7),
pod: makePod("fakePod", "fakeContainer1", "4000m", "4000m"),
@ -260,9 +272,10 @@ func TestStaticPolicyAdd(t *testing.T) {
description: "GuPodMultipleCores, DualSocketNoHT, ExpectAllocFourCores",
topo: topoDualSocketNoHT,
numReservedCPUs: 1,
containerID: "fakeID1",
stAssignments: state.ContainerCPUAssignments{
"fakeID100": cpuset.NewCPUSet(4, 5),
"fakePod": map[string]cpuset.CPUSet{
"fakeContainer100": cpuset.NewCPUSet(4, 5),
},
},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 3, 6, 7),
pod: makePod("fakePod", "fakeContainer1", "4000m", "4000m"),
@ -274,9 +287,10 @@ func TestStaticPolicyAdd(t *testing.T) {
description: "GuPodMultipleCores, DualSocketHT, ExpectAllocOneSocketOneCore",
topo: topoDualSocketHT,
numReservedCPUs: 1,
containerID: "fakeID3",
stAssignments: state.ContainerCPUAssignments{
"fakeID100": cpuset.NewCPUSet(2),
"fakePod": map[string]cpuset.CPUSet{
"fakeContainer100": cpuset.NewCPUSet(2),
},
},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11),
pod: makePod("fakePod", "fakeContainer3", "8000m", "8000m"),
@ -288,7 +302,6 @@ func TestStaticPolicyAdd(t *testing.T) {
description: "NonGuPod, SingleSocketHT, NoAlloc",
topo: topoSingleSocketHT,
numReservedCPUs: 1,
containerID: "fakeID1",
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
pod: makePod("fakePod", "fakeContainer1", "1000m", "2000m"),
@ -300,7 +313,6 @@ func TestStaticPolicyAdd(t *testing.T) {
description: "GuPodNonIntegerCore, SingleSocketHT, NoAlloc",
topo: topoSingleSocketHT,
numReservedCPUs: 1,
containerID: "fakeID4",
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
pod: makePod("fakePod", "fakeContainer4", "977m", "977m"),
@ -312,9 +324,10 @@ func TestStaticPolicyAdd(t *testing.T) {
description: "GuPodMultipleCores, SingleSocketHT, NoAllocExpectError",
topo: topoSingleSocketHT,
numReservedCPUs: 1,
containerID: "fakeID5",
stAssignments: state.ContainerCPUAssignments{
"fakeID100": cpuset.NewCPUSet(1, 2, 3, 4, 5, 6),
"fakePod": map[string]cpuset.CPUSet{
"fakeContainer100": cpuset.NewCPUSet(1, 2, 3, 4, 5, 6),
},
},
stDefaultCPUSet: cpuset.NewCPUSet(0, 7),
pod: makePod("fakePod", "fakeContainer5", "2000m", "2000m"),
@ -326,9 +339,10 @@ func TestStaticPolicyAdd(t *testing.T) {
description: "GuPodMultipleCores, DualSocketHT, NoAllocExpectError",
topo: topoDualSocketHT,
numReservedCPUs: 1,
containerID: "fakeID5",
stAssignments: state.ContainerCPUAssignments{
"fakeID100": cpuset.NewCPUSet(1, 2, 3),
"fakePod": map[string]cpuset.CPUSet{
"fakeContainer100": cpuset.NewCPUSet(1, 2, 3),
},
},
stDefaultCPUSet: cpuset.NewCPUSet(0, 4, 5, 6, 7, 8, 9, 10, 11),
pod: makePod("fakePod", "fakeContainer5", "10000m", "10000m"),
@ -342,9 +356,10 @@ func TestStaticPolicyAdd(t *testing.T) {
// Expect all CPUs from Socket 0.
description: "GuPodMultipleCores, topoQuadSocketFourWayHT, ExpectAllocSock0",
topo: topoQuadSocketFourWayHT,
containerID: "fakeID5",
stAssignments: state.ContainerCPUAssignments{
"fakeID100": cpuset.NewCPUSet(3, 11, 4, 5, 6, 7),
"fakePod": map[string]cpuset.CPUSet{
"fakeContainer100": cpuset.NewCPUSet(3, 11, 4, 5, 6, 7),
},
},
stDefaultCPUSet: largeTopoCPUSet.Difference(cpuset.NewCPUSet(3, 11, 4, 5, 6, 7)),
pod: makePod("fakePod", "fakeContainer5", "72000m", "72000m"),
@ -357,10 +372,11 @@ func TestStaticPolicyAdd(t *testing.T) {
// Expect CPUs from the 2 full cores available from the three Sockets.
description: "GuPodMultipleCores, topoQuadSocketFourWayHT, ExpectAllocAllFullCoresFromThreeSockets",
topo: topoQuadSocketFourWayHT,
containerID: "fakeID5",
stAssignments: state.ContainerCPUAssignments{
"fakeID100": largeTopoCPUSet.Difference(cpuset.NewCPUSet(1, 25, 13, 38, 2, 9, 11, 35, 23, 48, 12, 51,
53, 173, 113, 233, 54, 61)),
"fakePod": map[string]cpuset.CPUSet{
"fakeContainer100": largeTopoCPUSet.Difference(cpuset.NewCPUSet(1, 25, 13, 38, 2, 9, 11, 35, 23, 48, 12, 51,
53, 173, 113, 233, 54, 61)),
},
},
stDefaultCPUSet: cpuset.NewCPUSet(1, 25, 13, 38, 2, 9, 11, 35, 23, 48, 12, 51, 53, 173, 113, 233, 54, 61),
pod: makePod("fakePod", "fakeCcontainer5", "12000m", "12000m"),
@ -373,10 +389,11 @@ func TestStaticPolicyAdd(t *testing.T) {
// Expect all CPUs from Socket 1 and the hyper-threads from the full core.
description: "GuPodMultipleCores, topoQuadSocketFourWayHT, ExpectAllocAllSock1+FullCore",
topo: topoQuadSocketFourWayHT,
containerID: "fakeID5",
stAssignments: state.ContainerCPUAssignments{
"fakeID100": largeTopoCPUSet.Difference(largeTopoSock1CPUSet.Union(cpuset.NewCPUSet(10, 34, 22, 47, 53,
173, 61, 181, 108, 228, 115, 235))),
"fakePod": map[string]cpuset.CPUSet{
"fakeContainer100": largeTopoCPUSet.Difference(largeTopoSock1CPUSet.Union(cpuset.NewCPUSet(10, 34, 22, 47, 53,
173, 61, 181, 108, 228, 115, 235))),
},
},
stDefaultCPUSet: largeTopoSock1CPUSet.Union(cpuset.NewCPUSet(10, 34, 22, 47, 53, 173, 61, 181, 108, 228,
115, 235)),
@ -390,9 +407,10 @@ func TestStaticPolicyAdd(t *testing.T) {
// Expect allocation of all the CPUs from the partial cores.
description: "GuPodMultipleCores, topoQuadSocketFourWayHT, ExpectAllocCPUs",
topo: topoQuadSocketFourWayHT,
containerID: "fakeID5",
stAssignments: state.ContainerCPUAssignments{
"fakeID100": largeTopoCPUSet.Difference(cpuset.NewCPUSet(10, 11, 53, 37, 55, 67, 52)),
"fakePod": map[string]cpuset.CPUSet{
"fakeContainer100": largeTopoCPUSet.Difference(cpuset.NewCPUSet(10, 11, 53, 37, 55, 67, 52)),
},
},
stDefaultCPUSet: cpuset.NewCPUSet(10, 11, 53, 67, 52),
pod: makePod("fakePod", "fakeContainer5", "5000m", "5000m"),
@ -406,9 +424,10 @@ func TestStaticPolicyAdd(t *testing.T) {
// Error is expect since available CPUs are less than the request.
description: "GuPodMultipleCores, topoQuadSocketFourWayHT, NoAlloc",
topo: topoQuadSocketFourWayHT,
containerID: "fakeID5",
stAssignments: state.ContainerCPUAssignments{
"fakeID100": largeTopoCPUSet.Difference(cpuset.NewCPUSet(10, 11, 53, 37, 55, 67, 52)),
"fakePod": map[string]cpuset.CPUSet{
"fakeContainer100": largeTopoCPUSet.Difference(cpuset.NewCPUSet(10, 11, 53, 37, 55, 67, 52)),
},
},
stDefaultCPUSet: cpuset.NewCPUSet(10, 11, 53, 37, 55, 67, 52),
pod: makePod("fakePod", "fakeContainer5", "76000m", "76000m"),
@ -427,17 +446,17 @@ func TestStaticPolicyAdd(t *testing.T) {
}
container := &testCase.pod.Spec.Containers[0]
err := policy.AddContainer(st, testCase.pod, container, testCase.containerID)
err := policy.AddContainer(st, testCase.pod, container)
if !reflect.DeepEqual(err, testCase.expErr) {
t.Errorf("StaticPolicy AddContainer() error (%v). expected add error: %v but got: %v",
testCase.description, testCase.expErr, err)
}
if testCase.expCPUAlloc {
cset, found := st.assignments[testCase.containerID]
cset, found := st.assignments[string(testCase.pod.UID)][container.Name]
if !found {
t.Errorf("StaticPolicy AddContainer() error (%v). expected container id %v to be present in assignments %v",
testCase.description, testCase.containerID, st.assignments)
t.Errorf("StaticPolicy AddContainer() error (%v). expected container %v to be present in assignments %v",
testCase.description, container.Name, st.assignments)
}
if !reflect.DeepEqual(cset, testCase.expCSet) {
@ -452,10 +471,10 @@ func TestStaticPolicyAdd(t *testing.T) {
}
if !testCase.expCPUAlloc {
_, found := st.assignments[testCase.containerID]
_, found := st.assignments[string(testCase.pod.UID)][container.Name]
if found {
t.Errorf("StaticPolicy AddContainer() error (%v). Did not expect container id %v to be present in assignments %v",
testCase.description, testCase.containerID, st.assignments)
t.Errorf("StaticPolicy AddContainer() error (%v). Did not expect container %v to be present in assignments %v",
testCase.description, container.Name, st.assignments)
}
}
}
@ -464,43 +483,55 @@ func TestStaticPolicyAdd(t *testing.T) {
func TestStaticPolicyRemove(t *testing.T) {
testCases := []staticPolicyTest{
{
description: "SingleSocketHT, DeAllocOneContainer",
topo: topoSingleSocketHT,
containerID: "fakeID1",
description: "SingleSocketHT, DeAllocOneContainer",
topo: topoSingleSocketHT,
podUID: "fakePod",
containerName: "fakeContainer1",
stAssignments: state.ContainerCPUAssignments{
"fakeID1": cpuset.NewCPUSet(1, 2, 3),
"fakePod": map[string]cpuset.CPUSet{
"fakeContainer1": cpuset.NewCPUSet(1, 2, 3),
},
},
stDefaultCPUSet: cpuset.NewCPUSet(4, 5, 6, 7),
expCSet: cpuset.NewCPUSet(1, 2, 3, 4, 5, 6, 7),
},
{
description: "SingleSocketHT, DeAllocOneContainer, BeginEmpty",
topo: topoSingleSocketHT,
containerID: "fakeID1",
description: "SingleSocketHT, DeAllocOneContainer, BeginEmpty",
topo: topoSingleSocketHT,
podUID: "fakePod",
containerName: "fakeContainer1",
stAssignments: state.ContainerCPUAssignments{
"fakeID1": cpuset.NewCPUSet(1, 2, 3),
"fakeID2": cpuset.NewCPUSet(4, 5, 6, 7),
"fakePod": map[string]cpuset.CPUSet{
"fakeContainer1": cpuset.NewCPUSet(1, 2, 3),
"fakeContainer2": cpuset.NewCPUSet(4, 5, 6, 7),
},
},
stDefaultCPUSet: cpuset.NewCPUSet(),
expCSet: cpuset.NewCPUSet(1, 2, 3),
},
{
description: "SingleSocketHT, DeAllocTwoContainer",
topo: topoSingleSocketHT,
containerID: "fakeID1",
description: "SingleSocketHT, DeAllocTwoContainer",
topo: topoSingleSocketHT,
podUID: "fakePod",
containerName: "fakeContainer1",
stAssignments: state.ContainerCPUAssignments{
"fakeID1": cpuset.NewCPUSet(1, 3, 5),
"fakeID2": cpuset.NewCPUSet(2, 4),
"fakePod": map[string]cpuset.CPUSet{
"fakeContainer1": cpuset.NewCPUSet(1, 3, 5),
"fakeContainer2": cpuset.NewCPUSet(2, 4),
},
},
stDefaultCPUSet: cpuset.NewCPUSet(6, 7),
expCSet: cpuset.NewCPUSet(1, 3, 5, 6, 7),
},
{
description: "SingleSocketHT, NoDeAlloc",
topo: topoSingleSocketHT,
containerID: "fakeID2",
description: "SingleSocketHT, NoDeAlloc",
topo: topoSingleSocketHT,
podUID: "fakePod",
containerName: "fakeContainer2",
stAssignments: state.ContainerCPUAssignments{
"fakeID1": cpuset.NewCPUSet(1, 3, 5),
"fakePod": map[string]cpuset.CPUSet{
"fakeContainer1": cpuset.NewCPUSet(1, 3, 5),
},
},
stDefaultCPUSet: cpuset.NewCPUSet(2, 4, 6, 7),
expCSet: cpuset.NewCPUSet(2, 4, 6, 7),
@ -515,16 +546,16 @@ func TestStaticPolicyRemove(t *testing.T) {
defaultCPUSet: testCase.stDefaultCPUSet,
}
policy.RemoveContainer(st, testCase.containerID)
policy.RemoveContainer(st, testCase.podUID, testCase.containerName)
if !reflect.DeepEqual(st.defaultCPUSet, testCase.expCSet) {
t.Errorf("StaticPolicy RemoveContainer() error (%v). expected default cpuset %v but got %v",
testCase.description, testCase.expCSet, st.defaultCPUSet)
}
if _, found := st.assignments[testCase.containerID]; found {
t.Errorf("StaticPolicy RemoveContainer() error (%v). expected containerID %v not be in assignments %v",
testCase.description, testCase.containerID, st.assignments)
if _, found := st.assignments[testCase.podUID][testCase.containerName]; found {
t.Errorf("StaticPolicy RemoveContainer() error (%v). expected (pod %v, container %v) not be in assignments %v",
testCase.description, testCase.podUID, testCase.containerName, st.assignments)
}
}
}
@ -626,7 +657,6 @@ type staticPolicyTestWithResvList struct {
topo *topology.CPUTopology
numReservedCPUs int
reserved cpuset.CPUSet
containerID string
stAssignments state.ContainerCPUAssignments
stDefaultCPUSet cpuset.CPUSet
pod *v1.Pod
@ -701,7 +731,6 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
topo: topoSingleSocketHT,
numReservedCPUs: 1,
reserved: cpuset.NewCPUSet(0),
containerID: "fakeID2",
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
pod: makePod("fakePod", "fakeContainer2", "8000m", "8000m"),
@ -714,7 +743,6 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
topo: topoSingleSocketHT,
numReservedCPUs: 2,
reserved: cpuset.NewCPUSet(0, 1),
containerID: "fakeID2",
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
pod: makePod("fakePod", "fakeContainer2", "1000m", "1000m"),
@ -727,9 +755,10 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
topo: topoSingleSocketHT,
numReservedCPUs: 2,
reserved: cpuset.NewCPUSet(0, 1),
containerID: "fakeID3",
stAssignments: state.ContainerCPUAssignments{
"fakeID100": cpuset.NewCPUSet(2, 3, 6, 7),
"fakePod": map[string]cpuset.CPUSet{
"fakeContainer100": cpuset.NewCPUSet(2, 3, 6, 7),
},
},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 4, 5),
pod: makePod("fakePod", "fakeContainer3", "2000m", "2000m"),
@ -748,17 +777,17 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
}
container := &testCase.pod.Spec.Containers[0]
err := policy.AddContainer(st, testCase.pod, container, testCase.containerID)
err := policy.AddContainer(st, testCase.pod, container)
if !reflect.DeepEqual(err, testCase.expErr) {
t.Errorf("StaticPolicy AddContainer() error (%v). expected add error: %v but got: %v",
testCase.description, testCase.expErr, err)
}
if testCase.expCPUAlloc {
cset, found := st.assignments[testCase.containerID]
cset, found := st.assignments[string(testCase.pod.UID)][container.Name]
if !found {
t.Errorf("StaticPolicy AddContainer() error (%v). expected container id %v to be present in assignments %v",
testCase.description, testCase.containerID, st.assignments)
t.Errorf("StaticPolicy AddContainer() error (%v). expected container %v to be present in assignments %v",
testCase.description, container.Name, st.assignments)
}
if !reflect.DeepEqual(cset, testCase.expCSet) {
@ -773,10 +802,10 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
}
if !testCase.expCPUAlloc {
_, found := st.assignments[testCase.containerID]
_, found := st.assignments[string(testCase.pod.UID)][container.Name]
if found {
t.Errorf("StaticPolicy AddContainer() error (%v). Did not expect container id %v to be present in assignments %v",
testCase.description, testCase.containerID, st.assignments)
t.Errorf("StaticPolicy AddContainer() error (%v). Did not expect container %v to be present in assignments %v",
testCase.description, container.Name, st.assignments)
}
}
}