Move containerMap out of static policy and into top-level CPUManager

This commit is contained in:
Kevin Klues 2019-10-22 17:53:26 +02:00
parent 1d995c98ef
commit 765aae93f8
5 changed files with 292 additions and 268 deletions

View File

@ -43,6 +43,7 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//pkg/kubelet/cm/cpumanager/containermap:go_default_library",
"//pkg/kubelet/cm/cpumanager/state:go_default_library",
"//pkg/kubelet/cm/cpumanager/topology:go_default_library",
"//pkg/kubelet/cm/cpuset:go_default_library",

View File

@ -28,6 +28,7 @@ import (
"k8s.io/klog"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/containermap"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
@ -96,6 +97,10 @@ type manager struct {
// and the containerID of their containers
podStatusProvider status.PodStatusProvider
// containerMap provides a mapping from (pod, container) -> containerID
// for all containers a pod
containerMap containermap.ContainerMap
topology *topology.CPUTopology
nodeAllocatableReservation v1.ResourceList
@ -162,6 +167,7 @@ func NewManager(cpuPolicyName string, reconcilePeriod time.Duration, machineInfo
policy: policy,
reconcilePeriod: reconcilePeriod,
state: stateImpl,
containerMap: containermap.NewContainerMap(),
topology: topo,
nodeAllocatableReservation: nodeAllocatableReservation,
}
@ -186,7 +192,18 @@ func (m *manager) Start(activePods ActivePodsFunc, sourcesReady config.SourcesRe
func (m *manager) AddContainer(p *v1.Pod, c *v1.Container, containerID string) error {
m.Lock()
err := m.policy.AddContainer(m.state, p, c, containerID)
// Proactively remove CPUs from init containers that have already run.
// They are guaranteed to have run to completion before any other
// container is run.
for _, initContainer := range p.Spec.InitContainers {
if c.Name != initContainer.Name {
err := m.policyRemoveContainerByRef(string(p.UID), initContainer.Name)
if err != nil {
klog.Warningf("[cpumanager] unable to remove init container (pod: %s, container: %s, error: %v)", string(p.UID), initContainer.Name, err)
}
}
}
err := m.policyAddContainer(p, c, containerID)
if err != nil {
klog.Errorf("[cpumanager] AddContainer error: %v", err)
m.Unlock()
@ -200,7 +217,7 @@ func (m *manager) AddContainer(p *v1.Pod, c *v1.Container, containerID string) e
if err != nil {
klog.Errorf("[cpumanager] AddContainer error: %v", err)
m.Lock()
err := m.policy.RemoveContainer(m.state, containerID)
err := m.policyRemoveContainerByID(containerID)
if err != nil {
klog.Errorf("[cpumanager] AddContainer rollback state error: %v", err)
}
@ -216,7 +233,7 @@ func (m *manager) RemoveContainer(containerID string) error {
m.Lock()
defer m.Unlock()
err := m.policy.RemoveContainer(m.state, containerID)
err := m.policyRemoveContainerByID(containerID)
if err != nil {
klog.Errorf("[cpumanager] RemoveContainer error: %v", err)
return err
@ -224,6 +241,37 @@ func (m *manager) RemoveContainer(containerID string) error {
return nil
}
func (m *manager) policyAddContainer(p *v1.Pod, c *v1.Container, containerID string) error {
err := m.policy.AddContainer(m.state, p, c, containerID)
if err == nil {
m.containerMap.Add(string(p.UID), c.Name, containerID)
}
return err
}
func (m *manager) policyRemoveContainerByID(containerID string) error {
err := m.policy.RemoveContainer(m.state, containerID)
if err == nil {
m.containerMap.RemoveByContainerID(containerID)
}
return err
}
func (m *manager) policyRemoveContainerByRef(podUID string, containerName string) error {
containerID, err := m.containerMap.GetContainerID(podUID, containerName)
if err != nil {
return nil
}
err = m.policy.RemoveContainer(m.state, containerID)
if err == nil {
m.containerMap.RemoveByContainerID(containerID)
}
return err
}
func (m *manager) State() state.Reader {
return m.state
}

View File

@ -32,6 +32,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/containermap"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
@ -246,6 +247,7 @@ func TestCPUManagerAdd(t *testing.T) {
containerRuntime: mockRuntimeService{
err: testCase.updateErr,
},
containerMap: containermap.NewContainerMap(),
activePods: func() []*v1.Pod { return nil },
podStatusProvider: mockPodStatusProvider{},
}
@ -264,6 +266,237 @@ func TestCPUManagerAdd(t *testing.T) {
}
}
func TestCPUManagerAddWithInitContainers(t *testing.T) {
testCases := []struct {
description string
topo *topology.CPUTopology
numReservedCPUs int
initContainerIDs []string
containerIDs []string
stAssignments state.ContainerCPUAssignments
stDefaultCPUSet cpuset.CPUSet
pod *v1.Pod
expInitCSets []cpuset.CPUSet
expCSets []cpuset.CPUSet
}{
{
description: "No Guaranteed Init CPUs",
topo: topoSingleSocketHT,
numReservedCPUs: 0,
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
initContainerIDs: []string{"initFakeID"},
containerIDs: []string{"appFakeID"},
pod: makeMultiContainerPod(
[]struct{ request, limit string }{{"100m", "100m"}},
[]struct{ request, limit string }{{"4000m", "4000m"}}),
expInitCSets: []cpuset.CPUSet{
cpuset.NewCPUSet()},
expCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4, 1, 5)},
},
{
description: "Equal Number of Guaranteed CPUs",
topo: topoSingleSocketHT,
numReservedCPUs: 0,
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
initContainerIDs: []string{"initFakeID"},
containerIDs: []string{"appFakeID"},
pod: makeMultiContainerPod(
[]struct{ request, limit string }{{"4000m", "4000m"}},
[]struct{ request, limit string }{{"4000m", "4000m"}}),
expInitCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4, 1, 5)},
expCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4, 1, 5)},
},
{
description: "More Init Container Guaranteed CPUs",
topo: topoSingleSocketHT,
numReservedCPUs: 0,
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
initContainerIDs: []string{"initFakeID"},
containerIDs: []string{"appFakeID"},
pod: makeMultiContainerPod(
[]struct{ request, limit string }{{"6000m", "6000m"}},
[]struct{ request, limit string }{{"4000m", "4000m"}}),
expInitCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4, 1, 5, 2, 6)},
expCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4, 1, 5)},
},
{
description: "Less Init Container Guaranteed CPUs",
topo: topoSingleSocketHT,
numReservedCPUs: 0,
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
initContainerIDs: []string{"initFakeID"},
containerIDs: []string{"appFakeID"},
pod: makeMultiContainerPod(
[]struct{ request, limit string }{{"2000m", "2000m"}},
[]struct{ request, limit string }{{"4000m", "4000m"}}),
expInitCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4)},
expCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4, 1, 5)},
},
{
description: "Multi Init Container Equal CPUs",
topo: topoSingleSocketHT,
numReservedCPUs: 0,
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
initContainerIDs: []string{"initFakeID-1", "initFakeID-2"},
containerIDs: []string{"appFakeID"},
pod: makeMultiContainerPod(
[]struct{ request, limit string }{
{"2000m", "2000m"},
{"2000m", "2000m"}},
[]struct{ request, limit string }{
{"2000m", "2000m"}}),
expInitCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4),
cpuset.NewCPUSet(0, 4)},
expCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4)},
},
{
description: "Multi Init Container Less CPUs",
topo: topoSingleSocketHT,
numReservedCPUs: 0,
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
initContainerIDs: []string{"initFakeID-1", "initFakeID-2"},
containerIDs: []string{"appFakeID"},
pod: makeMultiContainerPod(
[]struct{ request, limit string }{
{"4000m", "4000m"},
{"4000m", "4000m"}},
[]struct{ request, limit string }{
{"2000m", "2000m"}}),
expInitCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4, 1, 5),
cpuset.NewCPUSet(0, 4, 1, 5)},
expCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4)},
},
{
description: "Multi Init Container More CPUs",
topo: topoSingleSocketHT,
numReservedCPUs: 0,
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
initContainerIDs: []string{"initFakeID-1", "initFakeID-2"},
containerIDs: []string{"appFakeID"},
pod: makeMultiContainerPod(
[]struct{ request, limit string }{
{"2000m", "2000m"},
{"2000m", "2000m"}},
[]struct{ request, limit string }{
{"4000m", "4000m"}}),
expInitCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4),
cpuset.NewCPUSet(0, 4)},
expCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4, 1, 5)},
},
{
description: "Multi Init Container Increasing CPUs",
topo: topoSingleSocketHT,
numReservedCPUs: 0,
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
initContainerIDs: []string{"initFakeID-1", "initFakeID-2"},
containerIDs: []string{"appFakeID"},
pod: makeMultiContainerPod(
[]struct{ request, limit string }{
{"2000m", "2000m"},
{"4000m", "4000m"}},
[]struct{ request, limit string }{
{"6000m", "6000m"}}),
expInitCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4),
cpuset.NewCPUSet(0, 4, 1, 5)},
expCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4, 1, 5, 2, 6)},
},
{
description: "Multi Init, Multi App Container Split CPUs",
topo: topoSingleSocketHT,
numReservedCPUs: 0,
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
initContainerIDs: []string{"initFakeID-1", "initFakeID-2"},
containerIDs: []string{"appFakeID-1", "appFakeID-2"},
pod: makeMultiContainerPod(
[]struct{ request, limit string }{
{"2000m", "2000m"},
{"4000m", "4000m"}},
[]struct{ request, limit string }{
{"2000m", "2000m"},
{"2000m", "2000m"}}),
expInitCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4),
cpuset.NewCPUSet(0, 4, 1, 5)},
expCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4),
cpuset.NewCPUSet(1, 5)},
},
}
for _, testCase := range testCases {
policy := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager())
state := &mockState{
assignments: testCase.stAssignments,
defaultCPUSet: testCase.stDefaultCPUSet,
}
mgr := &manager{
policy: policy,
state: state,
containerRuntime: mockRuntimeService{},
containerMap: containermap.NewContainerMap(),
activePods: func() []*v1.Pod { return nil },
podStatusProvider: mockPodStatusProvider{},
}
containers := append(
testCase.pod.Spec.InitContainers,
testCase.pod.Spec.Containers...)
containerIDs := append(
testCase.initContainerIDs,
testCase.containerIDs...)
expCSets := append(
testCase.expInitCSets,
testCase.expCSets...)
for i := range containers {
err := mgr.AddContainer(testCase.pod, &containers[i], containerIDs[i])
if err != nil {
t.Errorf("StaticPolicy AddContainer() error (%v). unexpected error for container id: %v: %v",
testCase.description, containerIDs[i], err)
}
cset, found := state.assignments[containerIDs[i]]
if !expCSets[i].IsEmpty() && !found {
t.Errorf("StaticPolicy AddContainer() error (%v). expected container id %v to be present in assignments %v",
testCase.description, containerIDs[i], state.assignments)
}
if found && !cset.Equals(expCSets[i]) {
t.Errorf("StaticPolicy AddContainer() error (%v). expected cpuset %v for container %v but got %v",
testCase.description, expCSets[i], containerIDs[i], cset)
}
}
}
}
func TestCPUManagerGenerate(t *testing.T) {
testCases := []struct {
description string
@ -386,6 +619,7 @@ func TestCPUManagerRemove(t *testing.T) {
defaultCPUSet: cpuset.NewCPUSet(),
},
containerRuntime: mockRuntimeService{},
containerMap: containermap.NewContainerMap(),
activePods: func() []*v1.Pod { return nil },
podStatusProvider: mockPodStatusProvider{},
}
@ -401,6 +635,7 @@ func TestCPUManagerRemove(t *testing.T) {
},
state: state.NewMemoryState(),
containerRuntime: mockRuntimeService{},
containerMap: containermap.NewContainerMap(),
activePods: func() []*v1.Pod { return nil },
podStatusProvider: mockPodStatusProvider{},
}
@ -630,6 +865,7 @@ func TestReconcileState(t *testing.T) {
containerRuntime: mockRuntimeService{
err: testCase.updateErr,
},
containerMap: containermap.NewContainerMap(),
activePods: func() []*v1.Pod {
return testCase.activePods
},
@ -724,6 +960,7 @@ func TestCPUManagerAddWithResvList(t *testing.T) {
containerRuntime: mockRuntimeService{
err: testCase.updateErr,
},
containerMap: containermap.NewContainerMap(),
activePods: func() []*v1.Pod { return nil },
podStatusProvider: mockPodStatusProvider{},
}

View File

@ -22,7 +22,6 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/klog"
v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/containermap"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
@ -76,10 +75,6 @@ type staticPolicy struct {
topology *topology.CPUTopology
// set of CPUs that is not available for exclusive assignment
reserved cpuset.CPUSet
// containerMap provides a mapping from
// (pod, container) -> containerID
// for all containers a pod
containerMap containermap.ContainerMap
// topology manager reference to get container Topology affinity
affinity topologymanager.Store
}
@ -111,10 +106,9 @@ func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, reserv
klog.Infof("[cpumanager] reserved %d CPUs (\"%s\") not available for exclusive assignment", reserved.Size(), reserved)
return &staticPolicy{
topology: topology,
reserved: reserved,
containerMap: containermap.NewContainerMap(),
affinity: affinity,
topology: topology,
reserved: reserved,
affinity: affinity,
}
}
@ -189,14 +183,6 @@ func (p *staticPolicy) assignableCPUs(s state.State) cpuset.CPUSet {
}
func (p *staticPolicy) AddContainer(s state.State, pod *v1.Pod, container *v1.Container, containerID string) (rerr error) {
// So long as this function does not return an error,
// add (pod, container, containerID) to the containerMap.
defer func() {
if rerr == nil {
p.containerMap.Add(string(pod.UID), container.Name, containerID)
}
}()
if numCPUs := p.guaranteedCPUs(pod, container); numCPUs != 0 {
klog.Infof("[cpumanager] static policy: AddContainer (pod: %s, container: %s, container id: %s)", pod.Name, container.Name, containerID)
// container belongs in an exclusively allocated pool
@ -206,22 +192,6 @@ func (p *staticPolicy) AddContainer(s state.State, pod *v1.Pod, container *v1.Co
return nil
}
// Proactively remove CPUs from init containers that have already run.
// They are guaranteed to have run to completion before any other
// container is run.
for _, initContainer := range pod.Spec.InitContainers {
if container.Name != initContainer.Name {
initContainerID, err := p.containerMap.GetContainerID(string(pod.UID), initContainer.Name)
if err != nil {
continue
}
err = p.RemoveContainer(s, initContainerID)
if err != nil {
klog.Warningf("[cpumanager] unable to remove init container (container id: %s, error: %v)", initContainerID, err)
}
}
}
// Call Topology Manager to get the aligned socket affinity across all hint providers.
hint := p.affinity.GetAffinity(string(pod.UID), container.Name)
klog.Infof("[cpumanager] Pod %v, Container %v Topology Affinity is: %v", pod.UID, container.Name, hint)
@ -239,14 +209,6 @@ func (p *staticPolicy) AddContainer(s state.State, pod *v1.Pod, container *v1.Co
}
func (p *staticPolicy) RemoveContainer(s state.State, containerID string) (rerr error) {
// So long as this function does not return an error,
// remove containerID from the containerMap.
defer func() {
if rerr == nil {
p.containerMap.RemoveByContainerID(containerID)
}
}()
klog.Infof("[cpumanager] static policy: RemoveContainer (container id: %s)", containerID)
if toRelease, ok := s.GetCPUSet(containerID); ok {
s.Delete(containerID)

View File

@ -43,19 +43,6 @@ type staticPolicyTest struct {
expPanic bool
}
type staticPolicyMultiContainerTest struct {
description string
topo *topology.CPUTopology
numReservedCPUs int
initContainerIDs []string
containerIDs []string
stAssignments state.ContainerCPUAssignments
stDefaultCPUSet cpuset.CPUSet
pod *v1.Pod
expInitCSets []cpuset.CPUSet
expCSets []cpuset.CPUSet
}
func TestStaticPolicyName(t *testing.T) {
policy := NewStaticPolicy(topoSingleSocketHT, 1, cpuset.NewCPUSet(), topologymanager.NewFakeManager())
@ -474,217 +461,6 @@ func TestStaticPolicyAdd(t *testing.T) {
}
}
func TestStaticPolicyAddWithInitContainers(t *testing.T) {
testCases := []staticPolicyMultiContainerTest{
{
description: "No Guaranteed Init CPUs",
topo: topoSingleSocketHT,
numReservedCPUs: 0,
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
initContainerIDs: []string{"initFakeID"},
containerIDs: []string{"appFakeID"},
pod: makeMultiContainerPod(
[]struct{ request, limit string }{{"100m", "100m"}},
[]struct{ request, limit string }{{"4000m", "4000m"}}),
expInitCSets: []cpuset.CPUSet{
cpuset.NewCPUSet()},
expCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4, 1, 5)},
},
{
description: "Equal Number of Guaranteed CPUs",
topo: topoSingleSocketHT,
numReservedCPUs: 0,
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
initContainerIDs: []string{"initFakeID"},
containerIDs: []string{"appFakeID"},
pod: makeMultiContainerPod(
[]struct{ request, limit string }{{"4000m", "4000m"}},
[]struct{ request, limit string }{{"4000m", "4000m"}}),
expInitCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4, 1, 5)},
expCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4, 1, 5)},
},
{
description: "More Init Container Guaranteed CPUs",
topo: topoSingleSocketHT,
numReservedCPUs: 0,
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
initContainerIDs: []string{"initFakeID"},
containerIDs: []string{"appFakeID"},
pod: makeMultiContainerPod(
[]struct{ request, limit string }{{"6000m", "6000m"}},
[]struct{ request, limit string }{{"4000m", "4000m"}}),
expInitCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4, 1, 5, 2, 6)},
expCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4, 1, 5)},
},
{
description: "Less Init Container Guaranteed CPUs",
topo: topoSingleSocketHT,
numReservedCPUs: 0,
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
initContainerIDs: []string{"initFakeID"},
containerIDs: []string{"appFakeID"},
pod: makeMultiContainerPod(
[]struct{ request, limit string }{{"2000m", "2000m"}},
[]struct{ request, limit string }{{"4000m", "4000m"}}),
expInitCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4)},
expCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4, 1, 5)},
},
{
description: "Multi Init Container Equal CPUs",
topo: topoSingleSocketHT,
numReservedCPUs: 0,
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
initContainerIDs: []string{"initFakeID-1", "initFakeID-2"},
containerIDs: []string{"appFakeID"},
pod: makeMultiContainerPod(
[]struct{ request, limit string }{
{"2000m", "2000m"},
{"2000m", "2000m"}},
[]struct{ request, limit string }{
{"2000m", "2000m"}}),
expInitCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4),
cpuset.NewCPUSet(0, 4)},
expCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4)},
},
{
description: "Multi Init Container Less CPUs",
topo: topoSingleSocketHT,
numReservedCPUs: 0,
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
initContainerIDs: []string{"initFakeID-1", "initFakeID-2"},
containerIDs: []string{"appFakeID"},
pod: makeMultiContainerPod(
[]struct{ request, limit string }{
{"4000m", "4000m"},
{"4000m", "4000m"}},
[]struct{ request, limit string }{
{"2000m", "2000m"}}),
expInitCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4, 1, 5),
cpuset.NewCPUSet(0, 4, 1, 5)},
expCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4)},
},
{
description: "Multi Init Container More CPUs",
topo: topoSingleSocketHT,
numReservedCPUs: 0,
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
initContainerIDs: []string{"initFakeID-1", "initFakeID-2"},
containerIDs: []string{"appFakeID"},
pod: makeMultiContainerPod(
[]struct{ request, limit string }{
{"2000m", "2000m"},
{"2000m", "2000m"}},
[]struct{ request, limit string }{
{"4000m", "4000m"}}),
expInitCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4),
cpuset.NewCPUSet(0, 4)},
expCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4, 1, 5)},
},
{
description: "Multi Init Container Increasing CPUs",
topo: topoSingleSocketHT,
numReservedCPUs: 0,
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
initContainerIDs: []string{"initFakeID-1", "initFakeID-2"},
containerIDs: []string{"appFakeID"},
pod: makeMultiContainerPod(
[]struct{ request, limit string }{
{"2000m", "2000m"},
{"4000m", "4000m"}},
[]struct{ request, limit string }{
{"6000m", "6000m"}}),
expInitCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4),
cpuset.NewCPUSet(0, 4, 1, 5)},
expCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4, 1, 5, 2, 6)},
},
{
description: "Multi Init, Multi App Container Split CPUs",
topo: topoSingleSocketHT,
numReservedCPUs: 0,
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
initContainerIDs: []string{"initFakeID-1", "initFakeID-2"},
containerIDs: []string{"appFakeID-1", "appFakeID-2"},
pod: makeMultiContainerPod(
[]struct{ request, limit string }{
{"2000m", "2000m"},
{"4000m", "4000m"}},
[]struct{ request, limit string }{
{"2000m", "2000m"},
{"2000m", "2000m"}}),
expInitCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4),
cpuset.NewCPUSet(0, 4, 1, 5)},
expCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4),
cpuset.NewCPUSet(1, 5)},
},
}
for _, testCase := range testCases {
policy := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager())
st := &mockState{
assignments: testCase.stAssignments,
defaultCPUSet: testCase.stDefaultCPUSet,
}
containers := append(
testCase.pod.Spec.InitContainers,
testCase.pod.Spec.Containers...)
containerIDs := append(
testCase.initContainerIDs,
testCase.containerIDs...)
expCSets := append(
testCase.expInitCSets,
testCase.expCSets...)
for i := range containers {
err := policy.AddContainer(st, testCase.pod, &containers[i], containerIDs[i])
if err != nil {
t.Errorf("StaticPolicy AddContainer() error (%v). unexpected error for container id: %v: %v",
testCase.description, containerIDs[i], err)
}
cset, found := st.assignments[containerIDs[i]]
if !expCSets[i].IsEmpty() && !found {
t.Errorf("StaticPolicy AddContainer() error (%v). expected container id %v to be present in assignments %v",
testCase.description, containerIDs[i], st.assignments)
}
if found && !cset.Equals(expCSets[i]) {
t.Errorf("StaticPolicy AddContainer() error (%v). expected cpuset %v for container %v but got %v",
testCase.description, expCSets[i], containerIDs[i], cset)
}
}
}
}
func TestStaticPolicyRemove(t *testing.T) {
testCases := []staticPolicyTest{
{