cpuset: Rename 'NewCPUSet' to 'New'

This commit is contained in:
Ian K. Coolidge
2022-12-19 17:29:49 +00:00
parent 768b1ecfb6
commit f3829c4be3
19 changed files with 453 additions and 453 deletions

View File

@@ -465,7 +465,7 @@ func makeEventRecorder(kubeDeps *kubelet.Dependencies, nodeName types.NodeName)
} }
func getReservedCPUs(machineInfo *cadvisorapi.MachineInfo, cpus string) (cpuset.CPUSet, error) { func getReservedCPUs(machineInfo *cadvisorapi.MachineInfo, cpus string) (cpuset.CPUSet, error) {
emptyCPUSet := cpuset.NewCPUSet() emptyCPUSet := cpuset.New()
if cpus == "" { if cpus == "" {
return emptyCPUSet, nil return emptyCPUSet, nil

View File

@@ -216,7 +216,7 @@ func newCPUAccumulator(topo *topology.CPUTopology, availableCPUs cpuset.CPUSet,
topo: topo, topo: topo,
details: topo.CPUDetails.KeepOnly(availableCPUs), details: topo.CPUDetails.KeepOnly(availableCPUs),
numCPUsNeeded: numCPUs, numCPUsNeeded: numCPUs,
result: cpuset.NewCPUSet(), result: cpuset.New(),
} }
if topo.NumSockets >= topo.NumNUMANodes { if topo.NumSockets >= topo.NumNUMANodes {
@@ -372,7 +372,7 @@ func (a *cpuAccumulator) takeFullCores() {
func (a *cpuAccumulator) takeRemainingCPUs() { func (a *cpuAccumulator) takeRemainingCPUs() {
for _, cpu := range a.sortAvailableCPUs() { for _, cpu := range a.sortAvailableCPUs() {
klog.V(4).InfoS("takeRemainingCPUs: claiming CPU", "cpu", cpu) klog.V(4).InfoS("takeRemainingCPUs: claiming CPU", "cpu", cpu)
a.take(cpuset.NewCPUSet(cpu)) a.take(cpuset.New(cpu))
if a.isSatisfied() { if a.isSatisfied() {
return return
} }
@@ -453,7 +453,7 @@ func takeByTopologyNUMAPacked(topo *topology.CPUTopology, availableCPUs cpuset.C
return acc.result, nil return acc.result, nil
} }
if acc.isFailed() { if acc.isFailed() {
return cpuset.NewCPUSet(), fmt.Errorf("not enough cpus available to satisfy request") return cpuset.New(), fmt.Errorf("not enough cpus available to satisfy request")
} }
// Algorithm: topology-aware best-fit // Algorithm: topology-aware best-fit
@@ -485,7 +485,7 @@ func takeByTopologyNUMAPacked(topo *topology.CPUTopology, availableCPUs cpuset.C
return acc.result, nil return acc.result, nil
} }
return cpuset.NewCPUSet(), fmt.Errorf("failed to allocate cpus") return cpuset.New(), fmt.Errorf("failed to allocate cpus")
} }
// takeByTopologyNUMADistributed returns a CPUSet of size 'numCPUs'. // takeByTopologyNUMADistributed returns a CPUSet of size 'numCPUs'.
@@ -565,7 +565,7 @@ func takeByTopologyNUMADistributed(topo *topology.CPUTopology, availableCPUs cpu
return acc.result, nil return acc.result, nil
} }
if acc.isFailed() { if acc.isFailed() {
return cpuset.NewCPUSet(), fmt.Errorf("not enough cpus available to satisfy request") return cpuset.New(), fmt.Errorf("not enough cpus available to satisfy request")
} }
// Get the list of NUMA nodes represented by the set of CPUs in 'availableCPUs'. // Get the list of NUMA nodes represented by the set of CPUs in 'availableCPUs'.
@@ -763,13 +763,13 @@ func takeByTopologyNUMADistributed(topo *topology.CPUTopology, availableCPUs cpu
// If we haven't allocated all of our CPUs at this point, then something // If we haven't allocated all of our CPUs at this point, then something
// went wrong in our accounting and we should error out. // went wrong in our accounting and we should error out.
if acc.numCPUsNeeded > 0 { if acc.numCPUsNeeded > 0 {
return cpuset.NewCPUSet(), fmt.Errorf("accounting error, not enough CPUs allocated, remaining: %v", acc.numCPUsNeeded) return cpuset.New(), fmt.Errorf("accounting error, not enough CPUs allocated, remaining: %v", acc.numCPUsNeeded)
} }
// Likewise, if we have allocated too many CPUs at this point, then something // Likewise, if we have allocated too many CPUs at this point, then something
// went wrong in our accounting and we should error out. // went wrong in our accounting and we should error out.
if acc.numCPUsNeeded < 0 { if acc.numCPUsNeeded < 0 {
return cpuset.NewCPUSet(), fmt.Errorf("accounting error, too many CPUs allocated, remaining: %v", acc.numCPUsNeeded) return cpuset.New(), fmt.Errorf("accounting error, too many CPUs allocated, remaining: %v", acc.numCPUsNeeded)
} }
// Otherwise, return the result // Otherwise, return the result

View File

@@ -35,31 +35,31 @@ func TestCPUAccumulatorFreeSockets(t *testing.T) {
{ {
"single socket HT, 1 socket free", "single socket HT, 1 socket free",
topoSingleSocketHT, topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), cpuset.New(0, 1, 2, 3, 4, 5, 6, 7),
[]int{0}, []int{0},
}, },
{ {
"single socket HT, 0 sockets free", "single socket HT, 0 sockets free",
topoSingleSocketHT, topoSingleSocketHT,
cpuset.NewCPUSet(1, 2, 3, 4, 5, 6, 7), cpuset.New(1, 2, 3, 4, 5, 6, 7),
[]int{}, []int{},
}, },
{ {
"dual socket HT, 2 sockets free", "dual socket HT, 2 sockets free",
topoDualSocketHT, topoDualSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), cpuset.New(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
[]int{0, 1}, []int{0, 1},
}, },
{ {
"dual socket HT, 1 socket free", "dual socket HT, 1 socket free",
topoDualSocketHT, topoDualSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11), cpuset.New(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11),
[]int{1}, []int{1},
}, },
{ {
"dual socket HT, 0 sockets free", "dual socket HT, 0 sockets free",
topoDualSocketHT, topoDualSocketHT,
cpuset.NewCPUSet(0, 2, 3, 4, 5, 6, 7, 8, 9, 11), cpuset.New(0, 2, 3, 4, 5, 6, 7, 8, 9, 11),
[]int{}, []int{},
}, },
{ {
@@ -135,31 +135,31 @@ func TestCPUAccumulatorFreeNUMANodes(t *testing.T) {
{ {
"single socket HT, 1 NUMA node free", "single socket HT, 1 NUMA node free",
topoSingleSocketHT, topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), cpuset.New(0, 1, 2, 3, 4, 5, 6, 7),
[]int{0}, []int{0},
}, },
{ {
"single socket HT, 0 NUMA Node free", "single socket HT, 0 NUMA Node free",
topoSingleSocketHT, topoSingleSocketHT,
cpuset.NewCPUSet(1, 2, 3, 4, 5, 6, 7), cpuset.New(1, 2, 3, 4, 5, 6, 7),
[]int{}, []int{},
}, },
{ {
"dual socket HT, 2 NUMA Node free", "dual socket HT, 2 NUMA Node free",
topoDualSocketHT, topoDualSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), cpuset.New(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
[]int{0, 1}, []int{0, 1},
}, },
{ {
"dual socket HT, 1 NUMA Node free", "dual socket HT, 1 NUMA Node free",
topoDualSocketHT, topoDualSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11), cpuset.New(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11),
[]int{1}, []int{1},
}, },
{ {
"dual socket HT, 0 NUMA node free", "dual socket HT, 0 NUMA node free",
topoDualSocketHT, topoDualSocketHT,
cpuset.NewCPUSet(0, 2, 3, 4, 5, 6, 7, 8, 9, 11), cpuset.New(0, 2, 3, 4, 5, 6, 7, 8, 9, 11),
[]int{}, []int{},
}, },
{ {
@@ -286,49 +286,49 @@ func TestCPUAccumulatorFreeCores(t *testing.T) {
{ {
"single socket HT, 4 cores free", "single socket HT, 4 cores free",
topoSingleSocketHT, topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), cpuset.New(0, 1, 2, 3, 4, 5, 6, 7),
[]int{0, 1, 2, 3}, []int{0, 1, 2, 3},
}, },
{ {
"single socket HT, 3 cores free", "single socket HT, 3 cores free",
topoSingleSocketHT, topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 4, 5, 6), cpuset.New(0, 1, 2, 4, 5, 6),
[]int{0, 1, 2}, []int{0, 1, 2},
}, },
{ {
"single socket HT, 3 cores free (1 partially consumed)", "single socket HT, 3 cores free (1 partially consumed)",
topoSingleSocketHT, topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6), cpuset.New(0, 1, 2, 3, 4, 5, 6),
[]int{0, 1, 2}, []int{0, 1, 2},
}, },
{ {
"single socket HT, 0 cores free", "single socket HT, 0 cores free",
topoSingleSocketHT, topoSingleSocketHT,
cpuset.NewCPUSet(), cpuset.New(),
[]int{}, []int{},
}, },
{ {
"single socket HT, 0 cores free (4 partially consumed)", "single socket HT, 0 cores free (4 partially consumed)",
topoSingleSocketHT, topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3), cpuset.New(0, 1, 2, 3),
[]int{}, []int{},
}, },
{ {
"dual socket HT, 6 cores free", "dual socket HT, 6 cores free",
topoDualSocketHT, topoDualSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), cpuset.New(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
[]int{0, 2, 4, 1, 3, 5}, []int{0, 2, 4, 1, 3, 5},
}, },
{ {
"dual socket HT, 5 cores free (1 consumed from socket 0)", "dual socket HT, 5 cores free (1 consumed from socket 0)",
topoDualSocketHT, topoDualSocketHT,
cpuset.NewCPUSet(2, 1, 3, 4, 5, 7, 8, 9, 10, 11), cpuset.New(2, 1, 3, 4, 5, 7, 8, 9, 10, 11),
[]int{2, 4, 1, 3, 5}, []int{2, 4, 1, 3, 5},
}, },
{ {
"dual socket HT, 4 cores free (1 consumed from each socket)", "dual socket HT, 4 cores free (1 consumed from each socket)",
topoDualSocketHT, topoDualSocketHT,
cpuset.NewCPUSet(2, 3, 4, 5, 8, 9, 10, 11), cpuset.New(2, 3, 4, 5, 8, 9, 10, 11),
[]int{2, 4, 3, 5}, []int{2, 4, 3, 5},
}, },
} }
@@ -354,37 +354,37 @@ func TestCPUAccumulatorFreeCPUs(t *testing.T) {
{ {
"single socket HT, 8 cpus free", "single socket HT, 8 cpus free",
topoSingleSocketHT, topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), cpuset.New(0, 1, 2, 3, 4, 5, 6, 7),
[]int{0, 4, 1, 5, 2, 6, 3, 7}, []int{0, 4, 1, 5, 2, 6, 3, 7},
}, },
{ {
"single socket HT, 5 cpus free", "single socket HT, 5 cpus free",
topoSingleSocketHT, topoSingleSocketHT,
cpuset.NewCPUSet(3, 4, 5, 6, 7), cpuset.New(3, 4, 5, 6, 7),
[]int{4, 5, 6, 3, 7}, []int{4, 5, 6, 3, 7},
}, },
{ {
"dual socket HT, 12 cpus free", "dual socket HT, 12 cpus free",
topoDualSocketHT, topoDualSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), cpuset.New(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
[]int{0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11}, []int{0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11},
}, },
{ {
"dual socket HT, 11 cpus free", "dual socket HT, 11 cpus free",
topoDualSocketHT, topoDualSocketHT,
cpuset.NewCPUSet(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), cpuset.New(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
[]int{6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11}, []int{6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11},
}, },
{ {
"dual socket HT, 10 cpus free", "dual socket HT, 10 cpus free",
topoDualSocketHT, topoDualSocketHT,
cpuset.NewCPUSet(1, 2, 3, 4, 5, 7, 8, 9, 10, 11), cpuset.New(1, 2, 3, 4, 5, 7, 8, 9, 10, 11),
[]int{2, 8, 4, 10, 1, 7, 3, 9, 5, 11}, []int{2, 8, 4, 10, 1, 7, 3, 9, 5, 11},
}, },
{ {
"triple socket HT, 12 cpus free", "triple socket HT, 12 cpus free",
topoTripleSocketHT, topoTripleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 6, 7, 8, 9, 10, 11, 12, 13), cpuset.New(0, 1, 2, 3, 6, 7, 8, 9, 10, 11, 12, 13),
[]int{12, 13, 0, 1, 2, 3, 6, 7, 8, 9, 10, 11}, []int{12, 13, 0, 1, 2, 3, 6, 7, 8, 9, 10, 11},
}, },
} }
@@ -413,8 +413,8 @@ func TestCPUAccumulatorTake(t *testing.T) {
{ {
"take 0 cpus from a single socket HT, require 1", "take 0 cpus from a single socket HT, require 1",
topoSingleSocketHT, topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), cpuset.New(0, 1, 2, 3, 4, 5, 6, 7),
[]cpuset.CPUSet{cpuset.NewCPUSet()}, []cpuset.CPUSet{cpuset.New()},
1, 1,
false, false,
false, false,
@@ -422,8 +422,8 @@ func TestCPUAccumulatorTake(t *testing.T) {
{ {
"take 0 cpus from a single socket HT, require 1, none available", "take 0 cpus from a single socket HT, require 1, none available",
topoSingleSocketHT, topoSingleSocketHT,
cpuset.NewCPUSet(), cpuset.New(),
[]cpuset.CPUSet{cpuset.NewCPUSet()}, []cpuset.CPUSet{cpuset.New()},
1, 1,
false, false,
true, true,
@@ -431,8 +431,8 @@ func TestCPUAccumulatorTake(t *testing.T) {
{ {
"take 1 cpu from a single socket HT, require 1", "take 1 cpu from a single socket HT, require 1",
topoSingleSocketHT, topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), cpuset.New(0, 1, 2, 3, 4, 5, 6, 7),
[]cpuset.CPUSet{cpuset.NewCPUSet(0)}, []cpuset.CPUSet{cpuset.New(0)},
1, 1,
true, true,
false, false,
@@ -440,8 +440,8 @@ func TestCPUAccumulatorTake(t *testing.T) {
{ {
"take 1 cpu from a single socket HT, require 2", "take 1 cpu from a single socket HT, require 2",
topoSingleSocketHT, topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), cpuset.New(0, 1, 2, 3, 4, 5, 6, 7),
[]cpuset.CPUSet{cpuset.NewCPUSet(0)}, []cpuset.CPUSet{cpuset.New(0)},
2, 2,
false, false,
false, false,
@@ -449,8 +449,8 @@ func TestCPUAccumulatorTake(t *testing.T) {
{ {
"take 2 cpu from a single socket HT, require 4, expect failed", "take 2 cpu from a single socket HT, require 4, expect failed",
topoSingleSocketHT, topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2), cpuset.New(0, 1, 2),
[]cpuset.CPUSet{cpuset.NewCPUSet(0), cpuset.NewCPUSet(1)}, []cpuset.CPUSet{cpuset.New(0), cpuset.New(1)},
4, 4,
false, false,
true, true,
@@ -458,16 +458,16 @@ func TestCPUAccumulatorTake(t *testing.T) {
{ {
"take all cpus one at a time from a single socket HT, require 8", "take all cpus one at a time from a single socket HT, require 8",
topoSingleSocketHT, topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), cpuset.New(0, 1, 2, 3, 4, 5, 6, 7),
[]cpuset.CPUSet{ []cpuset.CPUSet{
cpuset.NewCPUSet(0), cpuset.New(0),
cpuset.NewCPUSet(1), cpuset.New(1),
cpuset.NewCPUSet(2), cpuset.New(2),
cpuset.NewCPUSet(3), cpuset.New(3),
cpuset.NewCPUSet(4), cpuset.New(4),
cpuset.NewCPUSet(5), cpuset.New(5),
cpuset.NewCPUSet(6), cpuset.New(6),
cpuset.NewCPUSet(7), cpuset.New(7),
}, },
8, 8,
true, true,
@@ -520,66 +520,66 @@ func commonTakeByTopologyTestCases(t *testing.T) []takeByTopologyTestCase {
{ {
"take more cpus than are available from single socket with HT", "take more cpus than are available from single socket with HT",
topoSingleSocketHT, topoSingleSocketHT,
cpuset.NewCPUSet(0, 2, 4, 6), cpuset.New(0, 2, 4, 6),
5, 5,
"not enough cpus available to satisfy request", "not enough cpus available to satisfy request",
cpuset.NewCPUSet(), cpuset.New(),
}, },
{ {
"take zero cpus from single socket with HT", "take zero cpus from single socket with HT",
topoSingleSocketHT, topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), cpuset.New(0, 1, 2, 3, 4, 5, 6, 7),
0, 0,
"", "",
cpuset.NewCPUSet(), cpuset.New(),
}, },
{ {
"take one cpu from single socket with HT", "take one cpu from single socket with HT",
topoSingleSocketHT, topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), cpuset.New(0, 1, 2, 3, 4, 5, 6, 7),
1, 1,
"", "",
cpuset.NewCPUSet(0), cpuset.New(0),
}, },
{ {
"take one cpu from single socket with HT, some cpus are taken", "take one cpu from single socket with HT, some cpus are taken",
topoSingleSocketHT, topoSingleSocketHT,
cpuset.NewCPUSet(1, 3, 5, 6, 7), cpuset.New(1, 3, 5, 6, 7),
1, 1,
"", "",
cpuset.NewCPUSet(6), cpuset.New(6),
}, },
{ {
"take two cpus from single socket with HT", "take two cpus from single socket with HT",
topoSingleSocketHT, topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), cpuset.New(0, 1, 2, 3, 4, 5, 6, 7),
2, 2,
"", "",
cpuset.NewCPUSet(0, 4), cpuset.New(0, 4),
}, },
{ {
"take all cpus from single socket with HT", "take all cpus from single socket with HT",
topoSingleSocketHT, topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), cpuset.New(0, 1, 2, 3, 4, 5, 6, 7),
8, 8,
"", "",
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), cpuset.New(0, 1, 2, 3, 4, 5, 6, 7),
}, },
{ {
"take two cpus from single socket with HT, only one core totally free", "take two cpus from single socket with HT, only one core totally free",
topoSingleSocketHT, topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 6), cpuset.New(0, 1, 2, 3, 6),
2, 2,
"", "",
cpuset.NewCPUSet(2, 6), cpuset.New(2, 6),
}, },
{ {
"take a socket of cpus from dual socket with HT", "take a socket of cpus from dual socket with HT",
topoDualSocketHT, topoDualSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), cpuset.New(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
6, 6,
"", "",
cpuset.NewCPUSet(0, 2, 4, 6, 8, 10), cpuset.New(0, 2, 4, 6, 8, 10),
}, },
{ {
"take a socket of cpus from dual socket with multi-numa-per-socket with HT", "take a socket of cpus from dual socket with multi-numa-per-socket with HT",
@@ -630,10 +630,10 @@ func TestTakeByTopologyNUMAPacked(t *testing.T) {
{ {
"take one cpu from dual socket with HT - core from Socket 0", "take one cpu from dual socket with HT - core from Socket 0",
topoDualSocketHT, topoDualSocketHT,
cpuset.NewCPUSet(1, 2, 3, 4, 5, 7, 8, 9, 10, 11), cpuset.New(1, 2, 3, 4, 5, 7, 8, 9, 10, 11),
1, 1,
"", "",
cpuset.NewCPUSet(2), cpuset.New(2),
}, },
{ {
"allocate 4 full cores with 3 coming from the first NUMA node (filling it up) and 1 coming from the second NUMA node", "allocate 4 full cores with 3 coming from the first NUMA node (filling it up) and 1 coming from the second NUMA node",
@@ -767,20 +767,20 @@ func TestTakeByTopologyNUMADistributed(t *testing.T) {
{ {
"take one cpu from dual socket with HT - core from Socket 0", "take one cpu from dual socket with HT - core from Socket 0",
topoDualSocketHT, topoDualSocketHT,
cpuset.NewCPUSet(1, 2, 3, 4, 5, 7, 8, 9, 10, 11), cpuset.New(1, 2, 3, 4, 5, 7, 8, 9, 10, 11),
1, 1,
1, 1,
"", "",
cpuset.NewCPUSet(1), cpuset.New(1),
}, },
{ {
"take one cpu from dual socket with HT - core from Socket 0 - cpuGroupSize 2", "take one cpu from dual socket with HT - core from Socket 0 - cpuGroupSize 2",
topoDualSocketHT, topoDualSocketHT,
cpuset.NewCPUSet(1, 2, 3, 4, 5, 7, 8, 9, 10, 11), cpuset.New(1, 2, 3, 4, 5, 7, 8, 9, 10, 11),
1, 1,
2, 2,
"", "",
cpuset.NewCPUSet(2), cpuset.New(2),
}, },
{ {
"allocate 13 full cores distributed across the first 2 NUMA nodes", "allocate 13 full cores distributed across the first 2 NUMA nodes",

View File

@@ -121,7 +121,7 @@ func (p *mockPolicy) GetPodTopologyHints(s state.State, pod *v1.Pod) map[string]
} }
func (p *mockPolicy) GetAllocatableCPUs(m state.State) cpuset.CPUSet { func (p *mockPolicy) GetAllocatableCPUs(m state.State) cpuset.CPUSet {
return cpuset.NewCPUSet() return cpuset.New()
} }
type mockRuntimeService struct { type mockRuntimeService struct {
@@ -228,7 +228,7 @@ func TestCPUManagerAdd(t *testing.T) {
}, },
}, },
0, 0,
cpuset.NewCPUSet(), cpuset.New(),
topologymanager.NewFakeManager(), topologymanager.NewFakeManager(),
nil) nil)
testCases := []struct { testCases := []struct {
@@ -243,7 +243,7 @@ func TestCPUManagerAdd(t *testing.T) {
description: "cpu manager add - no error", description: "cpu manager add - no error",
updateErr: nil, updateErr: nil,
policy: testPolicy, policy: testPolicy,
expCPUSet: cpuset.NewCPUSet(3, 4), expCPUSet: cpuset.New(3, 4),
expAllocateErr: nil, expAllocateErr: nil,
expAddContainerErr: nil, expAddContainerErr: nil,
}, },
@@ -253,7 +253,7 @@ func TestCPUManagerAdd(t *testing.T) {
policy: &mockPolicy{ policy: &mockPolicy{
err: fmt.Errorf("fake reg error"), err: fmt.Errorf("fake reg error"),
}, },
expCPUSet: cpuset.NewCPUSet(1, 2, 3, 4), expCPUSet: cpuset.New(1, 2, 3, 4),
expAllocateErr: fmt.Errorf("fake reg error"), expAllocateErr: fmt.Errorf("fake reg error"),
expAddContainerErr: nil, expAddContainerErr: nil,
}, },
@@ -264,7 +264,7 @@ func TestCPUManagerAdd(t *testing.T) {
policy: testCase.policy, policy: testCase.policy,
state: &mockState{ state: &mockState{
assignments: state.ContainerCPUAssignments{}, assignments: state.ContainerCPUAssignments{},
defaultCPUSet: cpuset.NewCPUSet(1, 2, 3, 4), defaultCPUSet: cpuset.New(1, 2, 3, 4),
}, },
lastUpdateState: state.NewMemoryState(), lastUpdateState: state.NewMemoryState(),
containerRuntime: mockRuntimeService{ containerRuntime: mockRuntimeService{
@@ -316,71 +316,71 @@ func TestCPUManagerAddWithInitContainers(t *testing.T) {
topo: topoSingleSocketHT, topo: topoSingleSocketHT,
numReservedCPUs: 0, numReservedCPUs: 0,
stAssignments: state.ContainerCPUAssignments{}, stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7),
initContainerIDs: []string{"initFakeID"}, initContainerIDs: []string{"initFakeID"},
containerIDs: []string{"appFakeID"}, containerIDs: []string{"appFakeID"},
pod: makeMultiContainerPod( pod: makeMultiContainerPod(
[]struct{ request, limit string }{{"100m", "100m"}}, []struct{ request, limit string }{{"100m", "100m"}},
[]struct{ request, limit string }{{"4000m", "4000m"}}), []struct{ request, limit string }{{"4000m", "4000m"}}),
expInitCSets: []cpuset.CPUSet{ expInitCSets: []cpuset.CPUSet{
cpuset.NewCPUSet()}, cpuset.New()},
expCSets: []cpuset.CPUSet{ expCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4, 1, 5)}, cpuset.New(0, 4, 1, 5)},
}, },
{ {
description: "Equal Number of Guaranteed CPUs", description: "Equal Number of Guaranteed CPUs",
topo: topoSingleSocketHT, topo: topoSingleSocketHT,
numReservedCPUs: 0, numReservedCPUs: 0,
stAssignments: state.ContainerCPUAssignments{}, stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7),
initContainerIDs: []string{"initFakeID"}, initContainerIDs: []string{"initFakeID"},
containerIDs: []string{"appFakeID"}, containerIDs: []string{"appFakeID"},
pod: makeMultiContainerPod( pod: makeMultiContainerPod(
[]struct{ request, limit string }{{"4000m", "4000m"}}, []struct{ request, limit string }{{"4000m", "4000m"}},
[]struct{ request, limit string }{{"4000m", "4000m"}}), []struct{ request, limit string }{{"4000m", "4000m"}}),
expInitCSets: []cpuset.CPUSet{ expInitCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4, 1, 5)}, cpuset.New(0, 4, 1, 5)},
expCSets: []cpuset.CPUSet{ expCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4, 1, 5)}, cpuset.New(0, 4, 1, 5)},
}, },
{ {
description: "More Init Container Guaranteed CPUs", description: "More Init Container Guaranteed CPUs",
topo: topoSingleSocketHT, topo: topoSingleSocketHT,
numReservedCPUs: 0, numReservedCPUs: 0,
stAssignments: state.ContainerCPUAssignments{}, stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7),
initContainerIDs: []string{"initFakeID"}, initContainerIDs: []string{"initFakeID"},
containerIDs: []string{"appFakeID"}, containerIDs: []string{"appFakeID"},
pod: makeMultiContainerPod( pod: makeMultiContainerPod(
[]struct{ request, limit string }{{"6000m", "6000m"}}, []struct{ request, limit string }{{"6000m", "6000m"}},
[]struct{ request, limit string }{{"4000m", "4000m"}}), []struct{ request, limit string }{{"4000m", "4000m"}}),
expInitCSets: []cpuset.CPUSet{ expInitCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4, 1, 5, 2, 6)}, cpuset.New(0, 4, 1, 5, 2, 6)},
expCSets: []cpuset.CPUSet{ expCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4, 1, 5)}, cpuset.New(0, 4, 1, 5)},
}, },
{ {
description: "Less Init Container Guaranteed CPUs", description: "Less Init Container Guaranteed CPUs",
topo: topoSingleSocketHT, topo: topoSingleSocketHT,
numReservedCPUs: 0, numReservedCPUs: 0,
stAssignments: state.ContainerCPUAssignments{}, stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7),
initContainerIDs: []string{"initFakeID"}, initContainerIDs: []string{"initFakeID"},
containerIDs: []string{"appFakeID"}, containerIDs: []string{"appFakeID"},
pod: makeMultiContainerPod( pod: makeMultiContainerPod(
[]struct{ request, limit string }{{"2000m", "2000m"}}, []struct{ request, limit string }{{"2000m", "2000m"}},
[]struct{ request, limit string }{{"4000m", "4000m"}}), []struct{ request, limit string }{{"4000m", "4000m"}}),
expInitCSets: []cpuset.CPUSet{ expInitCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4)}, cpuset.New(0, 4)},
expCSets: []cpuset.CPUSet{ expCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4, 1, 5)}, cpuset.New(0, 4, 1, 5)},
}, },
{ {
description: "Multi Init Container Equal CPUs", description: "Multi Init Container Equal CPUs",
topo: topoSingleSocketHT, topo: topoSingleSocketHT,
numReservedCPUs: 0, numReservedCPUs: 0,
stAssignments: state.ContainerCPUAssignments{}, stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7),
initContainerIDs: []string{"initFakeID-1", "initFakeID-2"}, initContainerIDs: []string{"initFakeID-1", "initFakeID-2"},
containerIDs: []string{"appFakeID"}, containerIDs: []string{"appFakeID"},
pod: makeMultiContainerPod( pod: makeMultiContainerPod(
@@ -390,17 +390,17 @@ func TestCPUManagerAddWithInitContainers(t *testing.T) {
[]struct{ request, limit string }{ []struct{ request, limit string }{
{"2000m", "2000m"}}), {"2000m", "2000m"}}),
expInitCSets: []cpuset.CPUSet{ expInitCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4), cpuset.New(0, 4),
cpuset.NewCPUSet(0, 4)}, cpuset.New(0, 4)},
expCSets: []cpuset.CPUSet{ expCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4)}, cpuset.New(0, 4)},
}, },
{ {
description: "Multi Init Container Less CPUs", description: "Multi Init Container Less CPUs",
topo: topoSingleSocketHT, topo: topoSingleSocketHT,
numReservedCPUs: 0, numReservedCPUs: 0,
stAssignments: state.ContainerCPUAssignments{}, stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7),
initContainerIDs: []string{"initFakeID-1", "initFakeID-2"}, initContainerIDs: []string{"initFakeID-1", "initFakeID-2"},
containerIDs: []string{"appFakeID"}, containerIDs: []string{"appFakeID"},
pod: makeMultiContainerPod( pod: makeMultiContainerPod(
@@ -410,17 +410,17 @@ func TestCPUManagerAddWithInitContainers(t *testing.T) {
[]struct{ request, limit string }{ []struct{ request, limit string }{
{"2000m", "2000m"}}), {"2000m", "2000m"}}),
expInitCSets: []cpuset.CPUSet{ expInitCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4, 1, 5), cpuset.New(0, 4, 1, 5),
cpuset.NewCPUSet(0, 4, 1, 5)}, cpuset.New(0, 4, 1, 5)},
expCSets: []cpuset.CPUSet{ expCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4)}, cpuset.New(0, 4)},
}, },
{ {
description: "Multi Init Container More CPUs", description: "Multi Init Container More CPUs",
topo: topoSingleSocketHT, topo: topoSingleSocketHT,
numReservedCPUs: 0, numReservedCPUs: 0,
stAssignments: state.ContainerCPUAssignments{}, stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7),
initContainerIDs: []string{"initFakeID-1", "initFakeID-2"}, initContainerIDs: []string{"initFakeID-1", "initFakeID-2"},
containerIDs: []string{"appFakeID"}, containerIDs: []string{"appFakeID"},
pod: makeMultiContainerPod( pod: makeMultiContainerPod(
@@ -430,17 +430,17 @@ func TestCPUManagerAddWithInitContainers(t *testing.T) {
[]struct{ request, limit string }{ []struct{ request, limit string }{
{"4000m", "4000m"}}), {"4000m", "4000m"}}),
expInitCSets: []cpuset.CPUSet{ expInitCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4), cpuset.New(0, 4),
cpuset.NewCPUSet(0, 4)}, cpuset.New(0, 4)},
expCSets: []cpuset.CPUSet{ expCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4, 1, 5)}, cpuset.New(0, 4, 1, 5)},
}, },
{ {
description: "Multi Init Container Increasing CPUs", description: "Multi Init Container Increasing CPUs",
topo: topoSingleSocketHT, topo: topoSingleSocketHT,
numReservedCPUs: 0, numReservedCPUs: 0,
stAssignments: state.ContainerCPUAssignments{}, stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7),
initContainerIDs: []string{"initFakeID-1", "initFakeID-2"}, initContainerIDs: []string{"initFakeID-1", "initFakeID-2"},
containerIDs: []string{"appFakeID"}, containerIDs: []string{"appFakeID"},
pod: makeMultiContainerPod( pod: makeMultiContainerPod(
@@ -450,17 +450,17 @@ func TestCPUManagerAddWithInitContainers(t *testing.T) {
[]struct{ request, limit string }{ []struct{ request, limit string }{
{"6000m", "6000m"}}), {"6000m", "6000m"}}),
expInitCSets: []cpuset.CPUSet{ expInitCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4), cpuset.New(0, 4),
cpuset.NewCPUSet(0, 4, 1, 5)}, cpuset.New(0, 4, 1, 5)},
expCSets: []cpuset.CPUSet{ expCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4, 1, 5, 2, 6)}, cpuset.New(0, 4, 1, 5, 2, 6)},
}, },
{ {
description: "Multi Init, Multi App Container Split CPUs", description: "Multi Init, Multi App Container Split CPUs",
topo: topoSingleSocketHT, topo: topoSingleSocketHT,
numReservedCPUs: 0, numReservedCPUs: 0,
stAssignments: state.ContainerCPUAssignments{}, stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7),
initContainerIDs: []string{"initFakeID-1", "initFakeID-2"}, initContainerIDs: []string{"initFakeID-1", "initFakeID-2"},
containerIDs: []string{"appFakeID-1", "appFakeID-2"}, containerIDs: []string{"appFakeID-1", "appFakeID-2"},
pod: makeMultiContainerPod( pod: makeMultiContainerPod(
@@ -471,16 +471,16 @@ func TestCPUManagerAddWithInitContainers(t *testing.T) {
{"2000m", "2000m"}, {"2000m", "2000m"},
{"2000m", "2000m"}}), {"2000m", "2000m"}}),
expInitCSets: []cpuset.CPUSet{ expInitCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4), cpuset.New(0, 4),
cpuset.NewCPUSet(0, 4, 1, 5)}, cpuset.New(0, 4, 1, 5)},
expCSets: []cpuset.CPUSet{ expCSets: []cpuset.CPUSet{
cpuset.NewCPUSet(0, 4), cpuset.New(0, 4),
cpuset.NewCPUSet(1, 5)}, cpuset.New(1, 5)},
}, },
} }
for _, testCase := range testCases { for _, testCase := range testCases {
policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil) policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.New(), topologymanager.NewFakeManager(), nil)
mockState := &mockState{ mockState := &mockState{
assignments: testCase.stAssignments, assignments: testCase.stAssignments,
@@ -512,7 +512,7 @@ func TestCPUManagerAddWithInitContainers(t *testing.T) {
testCase.expInitCSets, testCase.expInitCSets,
testCase.expCSets...) testCase.expCSets...)
cumCSet := cpuset.NewCPUSet() cumCSet := cpuset.New()
for i := range containers { for i := range containers {
err := mgr.Allocate(testCase.pod, &containers[i]) err := mgr.Allocate(testCase.pod, &containers[i])
@@ -635,7 +635,7 @@ func TestCPUManagerGenerate(t *testing.T) {
} }
defer os.RemoveAll(sDir) defer os.RemoveAll(sDir)
mgr, err := NewManager(testCase.cpuPolicyName, nil, 5*time.Second, machineInfo, cpuset.NewCPUSet(), testCase.nodeAllocatableReservation, sDir, topologymanager.NewFakeManager()) mgr, err := NewManager(testCase.cpuPolicyName, nil, 5*time.Second, machineInfo, cpuset.New(), testCase.nodeAllocatableReservation, sDir, topologymanager.NewFakeManager())
if testCase.expectedError != nil { if testCase.expectedError != nil {
if !strings.Contains(err.Error(), testCase.expectedError.Error()) { if !strings.Contains(err.Error(), testCase.expectedError.Error()) {
t.Errorf("Unexpected error message. Have: %s wants %s", err.Error(), testCase.expectedError.Error()) t.Errorf("Unexpected error message. Have: %s wants %s", err.Error(), testCase.expectedError.Error())
@@ -671,7 +671,7 @@ func TestCPUManagerRemove(t *testing.T) {
}, },
state: &mockState{ state: &mockState{
assignments: state.ContainerCPUAssignments{}, assignments: state.ContainerCPUAssignments{},
defaultCPUSet: cpuset.NewCPUSet(), defaultCPUSet: cpuset.New(),
}, },
lastUpdateState: state.NewMemoryState(), lastUpdateState: state.NewMemoryState(),
containerRuntime: mockRuntimeService{}, containerRuntime: mockRuntimeService{},
@@ -722,7 +722,7 @@ func TestReconcileState(t *testing.T) {
}, },
}, },
0, 0,
cpuset.NewCPUSet(), cpuset.New(),
topologymanager.NewFakeManager(), topologymanager.NewFakeManager(),
nil) nil)
@@ -775,18 +775,18 @@ func TestReconcileState(t *testing.T) {
updateErr: nil, updateErr: nil,
stAssignments: state.ContainerCPUAssignments{ stAssignments: state.ContainerCPUAssignments{
"fakePodUID": map[string]cpuset.CPUSet{ "fakePodUID": map[string]cpuset.CPUSet{
"fakeContainerName": cpuset.NewCPUSet(1, 2), "fakeContainerName": cpuset.New(1, 2),
}, },
}, },
stDefaultCPUSet: cpuset.NewCPUSet(3, 4, 5, 6, 7), stDefaultCPUSet: cpuset.New(3, 4, 5, 6, 7),
lastUpdateStAssignments: state.ContainerCPUAssignments{}, lastUpdateStAssignments: state.ContainerCPUAssignments{},
lastUpdateStDefaultCPUSet: cpuset.NewCPUSet(), lastUpdateStDefaultCPUSet: cpuset.New(),
expectStAssignments: state.ContainerCPUAssignments{ expectStAssignments: state.ContainerCPUAssignments{
"fakePodUID": map[string]cpuset.CPUSet{ "fakePodUID": map[string]cpuset.CPUSet{
"fakeContainerName": cpuset.NewCPUSet(1, 2), "fakeContainerName": cpuset.New(1, 2),
}, },
}, },
expectStDefaultCPUSet: cpuset.NewCPUSet(3, 4, 5, 6, 7), expectStDefaultCPUSet: cpuset.New(3, 4, 5, 6, 7),
expectSucceededContainerName: "fakeContainerName", expectSucceededContainerName: "fakeContainerName",
expectFailedContainerName: "", expectFailedContainerName: "",
}, },
@@ -823,18 +823,18 @@ func TestReconcileState(t *testing.T) {
updateErr: nil, updateErr: nil,
stAssignments: state.ContainerCPUAssignments{ stAssignments: state.ContainerCPUAssignments{
"fakePodUID": map[string]cpuset.CPUSet{ "fakePodUID": map[string]cpuset.CPUSet{
"fakeContainerName": cpuset.NewCPUSet(1, 2), "fakeContainerName": cpuset.New(1, 2),
}, },
}, },
stDefaultCPUSet: cpuset.NewCPUSet(3, 4, 5, 6, 7), stDefaultCPUSet: cpuset.New(3, 4, 5, 6, 7),
lastUpdateStAssignments: state.ContainerCPUAssignments{}, lastUpdateStAssignments: state.ContainerCPUAssignments{},
lastUpdateStDefaultCPUSet: cpuset.NewCPUSet(), lastUpdateStDefaultCPUSet: cpuset.New(),
expectStAssignments: state.ContainerCPUAssignments{ expectStAssignments: state.ContainerCPUAssignments{
"fakePodUID": map[string]cpuset.CPUSet{ "fakePodUID": map[string]cpuset.CPUSet{
"fakeContainerName": cpuset.NewCPUSet(1, 2), "fakeContainerName": cpuset.New(1, 2),
}, },
}, },
expectStDefaultCPUSet: cpuset.NewCPUSet(3, 4, 5, 6, 7), expectStDefaultCPUSet: cpuset.New(3, 4, 5, 6, 7),
expectSucceededContainerName: "fakeContainerName", expectSucceededContainerName: "fakeContainerName",
expectFailedContainerName: "", expectFailedContainerName: "",
}, },
@@ -860,11 +860,11 @@ func TestReconcileState(t *testing.T) {
pspFound: false, pspFound: false,
updateErr: nil, updateErr: nil,
stAssignments: state.ContainerCPUAssignments{}, stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(), stDefaultCPUSet: cpuset.New(),
lastUpdateStAssignments: state.ContainerCPUAssignments{}, lastUpdateStAssignments: state.ContainerCPUAssignments{},
lastUpdateStDefaultCPUSet: cpuset.NewCPUSet(), lastUpdateStDefaultCPUSet: cpuset.New(),
expectStAssignments: state.ContainerCPUAssignments{}, expectStAssignments: state.ContainerCPUAssignments{},
expectStDefaultCPUSet: cpuset.NewCPUSet(), expectStDefaultCPUSet: cpuset.New(),
expectSucceededContainerName: "", expectSucceededContainerName: "",
expectFailedContainerName: "", expectFailedContainerName: "",
}, },
@@ -897,11 +897,11 @@ func TestReconcileState(t *testing.T) {
pspFound: true, pspFound: true,
updateErr: nil, updateErr: nil,
stAssignments: state.ContainerCPUAssignments{}, stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(), stDefaultCPUSet: cpuset.New(),
lastUpdateStAssignments: state.ContainerCPUAssignments{}, lastUpdateStAssignments: state.ContainerCPUAssignments{},
lastUpdateStDefaultCPUSet: cpuset.NewCPUSet(), lastUpdateStDefaultCPUSet: cpuset.New(),
expectStAssignments: state.ContainerCPUAssignments{}, expectStAssignments: state.ContainerCPUAssignments{},
expectStDefaultCPUSet: cpuset.NewCPUSet(), expectStDefaultCPUSet: cpuset.New(),
expectSucceededContainerName: "", expectSucceededContainerName: "",
expectFailedContainerName: "fakeContainerName", expectFailedContainerName: "fakeContainerName",
}, },
@@ -938,18 +938,18 @@ func TestReconcileState(t *testing.T) {
updateErr: nil, updateErr: nil,
stAssignments: state.ContainerCPUAssignments{ stAssignments: state.ContainerCPUAssignments{
"fakePodUID": map[string]cpuset.CPUSet{ "fakePodUID": map[string]cpuset.CPUSet{
"fakeContainerName": cpuset.NewCPUSet(), "fakeContainerName": cpuset.New(),
}, },
}, },
stDefaultCPUSet: cpuset.NewCPUSet(1, 2, 3, 4, 5, 6, 7), stDefaultCPUSet: cpuset.New(1, 2, 3, 4, 5, 6, 7),
lastUpdateStAssignments: state.ContainerCPUAssignments{}, lastUpdateStAssignments: state.ContainerCPUAssignments{},
lastUpdateStDefaultCPUSet: cpuset.NewCPUSet(), lastUpdateStDefaultCPUSet: cpuset.New(),
expectStAssignments: state.ContainerCPUAssignments{ expectStAssignments: state.ContainerCPUAssignments{
"fakePodUID": map[string]cpuset.CPUSet{ "fakePodUID": map[string]cpuset.CPUSet{
"fakeContainerName": cpuset.NewCPUSet(), "fakeContainerName": cpuset.New(),
}, },
}, },
expectStDefaultCPUSet: cpuset.NewCPUSet(1, 2, 3, 4, 5, 6, 7), expectStDefaultCPUSet: cpuset.New(1, 2, 3, 4, 5, 6, 7),
expectSucceededContainerName: "", expectSucceededContainerName: "",
expectFailedContainerName: "fakeContainerName", expectFailedContainerName: "fakeContainerName",
}, },
@@ -986,18 +986,18 @@ func TestReconcileState(t *testing.T) {
updateErr: fmt.Errorf("fake container update error"), updateErr: fmt.Errorf("fake container update error"),
stAssignments: state.ContainerCPUAssignments{ stAssignments: state.ContainerCPUAssignments{
"fakePodUID": map[string]cpuset.CPUSet{ "fakePodUID": map[string]cpuset.CPUSet{
"fakeContainerName": cpuset.NewCPUSet(1, 2), "fakeContainerName": cpuset.New(1, 2),
}, },
}, },
stDefaultCPUSet: cpuset.NewCPUSet(3, 4, 5, 6, 7), stDefaultCPUSet: cpuset.New(3, 4, 5, 6, 7),
lastUpdateStAssignments: state.ContainerCPUAssignments{}, lastUpdateStAssignments: state.ContainerCPUAssignments{},
lastUpdateStDefaultCPUSet: cpuset.NewCPUSet(), lastUpdateStDefaultCPUSet: cpuset.New(),
expectStAssignments: state.ContainerCPUAssignments{ expectStAssignments: state.ContainerCPUAssignments{
"fakePodUID": map[string]cpuset.CPUSet{ "fakePodUID": map[string]cpuset.CPUSet{
"fakeContainerName": cpuset.NewCPUSet(1, 2), "fakeContainerName": cpuset.New(1, 2),
}, },
}, },
expectStDefaultCPUSet: cpuset.NewCPUSet(3, 4, 5, 6, 7), expectStDefaultCPUSet: cpuset.New(3, 4, 5, 6, 7),
expectSucceededContainerName: "", expectSucceededContainerName: "",
expectFailedContainerName: "fakeContainerName", expectFailedContainerName: "fakeContainerName",
}, },
@@ -1034,21 +1034,21 @@ func TestReconcileState(t *testing.T) {
updateErr: nil, updateErr: nil,
stAssignments: state.ContainerCPUAssignments{ stAssignments: state.ContainerCPUAssignments{
"fakePodUID": map[string]cpuset.CPUSet{ "fakePodUID": map[string]cpuset.CPUSet{
"fakeContainerName": cpuset.NewCPUSet(1, 2), "fakeContainerName": cpuset.New(1, 2),
}, },
"secondfakePodUID": map[string]cpuset.CPUSet{ "secondfakePodUID": map[string]cpuset.CPUSet{
"secondfakeContainerName": cpuset.NewCPUSet(3, 4), "secondfakeContainerName": cpuset.New(3, 4),
}, },
}, },
stDefaultCPUSet: cpuset.NewCPUSet(5, 6, 7), stDefaultCPUSet: cpuset.New(5, 6, 7),
lastUpdateStAssignments: state.ContainerCPUAssignments{}, lastUpdateStAssignments: state.ContainerCPUAssignments{},
lastUpdateStDefaultCPUSet: cpuset.NewCPUSet(), lastUpdateStDefaultCPUSet: cpuset.New(),
expectStAssignments: state.ContainerCPUAssignments{ expectStAssignments: state.ContainerCPUAssignments{
"fakePodUID": map[string]cpuset.CPUSet{ "fakePodUID": map[string]cpuset.CPUSet{
"fakeContainerName": cpuset.NewCPUSet(1, 2), "fakeContainerName": cpuset.New(1, 2),
}, },
}, },
expectStDefaultCPUSet: cpuset.NewCPUSet(3, 4, 5, 6, 7), expectStDefaultCPUSet: cpuset.New(3, 4, 5, 6, 7),
expectSucceededContainerName: "fakeContainerName", expectSucceededContainerName: "fakeContainerName",
expectFailedContainerName: "", expectFailedContainerName: "",
}, },
@@ -1085,22 +1085,22 @@ func TestReconcileState(t *testing.T) {
updateErr: nil, updateErr: nil,
stAssignments: state.ContainerCPUAssignments{ stAssignments: state.ContainerCPUAssignments{
"fakePodUID": map[string]cpuset.CPUSet{ "fakePodUID": map[string]cpuset.CPUSet{
"fakeContainerName": cpuset.NewCPUSet(1, 2), "fakeContainerName": cpuset.New(1, 2),
}, },
}, },
stDefaultCPUSet: cpuset.NewCPUSet(5, 6, 7), stDefaultCPUSet: cpuset.New(5, 6, 7),
lastUpdateStAssignments: state.ContainerCPUAssignments{ lastUpdateStAssignments: state.ContainerCPUAssignments{
"fakePodUID": map[string]cpuset.CPUSet{ "fakePodUID": map[string]cpuset.CPUSet{
"fakeContainerName": cpuset.NewCPUSet(1, 2), "fakeContainerName": cpuset.New(1, 2),
}, },
}, },
lastUpdateStDefaultCPUSet: cpuset.NewCPUSet(5, 6, 7), lastUpdateStDefaultCPUSet: cpuset.New(5, 6, 7),
expectStAssignments: state.ContainerCPUAssignments{ expectStAssignments: state.ContainerCPUAssignments{
"fakePodUID": map[string]cpuset.CPUSet{ "fakePodUID": map[string]cpuset.CPUSet{
"fakeContainerName": cpuset.NewCPUSet(1, 2), "fakeContainerName": cpuset.New(1, 2),
}, },
}, },
expectStDefaultCPUSet: cpuset.NewCPUSet(5, 6, 7), expectStDefaultCPUSet: cpuset.New(5, 6, 7),
expectSucceededContainerName: "fakeContainerName", expectSucceededContainerName: "fakeContainerName",
expectFailedContainerName: "", expectFailedContainerName: "",
}, },
@@ -1137,22 +1137,22 @@ func TestReconcileState(t *testing.T) {
updateErr: nil, updateErr: nil,
stAssignments: state.ContainerCPUAssignments{ stAssignments: state.ContainerCPUAssignments{
"fakePodUID": map[string]cpuset.CPUSet{ "fakePodUID": map[string]cpuset.CPUSet{
"fakeContainerName": cpuset.NewCPUSet(1, 2), "fakeContainerName": cpuset.New(1, 2),
}, },
}, },
stDefaultCPUSet: cpuset.NewCPUSet(3, 4, 5, 6, 7), stDefaultCPUSet: cpuset.New(3, 4, 5, 6, 7),
lastUpdateStAssignments: state.ContainerCPUAssignments{ lastUpdateStAssignments: state.ContainerCPUAssignments{
"fakePodUID": map[string]cpuset.CPUSet{ "fakePodUID": map[string]cpuset.CPUSet{
"fakeContainerName": cpuset.NewCPUSet(3, 4), "fakeContainerName": cpuset.New(3, 4),
}, },
}, },
lastUpdateStDefaultCPUSet: cpuset.NewCPUSet(1, 2, 5, 6, 7), lastUpdateStDefaultCPUSet: cpuset.New(1, 2, 5, 6, 7),
expectStAssignments: state.ContainerCPUAssignments{ expectStAssignments: state.ContainerCPUAssignments{
"fakePodUID": map[string]cpuset.CPUSet{ "fakePodUID": map[string]cpuset.CPUSet{
"fakeContainerName": cpuset.NewCPUSet(1, 2), "fakeContainerName": cpuset.New(1, 2),
}, },
}, },
expectStDefaultCPUSet: cpuset.NewCPUSet(3, 4, 5, 6, 7), expectStDefaultCPUSet: cpuset.New(3, 4, 5, 6, 7),
expectSucceededContainerName: "fakeContainerName", expectSucceededContainerName: "fakeContainerName",
expectFailedContainerName: "", expectFailedContainerName: "",
}, },
@@ -1241,7 +1241,7 @@ func TestCPUManagerAddWithResvList(t *testing.T) {
}, },
}, },
1, 1,
cpuset.NewCPUSet(0), cpuset.New(0),
topologymanager.NewFakeManager(), topologymanager.NewFakeManager(),
nil) nil)
testCases := []struct { testCases := []struct {
@@ -1256,7 +1256,7 @@ func TestCPUManagerAddWithResvList(t *testing.T) {
description: "cpu manager add - no error", description: "cpu manager add - no error",
updateErr: nil, updateErr: nil,
policy: testPolicy, policy: testPolicy,
expCPUSet: cpuset.NewCPUSet(0, 3), expCPUSet: cpuset.New(0, 3),
expAllocateErr: nil, expAllocateErr: nil,
expAddContainerErr: nil, expAddContainerErr: nil,
}, },
@@ -1267,7 +1267,7 @@ func TestCPUManagerAddWithResvList(t *testing.T) {
policy: testCase.policy, policy: testCase.policy,
state: &mockState{ state: &mockState{
assignments: state.ContainerCPUAssignments{}, assignments: state.ContainerCPUAssignments{},
defaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3), defaultCPUSet: cpuset.New(0, 1, 2, 3),
}, },
lastUpdateState: state.NewMemoryState(), lastUpdateState: state.NewMemoryState(),
containerRuntime: mockRuntimeService{ containerRuntime: mockRuntimeService{
@@ -1355,7 +1355,7 @@ func TestCPUManagerHandlePolicyOptions(t *testing.T) {
} }
defer os.RemoveAll(sDir) defer os.RemoveAll(sDir)
_, err = NewManager(testCase.cpuPolicyName, testCase.cpuPolicyOptions, 5*time.Second, machineInfo, cpuset.NewCPUSet(), nodeAllocatableReservation, sDir, topologymanager.NewFakeManager()) _, err = NewManager(testCase.cpuPolicyName, testCase.cpuPolicyOptions, 5*time.Second, machineInfo, cpuset.New(), nodeAllocatableReservation, sDir, topologymanager.NewFakeManager())
if err == nil { if err == nil {
t.Errorf("Expected error, but NewManager succeeded") t.Errorf("Expected error, but NewManager succeeded")
} }
@@ -1382,7 +1382,7 @@ func TestCPUManagerGetAllocatableCPUs(t *testing.T) {
}, },
}, },
1, 1,
cpuset.NewCPUSet(0), cpuset.New(0),
topologymanager.NewFakeManager(), topologymanager.NewFakeManager(),
nil) nil)
@@ -1394,12 +1394,12 @@ func TestCPUManagerGetAllocatableCPUs(t *testing.T) {
{ {
description: "None Policy", description: "None Policy",
policy: nonePolicy, policy: nonePolicy,
expAllocatableCPUs: cpuset.NewCPUSet(), expAllocatableCPUs: cpuset.New(),
}, },
{ {
description: "Static Policy", description: "Static Policy",
policy: staticPolicy, policy: staticPolicy,
expAllocatableCPUs: cpuset.NewCPUSet(1, 2, 3), expAllocatableCPUs: cpuset.New(1, 2, 3),
}, },
} }
for _, testCase := range testCases { for _, testCase := range testCases {
@@ -1408,7 +1408,7 @@ func TestCPUManagerGetAllocatableCPUs(t *testing.T) {
activePods: func() []*v1.Pod { return nil }, activePods: func() []*v1.Pod { return nil },
state: &mockState{ state: &mockState{
assignments: state.ContainerCPUAssignments{}, assignments: state.ContainerCPUAssignments{},
defaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3), defaultCPUSet: cpuset.New(0, 1, 2, 3),
}, },
lastUpdateState: state.NewMemoryState(), lastUpdateState: state.NewMemoryState(),
containerMap: containermap.NewContainerMap(), containerMap: containermap.NewContainerMap(),

View File

@@ -72,5 +72,5 @@ func (p *nonePolicy) GetPodTopologyHints(s state.State, pod *v1.Pod) map[string]
// CAN get exclusive access to core(s). // CAN get exclusive access to core(s).
// Hence, we return empty set here: no cpus are assignable according to above definition with this policy. // Hence, we return empty set here: no cpus are assignable according to above definition with this policy.
func (p *nonePolicy) GetAllocatableCPUs(m state.State) cpuset.CPUSet { func (p *nonePolicy) GetAllocatableCPUs(m state.State) cpuset.CPUSet {
return cpuset.NewCPUSet() return cpuset.New()
} }

View File

@@ -37,7 +37,7 @@ func TestNonePolicyAllocate(t *testing.T) {
st := &mockState{ st := &mockState{
assignments: state.ContainerCPUAssignments{}, assignments: state.ContainerCPUAssignments{},
defaultCPUSet: cpuset.NewCPUSet(1, 2, 3, 4, 5, 6, 7), defaultCPUSet: cpuset.New(1, 2, 3, 4, 5, 6, 7),
} }
testPod := makePod("fakePod", "fakeContainer", "1000m", "1000m") testPod := makePod("fakePod", "fakeContainer", "1000m", "1000m")
@@ -54,7 +54,7 @@ func TestNonePolicyRemove(t *testing.T) {
st := &mockState{ st := &mockState{
assignments: state.ContainerCPUAssignments{}, assignments: state.ContainerCPUAssignments{},
defaultCPUSet: cpuset.NewCPUSet(1, 2, 3, 4, 5, 6, 7), defaultCPUSet: cpuset.New(1, 2, 3, 4, 5, 6, 7),
} }
testPod := makePod("fakePod", "fakeContainer", "1000m", "1000m") testPod := makePod("fakePod", "fakeContainer", "1000m", "1000m")
@@ -78,7 +78,7 @@ func TestNonePolicyGetAllocatableCPUs(t *testing.T) {
st := &mockState{ st := &mockState{
assignments: state.ContainerCPUAssignments{}, assignments: state.ContainerCPUAssignments{},
defaultCPUSet: cpuset.NewCPUSet(cpuIDs...), defaultCPUSet: cpuset.New(cpuIDs...),
} }
cpus := policy.GetAllocatableCPUs(st) cpus := policy.GetAllocatableCPUs(st)

View File

@@ -245,7 +245,7 @@ func (p *staticPolicy) updateCPUsToReuse(pod *v1.Pod, container *v1.Container, c
} }
// If no cpuset exists for cpusToReuse by this pod yet, create one. // If no cpuset exists for cpusToReuse by this pod yet, create one.
if _, ok := p.cpusToReuse[string(pod.UID)]; !ok { if _, ok := p.cpusToReuse[string(pod.UID)]; !ok {
p.cpusToReuse[string(pod.UID)] = cpuset.NewCPUSet() p.cpusToReuse[string(pod.UID)] = cpuset.New()
} }
// Check if the container is an init container. // Check if the container is an init container.
// If so, add its cpuset to the cpuset of reusable CPUs for any new allocations. // If so, add its cpuset to the cpuset of reusable CPUs for any new allocations.
@@ -316,7 +316,7 @@ func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Contai
// getAssignedCPUsOfSiblings returns assigned cpus of given container's siblings(all containers other than the given container) in the given pod `podUID`. // getAssignedCPUsOfSiblings returns assigned cpus of given container's siblings(all containers other than the given container) in the given pod `podUID`.
func getAssignedCPUsOfSiblings(s state.State, podUID string, containerName string) cpuset.CPUSet { func getAssignedCPUsOfSiblings(s state.State, podUID string, containerName string) cpuset.CPUSet {
assignments := s.GetCPUAssignments() assignments := s.GetCPUAssignments()
cset := cpuset.NewCPUSet() cset := cpuset.New()
for name, cpus := range assignments[podUID] { for name, cpus := range assignments[podUID] {
if containerName == name { if containerName == name {
continue continue
@@ -344,7 +344,7 @@ func (p *staticPolicy) allocateCPUs(s state.State, numCPUs int, numaAffinity bit
allocatableCPUs := p.GetAvailableCPUs(s).Union(reusableCPUs) allocatableCPUs := p.GetAvailableCPUs(s).Union(reusableCPUs)
// If there are aligned CPUs in numaAffinity, attempt to take those first. // If there are aligned CPUs in numaAffinity, attempt to take those first.
result := cpuset.NewCPUSet() result := cpuset.New()
if numaAffinity != nil { if numaAffinity != nil {
alignedCPUs := p.getAlignedCPUs(numaAffinity, allocatableCPUs) alignedCPUs := p.getAlignedCPUs(numaAffinity, allocatableCPUs)
@@ -355,7 +355,7 @@ func (p *staticPolicy) allocateCPUs(s state.State, numCPUs int, numaAffinity bit
alignedCPUs, err := p.takeByTopology(alignedCPUs, numAlignedToAlloc) alignedCPUs, err := p.takeByTopology(alignedCPUs, numAlignedToAlloc)
if err != nil { if err != nil {
return cpuset.NewCPUSet(), err return cpuset.New(), err
} }
result = result.Union(alignedCPUs) result = result.Union(alignedCPUs)
@@ -364,7 +364,7 @@ func (p *staticPolicy) allocateCPUs(s state.State, numCPUs int, numaAffinity bit
// Get any remaining CPUs from what's leftover after attempting to grab aligned ones. // Get any remaining CPUs from what's leftover after attempting to grab aligned ones.
remainingCPUs, err := p.takeByTopology(allocatableCPUs.Difference(result), numCPUs-result.Size()) remainingCPUs, err := p.takeByTopology(allocatableCPUs.Difference(result), numCPUs-result.Size())
if err != nil { if err != nil {
return cpuset.NewCPUSet(), err return cpuset.New(), err
} }
result = result.Union(remainingCPUs) result = result.Union(remainingCPUs)
@@ -486,7 +486,7 @@ func (p *staticPolicy) GetPodTopologyHints(s state.State, pod *v1.Pod) map[strin
return nil return nil
} }
assignedCPUs := cpuset.NewCPUSet() assignedCPUs := cpuset.New()
for _, container := range append(pod.Spec.InitContainers, pod.Spec.Containers...) { for _, container := range append(pod.Spec.InitContainers, pod.Spec.Containers...) {
requestedByContainer := p.guaranteedCPUs(pod, &container) requestedByContainer := p.guaranteedCPUs(pod, &container)
// Short circuit to regenerate the same hints if there are already // Short circuit to regenerate the same hints if there are already
@@ -616,7 +616,7 @@ func (p *staticPolicy) isHintSocketAligned(hint topologymanager.TopologyHint, mi
// getAlignedCPUs return set of aligned CPUs based on numa affinity mask and configured policy options. // getAlignedCPUs return set of aligned CPUs based on numa affinity mask and configured policy options.
func (p *staticPolicy) getAlignedCPUs(numaAffinity bitmask.BitMask, allocatableCPUs cpuset.CPUSet) cpuset.CPUSet { func (p *staticPolicy) getAlignedCPUs(numaAffinity bitmask.BitMask, allocatableCPUs cpuset.CPUSet) cpuset.CPUSet {
alignedCPUs := cpuset.NewCPUSet() alignedCPUs := cpuset.New()
numaBits := numaAffinity.GetBits() numaBits := numaAffinity.GetBits()
// If align-by-socket policy option is enabled, NUMA based hint is expanded to // If align-by-socket policy option is enabled, NUMA based hint is expanded to

View File

@@ -68,7 +68,7 @@ func (spt staticPolicyTest) PseudoClone() staticPolicyTest {
} }
func TestStaticPolicyName(t *testing.T) { func TestStaticPolicyName(t *testing.T) {
policy, _ := NewStaticPolicy(topoSingleSocketHT, 1, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil) policy, _ := NewStaticPolicy(topoSingleSocketHT, 1, cpuset.New(), topologymanager.NewFakeManager(), nil)
policyName := policy.Name() policyName := policy.Name()
if policyName != "static" { if policyName != "static" {
@@ -84,26 +84,26 @@ func TestStaticPolicyStart(t *testing.T) {
topo: topoDualSocketHT, topo: topoDualSocketHT,
stAssignments: state.ContainerCPUAssignments{ stAssignments: state.ContainerCPUAssignments{
"fakePod": map[string]cpuset.CPUSet{ "fakePod": map[string]cpuset.CPUSet{
"0": cpuset.NewCPUSet(0), "0": cpuset.New(0),
}, },
}, },
stDefaultCPUSet: cpuset.NewCPUSet(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), stDefaultCPUSet: cpuset.New(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
expCSet: cpuset.NewCPUSet(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), expCSet: cpuset.New(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
}, },
{ {
description: "empty cpuset", description: "empty cpuset",
topo: topoDualSocketHT, topo: topoDualSocketHT,
numReservedCPUs: 1, numReservedCPUs: 1,
stAssignments: state.ContainerCPUAssignments{}, stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(), stDefaultCPUSet: cpuset.New(),
expCSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), expCSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
}, },
{ {
description: "reserved cores 0 & 6 are not present in available cpuset", description: "reserved cores 0 & 6 are not present in available cpuset",
topo: topoDualSocketHT, topo: topoDualSocketHT,
numReservedCPUs: 2, numReservedCPUs: 2,
stAssignments: state.ContainerCPUAssignments{}, stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1), stDefaultCPUSet: cpuset.New(0, 1),
expErr: fmt.Errorf("not all reserved cpus: \"0,6\" are present in defaultCpuSet: \"0-1\""), expErr: fmt.Errorf("not all reserved cpus: \"0,6\" are present in defaultCpuSet: \"0-1\""),
}, },
{ {
@@ -111,10 +111,10 @@ func TestStaticPolicyStart(t *testing.T) {
topo: topoDualSocketHT, topo: topoDualSocketHT,
stAssignments: state.ContainerCPUAssignments{ stAssignments: state.ContainerCPUAssignments{
"fakePod": map[string]cpuset.CPUSet{ "fakePod": map[string]cpuset.CPUSet{
"0": cpuset.NewCPUSet(0, 1, 2), "0": cpuset.New(0, 1, 2),
}, },
}, },
stDefaultCPUSet: cpuset.NewCPUSet(2, 3, 4, 5, 6, 7, 8, 9, 10, 11), stDefaultCPUSet: cpuset.New(2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
expErr: fmt.Errorf("pod: fakePod, container: 0 cpuset: \"0-2\" overlaps with default cpuset \"2-11\""), expErr: fmt.Errorf("pod: fakePod, container: 0 cpuset: \"0-2\" overlaps with default cpuset \"2-11\""),
}, },
{ {
@@ -122,11 +122,11 @@ func TestStaticPolicyStart(t *testing.T) {
topo: topoDualSocketHT, topo: topoDualSocketHT,
stAssignments: state.ContainerCPUAssignments{ stAssignments: state.ContainerCPUAssignments{
"fakePod": map[string]cpuset.CPUSet{ "fakePod": map[string]cpuset.CPUSet{
"0": cpuset.NewCPUSet(0, 1, 2), "0": cpuset.New(0, 1, 2),
"1": cpuset.NewCPUSet(3, 4), "1": cpuset.New(3, 4),
}, },
}, },
stDefaultCPUSet: cpuset.NewCPUSet(5, 6, 7, 8, 9, 10, 11, 12), stDefaultCPUSet: cpuset.New(5, 6, 7, 8, 9, 10, 11, 12),
expErr: fmt.Errorf("current set of available CPUs \"0-11\" doesn't match with CPUs in state \"0-12\""), expErr: fmt.Errorf("current set of available CPUs \"0-11\" doesn't match with CPUs in state \"0-12\""),
}, },
{ {
@@ -134,17 +134,17 @@ func TestStaticPolicyStart(t *testing.T) {
topo: topoDualSocketHT, topo: topoDualSocketHT,
stAssignments: state.ContainerCPUAssignments{ stAssignments: state.ContainerCPUAssignments{
"fakePod": map[string]cpuset.CPUSet{ "fakePod": map[string]cpuset.CPUSet{
"0": cpuset.NewCPUSet(0, 1, 2), "0": cpuset.New(0, 1, 2),
"1": cpuset.NewCPUSet(3, 4), "1": cpuset.New(3, 4),
}, },
}, },
stDefaultCPUSet: cpuset.NewCPUSet(5, 6, 7, 8, 9, 10), stDefaultCPUSet: cpuset.New(5, 6, 7, 8, 9, 10),
expErr: fmt.Errorf("current set of available CPUs \"0-11\" doesn't match with CPUs in state \"0-10\""), expErr: fmt.Errorf("current set of available CPUs \"0-11\" doesn't match with CPUs in state \"0-10\""),
}, },
} }
for _, testCase := range testCases { for _, testCase := range testCases {
t.Run(testCase.description, func(t *testing.T) { t.Run(testCase.description, func(t *testing.T) {
p, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil) p, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.New(), topologymanager.NewFakeManager(), nil)
policy := p.(*staticPolicy) policy := p.(*staticPolicy)
st := &mockState{ st := &mockState{
assignments: testCase.stAssignments, assignments: testCase.stAssignments,
@@ -201,11 +201,11 @@ func TestStaticPolicyAdd(t *testing.T) {
topo: topoSingleSocketHT, topo: topoSingleSocketHT,
numReservedCPUs: 1, numReservedCPUs: 1,
stAssignments: state.ContainerCPUAssignments{}, stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7),
pod: makePod("fakePod", "fakeContainer2", "8000m", "8000m"), pod: makePod("fakePod", "fakeContainer2", "8000m", "8000m"),
expErr: fmt.Errorf("not enough cpus available to satisfy request"), expErr: fmt.Errorf("not enough cpus available to satisfy request"),
expCPUAlloc: false, expCPUAlloc: false,
expCSet: cpuset.NewCPUSet(), expCSet: cpuset.New(),
}, },
{ {
description: "GuPodMultipleCores, SingleSocketHT, ExpectAllocOneCore", description: "GuPodMultipleCores, SingleSocketHT, ExpectAllocOneCore",
@@ -213,14 +213,14 @@ func TestStaticPolicyAdd(t *testing.T) {
numReservedCPUs: 1, numReservedCPUs: 1,
stAssignments: state.ContainerCPUAssignments{ stAssignments: state.ContainerCPUAssignments{
"fakePod": map[string]cpuset.CPUSet{ "fakePod": map[string]cpuset.CPUSet{
"fakeContainer100": cpuset.NewCPUSet(2, 3, 6, 7), "fakeContainer100": cpuset.New(2, 3, 6, 7),
}, },
}, },
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 4, 5), stDefaultCPUSet: cpuset.New(0, 1, 4, 5),
pod: makePod("fakePod", "fakeContainer3", "2000m", "2000m"), pod: makePod("fakePod", "fakeContainer3", "2000m", "2000m"),
expErr: nil, expErr: nil,
expCPUAlloc: true, expCPUAlloc: true,
expCSet: cpuset.NewCPUSet(1, 5), expCSet: cpuset.New(1, 5),
}, },
{ {
description: "GuPodMultipleCores, SingleSocketHT, ExpectSameAllocation", description: "GuPodMultipleCores, SingleSocketHT, ExpectSameAllocation",
@@ -228,14 +228,14 @@ func TestStaticPolicyAdd(t *testing.T) {
numReservedCPUs: 1, numReservedCPUs: 1,
stAssignments: state.ContainerCPUAssignments{ stAssignments: state.ContainerCPUAssignments{
"fakePod": map[string]cpuset.CPUSet{ "fakePod": map[string]cpuset.CPUSet{
"fakeContainer3": cpuset.NewCPUSet(2, 3, 6, 7), "fakeContainer3": cpuset.New(2, 3, 6, 7),
}, },
}, },
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 4, 5), stDefaultCPUSet: cpuset.New(0, 1, 4, 5),
pod: makePod("fakePod", "fakeContainer3", "4000m", "4000m"), pod: makePod("fakePod", "fakeContainer3", "4000m", "4000m"),
expErr: nil, expErr: nil,
expCPUAlloc: true, expCPUAlloc: true,
expCSet: cpuset.NewCPUSet(2, 3, 6, 7), expCSet: cpuset.New(2, 3, 6, 7),
}, },
{ {
description: "GuPodMultipleCores, DualSocketHT, ExpectAllocOneSocket", description: "GuPodMultipleCores, DualSocketHT, ExpectAllocOneSocket",
@@ -243,14 +243,14 @@ func TestStaticPolicyAdd(t *testing.T) {
numReservedCPUs: 1, numReservedCPUs: 1,
stAssignments: state.ContainerCPUAssignments{ stAssignments: state.ContainerCPUAssignments{
"fakePod": map[string]cpuset.CPUSet{ "fakePod": map[string]cpuset.CPUSet{
"fakeContainer100": cpuset.NewCPUSet(2), "fakeContainer100": cpuset.New(2),
}, },
}, },
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11), stDefaultCPUSet: cpuset.New(0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11),
pod: makePod("fakePod", "fakeContainer3", "6000m", "6000m"), pod: makePod("fakePod", "fakeContainer3", "6000m", "6000m"),
expErr: nil, expErr: nil,
expCPUAlloc: true, expCPUAlloc: true,
expCSet: cpuset.NewCPUSet(1, 3, 5, 7, 9, 11), expCSet: cpuset.New(1, 3, 5, 7, 9, 11),
}, },
{ {
description: "GuPodMultipleCores, DualSocketHT, ExpectAllocThreeCores", description: "GuPodMultipleCores, DualSocketHT, ExpectAllocThreeCores",
@@ -258,14 +258,14 @@ func TestStaticPolicyAdd(t *testing.T) {
numReservedCPUs: 1, numReservedCPUs: 1,
stAssignments: state.ContainerCPUAssignments{ stAssignments: state.ContainerCPUAssignments{
"fakePod": map[string]cpuset.CPUSet{ "fakePod": map[string]cpuset.CPUSet{
"fakeContainer100": cpuset.NewCPUSet(1, 5), "fakeContainer100": cpuset.New(1, 5),
}, },
}, },
stDefaultCPUSet: cpuset.NewCPUSet(0, 2, 3, 4, 6, 7, 8, 9, 10, 11), stDefaultCPUSet: cpuset.New(0, 2, 3, 4, 6, 7, 8, 9, 10, 11),
pod: makePod("fakePod", "fakeContainer3", "6000m", "6000m"), pod: makePod("fakePod", "fakeContainer3", "6000m", "6000m"),
expErr: nil, expErr: nil,
expCPUAlloc: true, expCPUAlloc: true,
expCSet: cpuset.NewCPUSet(2, 3, 4, 8, 9, 10), expCSet: cpuset.New(2, 3, 4, 8, 9, 10),
}, },
{ {
description: "GuPodMultipleCores, DualSocketNoHT, ExpectAllocOneSocket", description: "GuPodMultipleCores, DualSocketNoHT, ExpectAllocOneSocket",
@@ -273,14 +273,14 @@ func TestStaticPolicyAdd(t *testing.T) {
numReservedCPUs: 1, numReservedCPUs: 1,
stAssignments: state.ContainerCPUAssignments{ stAssignments: state.ContainerCPUAssignments{
"fakePod": map[string]cpuset.CPUSet{ "fakePod": map[string]cpuset.CPUSet{
"fakeContainer100": cpuset.NewCPUSet(), "fakeContainer100": cpuset.New(),
}, },
}, },
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 3, 4, 5, 6, 7), stDefaultCPUSet: cpuset.New(0, 1, 3, 4, 5, 6, 7),
pod: makePod("fakePod", "fakeContainer1", "4000m", "4000m"), pod: makePod("fakePod", "fakeContainer1", "4000m", "4000m"),
expErr: nil, expErr: nil,
expCPUAlloc: true, expCPUAlloc: true,
expCSet: cpuset.NewCPUSet(4, 5, 6, 7), expCSet: cpuset.New(4, 5, 6, 7),
}, },
{ {
description: "GuPodMultipleCores, DualSocketNoHT, ExpectAllocFourCores", description: "GuPodMultipleCores, DualSocketNoHT, ExpectAllocFourCores",
@@ -288,14 +288,14 @@ func TestStaticPolicyAdd(t *testing.T) {
numReservedCPUs: 1, numReservedCPUs: 1,
stAssignments: state.ContainerCPUAssignments{ stAssignments: state.ContainerCPUAssignments{
"fakePod": map[string]cpuset.CPUSet{ "fakePod": map[string]cpuset.CPUSet{
"fakeContainer100": cpuset.NewCPUSet(4, 5), "fakeContainer100": cpuset.New(4, 5),
}, },
}, },
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 3, 6, 7), stDefaultCPUSet: cpuset.New(0, 1, 3, 6, 7),
pod: makePod("fakePod", "fakeContainer1", "4000m", "4000m"), pod: makePod("fakePod", "fakeContainer1", "4000m", "4000m"),
expErr: nil, expErr: nil,
expCPUAlloc: true, expCPUAlloc: true,
expCSet: cpuset.NewCPUSet(1, 3, 6, 7), expCSet: cpuset.New(1, 3, 6, 7),
}, },
{ {
description: "GuPodMultipleCores, DualSocketHT, ExpectAllocOneSocketOneCore", description: "GuPodMultipleCores, DualSocketHT, ExpectAllocOneSocketOneCore",
@@ -303,36 +303,36 @@ func TestStaticPolicyAdd(t *testing.T) {
numReservedCPUs: 1, numReservedCPUs: 1,
stAssignments: state.ContainerCPUAssignments{ stAssignments: state.ContainerCPUAssignments{
"fakePod": map[string]cpuset.CPUSet{ "fakePod": map[string]cpuset.CPUSet{
"fakeContainer100": cpuset.NewCPUSet(2), "fakeContainer100": cpuset.New(2),
}, },
}, },
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11), stDefaultCPUSet: cpuset.New(0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11),
pod: makePod("fakePod", "fakeContainer3", "8000m", "8000m"), pod: makePod("fakePod", "fakeContainer3", "8000m", "8000m"),
expErr: nil, expErr: nil,
expCPUAlloc: true, expCPUAlloc: true,
expCSet: cpuset.NewCPUSet(1, 3, 4, 5, 7, 9, 10, 11), expCSet: cpuset.New(1, 3, 4, 5, 7, 9, 10, 11),
}, },
{ {
description: "NonGuPod, SingleSocketHT, NoAlloc", description: "NonGuPod, SingleSocketHT, NoAlloc",
topo: topoSingleSocketHT, topo: topoSingleSocketHT,
numReservedCPUs: 1, numReservedCPUs: 1,
stAssignments: state.ContainerCPUAssignments{}, stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7),
pod: makePod("fakePod", "fakeContainer1", "1000m", "2000m"), pod: makePod("fakePod", "fakeContainer1", "1000m", "2000m"),
expErr: nil, expErr: nil,
expCPUAlloc: false, expCPUAlloc: false,
expCSet: cpuset.NewCPUSet(), expCSet: cpuset.New(),
}, },
{ {
description: "GuPodNonIntegerCore, SingleSocketHT, NoAlloc", description: "GuPodNonIntegerCore, SingleSocketHT, NoAlloc",
topo: topoSingleSocketHT, topo: topoSingleSocketHT,
numReservedCPUs: 1, numReservedCPUs: 1,
stAssignments: state.ContainerCPUAssignments{}, stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7),
pod: makePod("fakePod", "fakeContainer4", "977m", "977m"), pod: makePod("fakePod", "fakeContainer4", "977m", "977m"),
expErr: nil, expErr: nil,
expCPUAlloc: false, expCPUAlloc: false,
expCSet: cpuset.NewCPUSet(), expCSet: cpuset.New(),
}, },
{ {
description: "GuPodMultipleCores, SingleSocketHT, NoAllocExpectError", description: "GuPodMultipleCores, SingleSocketHT, NoAllocExpectError",
@@ -340,14 +340,14 @@ func TestStaticPolicyAdd(t *testing.T) {
numReservedCPUs: 1, numReservedCPUs: 1,
stAssignments: state.ContainerCPUAssignments{ stAssignments: state.ContainerCPUAssignments{
"fakePod": map[string]cpuset.CPUSet{ "fakePod": map[string]cpuset.CPUSet{
"fakeContainer100": cpuset.NewCPUSet(1, 2, 3, 4, 5, 6), "fakeContainer100": cpuset.New(1, 2, 3, 4, 5, 6),
}, },
}, },
stDefaultCPUSet: cpuset.NewCPUSet(0, 7), stDefaultCPUSet: cpuset.New(0, 7),
pod: makePod("fakePod", "fakeContainer5", "2000m", "2000m"), pod: makePod("fakePod", "fakeContainer5", "2000m", "2000m"),
expErr: fmt.Errorf("not enough cpus available to satisfy request"), expErr: fmt.Errorf("not enough cpus available to satisfy request"),
expCPUAlloc: false, expCPUAlloc: false,
expCSet: cpuset.NewCPUSet(), expCSet: cpuset.New(),
}, },
{ {
description: "GuPodMultipleCores, DualSocketHT, NoAllocExpectError", description: "GuPodMultipleCores, DualSocketHT, NoAllocExpectError",
@@ -355,14 +355,14 @@ func TestStaticPolicyAdd(t *testing.T) {
numReservedCPUs: 1, numReservedCPUs: 1,
stAssignments: state.ContainerCPUAssignments{ stAssignments: state.ContainerCPUAssignments{
"fakePod": map[string]cpuset.CPUSet{ "fakePod": map[string]cpuset.CPUSet{
"fakeContainer100": cpuset.NewCPUSet(1, 2, 3), "fakeContainer100": cpuset.New(1, 2, 3),
}, },
}, },
stDefaultCPUSet: cpuset.NewCPUSet(0, 4, 5, 6, 7, 8, 9, 10, 11), stDefaultCPUSet: cpuset.New(0, 4, 5, 6, 7, 8, 9, 10, 11),
pod: makePod("fakePod", "fakeContainer5", "10000m", "10000m"), pod: makePod("fakePod", "fakeContainer5", "10000m", "10000m"),
expErr: fmt.Errorf("not enough cpus available to satisfy request"), expErr: fmt.Errorf("not enough cpus available to satisfy request"),
expCPUAlloc: false, expCPUAlloc: false,
expCSet: cpuset.NewCPUSet(), expCSet: cpuset.New(),
}, },
{ {
// All the CPUs from Socket 0 are available. Some CPUs from each // All the CPUs from Socket 0 are available. Some CPUs from each
@@ -372,10 +372,10 @@ func TestStaticPolicyAdd(t *testing.T) {
topo: topoQuadSocketFourWayHT, topo: topoQuadSocketFourWayHT,
stAssignments: state.ContainerCPUAssignments{ stAssignments: state.ContainerCPUAssignments{
"fakePod": map[string]cpuset.CPUSet{ "fakePod": map[string]cpuset.CPUSet{
"fakeContainer100": cpuset.NewCPUSet(3, 11, 4, 5, 6, 7), "fakeContainer100": cpuset.New(3, 11, 4, 5, 6, 7),
}, },
}, },
stDefaultCPUSet: largeTopoCPUSet.Difference(cpuset.NewCPUSet(3, 11, 4, 5, 6, 7)), stDefaultCPUSet: largeTopoCPUSet.Difference(cpuset.New(3, 11, 4, 5, 6, 7)),
pod: makePod("fakePod", "fakeContainer5", "72000m", "72000m"), pod: makePod("fakePod", "fakeContainer5", "72000m", "72000m"),
expErr: nil, expErr: nil,
expCPUAlloc: true, expCPUAlloc: true,
@@ -388,15 +388,15 @@ func TestStaticPolicyAdd(t *testing.T) {
topo: topoQuadSocketFourWayHT, topo: topoQuadSocketFourWayHT,
stAssignments: state.ContainerCPUAssignments{ stAssignments: state.ContainerCPUAssignments{
"fakePod": map[string]cpuset.CPUSet{ "fakePod": map[string]cpuset.CPUSet{
"fakeContainer100": largeTopoCPUSet.Difference(cpuset.NewCPUSet(1, 25, 13, 38, 2, 9, 11, 35, 23, 48, 12, 51, "fakeContainer100": largeTopoCPUSet.Difference(cpuset.New(1, 25, 13, 38, 2, 9, 11, 35, 23, 48, 12, 51,
53, 173, 113, 233, 54, 61)), 53, 173, 113, 233, 54, 61)),
}, },
}, },
stDefaultCPUSet: cpuset.NewCPUSet(1, 25, 13, 38, 2, 9, 11, 35, 23, 48, 12, 51, 53, 173, 113, 233, 54, 61), stDefaultCPUSet: cpuset.New(1, 25, 13, 38, 2, 9, 11, 35, 23, 48, 12, 51, 53, 173, 113, 233, 54, 61),
pod: makePod("fakePod", "fakeCcontainer5", "12000m", "12000m"), pod: makePod("fakePod", "fakeCcontainer5", "12000m", "12000m"),
expErr: nil, expErr: nil,
expCPUAlloc: true, expCPUAlloc: true,
expCSet: cpuset.NewCPUSet(1, 25, 13, 38, 11, 35, 23, 48, 53, 173, 113, 233), expCSet: cpuset.New(1, 25, 13, 38, 11, 35, 23, 48, 53, 173, 113, 233),
}, },
{ {
// All CPUs from Socket 1, 1 full core and some partial cores are available. // All CPUs from Socket 1, 1 full core and some partial cores are available.
@@ -405,16 +405,16 @@ func TestStaticPolicyAdd(t *testing.T) {
topo: topoQuadSocketFourWayHT, topo: topoQuadSocketFourWayHT,
stAssignments: state.ContainerCPUAssignments{ stAssignments: state.ContainerCPUAssignments{
"fakePod": map[string]cpuset.CPUSet{ "fakePod": map[string]cpuset.CPUSet{
"fakeContainer100": largeTopoCPUSet.Difference(largeTopoSock1CPUSet.Union(cpuset.NewCPUSet(10, 34, 22, 47, 53, "fakeContainer100": largeTopoCPUSet.Difference(largeTopoSock1CPUSet.Union(cpuset.New(10, 34, 22, 47, 53,
173, 61, 181, 108, 228, 115, 235))), 173, 61, 181, 108, 228, 115, 235))),
}, },
}, },
stDefaultCPUSet: largeTopoSock1CPUSet.Union(cpuset.NewCPUSet(10, 34, 22, 47, 53, 173, 61, 181, 108, 228, stDefaultCPUSet: largeTopoSock1CPUSet.Union(cpuset.New(10, 34, 22, 47, 53, 173, 61, 181, 108, 228,
115, 235)), 115, 235)),
pod: makePod("fakePod", "fakeContainer5", "76000m", "76000m"), pod: makePod("fakePod", "fakeContainer5", "76000m", "76000m"),
expErr: nil, expErr: nil,
expCPUAlloc: true, expCPUAlloc: true,
expCSet: largeTopoSock1CPUSet.Union(cpuset.NewCPUSet(10, 34, 22, 47)), expCSet: largeTopoSock1CPUSet.Union(cpuset.New(10, 34, 22, 47)),
}, },
{ {
// Only 7 CPUs are available. // Only 7 CPUs are available.
@@ -424,14 +424,14 @@ func TestStaticPolicyAdd(t *testing.T) {
topo: topoQuadSocketFourWayHT, topo: topoQuadSocketFourWayHT,
stAssignments: state.ContainerCPUAssignments{ stAssignments: state.ContainerCPUAssignments{
"fakePod": map[string]cpuset.CPUSet{ "fakePod": map[string]cpuset.CPUSet{
"fakeContainer100": largeTopoCPUSet.Difference(cpuset.NewCPUSet(10, 11, 53, 37, 55, 67, 52)), "fakeContainer100": largeTopoCPUSet.Difference(cpuset.New(10, 11, 53, 37, 55, 67, 52)),
}, },
}, },
stDefaultCPUSet: cpuset.NewCPUSet(10, 11, 53, 37, 55, 67, 52), stDefaultCPUSet: cpuset.New(10, 11, 53, 37, 55, 67, 52),
pod: makePod("fakePod", "fakeContainer5", "76000m", "76000m"), pod: makePod("fakePod", "fakeContainer5", "76000m", "76000m"),
expErr: fmt.Errorf("not enough cpus available to satisfy request"), expErr: fmt.Errorf("not enough cpus available to satisfy request"),
expCPUAlloc: false, expCPUAlloc: false,
expCSet: cpuset.NewCPUSet(), expCSet: cpuset.New(),
}, },
} }
@@ -442,11 +442,11 @@ func TestStaticPolicyAdd(t *testing.T) {
topo: topoSingleSocketHT, topo: topoSingleSocketHT,
numReservedCPUs: 1, numReservedCPUs: 1,
stAssignments: state.ContainerCPUAssignments{}, stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7),
pod: makePod("fakePod", "fakeContainer2", "1000m", "1000m"), pod: makePod("fakePod", "fakeContainer2", "1000m", "1000m"),
expErr: nil, expErr: nil,
expCPUAlloc: true, expCPUAlloc: true,
expCSet: cpuset.NewCPUSet(4), // expect sibling of partial core expCSet: cpuset.New(4), // expect sibling of partial core
}, },
{ {
// Only partial cores are available in the entire system. // Only partial cores are available in the entire system.
@@ -455,14 +455,14 @@ func TestStaticPolicyAdd(t *testing.T) {
topo: topoQuadSocketFourWayHT, topo: topoQuadSocketFourWayHT,
stAssignments: state.ContainerCPUAssignments{ stAssignments: state.ContainerCPUAssignments{
"fakePod": map[string]cpuset.CPUSet{ "fakePod": map[string]cpuset.CPUSet{
"fakeContainer100": largeTopoCPUSet.Difference(cpuset.NewCPUSet(10, 11, 53, 37, 55, 67, 52)), "fakeContainer100": largeTopoCPUSet.Difference(cpuset.New(10, 11, 53, 37, 55, 67, 52)),
}, },
}, },
stDefaultCPUSet: cpuset.NewCPUSet(10, 11, 53, 67, 52), stDefaultCPUSet: cpuset.New(10, 11, 53, 67, 52),
pod: makePod("fakePod", "fakeContainer5", "5000m", "5000m"), pod: makePod("fakePod", "fakeContainer5", "5000m", "5000m"),
expErr: nil, expErr: nil,
expCPUAlloc: true, expCPUAlloc: true,
expCSet: cpuset.NewCPUSet(10, 11, 53, 67, 52), expCSet: cpuset.New(10, 11, 53, 67, 52),
}, },
} }
@@ -476,11 +476,11 @@ func TestStaticPolicyAdd(t *testing.T) {
}, },
numReservedCPUs: 1, numReservedCPUs: 1,
stAssignments: state.ContainerCPUAssignments{}, stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7),
pod: makePod("fakePod", "fakeContainer2", "1000m", "1000m"), pod: makePod("fakePod", "fakeContainer2", "1000m", "1000m"),
expErr: SMTAlignmentError{RequestedCPUs: 1, CpusPerCore: 2}, expErr: SMTAlignmentError{RequestedCPUs: 1, CpusPerCore: 2},
expCPUAlloc: false, expCPUAlloc: false,
expCSet: cpuset.NewCPUSet(), // reject allocation of sibling of partial core expCSet: cpuset.New(), // reject allocation of sibling of partial core
}, },
{ {
// test SMT-level != 2 - which is the default on x86_64 // test SMT-level != 2 - which is the default on x86_64
@@ -495,7 +495,7 @@ func TestStaticPolicyAdd(t *testing.T) {
pod: makePod("fakePod", "fakeContainer15", "15000m", "15000m"), pod: makePod("fakePod", "fakeContainer15", "15000m", "15000m"),
expErr: SMTAlignmentError{RequestedCPUs: 15, CpusPerCore: 4}, expErr: SMTAlignmentError{RequestedCPUs: 15, CpusPerCore: 4},
expCPUAlloc: false, expCPUAlloc: false,
expCSet: cpuset.NewCPUSet(), expCSet: cpuset.New(),
}, },
} }
newNUMAAffinity := func(bits ...int) bitmask.BitMask { newNUMAAffinity := func(bits ...int) bitmask.BitMask {
@@ -511,12 +511,12 @@ func TestStaticPolicyAdd(t *testing.T) {
}, },
numReservedCPUs: 1, numReservedCPUs: 1,
stAssignments: state.ContainerCPUAssignments{}, stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(2, 11, 21, 22), stDefaultCPUSet: cpuset.New(2, 11, 21, 22),
pod: makePod("fakePod", "fakeContainer2", "2000m", "2000m"), pod: makePod("fakePod", "fakeContainer2", "2000m", "2000m"),
topologyHint: &topologymanager.TopologyHint{NUMANodeAffinity: newNUMAAffinity(0, 2), Preferred: true}, topologyHint: &topologymanager.TopologyHint{NUMANodeAffinity: newNUMAAffinity(0, 2), Preferred: true},
expErr: nil, expErr: nil,
expCPUAlloc: true, expCPUAlloc: true,
expCSet: cpuset.NewCPUSet(2, 11), expCSet: cpuset.New(2, 11),
}, },
{ {
description: "Align by socket: false, cpu's are taken strictly from NUMA nodes in hint", description: "Align by socket: false, cpu's are taken strictly from NUMA nodes in hint",
@@ -526,12 +526,12 @@ func TestStaticPolicyAdd(t *testing.T) {
}, },
numReservedCPUs: 1, numReservedCPUs: 1,
stAssignments: state.ContainerCPUAssignments{}, stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(2, 11, 21, 22), stDefaultCPUSet: cpuset.New(2, 11, 21, 22),
pod: makePod("fakePod", "fakeContainer2", "2000m", "2000m"), pod: makePod("fakePod", "fakeContainer2", "2000m", "2000m"),
topologyHint: &topologymanager.TopologyHint{NUMANodeAffinity: newNUMAAffinity(0, 2), Preferred: true}, topologyHint: &topologymanager.TopologyHint{NUMANodeAffinity: newNUMAAffinity(0, 2), Preferred: true},
expErr: nil, expErr: nil,
expCPUAlloc: true, expCPUAlloc: true,
expCSet: cpuset.NewCPUSet(2, 21), expCSet: cpuset.New(2, 21),
}, },
} }
@@ -565,7 +565,7 @@ func runStaticPolicyTestCase(t *testing.T, testCase staticPolicyTest) {
if testCase.topologyHint != nil { if testCase.topologyHint != nil {
tm = topologymanager.NewFakeManagerWithHint(testCase.topologyHint) tm = topologymanager.NewFakeManagerWithHint(testCase.topologyHint)
} }
policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), tm, testCase.options) policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.New(), tm, testCase.options)
st := &mockState{ st := &mockState{
assignments: testCase.stAssignments, assignments: testCase.stAssignments,
@@ -628,15 +628,15 @@ func TestStaticPolicyReuseCPUs(t *testing.T) {
{"2000m", "2000m"}}), // 0, 4 {"2000m", "2000m"}}), // 0, 4
containerName: "initContainer-0", containerName: "initContainer-0",
stAssignments: state.ContainerCPUAssignments{}, stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7),
}, },
expCSetAfterAlloc: cpuset.NewCPUSet(2, 3, 6, 7), expCSetAfterAlloc: cpuset.New(2, 3, 6, 7),
expCSetAfterRemove: cpuset.NewCPUSet(1, 2, 3, 5, 6, 7), expCSetAfterRemove: cpuset.New(1, 2, 3, 5, 6, 7),
}, },
} }
for _, testCase := range testCases { for _, testCase := range testCases {
policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil) policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.New(), topologymanager.NewFakeManager(), nil)
st := &mockState{ st := &mockState{
assignments: testCase.stAssignments, assignments: testCase.stAssignments,
@@ -676,11 +676,11 @@ func TestStaticPolicyRemove(t *testing.T) {
containerName: "fakeContainer1", containerName: "fakeContainer1",
stAssignments: state.ContainerCPUAssignments{ stAssignments: state.ContainerCPUAssignments{
"fakePod": map[string]cpuset.CPUSet{ "fakePod": map[string]cpuset.CPUSet{
"fakeContainer1": cpuset.NewCPUSet(1, 2, 3), "fakeContainer1": cpuset.New(1, 2, 3),
}, },
}, },
stDefaultCPUSet: cpuset.NewCPUSet(4, 5, 6, 7), stDefaultCPUSet: cpuset.New(4, 5, 6, 7),
expCSet: cpuset.NewCPUSet(1, 2, 3, 4, 5, 6, 7), expCSet: cpuset.New(1, 2, 3, 4, 5, 6, 7),
}, },
{ {
description: "SingleSocketHT, DeAllocOneContainer, BeginEmpty", description: "SingleSocketHT, DeAllocOneContainer, BeginEmpty",
@@ -689,12 +689,12 @@ func TestStaticPolicyRemove(t *testing.T) {
containerName: "fakeContainer1", containerName: "fakeContainer1",
stAssignments: state.ContainerCPUAssignments{ stAssignments: state.ContainerCPUAssignments{
"fakePod": map[string]cpuset.CPUSet{ "fakePod": map[string]cpuset.CPUSet{
"fakeContainer1": cpuset.NewCPUSet(1, 2, 3), "fakeContainer1": cpuset.New(1, 2, 3),
"fakeContainer2": cpuset.NewCPUSet(4, 5, 6, 7), "fakeContainer2": cpuset.New(4, 5, 6, 7),
}, },
}, },
stDefaultCPUSet: cpuset.NewCPUSet(), stDefaultCPUSet: cpuset.New(),
expCSet: cpuset.NewCPUSet(1, 2, 3), expCSet: cpuset.New(1, 2, 3),
}, },
{ {
description: "SingleSocketHT, DeAllocTwoContainer", description: "SingleSocketHT, DeAllocTwoContainer",
@@ -703,12 +703,12 @@ func TestStaticPolicyRemove(t *testing.T) {
containerName: "fakeContainer1", containerName: "fakeContainer1",
stAssignments: state.ContainerCPUAssignments{ stAssignments: state.ContainerCPUAssignments{
"fakePod": map[string]cpuset.CPUSet{ "fakePod": map[string]cpuset.CPUSet{
"fakeContainer1": cpuset.NewCPUSet(1, 3, 5), "fakeContainer1": cpuset.New(1, 3, 5),
"fakeContainer2": cpuset.NewCPUSet(2, 4), "fakeContainer2": cpuset.New(2, 4),
}, },
}, },
stDefaultCPUSet: cpuset.NewCPUSet(6, 7), stDefaultCPUSet: cpuset.New(6, 7),
expCSet: cpuset.NewCPUSet(1, 3, 5, 6, 7), expCSet: cpuset.New(1, 3, 5, 6, 7),
}, },
{ {
description: "SingleSocketHT, NoDeAlloc", description: "SingleSocketHT, NoDeAlloc",
@@ -717,16 +717,16 @@ func TestStaticPolicyRemove(t *testing.T) {
containerName: "fakeContainer2", containerName: "fakeContainer2",
stAssignments: state.ContainerCPUAssignments{ stAssignments: state.ContainerCPUAssignments{
"fakePod": map[string]cpuset.CPUSet{ "fakePod": map[string]cpuset.CPUSet{
"fakeContainer1": cpuset.NewCPUSet(1, 3, 5), "fakeContainer1": cpuset.New(1, 3, 5),
}, },
}, },
stDefaultCPUSet: cpuset.NewCPUSet(2, 4, 6, 7), stDefaultCPUSet: cpuset.New(2, 4, 6, 7),
expCSet: cpuset.NewCPUSet(2, 4, 6, 7), expCSet: cpuset.New(2, 4, 6, 7),
}, },
} }
for _, testCase := range testCases { for _, testCase := range testCases {
policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil) policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.New(), topologymanager.NewFakeManager(), nil)
st := &mockState{ st := &mockState{
assignments: testCase.stAssignments, assignments: testCase.stAssignments,
@@ -761,62 +761,62 @@ func TestTopologyAwareAllocateCPUs(t *testing.T) {
description: "Request 2 CPUs, No BitMask", description: "Request 2 CPUs, No BitMask",
topo: topoDualSocketHT, topo: topoDualSocketHT,
stAssignments: state.ContainerCPUAssignments{}, stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
numRequested: 2, numRequested: 2,
socketMask: nil, socketMask: nil,
expCSet: cpuset.NewCPUSet(0, 6), expCSet: cpuset.New(0, 6),
}, },
{ {
description: "Request 2 CPUs, BitMask on Socket 0", description: "Request 2 CPUs, BitMask on Socket 0",
topo: topoDualSocketHT, topo: topoDualSocketHT,
stAssignments: state.ContainerCPUAssignments{}, stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
numRequested: 2, numRequested: 2,
socketMask: func() bitmask.BitMask { socketMask: func() bitmask.BitMask {
mask, _ := bitmask.NewBitMask(0) mask, _ := bitmask.NewBitMask(0)
return mask return mask
}(), }(),
expCSet: cpuset.NewCPUSet(0, 6), expCSet: cpuset.New(0, 6),
}, },
{ {
description: "Request 2 CPUs, BitMask on Socket 1", description: "Request 2 CPUs, BitMask on Socket 1",
topo: topoDualSocketHT, topo: topoDualSocketHT,
stAssignments: state.ContainerCPUAssignments{}, stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
numRequested: 2, numRequested: 2,
socketMask: func() bitmask.BitMask { socketMask: func() bitmask.BitMask {
mask, _ := bitmask.NewBitMask(1) mask, _ := bitmask.NewBitMask(1)
return mask return mask
}(), }(),
expCSet: cpuset.NewCPUSet(1, 7), expCSet: cpuset.New(1, 7),
}, },
{ {
description: "Request 8 CPUs, BitMask on Socket 0", description: "Request 8 CPUs, BitMask on Socket 0",
topo: topoDualSocketHT, topo: topoDualSocketHT,
stAssignments: state.ContainerCPUAssignments{}, stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
numRequested: 8, numRequested: 8,
socketMask: func() bitmask.BitMask { socketMask: func() bitmask.BitMask {
mask, _ := bitmask.NewBitMask(0) mask, _ := bitmask.NewBitMask(0)
return mask return mask
}(), }(),
expCSet: cpuset.NewCPUSet(0, 6, 2, 8, 4, 10, 1, 7), expCSet: cpuset.New(0, 6, 2, 8, 4, 10, 1, 7),
}, },
{ {
description: "Request 8 CPUs, BitMask on Socket 1", description: "Request 8 CPUs, BitMask on Socket 1",
topo: topoDualSocketHT, topo: topoDualSocketHT,
stAssignments: state.ContainerCPUAssignments{}, stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
numRequested: 8, numRequested: 8,
socketMask: func() bitmask.BitMask { socketMask: func() bitmask.BitMask {
mask, _ := bitmask.NewBitMask(1) mask, _ := bitmask.NewBitMask(1)
return mask return mask
}(), }(),
expCSet: cpuset.NewCPUSet(1, 7, 3, 9, 5, 11, 0, 6), expCSet: cpuset.New(1, 7, 3, 9, 5, 11, 0, 6),
}, },
} }
for _, tc := range testCases { for _, tc := range testCases {
p, _ := NewStaticPolicy(tc.topo, 0, cpuset.NewCPUSet(), topologymanager.NewFakeManager(), nil) p, _ := NewStaticPolicy(tc.topo, 0, cpuset.New(), topologymanager.NewFakeManager(), nil)
policy := p.(*staticPolicy) policy := p.(*staticPolicy)
st := &mockState{ st := &mockState{
assignments: tc.stAssignments, assignments: tc.stAssignments,
@@ -828,7 +828,7 @@ func TestTopologyAwareAllocateCPUs(t *testing.T) {
continue continue
} }
cset, err := policy.allocateCPUs(st, tc.numRequested, tc.socketMask, cpuset.NewCPUSet()) cset, err := policy.allocateCPUs(st, tc.numRequested, tc.socketMask, cpuset.New())
if err != nil { if err != nil {
t.Errorf("StaticPolicy allocateCPUs() error (%v). expected CPUSet %v not error %v", t.Errorf("StaticPolicy allocateCPUs() error (%v). expected CPUSet %v not error %v",
tc.description, tc.expCSet, err) tc.description, tc.expCSet, err)
@@ -864,27 +864,27 @@ func TestStaticPolicyStartWithResvList(t *testing.T) {
description: "empty cpuset", description: "empty cpuset",
topo: topoDualSocketHT, topo: topoDualSocketHT,
numReservedCPUs: 2, numReservedCPUs: 2,
reserved: cpuset.NewCPUSet(0, 1), reserved: cpuset.New(0, 1),
stAssignments: state.ContainerCPUAssignments{}, stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(), stDefaultCPUSet: cpuset.New(),
expCSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), expCSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
}, },
{ {
description: "reserved cores 0 & 1 are not present in available cpuset", description: "reserved cores 0 & 1 are not present in available cpuset",
topo: topoDualSocketHT, topo: topoDualSocketHT,
numReservedCPUs: 2, numReservedCPUs: 2,
reserved: cpuset.NewCPUSet(0, 1), reserved: cpuset.New(0, 1),
stAssignments: state.ContainerCPUAssignments{}, stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(2, 3, 4, 5), stDefaultCPUSet: cpuset.New(2, 3, 4, 5),
expErr: fmt.Errorf("not all reserved cpus: \"0-1\" are present in defaultCpuSet: \"2-5\""), expErr: fmt.Errorf("not all reserved cpus: \"0-1\" are present in defaultCpuSet: \"2-5\""),
}, },
{ {
description: "inconsistency between numReservedCPUs and reserved", description: "inconsistency between numReservedCPUs and reserved",
topo: topoDualSocketHT, topo: topoDualSocketHT,
numReservedCPUs: 1, numReservedCPUs: 1,
reserved: cpuset.NewCPUSet(0, 1), reserved: cpuset.New(0, 1),
stAssignments: state.ContainerCPUAssignments{}, stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
expNewErr: fmt.Errorf("[cpumanager] unable to reserve the required amount of CPUs (size of 0-1 did not equal 1)"), expNewErr: fmt.Errorf("[cpumanager] unable to reserve the required amount of CPUs (size of 0-1 did not equal 1)"),
}, },
} }
@@ -928,41 +928,41 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
description: "GuPodSingleCore, SingleSocketHT, ExpectError", description: "GuPodSingleCore, SingleSocketHT, ExpectError",
topo: topoSingleSocketHT, topo: topoSingleSocketHT,
numReservedCPUs: 1, numReservedCPUs: 1,
reserved: cpuset.NewCPUSet(0), reserved: cpuset.New(0),
stAssignments: state.ContainerCPUAssignments{}, stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7),
pod: makePod("fakePod", "fakeContainer2", "8000m", "8000m"), pod: makePod("fakePod", "fakeContainer2", "8000m", "8000m"),
expErr: fmt.Errorf("not enough cpus available to satisfy request"), expErr: fmt.Errorf("not enough cpus available to satisfy request"),
expCPUAlloc: false, expCPUAlloc: false,
expCSet: cpuset.NewCPUSet(), expCSet: cpuset.New(),
}, },
{ {
description: "GuPodSingleCore, SingleSocketHT, ExpectAllocOneCPU", description: "GuPodSingleCore, SingleSocketHT, ExpectAllocOneCPU",
topo: topoSingleSocketHT, topo: topoSingleSocketHT,
numReservedCPUs: 2, numReservedCPUs: 2,
reserved: cpuset.NewCPUSet(0, 1), reserved: cpuset.New(0, 1),
stAssignments: state.ContainerCPUAssignments{}, stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7),
pod: makePod("fakePod", "fakeContainer2", "1000m", "1000m"), pod: makePod("fakePod", "fakeContainer2", "1000m", "1000m"),
expErr: nil, expErr: nil,
expCPUAlloc: true, expCPUAlloc: true,
expCSet: cpuset.NewCPUSet(4), // expect sibling of partial core expCSet: cpuset.New(4), // expect sibling of partial core
}, },
{ {
description: "GuPodMultipleCores, SingleSocketHT, ExpectAllocOneCore", description: "GuPodMultipleCores, SingleSocketHT, ExpectAllocOneCore",
topo: topoSingleSocketHT, topo: topoSingleSocketHT,
numReservedCPUs: 2, numReservedCPUs: 2,
reserved: cpuset.NewCPUSet(0, 1), reserved: cpuset.New(0, 1),
stAssignments: state.ContainerCPUAssignments{ stAssignments: state.ContainerCPUAssignments{
"fakePod": map[string]cpuset.CPUSet{ "fakePod": map[string]cpuset.CPUSet{
"fakeContainer100": cpuset.NewCPUSet(2, 3, 6, 7), "fakeContainer100": cpuset.New(2, 3, 6, 7),
}, },
}, },
stDefaultCPUSet: cpuset.NewCPUSet(0, 1, 4, 5), stDefaultCPUSet: cpuset.New(0, 1, 4, 5),
pod: makePod("fakePod", "fakeContainer3", "2000m", "2000m"), pod: makePod("fakePod", "fakeContainer3", "2000m", "2000m"),
expErr: nil, expErr: nil,
expCPUAlloc: true, expCPUAlloc: true,
expCSet: cpuset.NewCPUSet(4, 5), expCSet: cpuset.New(4, 5),
}, },
} }

View File

@@ -60,7 +60,7 @@ func TestCheckpointStateRestore(t *testing.T) {
containermap.ContainerMap{}, containermap.ContainerMap{},
"", "",
&stateMemory{ &stateMemory{
defaultCPUSet: cpuset.NewCPUSet(4, 5, 6), defaultCPUSet: cpuset.New(4, 5, 6),
}, },
}, },
{ {
@@ -82,11 +82,11 @@ func TestCheckpointStateRestore(t *testing.T) {
&stateMemory{ &stateMemory{
assignments: ContainerCPUAssignments{ assignments: ContainerCPUAssignments{
"pod": map[string]cpuset.CPUSet{ "pod": map[string]cpuset.CPUSet{
"container1": cpuset.NewCPUSet(4, 5, 6), "container1": cpuset.New(4, 5, 6),
"container2": cpuset.NewCPUSet(1, 2, 3), "container2": cpuset.New(1, 2, 3),
}, },
}, },
defaultCPUSet: cpuset.NewCPUSet(1, 2, 3), defaultCPUSet: cpuset.New(1, 2, 3),
}, },
}, },
{ {
@@ -165,7 +165,7 @@ func TestCheckpointStateRestore(t *testing.T) {
containermap.ContainerMap{}, containermap.ContainerMap{},
"", "",
&stateMemory{ &stateMemory{
defaultCPUSet: cpuset.NewCPUSet(1, 2, 3), defaultCPUSet: cpuset.New(1, 2, 3),
}, },
}, },
{ {
@@ -190,11 +190,11 @@ func TestCheckpointStateRestore(t *testing.T) {
&stateMemory{ &stateMemory{
assignments: ContainerCPUAssignments{ assignments: ContainerCPUAssignments{
"pod": map[string]cpuset.CPUSet{ "pod": map[string]cpuset.CPUSet{
"container1": cpuset.NewCPUSet(4, 5, 6), "container1": cpuset.New(4, 5, 6),
"container2": cpuset.NewCPUSet(1, 2, 3), "container2": cpuset.New(1, 2, 3),
}, },
}, },
defaultCPUSet: cpuset.NewCPUSet(1, 2, 3), defaultCPUSet: cpuset.New(1, 2, 3),
}, },
}, },
} }
@@ -242,14 +242,14 @@ func TestCheckpointStateStore(t *testing.T) {
}{ }{
{ {
"Store default cpu set", "Store default cpu set",
&stateMemory{defaultCPUSet: cpuset.NewCPUSet(1, 2, 3)}, &stateMemory{defaultCPUSet: cpuset.New(1, 2, 3)},
}, },
{ {
"Store assignments", "Store assignments",
&stateMemory{ &stateMemory{
assignments: map[string]map[string]cpuset.CPUSet{ assignments: map[string]map[string]cpuset.CPUSet{
"pod": { "pod": {
"container1": cpuset.NewCPUSet(1, 5, 8), "container1": cpuset.New(1, 5, 8),
}, },
}, },
}, },
@@ -301,29 +301,29 @@ func TestCheckpointStateHelpers(t *testing.T) {
}{ }{
{ {
description: "One container", description: "One container",
defaultCPUset: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8), defaultCPUset: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7, 8),
assignments: map[string]map[string]cpuset.CPUSet{ assignments: map[string]map[string]cpuset.CPUSet{
"pod": { "pod": {
"c1": cpuset.NewCPUSet(0, 1), "c1": cpuset.New(0, 1),
}, },
}, },
}, },
{ {
description: "Two containers", description: "Two containers",
defaultCPUset: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8), defaultCPUset: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7, 8),
assignments: map[string]map[string]cpuset.CPUSet{ assignments: map[string]map[string]cpuset.CPUSet{
"pod": { "pod": {
"c1": cpuset.NewCPUSet(0, 1), "c1": cpuset.New(0, 1),
"c2": cpuset.NewCPUSet(2, 3, 4, 5), "c2": cpuset.New(2, 3, 4, 5),
}, },
}, },
}, },
{ {
description: "Container without assigned cpus", description: "Container without assigned cpus",
defaultCPUset: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8), defaultCPUset: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7, 8),
assignments: map[string]map[string]cpuset.CPUSet{ assignments: map[string]map[string]cpuset.CPUSet{
"pod": { "pod": {
"c1": cpuset.NewCPUSet(), "c1": cpuset.New(),
}, },
}, },
}, },
@@ -377,10 +377,10 @@ func TestCheckpointStateClear(t *testing.T) {
}{ }{
{ {
"Valid state", "Valid state",
cpuset.NewCPUSet(1, 5, 10), cpuset.New(1, 5, 10),
map[string]map[string]cpuset.CPUSet{ map[string]map[string]cpuset.CPUSet{
"pod": { "pod": {
"container1": cpuset.NewCPUSet(1, 4), "container1": cpuset.New(1, 4),
}, },
}, },
}, },
@@ -404,7 +404,7 @@ func TestCheckpointStateClear(t *testing.T) {
state.SetCPUAssignments(tc.assignments) state.SetCPUAssignments(tc.assignments)
state.ClearState() state.ClearState()
if !cpuset.NewCPUSet().Equals(state.GetDefaultCPUSet()) { if !cpuset.New().Equals(state.GetDefaultCPUSet()) {
t.Fatal("cleared state with non-empty default cpu set") t.Fatal("cleared state with non-empty default cpu set")
} }
for pod := range tc.assignments { for pod := range tc.assignments {

View File

@@ -36,7 +36,7 @@ func NewMemoryState() State {
klog.InfoS("Initialized new in-memory state store") klog.InfoS("Initialized new in-memory state store")
return &stateMemory{ return &stateMemory{
assignments: ContainerCPUAssignments{}, assignments: ContainerCPUAssignments{},
defaultCPUSet: cpuset.NewCPUSet(), defaultCPUSet: cpuset.New(),
} }
} }

View File

@@ -26,8 +26,8 @@ import (
func TestClone(t *testing.T) { func TestClone(t *testing.T) {
expect := ContainerCPUAssignments{ expect := ContainerCPUAssignments{
"pod": map[string]cpuset.CPUSet{ "pod": map[string]cpuset.CPUSet{
"container1": cpuset.NewCPUSet(4, 5, 6), "container1": cpuset.New(4, 5, 6),
"container2": cpuset.NewCPUSet(1, 2, 3), "container2": cpuset.New(1, 2, 3),
}, },
} }
actual := expect.Clone() actual := expect.Clone()

View File

@@ -261,7 +261,7 @@ func getUniqueCoreID(threads []int) (coreID int, err error) {
return 0, fmt.Errorf("no cpus provided") return 0, fmt.Errorf("no cpus provided")
} }
if len(threads) != cpuset.NewCPUSet(threads...).Size() { if len(threads) != cpuset.New(threads...).Size() {
return 0, fmt.Errorf("cpus provided are not unique") return 0, fmt.Errorf("cpus provided are not unique")
} }

View File

@@ -537,14 +537,14 @@ func TestCPUDetailsKeepOnly(t *testing.T) {
want CPUDetails want CPUDetails
}{{ }{{
name: "cpus is in CPUDetails.", name: "cpus is in CPUDetails.",
cpus: cpuset.NewCPUSet(0, 1), cpus: cpuset.New(0, 1),
want: map[int]CPUInfo{ want: map[int]CPUInfo{
0: {}, 0: {},
1: {}, 1: {},
}, },
}, { }, {
name: "cpus is not in CPUDetails.", name: "cpus is not in CPUDetails.",
cpus: cpuset.NewCPUSet(3), cpus: cpuset.New(3),
want: CPUDetails{}, want: CPUDetails{},
}} }}
@@ -572,7 +572,7 @@ func TestCPUDetailsNUMANodes(t *testing.T) {
2: {NUMANodeID: 1}, 2: {NUMANodeID: 1},
3: {NUMANodeID: 1}, 3: {NUMANodeID: 1},
}, },
want: cpuset.NewCPUSet(0, 1), want: cpuset.New(0, 1),
}} }}
for _, tt := range tests { for _, tt := range tests {
@@ -613,22 +613,22 @@ func TestCPUDetailsNUMANodesInSockets(t *testing.T) {
name: "Socket IDs is in CPUDetails.", name: "Socket IDs is in CPUDetails.",
details: details1, details: details1,
ids: []int{0, 1, 2}, ids: []int{0, 1, 2},
want: cpuset.NewCPUSet(0, 1), want: cpuset.New(0, 1),
}, { }, {
name: "Socket IDs is not in CPUDetails.", name: "Socket IDs is not in CPUDetails.",
details: details1, details: details1,
ids: []int{4}, ids: []int{4},
want: cpuset.NewCPUSet(), want: cpuset.New(),
}, { }, {
name: "Socket IDs is in CPUDetails. (poorly designed mainboards)", name: "Socket IDs is in CPUDetails. (poorly designed mainboards)",
details: details2, details: details2,
ids: []int{0}, ids: []int{0},
want: cpuset.NewCPUSet(0, 1), want: cpuset.New(0, 1),
}, { }, {
name: "Socket IDs is not in CPUDetails. (poorly designed mainboards)", name: "Socket IDs is not in CPUDetails. (poorly designed mainboards)",
details: details2, details: details2,
ids: []int{3}, ids: []int{3},
want: cpuset.NewCPUSet(), want: cpuset.New(),
}} }}
for _, tt := range tests { for _, tt := range tests {
@@ -655,7 +655,7 @@ func TestCPUDetailsSockets(t *testing.T) {
2: {SocketID: 1}, 2: {SocketID: 1},
3: {SocketID: 1}, 3: {SocketID: 1},
}, },
want: cpuset.NewCPUSet(0, 1), want: cpuset.New(0, 1),
}} }}
for _, tt := range tests { for _, tt := range tests {
@@ -685,11 +685,11 @@ func TestCPUDetailsCPUsInSockets(t *testing.T) {
}{{ }{{
name: "Socket IDs is in CPUDetails.", name: "Socket IDs is in CPUDetails.",
ids: []int{0, 1}, ids: []int{0, 1},
want: cpuset.NewCPUSet(0, 1, 2), want: cpuset.New(0, 1, 2),
}, { }, {
name: "Socket IDs is not in CPUDetails.", name: "Socket IDs is not in CPUDetails.",
ids: []int{3}, ids: []int{3},
want: cpuset.NewCPUSet(), want: cpuset.New(),
}} }}
for _, tt := range tests { for _, tt := range tests {
@@ -719,11 +719,11 @@ func TestCPUDetailsSocketsInNUMANodes(t *testing.T) {
}{{ }{{
name: "NUMANodes IDs is in CPUDetails.", name: "NUMANodes IDs is in CPUDetails.",
ids: []int{0, 1}, ids: []int{0, 1},
want: cpuset.NewCPUSet(0, 1, 2), want: cpuset.New(0, 1, 2),
}, { }, {
name: "NUMANodes IDs is not in CPUDetails.", name: "NUMANodes IDs is not in CPUDetails.",
ids: []int{3}, ids: []int{3},
want: cpuset.NewCPUSet(), want: cpuset.New(),
}} }}
for _, tt := range tests { for _, tt := range tests {
@@ -750,7 +750,7 @@ func TestCPUDetailsCores(t *testing.T) {
2: {CoreID: 1}, 2: {CoreID: 1},
3: {CoreID: 1}, 3: {CoreID: 1},
}, },
want: cpuset.NewCPUSet(0, 1), want: cpuset.New(0, 1),
}} }}
for _, tt := range tests { for _, tt := range tests {
@@ -780,11 +780,11 @@ func TestCPUDetailsCoresInNUMANodes(t *testing.T) {
}{{ }{{
name: "NUMANodes IDs is in CPUDetails.", name: "NUMANodes IDs is in CPUDetails.",
ids: []int{0, 1}, ids: []int{0, 1},
want: cpuset.NewCPUSet(0, 1, 2), want: cpuset.New(0, 1, 2),
}, { }, {
name: "NUMANodes IDs is not in CPUDetails.", name: "NUMANodes IDs is not in CPUDetails.",
ids: []int{3}, ids: []int{3},
want: cpuset.NewCPUSet(), want: cpuset.New(),
}} }}
for _, tt := range tests { for _, tt := range tests {
@@ -814,11 +814,11 @@ func TestCPUDetailsCoresInSockets(t *testing.T) {
}{{ }{{
name: "Socket IDs is in CPUDetails.", name: "Socket IDs is in CPUDetails.",
ids: []int{0, 1}, ids: []int{0, 1},
want: cpuset.NewCPUSet(0, 1, 2), want: cpuset.New(0, 1, 2),
}, { }, {
name: "Socket IDs is not in CPUDetails.", name: "Socket IDs is not in CPUDetails.",
ids: []int{3}, ids: []int{3},
want: cpuset.NewCPUSet(), want: cpuset.New(),
}} }}
for _, tt := range tests { for _, tt := range tests {
@@ -843,7 +843,7 @@ func TestCPUDetailsCPUs(t *testing.T) {
0: {}, 0: {},
1: {}, 1: {},
}, },
want: cpuset.NewCPUSet(0, 1), want: cpuset.New(0, 1),
}} }}
for _, tt := range tests { for _, tt := range tests {
@@ -873,11 +873,11 @@ func TestCPUDetailsCPUsInNUMANodes(t *testing.T) {
}{{ }{{
name: "NUMANode IDs is in CPUDetails.", name: "NUMANode IDs is in CPUDetails.",
ids: []int{0, 1}, ids: []int{0, 1},
want: cpuset.NewCPUSet(0, 1, 2), want: cpuset.New(0, 1, 2),
}, { }, {
name: "NUMANode IDs is not in CPUDetails.", name: "NUMANode IDs is not in CPUDetails.",
ids: []int{3}, ids: []int{3},
want: cpuset.NewCPUSet(), want: cpuset.New(),
}} }}
for _, tt := range tests { for _, tt := range tests {
@@ -907,11 +907,11 @@ func TestCPUDetailsCPUsInCores(t *testing.T) {
}{{ }{{
name: "Core IDs is in CPUDetails.", name: "Core IDs is in CPUDetails.",
ids: []int{0, 1}, ids: []int{0, 1},
want: cpuset.NewCPUSet(0, 1, 2), want: cpuset.New(0, 1, 2),
}, { }, {
name: "Core IDs is not in CPUDetails.", name: "Core IDs is not in CPUDetails.",
ids: []int{3}, ids: []int{3},
want: cpuset.NewCPUSet(), want: cpuset.New(),
}} }}
for _, tt := range tests { for _, tt := range tests {

View File

@@ -290,7 +290,7 @@ func TestGetPodTopologyHintsWithPolicyOptions(t *testing.T) {
description: "AlignBySocket:false, Preferred hints does not contains socket aligned hints", description: "AlignBySocket:false, Preferred hints does not contains socket aligned hints",
pod: *testPod1, pod: *testPod1,
container: *testContainer1, container: *testContainer1,
defaultCPUSet: cpuset.NewCPUSet(2, 3, 11), defaultCPUSet: cpuset.New(2, 3, 11),
topology: topoDualSocketMultiNumaPerSocketHT, topology: topoDualSocketMultiNumaPerSocketHT,
policyOptions: map[string]string{AlignBySocketOption: "false"}, policyOptions: map[string]string{AlignBySocketOption: "false"},
expectedHints: []topologymanager.TopologyHint{ expectedHints: []topologymanager.TopologyHint{
@@ -333,7 +333,7 @@ func TestGetPodTopologyHintsWithPolicyOptions(t *testing.T) {
description: "AlignBySocket:true Preferred hints contains socket aligned hints", description: "AlignBySocket:true Preferred hints contains socket aligned hints",
pod: *testPod1, pod: *testPod1,
container: *testContainer1, container: *testContainer1,
defaultCPUSet: cpuset.NewCPUSet(2, 3, 11), defaultCPUSet: cpuset.New(2, 3, 11),
topology: topoDualSocketMultiNumaPerSocketHT, topology: topoDualSocketMultiNumaPerSocketHT,
policyOptions: map[string]string{AlignBySocketOption: "true"}, policyOptions: map[string]string{AlignBySocketOption: "true"},
expectedHints: []topologymanager.TopologyHint{ expectedHints: []topologymanager.TopologyHint{
@@ -456,7 +456,7 @@ func returnTestCases() []testCase {
name: "Request 2 CPUs, 4 available on NUMA 0, 6 available on NUMA 1", name: "Request 2 CPUs, 4 available on NUMA 0, 6 available on NUMA 1",
pod: *testPod1, pod: *testPod1,
container: *testContainer1, container: *testContainer1,
defaultCPUSet: cpuset.NewCPUSet(2, 3, 4, 5, 6, 7, 8, 9, 10, 11), defaultCPUSet: cpuset.New(2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
expectedHints: []topologymanager.TopologyHint{ expectedHints: []topologymanager.TopologyHint{
{ {
NUMANodeAffinity: firstSocketMask, NUMANodeAffinity: firstSocketMask,
@@ -476,7 +476,7 @@ func returnTestCases() []testCase {
name: "Request 5 CPUs, 4 available on NUMA 0, 6 available on NUMA 1", name: "Request 5 CPUs, 4 available on NUMA 0, 6 available on NUMA 1",
pod: *testPod2, pod: *testPod2,
container: *testContainer2, container: *testContainer2,
defaultCPUSet: cpuset.NewCPUSet(2, 3, 4, 5, 6, 7, 8, 9, 10, 11), defaultCPUSet: cpuset.New(2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
expectedHints: []topologymanager.TopologyHint{ expectedHints: []topologymanager.TopologyHint{
{ {
NUMANodeAffinity: secondSocketMask, NUMANodeAffinity: secondSocketMask,
@@ -492,7 +492,7 @@ func returnTestCases() []testCase {
name: "Request 7 CPUs, 4 available on NUMA 0, 6 available on NUMA 1", name: "Request 7 CPUs, 4 available on NUMA 0, 6 available on NUMA 1",
pod: *testPod3, pod: *testPod3,
container: *testContainer3, container: *testContainer3,
defaultCPUSet: cpuset.NewCPUSet(2, 3, 4, 5, 6, 7, 8, 9, 10, 11), defaultCPUSet: cpuset.New(2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
expectedHints: []topologymanager.TopologyHint{ expectedHints: []topologymanager.TopologyHint{
{ {
NUMANodeAffinity: crossSocketMask, NUMANodeAffinity: crossSocketMask,
@@ -504,14 +504,14 @@ func returnTestCases() []testCase {
name: "Request 11 CPUs, 4 available on NUMA 0, 6 available on NUMA 1", name: "Request 11 CPUs, 4 available on NUMA 0, 6 available on NUMA 1",
pod: *testPod4, pod: *testPod4,
container: *testContainer4, container: *testContainer4,
defaultCPUSet: cpuset.NewCPUSet(2, 3, 4, 5, 6, 7, 8, 9, 10, 11), defaultCPUSet: cpuset.New(2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
expectedHints: nil, expectedHints: nil,
}, },
{ {
name: "Request 2 CPUs, 1 available on NUMA 0, 1 available on NUMA 1", name: "Request 2 CPUs, 1 available on NUMA 0, 1 available on NUMA 1",
pod: *testPod1, pod: *testPod1,
container: *testContainer1, container: *testContainer1,
defaultCPUSet: cpuset.NewCPUSet(0, 3), defaultCPUSet: cpuset.New(0, 3),
expectedHints: []topologymanager.TopologyHint{ expectedHints: []topologymanager.TopologyHint{
{ {
NUMANodeAffinity: crossSocketMask, NUMANodeAffinity: crossSocketMask,
@@ -523,7 +523,7 @@ func returnTestCases() []testCase {
name: "Request more CPUs than available", name: "Request more CPUs than available",
pod: *testPod2, pod: *testPod2,
container: *testContainer2, container: *testContainer2,
defaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3), defaultCPUSet: cpuset.New(0, 1, 2, 3),
expectedHints: nil, expectedHints: nil,
}, },
{ {
@@ -532,10 +532,10 @@ func returnTestCases() []testCase {
container: *testContainer1, container: *testContainer1,
assignments: state.ContainerCPUAssignments{ assignments: state.ContainerCPUAssignments{
string(testPod1.UID): map[string]cpuset.CPUSet{ string(testPod1.UID): map[string]cpuset.CPUSet{
testContainer1.Name: cpuset.NewCPUSet(0, 6), testContainer1.Name: cpuset.New(0, 6),
}, },
}, },
defaultCPUSet: cpuset.NewCPUSet(), defaultCPUSet: cpuset.New(),
expectedHints: []topologymanager.TopologyHint{ expectedHints: []topologymanager.TopologyHint{
{ {
NUMANodeAffinity: firstSocketMask, NUMANodeAffinity: firstSocketMask,
@@ -553,10 +553,10 @@ func returnTestCases() []testCase {
container: *testContainer1, container: *testContainer1,
assignments: state.ContainerCPUAssignments{ assignments: state.ContainerCPUAssignments{
string(testPod1.UID): map[string]cpuset.CPUSet{ string(testPod1.UID): map[string]cpuset.CPUSet{
testContainer1.Name: cpuset.NewCPUSet(3, 9), testContainer1.Name: cpuset.New(3, 9),
}, },
}, },
defaultCPUSet: cpuset.NewCPUSet(), defaultCPUSet: cpuset.New(),
expectedHints: []topologymanager.TopologyHint{ expectedHints: []topologymanager.TopologyHint{
{ {
NUMANodeAffinity: secondSocketMask, NUMANodeAffinity: secondSocketMask,
@@ -574,10 +574,10 @@ func returnTestCases() []testCase {
container: *testContainer4, container: *testContainer4,
assignments: state.ContainerCPUAssignments{ assignments: state.ContainerCPUAssignments{
string(testPod4.UID): map[string]cpuset.CPUSet{ string(testPod4.UID): map[string]cpuset.CPUSet{
testContainer4.Name: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), testContainer4.Name: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10),
}, },
}, },
defaultCPUSet: cpuset.NewCPUSet(), defaultCPUSet: cpuset.New(),
expectedHints: []topologymanager.TopologyHint{ expectedHints: []topologymanager.TopologyHint{
{ {
NUMANodeAffinity: crossSocketMask, NUMANodeAffinity: crossSocketMask,
@@ -591,10 +591,10 @@ func returnTestCases() []testCase {
container: *testContainer1, container: *testContainer1,
assignments: state.ContainerCPUAssignments{ assignments: state.ContainerCPUAssignments{
string(testPod1.UID): map[string]cpuset.CPUSet{ string(testPod1.UID): map[string]cpuset.CPUSet{
testContainer1.Name: cpuset.NewCPUSet(0, 6, 3, 9), testContainer1.Name: cpuset.New(0, 6, 3, 9),
}, },
}, },
defaultCPUSet: cpuset.NewCPUSet(), defaultCPUSet: cpuset.New(),
expectedHints: []topologymanager.TopologyHint{}, expectedHints: []topologymanager.TopologyHint{},
}, },
{ {
@@ -603,10 +603,10 @@ func returnTestCases() []testCase {
container: *testContainer4, container: *testContainer4,
assignments: state.ContainerCPUAssignments{ assignments: state.ContainerCPUAssignments{
string(testPod4.UID): map[string]cpuset.CPUSet{ string(testPod4.UID): map[string]cpuset.CPUSet{
testContainer4.Name: cpuset.NewCPUSet(0, 6, 3, 9), testContainer4.Name: cpuset.New(0, 6, 3, 9),
}, },
}, },
defaultCPUSet: cpuset.NewCPUSet(), defaultCPUSet: cpuset.New(),
expectedHints: []topologymanager.TopologyHint{}, expectedHints: []topologymanager.TopologyHint{},
}, },
} }

View File

@@ -64,8 +64,8 @@ type CPUSet struct {
elems map[int]struct{} elems map[int]struct{}
} }
// NewCPUSet returns a new CPUSet containing the supplied elements. // New returns a new CPUSet containing the supplied elements.
func NewCPUSet(cpus ...int) CPUSet { func New(cpus ...int) CPUSet {
b := NewBuilder() b := NewBuilder()
for _, c := range cpus { for _, c := range cpus {
b.Add(c) b.Add(c)
@@ -231,21 +231,21 @@ func Parse(s string) (CPUSet, error) {
// Handle ranges that consist of only one element like "34". // Handle ranges that consist of only one element like "34".
elem, err := strconv.Atoi(boundaries[0]) elem, err := strconv.Atoi(boundaries[0])
if err != nil { if err != nil {
return NewCPUSet(), err return New(), err
} }
b.Add(elem) b.Add(elem)
} else if len(boundaries) == 2 { } else if len(boundaries) == 2 {
// Handle multi-element ranges like "0-5". // Handle multi-element ranges like "0-5".
start, err := strconv.Atoi(boundaries[0]) start, err := strconv.Atoi(boundaries[0])
if err != nil { if err != nil {
return NewCPUSet(), err return New(), err
} }
end, err := strconv.Atoi(boundaries[1]) end, err := strconv.Atoi(boundaries[1])
if err != nil { if err != nil {
return NewCPUSet(), err return New(), err
} }
if start > end { if start > end {
return NewCPUSet(), fmt.Errorf("invalid range %q (%d >= %d)", r, start, end) return New(), fmt.Errorf("invalid range %q (%d >= %d)", r, start, end)
} }
// start == end is acceptable (1-1 -> 1) // start == end is acceptable (1-1 -> 1)

View File

@@ -48,9 +48,9 @@ func TestCPUSetSize(t *testing.T) {
cpuset CPUSet cpuset CPUSet
expected int expected int
}{ }{
{NewCPUSet(), 0}, {New(), 0},
{NewCPUSet(5), 1}, {New(5), 1},
{NewCPUSet(1, 2, 3, 4, 5), 5}, {New(1, 2, 3, 4, 5), 5},
} }
for _, c := range testCases { for _, c := range testCases {
@@ -66,9 +66,9 @@ func TestCPUSetIsEmpty(t *testing.T) {
cpuset CPUSet cpuset CPUSet
expected bool expected bool
}{ }{
{NewCPUSet(), true}, {New(), true},
{NewCPUSet(5), false}, {New(5), false},
{NewCPUSet(1, 2, 3, 4, 5), false}, {New(1, 2, 3, 4, 5), false},
} }
for _, c := range testCases { for _, c := range testCases {
@@ -85,9 +85,9 @@ func TestCPUSetContains(t *testing.T) {
mustContain []int mustContain []int
mustNotContain []int mustNotContain []int
}{ }{
{NewCPUSet(), []int{}, []int{1, 2, 3, 4, 5}}, {New(), []int{}, []int{1, 2, 3, 4, 5}},
{NewCPUSet(5), []int{5}, []int{1, 2, 3, 4}}, {New(5), []int{5}, []int{1, 2, 3, 4}},
{NewCPUSet(1, 2, 4, 5), []int{1, 2, 4, 5}, []int{0, 3, 6}}, {New(1, 2, 4, 5), []int{1, 2, 4, 5}, []int{0, 3, 6}},
} }
for _, c := range testCases { for _, c := range testCases {
@@ -109,21 +109,21 @@ func TestCPUSetEqual(t *testing.T) {
s1 CPUSet s1 CPUSet
s2 CPUSet s2 CPUSet
}{ }{
{NewCPUSet(), NewCPUSet()}, {New(), New()},
{NewCPUSet(5), NewCPUSet(5)}, {New(5), New(5)},
{NewCPUSet(1, 2, 3, 4, 5), NewCPUSet(1, 2, 3, 4, 5)}, {New(1, 2, 3, 4, 5), New(1, 2, 3, 4, 5)},
} }
shouldNotEqual := []struct { shouldNotEqual := []struct {
s1 CPUSet s1 CPUSet
s2 CPUSet s2 CPUSet
}{ }{
{NewCPUSet(), NewCPUSet(5)}, {New(), New(5)},
{NewCPUSet(5), NewCPUSet()}, {New(5), New()},
{NewCPUSet(), NewCPUSet(1, 2, 3, 4, 5)}, {New(), New(1, 2, 3, 4, 5)},
{NewCPUSet(1, 2, 3, 4, 5), NewCPUSet()}, {New(1, 2, 3, 4, 5), New()},
{NewCPUSet(5), NewCPUSet(1, 2, 3, 4, 5)}, {New(5), New(1, 2, 3, 4, 5)},
{NewCPUSet(1, 2, 3, 4, 5), NewCPUSet(5)}, {New(1, 2, 3, 4, 5), New(5)},
} }
for _, c := range shouldEqual { for _, c := range shouldEqual {
@@ -144,18 +144,18 @@ func TestCPUSetIsSubsetOf(t *testing.T) {
s2 CPUSet s2 CPUSet
}{ }{
// A set is a subset of itself // A set is a subset of itself
{NewCPUSet(), NewCPUSet()}, {New(), New()},
{NewCPUSet(5), NewCPUSet(5)}, {New(5), New(5)},
{NewCPUSet(1, 2, 3, 4, 5), NewCPUSet(1, 2, 3, 4, 5)}, {New(1, 2, 3, 4, 5), New(1, 2, 3, 4, 5)},
// Empty set is a subset of every set // Empty set is a subset of every set
{NewCPUSet(), NewCPUSet(5)}, {New(), New(5)},
{NewCPUSet(), NewCPUSet(1, 2, 3, 4, 5)}, {New(), New(1, 2, 3, 4, 5)},
{NewCPUSet(5), NewCPUSet(1, 2, 3, 4, 5)}, {New(5), New(1, 2, 3, 4, 5)},
{NewCPUSet(1, 2, 3), NewCPUSet(1, 2, 3, 4, 5)}, {New(1, 2, 3), New(1, 2, 3, 4, 5)},
{NewCPUSet(4, 5), NewCPUSet(1, 2, 3, 4, 5)}, {New(4, 5), New(1, 2, 3, 4, 5)},
{NewCPUSet(2, 3), NewCPUSet(1, 2, 3, 4, 5)}, {New(2, 3), New(1, 2, 3, 4, 5)},
} }
shouldNotBeSubset := []struct { shouldNotBeSubset := []struct {
@@ -181,27 +181,27 @@ func TestCPUSetUnion(t *testing.T) {
others []CPUSet others []CPUSet
expected CPUSet expected CPUSet
}{ }{
{NewCPUSet(5), []CPUSet{}, NewCPUSet(5)}, {New(5), []CPUSet{}, New(5)},
{NewCPUSet(), []CPUSet{NewCPUSet()}, NewCPUSet()}, {New(), []CPUSet{New()}, New()},
{NewCPUSet(), []CPUSet{NewCPUSet(5)}, NewCPUSet(5)}, {New(), []CPUSet{New(5)}, New(5)},
{NewCPUSet(5), []CPUSet{NewCPUSet()}, NewCPUSet(5)}, {New(5), []CPUSet{New()}, New(5)},
{NewCPUSet(5), []CPUSet{NewCPUSet(5)}, NewCPUSet(5)}, {New(5), []CPUSet{New(5)}, New(5)},
{NewCPUSet(), []CPUSet{NewCPUSet(1, 2, 3, 4, 5)}, NewCPUSet(1, 2, 3, 4, 5)}, {New(), []CPUSet{New(1, 2, 3, 4, 5)}, New(1, 2, 3, 4, 5)},
{NewCPUSet(1, 2, 3, 4, 5), []CPUSet{NewCPUSet()}, NewCPUSet(1, 2, 3, 4, 5)}, {New(1, 2, 3, 4, 5), []CPUSet{New()}, New(1, 2, 3, 4, 5)},
{NewCPUSet(1, 2, 3, 4, 5), []CPUSet{NewCPUSet(1, 2, 3, 4, 5)}, NewCPUSet(1, 2, 3, 4, 5)}, {New(1, 2, 3, 4, 5), []CPUSet{New(1, 2, 3, 4, 5)}, New(1, 2, 3, 4, 5)},
{NewCPUSet(5), []CPUSet{NewCPUSet(1, 2, 3, 4, 5)}, NewCPUSet(1, 2, 3, 4, 5)}, {New(5), []CPUSet{New(1, 2, 3, 4, 5)}, New(1, 2, 3, 4, 5)},
{NewCPUSet(1, 2, 3, 4, 5), []CPUSet{NewCPUSet(5)}, NewCPUSet(1, 2, 3, 4, 5)}, {New(1, 2, 3, 4, 5), []CPUSet{New(5)}, New(1, 2, 3, 4, 5)},
{NewCPUSet(1, 2), []CPUSet{NewCPUSet(3, 4, 5)}, NewCPUSet(1, 2, 3, 4, 5)}, {New(1, 2), []CPUSet{New(3, 4, 5)}, New(1, 2, 3, 4, 5)},
{NewCPUSet(1, 2, 3), []CPUSet{NewCPUSet(3, 4, 5)}, NewCPUSet(1, 2, 3, 4, 5)}, {New(1, 2, 3), []CPUSet{New(3, 4, 5)}, New(1, 2, 3, 4, 5)},
{NewCPUSet(), []CPUSet{NewCPUSet(1, 2, 3, 4, 5), NewCPUSet(4, 5)}, NewCPUSet(1, 2, 3, 4, 5)}, {New(), []CPUSet{New(1, 2, 3, 4, 5), New(4, 5)}, New(1, 2, 3, 4, 5)},
{NewCPUSet(1, 2, 3, 4, 5), []CPUSet{NewCPUSet(), NewCPUSet(4)}, NewCPUSet(1, 2, 3, 4, 5)}, {New(1, 2, 3, 4, 5), []CPUSet{New(), New(4)}, New(1, 2, 3, 4, 5)},
{NewCPUSet(1, 2, 3, 4, 5), []CPUSet{NewCPUSet(1, 2, 3, 4, 5), NewCPUSet(1, 5)}, NewCPUSet(1, 2, 3, 4, 5)}, {New(1, 2, 3, 4, 5), []CPUSet{New(1, 2, 3, 4, 5), New(1, 5)}, New(1, 2, 3, 4, 5)},
} }
for _, c := range testCases { for _, c := range testCases {
@@ -218,21 +218,21 @@ func TestCPUSetIntersection(t *testing.T) {
s2 CPUSet s2 CPUSet
expected CPUSet expected CPUSet
}{ }{
{NewCPUSet(), NewCPUSet(), NewCPUSet()}, {New(), New(), New()},
{NewCPUSet(), NewCPUSet(5), NewCPUSet()}, {New(), New(5), New()},
{NewCPUSet(5), NewCPUSet(), NewCPUSet()}, {New(5), New(), New()},
{NewCPUSet(5), NewCPUSet(5), NewCPUSet(5)}, {New(5), New(5), New(5)},
{NewCPUSet(), NewCPUSet(1, 2, 3, 4, 5), NewCPUSet()}, {New(), New(1, 2, 3, 4, 5), New()},
{NewCPUSet(1, 2, 3, 4, 5), NewCPUSet(), NewCPUSet()}, {New(1, 2, 3, 4, 5), New(), New()},
{NewCPUSet(1, 2, 3, 4, 5), NewCPUSet(1, 2, 3, 4, 5), NewCPUSet(1, 2, 3, 4, 5)}, {New(1, 2, 3, 4, 5), New(1, 2, 3, 4, 5), New(1, 2, 3, 4, 5)},
{NewCPUSet(5), NewCPUSet(1, 2, 3, 4, 5), NewCPUSet(5)}, {New(5), New(1, 2, 3, 4, 5), New(5)},
{NewCPUSet(1, 2, 3, 4, 5), NewCPUSet(5), NewCPUSet(5)}, {New(1, 2, 3, 4, 5), New(5), New(5)},
{NewCPUSet(1, 2), NewCPUSet(3, 4, 5), NewCPUSet()}, {New(1, 2), New(3, 4, 5), New()},
{NewCPUSet(1, 2, 3), NewCPUSet(3, 4, 5), NewCPUSet(3)}, {New(1, 2, 3), New(3, 4, 5), New(3)},
} }
for _, c := range testCases { for _, c := range testCases {
@@ -249,21 +249,21 @@ func TestCPUSetDifference(t *testing.T) {
s2 CPUSet s2 CPUSet
expected CPUSet expected CPUSet
}{ }{
{NewCPUSet(), NewCPUSet(), NewCPUSet()}, {New(), New(), New()},
{NewCPUSet(), NewCPUSet(5), NewCPUSet()}, {New(), New(5), New()},
{NewCPUSet(5), NewCPUSet(), NewCPUSet(5)}, {New(5), New(), New(5)},
{NewCPUSet(5), NewCPUSet(5), NewCPUSet()}, {New(5), New(5), New()},
{NewCPUSet(), NewCPUSet(1, 2, 3, 4, 5), NewCPUSet()}, {New(), New(1, 2, 3, 4, 5), New()},
{NewCPUSet(1, 2, 3, 4, 5), NewCPUSet(), NewCPUSet(1, 2, 3, 4, 5)}, {New(1, 2, 3, 4, 5), New(), New(1, 2, 3, 4, 5)},
{NewCPUSet(1, 2, 3, 4, 5), NewCPUSet(1, 2, 3, 4, 5), NewCPUSet()}, {New(1, 2, 3, 4, 5), New(1, 2, 3, 4, 5), New()},
{NewCPUSet(5), NewCPUSet(1, 2, 3, 4, 5), NewCPUSet()}, {New(5), New(1, 2, 3, 4, 5), New()},
{NewCPUSet(1, 2, 3, 4, 5), NewCPUSet(5), NewCPUSet(1, 2, 3, 4)}, {New(1, 2, 3, 4, 5), New(5), New(1, 2, 3, 4)},
{NewCPUSet(1, 2), NewCPUSet(3, 4, 5), NewCPUSet(1, 2)}, {New(1, 2), New(3, 4, 5), New(1, 2)},
{NewCPUSet(1, 2, 3), NewCPUSet(3, 4, 5), NewCPUSet(1, 2)}, {New(1, 2, 3), New(3, 4, 5), New(1, 2)},
} }
for _, c := range testCases { for _, c := range testCases {
@@ -279,9 +279,9 @@ func TestCPUSetList(t *testing.T) {
set CPUSet set CPUSet
expected []int expected []int
}{ }{
{NewCPUSet(), []int{}}, {New(), []int{}},
{NewCPUSet(5), []int{5}}, {New(5), []int{5}},
{NewCPUSet(1, 2, 3, 4, 5), []int{1, 2, 3, 4, 5}}, {New(1, 2, 3, 4, 5), []int{1, 2, 3, 4, 5}},
} }
for _, c := range testCases { for _, c := range testCases {
@@ -297,10 +297,10 @@ func TestCPUSetString(t *testing.T) {
set CPUSet set CPUSet
expected string expected string
}{ }{
{NewCPUSet(), ""}, {New(), ""},
{NewCPUSet(5), "5"}, {New(5), "5"},
{NewCPUSet(1, 2, 3, 4, 5), "1-5"}, {New(1, 2, 3, 4, 5), "1-5"},
{NewCPUSet(1, 2, 3, 5, 6, 8), "1-3,5-6,8"}, {New(1, 2, 3, 5, 6, 8), "1-3,5-6,8"},
} }
for _, c := range testCases { for _, c := range testCases {
@@ -316,14 +316,14 @@ func TestParse(t *testing.T) {
cpusetString string cpusetString string
expected CPUSet expected CPUSet
}{ }{
{"", NewCPUSet()}, {"", New()},
{"5", NewCPUSet(5)}, {"5", New(5)},
{"1,2,3,4,5", NewCPUSet(1, 2, 3, 4, 5)}, {"1,2,3,4,5", New(1, 2, 3, 4, 5)},
{"1-5", NewCPUSet(1, 2, 3, 4, 5)}, {"1-5", New(1, 2, 3, 4, 5)},
{"1-2,3-5", NewCPUSet(1, 2, 3, 4, 5)}, {"1-2,3-5", New(1, 2, 3, 4, 5)},
{"5,4,3,2,1", NewCPUSet(1, 2, 3, 4, 5)}, // Range ordering {"5,4,3,2,1", New(1, 2, 3, 4, 5)}, // Range ordering
{"3-6,1-5", NewCPUSet(1, 2, 3, 4, 5, 6)}, // Overlapping ranges {"3-6,1-5", New(1, 2, 3, 4, 5, 6)}, // Overlapping ranges
{"3-3,5-5", NewCPUSet(3, 5)}, // Very short ranges {"3-3,5-5", New(3, 5)}, // Very short ranges
} }
for _, c := range positiveTestCases { for _, c := range positiveTestCases {

View File

@@ -79,7 +79,7 @@ var _ = SIGDescribe("CPU Manager Metrics [Serial][Feature:CPUManager]", func() {
newCfg := configureCPUManagerInKubelet(oldCfg, newCfg := configureCPUManagerInKubelet(oldCfg,
&cpuManagerKubeletArguments{ &cpuManagerKubeletArguments{
policyName: string(cpumanager.PolicyStatic), policyName: string(cpumanager.PolicyStatic),
reservedSystemCPUs: cpuset.NewCPUSet(0), reservedSystemCPUs: cpuset.New(0),
enableCPUManagerOptions: true, enableCPUManagerOptions: true,
options: cpuPolicyOptions, options: cpuPolicyOptions,
}, },

View File

@@ -334,7 +334,7 @@ func runMultipleGuNonGuPods(ctx context.Context, f *framework.Framework, cpuCap
cpuListString = "0" cpuListString = "0"
if cpuAlloc > 2 { if cpuAlloc > 2 {
cset = mustParseCPUSet(fmt.Sprintf("0-%d", cpuCap-1)) cset = mustParseCPUSet(fmt.Sprintf("0-%d", cpuCap-1))
cpuListString = fmt.Sprintf("%s", cset.Difference(cpuset.NewCPUSet(cpu1))) cpuListString = fmt.Sprintf("%s", cset.Difference(cpuset.New(cpu1)))
} }
expAllowedCPUsListRegex = fmt.Sprintf("^%s\n$", cpuListString) expAllowedCPUsListRegex = fmt.Sprintf("^%s\n$", cpuListString)
err = e2epod.NewPodClient(f).MatchContainerOutput(ctx, pod2.Name, pod2.Spec.Containers[0].Name, expAllowedCPUsListRegex) err = e2epod.NewPodClient(f).MatchContainerOutput(ctx, pod2.Name, pod2.Spec.Containers[0].Name, expAllowedCPUsListRegex)
@@ -633,7 +633,7 @@ func runCPUManagerTests(f *framework.Framework) {
newCfg := configureCPUManagerInKubelet(oldCfg, newCfg := configureCPUManagerInKubelet(oldCfg,
&cpuManagerKubeletArguments{ &cpuManagerKubeletArguments{
policyName: string(cpumanager.PolicyStatic), policyName: string(cpumanager.PolicyStatic),
reservedSystemCPUs: cpuset.NewCPUSet(0), reservedSystemCPUs: cpuset.New(0),
enableCPUManagerOptions: true, enableCPUManagerOptions: true,
options: cpuPolicyOptions, options: cpuPolicyOptions,
}, },

View File

@@ -561,7 +561,7 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P
f := framework.NewDefaultFramework("podresources-test") f := framework.NewDefaultFramework("podresources-test")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
reservedSystemCPUs := cpuset.NewCPUSet(1) reservedSystemCPUs := cpuset.New(1)
ginkgo.Context("with SRIOV devices in the system", func() { ginkgo.Context("with SRIOV devices in the system", func() {
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {