use table test pattern and replace hardcoded values

This commit is contained in:
reinka
2024-02-13 20:05:26 +01:00
committed by Andrei Poehlmann
parent 960d7fbf09
commit 48b1576980

View File

@@ -169,11 +169,6 @@ func makePIDStats(nodeAvailablePIDs string, numberOfRunningProcesses string, pod
MaxPID: &availablePIDs,
NumOfRunningProcesses: &NumberOfRunningProcesses,
},
SystemContainers: []statsapi.ContainerStats{
{
Name: statsapi.SystemContainerPods,
},
},
},
Pods: []statsapi.PodStats{},
}
@@ -402,8 +397,8 @@ func TestPIDPressure_VerifyPodStatus(t *testing.T) {
podMaker := makePodWithPIDStats
summaryStatsMaker := makePIDStats
podsToMake := []podToMake{
{name: "below-requests"},
{name: "above-requests"},
{},
{},
}
pods := []*v1.Pod{}
podStats := map[*v1.Pod]statsapi.PodStats{}
@@ -924,26 +919,47 @@ func makeContainersByQOS(class v1.PodQOSClass) []v1.Container {
}
func TestPIDPressure(t *testing.T) {
podMaker := makePodWithPIDStats
summaryStatsMaker := makePIDStats
podsToMake := []podToMake{
// Define test cases
testCases := []struct {
name string
podsToMake []podToMake
evictPodIndex int
noPressurePIDUsage string
pressurePIDUsageWithGracePeriod string
pressurePIDUsageWithoutGracePeriod string
totalPID string
}{
{
name: "eviction due to pid pressure",
podsToMake: []podToMake{
{name: "high-priority-high-usage", priority: highPriority, pidUsage: 900},
{name: "default-priority-low-usage", priority: defaultPriority, pidUsage: 100},
{name: "default-priority-medium-usage", priority: defaultPriority, pidUsage: 400},
{name: "low-priority-high-usage", priority: lowPriority, pidUsage: 600},
{name: "low-priority-low-usage", priority: lowPriority, pidUsage: 50},
},
evictPodIndex: 3, // we expect the low-priority-high-usage pod to be evicted
noPressurePIDUsage: "300",
pressurePIDUsageWithGracePeriod: "700",
pressurePIDUsageWithoutGracePeriod: "1200",
totalPID: "2000",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
podMaker := makePodWithPIDStats
summaryStatsMaker := makePIDStats
pods := []*v1.Pod{}
podStats := map[*v1.Pod]statsapi.PodStats{}
for _, podToMake := range podsToMake {
for _, podToMake := range tc.podsToMake {
pod, podStat := podMaker(podToMake.name, podToMake.priority, podToMake.pidUsage)
pods = append(pods, pod)
podStats[pod] = podStat
}
podToEvict := pods[3]
activePodsFunc := func() []*v1.Pod {
return pods
}
podToEvict := pods[tc.evictPodIndex]
activePodsFunc := func() []*v1.Pod { return pods }
fakeClock := testingclock.NewFakeClock(time.Now())
podKiller := &mockPodKiller{}
@@ -972,7 +988,8 @@ func TestPIDPressure(t *testing.T) {
},
},
}
summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("2000", "300", podStats)}
summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker(tc.totalPID, tc.noPressurePIDUsage, podStats)}
manager := &managerImpl{
clock: fakeClock,
killPodFunc: podKiller.killPodNow,
@@ -986,7 +1003,7 @@ func TestPIDPressure(t *testing.T) {
thresholdsFirstObservedAt: thresholdsObservedAt{},
}
// create a best effort pod to test admission
// create a pod to test admission
podToAdmit, _ := podMaker("pod-to-admit", defaultPriority, 50)
// synchronize
@@ -996,9 +1013,9 @@ func TestPIDPressure(t *testing.T) {
t.Fatalf("Manager expects no error but got %v", err)
}
// we should not have disk pressure
if manager.IsUnderDiskPressure() {
t.Fatalf("Manager should not report disk pressure")
// we should not have PID pressure
if manager.IsUnderPIDPressure() {
t.Fatalf("Manager should not report PID pressure")
}
// try to admit our pod (should succeed)
@@ -1008,7 +1025,7 @@ func TestPIDPressure(t *testing.T) {
// induce soft threshold for PID pressure
fakeClock.Step(1 * time.Minute)
summaryProvider.result = summaryStatsMaker("2000", "700", podStats)
summaryProvider.result = summaryStatsMaker(tc.totalPID, tc.pressurePIDUsageWithGracePeriod, podStats)
_, err = manager.synchronize(diskInfoProvider, activePodsFunc)
if err != nil {
@@ -1057,7 +1074,7 @@ func TestPIDPressure(t *testing.T) {
// remove PID pressure by simulating increased PID availability
fakeClock.Step(20 * time.Minute)
summaryProvider.result = summaryStatsMaker("2000", "300", podStats) // Simulate increased PID availability
summaryProvider.result = summaryStatsMaker(tc.totalPID, tc.noPressurePIDUsage, podStats) // Simulate increased PID availability
_, err = manager.synchronize(diskInfoProvider, activePodsFunc)
if err != nil {
@@ -1071,7 +1088,7 @@ func TestPIDPressure(t *testing.T) {
// re-induce PID pressure
fakeClock.Step(1 * time.Minute)
summaryProvider.result = summaryStatsMaker("2000", "1200", podStats)
summaryProvider.result = summaryStatsMaker(tc.totalPID, tc.pressurePIDUsageWithoutGracePeriod, podStats)
_, err = manager.synchronize(diskInfoProvider, activePodsFunc)
if err != nil {
@@ -1102,7 +1119,7 @@ func TestPIDPressure(t *testing.T) {
// reduce PID pressure
fakeClock.Step(1 * time.Minute)
summaryProvider.result = summaryStatsMaker("2000", "300", podStats)
summaryProvider.result = summaryStatsMaker(tc.totalPID, tc.noPressurePIDUsage, podStats)
podKiller.pod = nil // reset state
_, err = manager.synchronize(diskInfoProvider, activePodsFunc)
@@ -1127,7 +1144,7 @@ func TestPIDPressure(t *testing.T) {
// move the clock past the transition period
fakeClock.Step(5 * time.Minute)
summaryProvider.result = summaryStatsMaker("2000", "300", podStats)
summaryProvider.result = summaryStatsMaker(tc.totalPID, tc.noPressurePIDUsage, podStats)
_, err = manager.synchronize(diskInfoProvider, activePodsFunc)
if err != nil {
@@ -1148,6 +1165,8 @@ func TestPIDPressure(t *testing.T) {
if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: podToAdmit}); !result.Admit {
t.Fatalf("Admit pod: %v, expected: %v, actual: %v", podToAdmit, true, result.Admit)
}
})
}
}
func TestAdmitUnderNodeConditions(t *testing.T) {