|
|
|
@@ -1428,3 +1428,295 @@ func TestAllocatableMemoryPressure(t *testing.T) {
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// TestAllocatableNodeFsPressure
|
|
|
|
|
func TestAllocatableNodeFsPressure(t *testing.T) {
|
|
|
|
|
podMaker := makePodWithDiskStats
|
|
|
|
|
summaryStatsMaker := makeDiskStats
|
|
|
|
|
|
|
|
|
|
podsToMake := []podToMake{
|
|
|
|
|
{name: "guaranteed-low", requests: newEphemeralStorageResourceList("200Mi", "100m", "1Gi"), limits: newEphemeralStorageResourceList("200Mi", "100m", "1Gi"), rootFsUsed: "200Mi"},
|
|
|
|
|
{name: "guaranteed-high", requests: newEphemeralStorageResourceList("800Mi", "100m", "1Gi"), limits: newEphemeralStorageResourceList("800Mi", "100m", "1Gi"), rootFsUsed: "800Mi"},
|
|
|
|
|
{name: "burstable-low", requests: newEphemeralStorageResourceList("300Mi", "100m", "100Mi"), limits: newEphemeralStorageResourceList("300Mi", "200m", "1Gi"), logsFsUsed: "300Mi"},
|
|
|
|
|
{name: "burstable-high", requests: newEphemeralStorageResourceList("800Mi", "100m", "100Mi"), limits: newEphemeralStorageResourceList("800Mi", "200m", "1Gi"), rootFsUsed: "800Mi"},
|
|
|
|
|
{name: "best-effort-low", requests: newEphemeralStorageResourceList("300Mi", "", ""), limits: newEphemeralStorageResourceList("300Mi", "", ""), logsFsUsed: "300Mi"},
|
|
|
|
|
{name: "best-effort-high", requests: newEphemeralStorageResourceList("800Mi", "", ""), limits: newEphemeralStorageResourceList("800Mi", "", ""), rootFsUsed: "800Mi"},
|
|
|
|
|
}
|
|
|
|
|
pods := []*v1.Pod{}
|
|
|
|
|
podStats := map[*v1.Pod]statsapi.PodStats{}
|
|
|
|
|
for _, podToMake := range podsToMake {
|
|
|
|
|
pod, podStat := podMaker(podToMake.name, podToMake.requests, podToMake.limits, podToMake.rootFsUsed, podToMake.logsFsUsed, podToMake.perLocalVolumeUsed)
|
|
|
|
|
pods = append(pods, pod)
|
|
|
|
|
podStats[pod] = podStat
|
|
|
|
|
}
|
|
|
|
|
podToEvict := pods[5]
|
|
|
|
|
activePodsFunc := func() []*v1.Pod {
|
|
|
|
|
return pods
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fakeClock := clock.NewFakeClock(time.Now())
|
|
|
|
|
podKiller := &mockPodKiller{}
|
|
|
|
|
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
|
|
|
|
|
capacityProvider := newMockCapacityProvider(newEphemeralStorageResourceList("6Gi", "1000m", "10Gi"), newEphemeralStorageResourceList("1Gi", "1000m", "10Gi"))
|
|
|
|
|
diskGC := &mockDiskGC{imageBytesFreed: int64(0), err: nil}
|
|
|
|
|
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
|
|
|
|
|
|
|
|
|
config := Config{
|
|
|
|
|
MaxPodGracePeriodSeconds: 5,
|
|
|
|
|
PressureTransitionPeriod: time.Minute * 5,
|
|
|
|
|
Thresholds: []evictionapi.Threshold{
|
|
|
|
|
{
|
|
|
|
|
Signal: evictionapi.SignalAllocatableNodeFsAvailable,
|
|
|
|
|
Operator: evictionapi.OpLessThan,
|
|
|
|
|
Value: evictionapi.ThresholdValue{
|
|
|
|
|
Quantity: quantityMustParse("1Ki"),
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("3Gi", "6Gi", podStats)}
|
|
|
|
|
manager := &managerImpl{
|
|
|
|
|
clock: fakeClock,
|
|
|
|
|
killPodFunc: podKiller.killPodNow,
|
|
|
|
|
imageGC: diskGC,
|
|
|
|
|
containerGC: diskGC,
|
|
|
|
|
config: config,
|
|
|
|
|
recorder: &record.FakeRecorder{},
|
|
|
|
|
summaryProvider: summaryProvider,
|
|
|
|
|
nodeRef: nodeRef,
|
|
|
|
|
nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
|
|
|
|
|
thresholdsFirstObservedAt: thresholdsObservedAt{},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// create a best effort pod to test admission
|
|
|
|
|
bestEffortPodToAdmit, _ := podMaker("best-admit", newEphemeralStorageResourceList("", "", ""), newEphemeralStorageResourceList("", "", ""), "0Gi", "", "")
|
|
|
|
|
burstablePodToAdmit, _ := podMaker("burst-admit", newEphemeralStorageResourceList("1Gi", "", ""), newEphemeralStorageResourceList("1Gi", "", ""), "1Gi", "", "")
|
|
|
|
|
|
|
|
|
|
// synchronize
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
|
|
|
|
|
|
|
|
|
// we should not have disk pressure
|
|
|
|
|
if manager.IsUnderDiskPressure() {
|
|
|
|
|
t.Fatalf("Manager should not report disk pressure")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// try to admit our pods (they should succeed)
|
|
|
|
|
expected := []bool{true, true}
|
|
|
|
|
for i, pod := range []*v1.Pod{bestEffortPodToAdmit, burstablePodToAdmit} {
|
|
|
|
|
if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: pod}); expected[i] != result.Admit {
|
|
|
|
|
t.Fatalf("Admit pod: %v, expected: %v, actual: %v", pod, expected[i], result.Admit)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// induce disk pressure!
|
|
|
|
|
fakeClock.Step(1 * time.Minute)
|
|
|
|
|
pod, podStat := podMaker("guaranteed-high-2", newEphemeralStorageResourceList("2000Mi", "100m", "1Gi"), newEphemeralStorageResourceList("2000Mi", "100m", "1Gi"), "2000Mi", "", "")
|
|
|
|
|
podStats[pod] = podStat
|
|
|
|
|
pods = append(pods, pod)
|
|
|
|
|
summaryProvider.result = summaryStatsMaker("6Gi", "6Gi", podStats)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
|
|
|
|
|
|
|
|
|
// we should have disk pressure
|
|
|
|
|
if !manager.IsUnderDiskPressure() {
|
|
|
|
|
t.Fatalf("Manager should report disk pressure")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// check the right pod was killed
|
|
|
|
|
if podKiller.pod != podToEvict {
|
|
|
|
|
t.Fatalf("Manager chose to kill pod: %v, but should have chosen %v", podKiller.pod, podToEvict.Name)
|
|
|
|
|
}
|
|
|
|
|
observedGracePeriod := *podKiller.gracePeriodOverride
|
|
|
|
|
if observedGracePeriod != int64(0) {
|
|
|
|
|
t.Fatalf("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d", 0, observedGracePeriod)
|
|
|
|
|
}
|
|
|
|
|
// reset state
|
|
|
|
|
podKiller.pod = nil
|
|
|
|
|
podKiller.gracePeriodOverride = nil
|
|
|
|
|
|
|
|
|
|
// try to admit our pod (should fail)
|
|
|
|
|
expected = []bool{false, false}
|
|
|
|
|
for i, pod := range []*v1.Pod{bestEffortPodToAdmit, burstablePodToAdmit} {
|
|
|
|
|
if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: pod}); expected[i] != result.Admit {
|
|
|
|
|
t.Fatalf("Admit pod: %v, expected: %v, actual: %v", pod, expected[i], result.Admit)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// reduce disk pressure
|
|
|
|
|
fakeClock.Step(1 * time.Minute)
|
|
|
|
|
pods[5] = pods[len(pods)-1]
|
|
|
|
|
pods = pods[:len(pods)-1]
|
|
|
|
|
|
|
|
|
|
// we should have disk pressure (because transition period not yet met)
|
|
|
|
|
if !manager.IsUnderDiskPressure() {
|
|
|
|
|
t.Fatalf("Manager should report disk pressure")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// try to admit our pod (should fail)
|
|
|
|
|
expected = []bool{false, false}
|
|
|
|
|
for i, pod := range []*v1.Pod{bestEffortPodToAdmit, burstablePodToAdmit} {
|
|
|
|
|
if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: pod}); expected[i] != result.Admit {
|
|
|
|
|
t.Fatalf("Admit pod: %v, expected: %v, actual: %v", pod, expected[i], result.Admit)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// move the clock past transition period to ensure that we stop reporting pressure
|
|
|
|
|
fakeClock.Step(5 * time.Minute)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
|
|
|
|
|
|
|
|
|
// we should not have disk pressure (because transition period met)
|
|
|
|
|
if manager.IsUnderDiskPressure() {
|
|
|
|
|
t.Fatalf("Manager should not report disk pressure")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// no pod should have been killed
|
|
|
|
|
if podKiller.pod != nil {
|
|
|
|
|
t.Fatalf("Manager chose to kill pod: %v when no pod should have been killed", podKiller.pod.Name)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// all pods should admit now
|
|
|
|
|
expected = []bool{true, true}
|
|
|
|
|
for i, pod := range []*v1.Pod{bestEffortPodToAdmit, burstablePodToAdmit} {
|
|
|
|
|
if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: pod}); expected[i] != result.Admit {
|
|
|
|
|
t.Fatalf("Admit pod: %v, expected: %v, actual: %v", pod, expected[i], result.Admit)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestNodeReclaimForAllocatableFuncs(t *testing.T) {
|
|
|
|
|
podMaker := makePodWithDiskStats
|
|
|
|
|
summaryStatsMaker := makeDiskStats
|
|
|
|
|
podsToMake := []podToMake{
|
|
|
|
|
{name: "guaranteed-low", requests: newEphemeralStorageResourceList("200Mi", "100m", "1Gi"), limits: newEphemeralStorageResourceList("200Mi", "100m", "1Gi"), rootFsUsed: "200Mi"},
|
|
|
|
|
{name: "guaranteed-high", requests: newEphemeralStorageResourceList("800Mi", "100m", "1Gi"), limits: newEphemeralStorageResourceList("800Mi", "100m", "1Gi"), rootFsUsed: "800Mi"},
|
|
|
|
|
{name: "burstable-low", requests: newEphemeralStorageResourceList("300Mi", "100m", "100Mi"), limits: newEphemeralStorageResourceList("300Mi", "200m", "1Gi"), logsFsUsed: "300Mi"},
|
|
|
|
|
{name: "burstable-high", requests: newEphemeralStorageResourceList("800Mi", "100m", "100Mi"), limits: newEphemeralStorageResourceList("800Mi", "200m", "1Gi"), rootFsUsed: "800Mi"},
|
|
|
|
|
{name: "best-effort-low", requests: newEphemeralStorageResourceList("300Mi", "", ""), limits: newEphemeralStorageResourceList("300Mi", "", ""), logsFsUsed: "300Mi"},
|
|
|
|
|
{name: "best-effort-high", requests: newEphemeralStorageResourceList("800Mi", "", ""), limits: newEphemeralStorageResourceList("800Mi", "", ""), rootFsUsed: "800Mi"},
|
|
|
|
|
}
|
|
|
|
|
pods := []*v1.Pod{}
|
|
|
|
|
podStats := map[*v1.Pod]statsapi.PodStats{}
|
|
|
|
|
for _, podToMake := range podsToMake {
|
|
|
|
|
pod, podStat := podMaker(podToMake.name, podToMake.requests, podToMake.limits, podToMake.rootFsUsed, podToMake.logsFsUsed, podToMake.perLocalVolumeUsed)
|
|
|
|
|
pods = append(pods, pod)
|
|
|
|
|
podStats[pod] = podStat
|
|
|
|
|
}
|
|
|
|
|
podToEvict := pods[5]
|
|
|
|
|
activePodsFunc := func() []*v1.Pod {
|
|
|
|
|
return pods
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fakeClock := clock.NewFakeClock(time.Now())
|
|
|
|
|
podKiller := &mockPodKiller{}
|
|
|
|
|
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
|
|
|
|
|
capacityProvider := newMockCapacityProvider(newEphemeralStorageResourceList("6Gi", "1000m", "10Gi"), newEphemeralStorageResourceList("1Gi", "1000m", "10Gi"))
|
|
|
|
|
imageGcFree := resource.MustParse("800Mi")
|
|
|
|
|
diskGC := &mockDiskGC{imageBytesFreed: imageGcFree.Value(), err: nil}
|
|
|
|
|
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
|
|
|
|
|
|
|
|
|
config := Config{
|
|
|
|
|
MaxPodGracePeriodSeconds: 5,
|
|
|
|
|
PressureTransitionPeriod: time.Minute * 5,
|
|
|
|
|
Thresholds: []evictionapi.Threshold{
|
|
|
|
|
{
|
|
|
|
|
Signal: evictionapi.SignalAllocatableNodeFsAvailable,
|
|
|
|
|
Operator: evictionapi.OpLessThan,
|
|
|
|
|
Value: evictionapi.ThresholdValue{
|
|
|
|
|
Quantity: quantityMustParse("10Mi"),
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("6Gi", "6Gi", podStats)}
|
|
|
|
|
manager := &managerImpl{
|
|
|
|
|
clock: fakeClock,
|
|
|
|
|
killPodFunc: podKiller.killPodNow,
|
|
|
|
|
imageGC: diskGC,
|
|
|
|
|
containerGC: diskGC,
|
|
|
|
|
config: config,
|
|
|
|
|
recorder: &record.FakeRecorder{},
|
|
|
|
|
summaryProvider: summaryProvider,
|
|
|
|
|
nodeRef: nodeRef,
|
|
|
|
|
nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
|
|
|
|
|
thresholdsFirstObservedAt: thresholdsObservedAt{},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// synchronize
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
|
|
|
|
|
|
|
|
|
// we should not have disk pressure
|
|
|
|
|
if manager.IsUnderDiskPressure() {
|
|
|
|
|
t.Errorf("Manager should not report disk pressure")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// induce hard threshold
|
|
|
|
|
fakeClock.Step(1 * time.Minute)
|
|
|
|
|
|
|
|
|
|
pod, podStat := podMaker("guaranteed-high-2", newEphemeralStorageResourceList("2000Mi", "100m", "1Gi"), newEphemeralStorageResourceList("2000Mi", "100m", "1Gi"), "2000Mi", "", "")
|
|
|
|
|
podStats[pod] = podStat
|
|
|
|
|
pods = append(pods, pod)
|
|
|
|
|
summaryProvider.result = summaryStatsMaker("6Gi", "6Gi", podStats)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
|
|
|
|
|
|
|
|
|
// we should have disk pressure
|
|
|
|
|
if !manager.IsUnderDiskPressure() {
|
|
|
|
|
t.Fatalf("Manager should report disk pressure since soft threshold was met")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// verify image gc was invoked
|
|
|
|
|
if !diskGC.imageGCInvoked || !diskGC.containerGCInvoked {
|
|
|
|
|
t.Fatalf("Manager should have invoked image gc")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// verify no pod was killed because image gc was sufficient
|
|
|
|
|
if podKiller.pod == nil {
|
|
|
|
|
t.Fatalf("Manager should have killed a pod, but not killed")
|
|
|
|
|
}
|
|
|
|
|
// check the right pod was killed
|
|
|
|
|
if podKiller.pod != podToEvict {
|
|
|
|
|
t.Fatalf("Manager chose to kill pod: %v, but should have chosen %v", podKiller.pod, podToEvict.Name)
|
|
|
|
|
}
|
|
|
|
|
observedGracePeriod := *podKiller.gracePeriodOverride
|
|
|
|
|
if observedGracePeriod != int64(0) {
|
|
|
|
|
t.Fatalf("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d", 0, observedGracePeriod)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// reset state
|
|
|
|
|
diskGC.imageGCInvoked = false
|
|
|
|
|
diskGC.containerGCInvoked = false
|
|
|
|
|
podKiller.pod = nil
|
|
|
|
|
podKiller.gracePeriodOverride = nil
|
|
|
|
|
|
|
|
|
|
// reduce disk pressure
|
|
|
|
|
fakeClock.Step(1 * time.Minute)
|
|
|
|
|
pods[5] = pods[len(pods)-1]
|
|
|
|
|
pods = pods[:len(pods)-1]
|
|
|
|
|
|
|
|
|
|
// we should have disk pressure (because transition period not yet met)
|
|
|
|
|
if !manager.IsUnderDiskPressure() {
|
|
|
|
|
t.Fatalf("Manager should report disk pressure")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// move the clock past transition period to ensure that we stop reporting pressure
|
|
|
|
|
fakeClock.Step(5 * time.Minute)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
|
|
|
|
|
|
|
|
|
// we should not have disk pressure (because transition period met)
|
|
|
|
|
if manager.IsUnderDiskPressure() {
|
|
|
|
|
t.Fatalf("Manager should not report disk pressure")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// no pod should have been killed
|
|
|
|
|
if podKiller.pod != nil {
|
|
|
|
|
t.Fatalf("Manager chose to kill pod: %v when no pod should have been killed", podKiller.pod.Name)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// no image gc should have occurred
|
|
|
|
|
if diskGC.imageGCInvoked || diskGC.containerGCInvoked {
|
|
|
|
|
t.Errorf("Manager chose to perform image gc when it was not neeed")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// no pod should have been killed
|
|
|
|
|
if podKiller.pod != nil {
|
|
|
|
|
t.Errorf("Manager chose to kill pod: %v when no pod should have been killed", podKiller.pod.Name)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|