|
|
|
@ -59,22 +59,25 @@ func (m *mockDiskInfoProvider) HasDedicatedImageFs() (bool, error) {
|
|
|
|
|
return m.dedicatedImageFs, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func newMockNodeProvider(allocatableCapacity v1.ResourceList) *mockNodeProvider {
|
|
|
|
|
return &mockNodeProvider{
|
|
|
|
|
node: v1.Node{
|
|
|
|
|
Status: v1.NodeStatus{
|
|
|
|
|
Allocatable: allocatableCapacity,
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
func newMockCapacityProvider(capacity, reservation v1.ResourceList) *mockCapacityProvider {
|
|
|
|
|
return &mockCapacityProvider{
|
|
|
|
|
capacity: capacity,
|
|
|
|
|
reservation: reservation,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type mockNodeProvider struct {
|
|
|
|
|
node v1.Node
|
|
|
|
|
type mockCapacityProvider struct {
|
|
|
|
|
capacity v1.ResourceList
|
|
|
|
|
reservation v1.ResourceList
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (m *mockNodeProvider) GetNode() (*v1.Node, error) {
|
|
|
|
|
return &m.node, nil
|
|
|
|
|
func (m *mockCapacityProvider) GetCapacity() v1.ResourceList {
|
|
|
|
|
return m.capacity
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (m *mockCapacityProvider) GetNodeAllocatableReservation() v1.ResourceList {
|
|
|
|
|
return m.reservation
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// mockDiskGC is used to simulate invoking image and container garbage collection.
|
|
|
|
@ -200,7 +203,7 @@ func TestMemoryPressure(t *testing.T) {
|
|
|
|
|
fakeClock := clock.NewFakeClock(time.Now())
|
|
|
|
|
podKiller := &mockPodKiller{}
|
|
|
|
|
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
|
|
|
|
|
nodeProvider := newMockNodeProvider(v1.ResourceList{v1.ResourceMemory: *quantityMustParse("2Gi")})
|
|
|
|
|
capacityProvider := newMockCapacityProvider(v1.ResourceList{v1.ResourceMemory: *quantityMustParse("3Gi")}, v1.ResourceList{v1.ResourceMemory: *quantityMustParse("1Gi")})
|
|
|
|
|
imageGC := &mockDiskGC{imageBytesFreed: int64(0), err: nil}
|
|
|
|
|
nodeRef := &clientv1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
|
|
|
|
|
|
|
|
@ -243,7 +246,7 @@ func TestMemoryPressure(t *testing.T) {
|
|
|
|
|
burstablePodToAdmit, _ := podMaker("burst-admit", newResourceList("100m", "100Mi"), newResourceList("200m", "200Mi"), "0Gi")
|
|
|
|
|
|
|
|
|
|
// synchronize
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
|
|
|
|
|
|
|
|
|
// we should not have memory pressure
|
|
|
|
|
if manager.IsUnderMemoryPressure() {
|
|
|
|
@ -261,7 +264,7 @@ func TestMemoryPressure(t *testing.T) {
|
|
|
|
|
// induce soft threshold
|
|
|
|
|
fakeClock.Step(1 * time.Minute)
|
|
|
|
|
summaryProvider.result = summaryStatsMaker("1500Mi", podStats)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
|
|
|
|
|
|
|
|
|
// we should have memory pressure
|
|
|
|
|
if !manager.IsUnderMemoryPressure() {
|
|
|
|
@ -276,7 +279,7 @@ func TestMemoryPressure(t *testing.T) {
|
|
|
|
|
// step forward in time pass the grace period
|
|
|
|
|
fakeClock.Step(3 * time.Minute)
|
|
|
|
|
summaryProvider.result = summaryStatsMaker("1500Mi", podStats)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
|
|
|
|
|
|
|
|
|
// we should have memory pressure
|
|
|
|
|
if !manager.IsUnderMemoryPressure() {
|
|
|
|
@ -301,7 +304,7 @@ func TestMemoryPressure(t *testing.T) {
|
|
|
|
|
// remove memory pressure
|
|
|
|
|
fakeClock.Step(20 * time.Minute)
|
|
|
|
|
summaryProvider.result = summaryStatsMaker("3Gi", podStats)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
|
|
|
|
|
|
|
|
|
// we should not have memory pressure
|
|
|
|
|
if manager.IsUnderMemoryPressure() {
|
|
|
|
@ -311,7 +314,7 @@ func TestMemoryPressure(t *testing.T) {
|
|
|
|
|
// induce memory pressure!
|
|
|
|
|
fakeClock.Step(1 * time.Minute)
|
|
|
|
|
summaryProvider.result = summaryStatsMaker("500Mi", podStats)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
|
|
|
|
|
|
|
|
|
// we should have memory pressure
|
|
|
|
|
if !manager.IsUnderMemoryPressure() {
|
|
|
|
@ -339,7 +342,7 @@ func TestMemoryPressure(t *testing.T) {
|
|
|
|
|
fakeClock.Step(1 * time.Minute)
|
|
|
|
|
summaryProvider.result = summaryStatsMaker("2Gi", podStats)
|
|
|
|
|
podKiller.pod = nil // reset state
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
|
|
|
|
|
|
|
|
|
// we should have memory pressure (because transition period not yet met)
|
|
|
|
|
if !manager.IsUnderMemoryPressure() {
|
|
|
|
@ -363,7 +366,7 @@ func TestMemoryPressure(t *testing.T) {
|
|
|
|
|
fakeClock.Step(5 * time.Minute)
|
|
|
|
|
summaryProvider.result = summaryStatsMaker("2Gi", podStats)
|
|
|
|
|
podKiller.pod = nil // reset state
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
|
|
|
|
|
|
|
|
|
// we should not have memory pressure (because transition period met)
|
|
|
|
|
if manager.IsUnderMemoryPressure() {
|
|
|
|
@ -418,7 +421,7 @@ func TestDiskPressureNodeFs(t *testing.T) {
|
|
|
|
|
fakeClock := clock.NewFakeClock(time.Now())
|
|
|
|
|
podKiller := &mockPodKiller{}
|
|
|
|
|
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
|
|
|
|
|
nodeProvider := newMockNodeProvider(v1.ResourceList{v1.ResourceMemory: *quantityMustParse("2Gi")})
|
|
|
|
|
capacityProvider := newMockCapacityProvider(v1.ResourceList{v1.ResourceMemory: *quantityMustParse("3Gi")}, v1.ResourceList{v1.ResourceMemory: *quantityMustParse("1Gi")})
|
|
|
|
|
diskGC := &mockDiskGC{imageBytesFreed: int64(0), err: nil}
|
|
|
|
|
nodeRef := &clientv1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
|
|
|
|
|
|
|
|
@ -461,7 +464,7 @@ func TestDiskPressureNodeFs(t *testing.T) {
|
|
|
|
|
podToAdmit, _ := podMaker("pod-to-admit", newResourceList("", ""), newResourceList("", ""), "0Gi", "0Gi", "0Gi")
|
|
|
|
|
|
|
|
|
|
// synchronize
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
|
|
|
|
|
|
|
|
|
// we should not have disk pressure
|
|
|
|
|
if manager.IsUnderDiskPressure() {
|
|
|
|
@ -476,7 +479,7 @@ func TestDiskPressureNodeFs(t *testing.T) {
|
|
|
|
|
// induce soft threshold
|
|
|
|
|
fakeClock.Step(1 * time.Minute)
|
|
|
|
|
summaryProvider.result = summaryStatsMaker("1.5Gi", "200Gi", podStats)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
|
|
|
|
|
|
|
|
|
// we should have disk pressure
|
|
|
|
|
if !manager.IsUnderDiskPressure() {
|
|
|
|
@ -491,7 +494,7 @@ func TestDiskPressureNodeFs(t *testing.T) {
|
|
|
|
|
// step forward in time pass the grace period
|
|
|
|
|
fakeClock.Step(3 * time.Minute)
|
|
|
|
|
summaryProvider.result = summaryStatsMaker("1.5Gi", "200Gi", podStats)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
|
|
|
|
|
|
|
|
|
// we should have disk pressure
|
|
|
|
|
if !manager.IsUnderDiskPressure() {
|
|
|
|
@ -516,7 +519,7 @@ func TestDiskPressureNodeFs(t *testing.T) {
|
|
|
|
|
// remove disk pressure
|
|
|
|
|
fakeClock.Step(20 * time.Minute)
|
|
|
|
|
summaryProvider.result = summaryStatsMaker("16Gi", "200Gi", podStats)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
|
|
|
|
|
|
|
|
|
// we should not have disk pressure
|
|
|
|
|
if manager.IsUnderDiskPressure() {
|
|
|
|
@ -526,7 +529,7 @@ func TestDiskPressureNodeFs(t *testing.T) {
|
|
|
|
|
// induce disk pressure!
|
|
|
|
|
fakeClock.Step(1 * time.Minute)
|
|
|
|
|
summaryProvider.result = summaryStatsMaker("500Mi", "200Gi", podStats)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
|
|
|
|
|
|
|
|
|
// we should have disk pressure
|
|
|
|
|
if !manager.IsUnderDiskPressure() {
|
|
|
|
@ -551,7 +554,7 @@ func TestDiskPressureNodeFs(t *testing.T) {
|
|
|
|
|
fakeClock.Step(1 * time.Minute)
|
|
|
|
|
summaryProvider.result = summaryStatsMaker("16Gi", "200Gi", podStats)
|
|
|
|
|
podKiller.pod = nil // reset state
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
|
|
|
|
|
|
|
|
|
// we should have disk pressure (because transition period not yet met)
|
|
|
|
|
if !manager.IsUnderDiskPressure() {
|
|
|
|
@ -572,7 +575,7 @@ func TestDiskPressureNodeFs(t *testing.T) {
|
|
|
|
|
fakeClock.Step(5 * time.Minute)
|
|
|
|
|
summaryProvider.result = summaryStatsMaker("16Gi", "200Gi", podStats)
|
|
|
|
|
podKiller.pod = nil // reset state
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
|
|
|
|
|
|
|
|
|
// we should not have disk pressure (because transition period met)
|
|
|
|
|
if manager.IsUnderDiskPressure() {
|
|
|
|
@ -617,7 +620,7 @@ func TestMinReclaim(t *testing.T) {
|
|
|
|
|
fakeClock := clock.NewFakeClock(time.Now())
|
|
|
|
|
podKiller := &mockPodKiller{}
|
|
|
|
|
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
|
|
|
|
|
nodeProvider := newMockNodeProvider(v1.ResourceList{v1.ResourceMemory: *quantityMustParse("2Gi")})
|
|
|
|
|
capacityProvider := newMockCapacityProvider(v1.ResourceList{v1.ResourceMemory: *quantityMustParse("3Gi")}, v1.ResourceList{v1.ResourceMemory: *quantityMustParse("1Gi")})
|
|
|
|
|
diskGC := &mockDiskGC{imageBytesFreed: int64(0), err: nil}
|
|
|
|
|
nodeRef := &clientv1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
|
|
|
|
|
|
|
|
@ -652,7 +655,7 @@ func TestMinReclaim(t *testing.T) {
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// synchronize
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
|
|
|
|
|
|
|
|
|
// we should not have memory pressure
|
|
|
|
|
if manager.IsUnderMemoryPressure() {
|
|
|
|
@ -662,7 +665,7 @@ func TestMinReclaim(t *testing.T) {
|
|
|
|
|
// induce memory pressure!
|
|
|
|
|
fakeClock.Step(1 * time.Minute)
|
|
|
|
|
summaryProvider.result = summaryStatsMaker("500Mi", podStats)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
|
|
|
|
|
|
|
|
|
// we should have memory pressure
|
|
|
|
|
if !manager.IsUnderMemoryPressure() {
|
|
|
|
@ -682,7 +685,7 @@ func TestMinReclaim(t *testing.T) {
|
|
|
|
|
fakeClock.Step(1 * time.Minute)
|
|
|
|
|
summaryProvider.result = summaryStatsMaker("1.2Gi", podStats)
|
|
|
|
|
podKiller.pod = nil // reset state
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
|
|
|
|
|
|
|
|
|
// we should have memory pressure (because transition period not yet met)
|
|
|
|
|
if !manager.IsUnderMemoryPressure() {
|
|
|
|
@ -702,7 +705,7 @@ func TestMinReclaim(t *testing.T) {
|
|
|
|
|
fakeClock.Step(1 * time.Minute)
|
|
|
|
|
summaryProvider.result = summaryStatsMaker("2Gi", podStats)
|
|
|
|
|
podKiller.pod = nil // reset state
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
|
|
|
|
|
|
|
|
|
// we should have memory pressure (because transition period not yet met)
|
|
|
|
|
if !manager.IsUnderMemoryPressure() {
|
|
|
|
@ -718,7 +721,7 @@ func TestMinReclaim(t *testing.T) {
|
|
|
|
|
fakeClock.Step(5 * time.Minute)
|
|
|
|
|
summaryProvider.result = summaryStatsMaker("2Gi", podStats)
|
|
|
|
|
podKiller.pod = nil // reset state
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
|
|
|
|
|
|
|
|
|
// we should not have memory pressure (because transition period met)
|
|
|
|
|
if manager.IsUnderMemoryPressure() {
|
|
|
|
@ -757,7 +760,7 @@ func TestNodeReclaimFuncs(t *testing.T) {
|
|
|
|
|
fakeClock := clock.NewFakeClock(time.Now())
|
|
|
|
|
podKiller := &mockPodKiller{}
|
|
|
|
|
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
|
|
|
|
|
nodeProvider := newMockNodeProvider(v1.ResourceList{v1.ResourceMemory: *quantityMustParse("2Gi")})
|
|
|
|
|
capacityProvider := newMockCapacityProvider(v1.ResourceList{v1.ResourceMemory: *quantityMustParse("3Gi")}, v1.ResourceList{v1.ResourceMemory: *quantityMustParse("1Gi")})
|
|
|
|
|
imageGcFree := resource.MustParse("700Mi")
|
|
|
|
|
diskGC := &mockDiskGC{imageBytesFreed: imageGcFree.Value(), err: nil}
|
|
|
|
|
nodeRef := &clientv1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
|
|
|
@ -793,7 +796,7 @@ func TestNodeReclaimFuncs(t *testing.T) {
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// synchronize
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
|
|
|
|
|
|
|
|
|
// we should not have disk pressure
|
|
|
|
|
if manager.IsUnderDiskPressure() {
|
|
|
|
@ -803,7 +806,7 @@ func TestNodeReclaimFuncs(t *testing.T) {
|
|
|
|
|
// induce hard threshold
|
|
|
|
|
fakeClock.Step(1 * time.Minute)
|
|
|
|
|
summaryProvider.result = summaryStatsMaker(".9Gi", "200Gi", podStats)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
|
|
|
|
|
|
|
|
|
// we should have disk pressure
|
|
|
|
|
if !manager.IsUnderDiskPressure() {
|
|
|
|
@ -827,7 +830,7 @@ func TestNodeReclaimFuncs(t *testing.T) {
|
|
|
|
|
// remove disk pressure
|
|
|
|
|
fakeClock.Step(20 * time.Minute)
|
|
|
|
|
summaryProvider.result = summaryStatsMaker("16Gi", "200Gi", podStats)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
|
|
|
|
|
|
|
|
|
// we should not have disk pressure
|
|
|
|
|
if manager.IsUnderDiskPressure() {
|
|
|
|
@ -837,7 +840,7 @@ func TestNodeReclaimFuncs(t *testing.T) {
|
|
|
|
|
// induce disk pressure!
|
|
|
|
|
fakeClock.Step(1 * time.Minute)
|
|
|
|
|
summaryProvider.result = summaryStatsMaker("400Mi", "200Gi", podStats)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
|
|
|
|
|
|
|
|
|
// we should have disk pressure
|
|
|
|
|
if !manager.IsUnderDiskPressure() {
|
|
|
|
@ -864,7 +867,7 @@ func TestNodeReclaimFuncs(t *testing.T) {
|
|
|
|
|
diskGC.imageGCInvoked = false // reset state
|
|
|
|
|
diskGC.containerGCInvoked = false // reset state
|
|
|
|
|
podKiller.pod = nil // reset state
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
|
|
|
|
|
|
|
|
|
// we should have disk pressure (because transition period not yet met)
|
|
|
|
|
if !manager.IsUnderDiskPressure() {
|
|
|
|
@ -887,7 +890,7 @@ func TestNodeReclaimFuncs(t *testing.T) {
|
|
|
|
|
diskGC.imageGCInvoked = false // reset state
|
|
|
|
|
diskGC.containerGCInvoked = false // reset state
|
|
|
|
|
podKiller.pod = nil // reset state
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
|
|
|
|
|
|
|
|
|
// we should not have disk pressure (because transition period met)
|
|
|
|
|
if manager.IsUnderDiskPressure() {
|
|
|
|
@ -955,7 +958,7 @@ func TestInodePressureNodeFsInodes(t *testing.T) {
|
|
|
|
|
fakeClock := clock.NewFakeClock(time.Now())
|
|
|
|
|
podKiller := &mockPodKiller{}
|
|
|
|
|
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
|
|
|
|
|
nodeProvider := newMockNodeProvider(v1.ResourceList{v1.ResourceMemory: *quantityMustParse("2Gi")})
|
|
|
|
|
capacityProvider := newMockCapacityProvider(v1.ResourceList{v1.ResourceMemory: *quantityMustParse("3Gi")}, v1.ResourceList{v1.ResourceMemory: *quantityMustParse("1Gi")})
|
|
|
|
|
diskGC := &mockDiskGC{imageBytesFreed: int64(0), err: nil}
|
|
|
|
|
nodeRef := &clientv1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
|
|
|
|
|
|
|
|
@ -998,7 +1001,7 @@ func TestInodePressureNodeFsInodes(t *testing.T) {
|
|
|
|
|
podToAdmit, _ := podMaker("pod-to-admit", newResourceList("", ""), newResourceList("", ""), "0", "0", "0")
|
|
|
|
|
|
|
|
|
|
// synchronize
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
|
|
|
|
|
|
|
|
|
// we should not have disk pressure
|
|
|
|
|
if manager.IsUnderDiskPressure() {
|
|
|
|
@ -1013,7 +1016,7 @@ func TestInodePressureNodeFsInodes(t *testing.T) {
|
|
|
|
|
// induce soft threshold
|
|
|
|
|
fakeClock.Step(1 * time.Minute)
|
|
|
|
|
summaryProvider.result = summaryStatsMaker("1.5Mi", "4Mi", podStats)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
|
|
|
|
|
|
|
|
|
// we should have disk pressure
|
|
|
|
|
if !manager.IsUnderDiskPressure() {
|
|
|
|
@ -1028,7 +1031,7 @@ func TestInodePressureNodeFsInodes(t *testing.T) {
|
|
|
|
|
// step forward in time pass the grace period
|
|
|
|
|
fakeClock.Step(3 * time.Minute)
|
|
|
|
|
summaryProvider.result = summaryStatsMaker("1.5Mi", "4Mi", podStats)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
|
|
|
|
|
|
|
|
|
// we should have disk pressure
|
|
|
|
|
if !manager.IsUnderDiskPressure() {
|
|
|
|
@ -1053,7 +1056,7 @@ func TestInodePressureNodeFsInodes(t *testing.T) {
|
|
|
|
|
// remove inode pressure
|
|
|
|
|
fakeClock.Step(20 * time.Minute)
|
|
|
|
|
summaryProvider.result = summaryStatsMaker("3Mi", "4Mi", podStats)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
|
|
|
|
|
|
|
|
|
// we should not have disk pressure
|
|
|
|
|
if manager.IsUnderDiskPressure() {
|
|
|
|
@ -1063,7 +1066,7 @@ func TestInodePressureNodeFsInodes(t *testing.T) {
|
|
|
|
|
// induce inode pressure!
|
|
|
|
|
fakeClock.Step(1 * time.Minute)
|
|
|
|
|
summaryProvider.result = summaryStatsMaker("0.5Mi", "4Mi", podStats)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
|
|
|
|
|
|
|
|
|
// we should have disk pressure
|
|
|
|
|
if !manager.IsUnderDiskPressure() {
|
|
|
|
@ -1088,7 +1091,7 @@ func TestInodePressureNodeFsInodes(t *testing.T) {
|
|
|
|
|
fakeClock.Step(1 * time.Minute)
|
|
|
|
|
summaryProvider.result = summaryStatsMaker("3Mi", "4Mi", podStats)
|
|
|
|
|
podKiller.pod = nil // reset state
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
|
|
|
|
|
|
|
|
|
// we should have disk pressure (because transition period not yet met)
|
|
|
|
|
if !manager.IsUnderDiskPressure() {
|
|
|
|
@ -1109,7 +1112,7 @@ func TestInodePressureNodeFsInodes(t *testing.T) {
|
|
|
|
|
fakeClock.Step(5 * time.Minute)
|
|
|
|
|
summaryProvider.result = summaryStatsMaker("3Mi", "4Mi", podStats)
|
|
|
|
|
podKiller.pod = nil // reset state
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
|
|
|
|
|
|
|
|
|
// we should not have disk pressure (because transition period met)
|
|
|
|
|
if manager.IsUnderDiskPressure() {
|
|
|
|
@ -1157,7 +1160,7 @@ func TestCriticalPodsAreNotEvicted(t *testing.T) {
|
|
|
|
|
fakeClock := clock.NewFakeClock(time.Now())
|
|
|
|
|
podKiller := &mockPodKiller{}
|
|
|
|
|
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
|
|
|
|
|
nodeProvider := newMockNodeProvider(v1.ResourceList{v1.ResourceMemory: *quantityMustParse("2Gi")})
|
|
|
|
|
capacityProvider := newMockCapacityProvider(v1.ResourceList{v1.ResourceMemory: *quantityMustParse("3Gi")}, v1.ResourceList{v1.ResourceMemory: *quantityMustParse("1Gi")})
|
|
|
|
|
diskGC := &mockDiskGC{imageBytesFreed: int64(0), err: nil}
|
|
|
|
|
nodeRef := &clientv1.ObjectReference{
|
|
|
|
|
Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: "",
|
|
|
|
@ -1203,7 +1206,7 @@ func TestCriticalPodsAreNotEvicted(t *testing.T) {
|
|
|
|
|
// induce soft threshold
|
|
|
|
|
fakeClock.Step(1 * time.Minute)
|
|
|
|
|
summaryProvider.result = summaryStatsMaker("1500Mi", podStats)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
|
|
|
|
|
|
|
|
|
// we should have memory pressure
|
|
|
|
|
if !manager.IsUnderMemoryPressure() {
|
|
|
|
@ -1218,7 +1221,7 @@ func TestCriticalPodsAreNotEvicted(t *testing.T) {
|
|
|
|
|
// step forward in time pass the grace period
|
|
|
|
|
fakeClock.Step(3 * time.Minute)
|
|
|
|
|
summaryProvider.result = summaryStatsMaker("1500Mi", podStats)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
|
|
|
|
|
|
|
|
|
// we should have memory pressure
|
|
|
|
|
if !manager.IsUnderMemoryPressure() {
|
|
|
|
@ -1236,7 +1239,7 @@ func TestCriticalPodsAreNotEvicted(t *testing.T) {
|
|
|
|
|
// remove memory pressure
|
|
|
|
|
fakeClock.Step(20 * time.Minute)
|
|
|
|
|
summaryProvider.result = summaryStatsMaker("3Gi", podStats)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
|
|
|
|
|
|
|
|
|
// we should not have memory pressure
|
|
|
|
|
if manager.IsUnderMemoryPressure() {
|
|
|
|
@ -1249,7 +1252,7 @@ func TestCriticalPodsAreNotEvicted(t *testing.T) {
|
|
|
|
|
// induce memory pressure!
|
|
|
|
|
fakeClock.Step(1 * time.Minute)
|
|
|
|
|
summaryProvider.result = summaryStatsMaker("500Mi", podStats)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
|
|
|
|
|
|
|
|
|
// we should have memory pressure
|
|
|
|
|
if !manager.IsUnderMemoryPressure() {
|
|
|
|
@ -1290,7 +1293,7 @@ func TestAllocatableMemoryPressure(t *testing.T) {
|
|
|
|
|
fakeClock := clock.NewFakeClock(time.Now())
|
|
|
|
|
podKiller := &mockPodKiller{}
|
|
|
|
|
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
|
|
|
|
|
nodeProvider := newMockNodeProvider(v1.ResourceList{v1.ResourceMemory: *quantityMustParse("2Gi")})
|
|
|
|
|
capacityProvider := newMockCapacityProvider(v1.ResourceList{v1.ResourceMemory: *quantityMustParse("3Gi")}, v1.ResourceList{v1.ResourceMemory: *quantityMustParse("1Gi")})
|
|
|
|
|
diskGC := &mockDiskGC{imageBytesFreed: int64(0), err: nil}
|
|
|
|
|
nodeRef := &clientv1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
|
|
|
|
|
|
|
|
@ -1326,7 +1329,7 @@ func TestAllocatableMemoryPressure(t *testing.T) {
|
|
|
|
|
burstablePodToAdmit, _ := podMaker("burst-admit", newResourceList("100m", "100Mi"), newResourceList("200m", "200Mi"), "0Gi")
|
|
|
|
|
|
|
|
|
|
// synchronize
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
|
|
|
|
|
|
|
|
|
// we should not have memory pressure
|
|
|
|
|
if manager.IsUnderMemoryPressure() {
|
|
|
|
@ -1346,7 +1349,7 @@ func TestAllocatableMemoryPressure(t *testing.T) {
|
|
|
|
|
pod, podStat := podMaker("guaranteed-high-2", newResourceList("100m", "1Gi"), newResourceList("100m", "1Gi"), "1Gi")
|
|
|
|
|
podStats[pod] = podStat
|
|
|
|
|
summaryProvider.result = summaryStatsMaker(constantCapacity, podStats)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
|
|
|
|
|
|
|
|
|
// we should have memory pressure
|
|
|
|
|
if !manager.IsUnderMemoryPressure() {
|
|
|
|
@ -1382,7 +1385,7 @@ func TestAllocatableMemoryPressure(t *testing.T) {
|
|
|
|
|
}
|
|
|
|
|
summaryProvider.result = summaryStatsMaker(constantCapacity, podStats)
|
|
|
|
|
podKiller.pod = nil // reset state
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
|
|
|
|
|
|
|
|
|
// we should have memory pressure (because transition period not yet met)
|
|
|
|
|
if !manager.IsUnderMemoryPressure() {
|
|
|
|
@ -1406,7 +1409,7 @@ func TestAllocatableMemoryPressure(t *testing.T) {
|
|
|
|
|
fakeClock.Step(5 * time.Minute)
|
|
|
|
|
summaryProvider.result = summaryStatsMaker(constantCapacity, podStats)
|
|
|
|
|
podKiller.pod = nil // reset state
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider)
|
|
|
|
|
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
|
|
|
|
|
|
|
|
|
// we should not have memory pressure (because transition period met)
|
|
|
|
|
if manager.IsUnderMemoryPressure() {
|
|
|
|
|