Graduate PodDisruptionConditions to stable
This commit is contained in:
		| @@ -2709,7 +2709,6 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { | ||||
|  | ||||
| 	testCases := map[string]struct { | ||||
| 		enableJobPodFailurePolicy     bool | ||||
| 		enablePodDisruptionConditions bool | ||||
| 		enableJobPodReplacementPolicy bool | ||||
| 		job                           batch.Job | ||||
| 		pods                          []v1.Pod | ||||
| @@ -3736,8 +3735,7 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { | ||||
| 			}, | ||||
| 		}, | ||||
| 		"terminating Pod not considered failed when PodDisruptionConditions is enabled": { | ||||
| 			enableJobPodFailurePolicy:     true, | ||||
| 			enablePodDisruptionConditions: true, | ||||
| 			enableJobPodFailurePolicy: true, | ||||
| 			job: batch.Job{ | ||||
| 				TypeMeta:   metav1.TypeMeta{Kind: "Job"}, | ||||
| 				ObjectMeta: validObjectMeta, | ||||
| @@ -3776,7 +3774,6 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { | ||||
| 	for name, tc := range testCases { | ||||
| 		t.Run(name, func(t *testing.T) { | ||||
| 			featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobPodFailurePolicy, tc.enableJobPodFailurePolicy) | ||||
| 			featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.PodDisruptionConditions, tc.enablePodDisruptionConditions) | ||||
| 			featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobPodReplacementPolicy, tc.enableJobPodReplacementPolicy) | ||||
|  | ||||
| 			if tc.job.Spec.PodReplacementPolicy == nil { | ||||
|   | ||||
| @@ -31,20 +31,17 @@ import ( | ||||
| 	"k8s.io/apimachinery/pkg/util/sets" | ||||
| 	"k8s.io/apimachinery/pkg/util/strategicpatch" | ||||
| 	"k8s.io/apimachinery/pkg/util/wait" | ||||
| 	utilfeature "k8s.io/apiserver/pkg/util/feature" | ||||
| 	"k8s.io/client-go/informers" | ||||
| 	coreinformers "k8s.io/client-go/informers/core/v1" | ||||
| 	clientset "k8s.io/client-go/kubernetes" | ||||
| 	"k8s.io/client-go/kubernetes/fake" | ||||
| 	clienttesting "k8s.io/client-go/testing" | ||||
| 	"k8s.io/client-go/util/workqueue" | ||||
| 	featuregatetesting "k8s.io/component-base/featuregate/testing" | ||||
| 	metricstestutil "k8s.io/component-base/metrics/testutil" | ||||
| 	"k8s.io/klog/v2/ktesting" | ||||
| 	"k8s.io/kubernetes/pkg/controller" | ||||
| 	"k8s.io/kubernetes/pkg/controller/podgc/metrics" | ||||
| 	"k8s.io/kubernetes/pkg/controller/testutil" | ||||
| 	"k8s.io/kubernetes/pkg/features" | ||||
| 	"k8s.io/kubernetes/pkg/kubelet/eviction" | ||||
| 	testingclock "k8s.io/utils/clock/testing" | ||||
| 	"k8s.io/utils/pointer" | ||||
| @@ -69,23 +66,21 @@ func TestGCTerminated(t *testing.T) { | ||||
| 	} | ||||
|  | ||||
| 	testCases := []struct { | ||||
| 		name                          string | ||||
| 		pods                          []nameToPhase | ||||
| 		threshold                     int | ||||
| 		deletedPodNames               sets.Set[string] | ||||
| 		patchedPodNames               sets.Set[string] | ||||
| 		enablePodDisruptionConditions bool | ||||
| 		name            string | ||||
| 		pods            []nameToPhase | ||||
| 		threshold       int | ||||
| 		deletedPodNames sets.Set[string] | ||||
| 		patchedPodNames sets.Set[string] | ||||
| 	}{ | ||||
| 		{ | ||||
| 			name: "delete pod a which is PodFailed and pod b which is PodSucceeded; PodDisruptionConditions enabled", | ||||
| 			name: "delete pod a which is PodFailed and pod b which is PodSucceeded", | ||||
| 			pods: []nameToPhase{ | ||||
| 				{name: "a", phase: v1.PodFailed}, | ||||
| 				{name: "b", phase: v1.PodSucceeded}, | ||||
| 				{name: "c", phase: v1.PodFailed}, | ||||
| 			}, | ||||
| 			threshold:                     1, | ||||
| 			deletedPodNames:               sets.New("a", "b"), | ||||
| 			enablePodDisruptionConditions: true, | ||||
| 			threshold:       1, | ||||
| 			deletedPodNames: sets.New("a", "b"), | ||||
| 		}, | ||||
| 		{ | ||||
| 			name: "threshold = 0, disables terminated pod deletion", | ||||
| @@ -156,7 +151,6 @@ func TestGCTerminated(t *testing.T) { | ||||
| 		t.Run(test.name, func(t *testing.T) { | ||||
| 			resetMetrics() | ||||
| 			_, ctx := ktesting.NewTestContext(t) | ||||
| 			featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions) | ||||
| 			creationTime := time.Unix(0, 0) | ||||
| 			nodes := []*v1.Node{testutil.NewNode("node")} | ||||
|  | ||||
| @@ -206,19 +200,18 @@ func waitForAdded(q workqueue.TypedDelayingInterface[string], depth int) error { | ||||
|  | ||||
| func TestGCOrphaned(t *testing.T) { | ||||
| 	testCases := []struct { | ||||
| 		name                          string | ||||
| 		initialClientNodes            []*v1.Node | ||||
| 		initialInformerNodes          []*v1.Node | ||||
| 		delay                         time.Duration | ||||
| 		addedClientNodes              []*v1.Node | ||||
| 		deletedClientNodes            []*v1.Node | ||||
| 		addedInformerNodes            []*v1.Node | ||||
| 		deletedInformerNodes          []*v1.Node | ||||
| 		pods                          []*v1.Pod | ||||
| 		itemsInQueue                  int | ||||
| 		deletedPodNames               sets.Set[string] | ||||
| 		patchedPodNames               sets.Set[string] | ||||
| 		enablePodDisruptionConditions bool | ||||
| 		name                 string | ||||
| 		initialClientNodes   []*v1.Node | ||||
| 		initialInformerNodes []*v1.Node | ||||
| 		delay                time.Duration | ||||
| 		addedClientNodes     []*v1.Node | ||||
| 		deletedClientNodes   []*v1.Node | ||||
| 		addedInformerNodes   []*v1.Node | ||||
| 		deletedInformerNodes []*v1.Node | ||||
| 		pods                 []*v1.Pod | ||||
| 		itemsInQueue         int | ||||
| 		deletedPodNames      sets.Set[string] | ||||
| 		patchedPodNames      sets.Set[string] | ||||
| 	}{ | ||||
| 		{ | ||||
| 			name: "nodes present in lister", | ||||
| @@ -259,17 +252,16 @@ func TestGCOrphaned(t *testing.T) { | ||||
| 			deletedPodNames: sets.New("a", "b"), | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:  "no nodes with PodDisruptionConditions enabled", | ||||
| 			name:  "no nodes, one running pod", | ||||
| 			delay: 2 * quarantineTime, | ||||
| 			pods: []*v1.Pod{ | ||||
| 				makePod("a", "deleted", v1.PodFailed), | ||||
| 				makePod("b", "deleted", v1.PodSucceeded), | ||||
| 				makePod("c", "deleted", v1.PodRunning), | ||||
| 			}, | ||||
| 			itemsInQueue:                  1, | ||||
| 			deletedPodNames:               sets.New("a", "b", "c"), | ||||
| 			patchedPodNames:               sets.New("c"), | ||||
| 			enablePodDisruptionConditions: true, | ||||
| 			itemsInQueue:    1, | ||||
| 			deletedPodNames: sets.New("a", "b", "c"), | ||||
| 			patchedPodNames: sets.New("c"), | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:  "quarantine not finished", | ||||
| @@ -351,7 +343,6 @@ func TestGCOrphaned(t *testing.T) { | ||||
| 		t.Run(test.name, func(t *testing.T) { | ||||
| 			resetMetrics() | ||||
| 			_, ctx := ktesting.NewTestContext(t) | ||||
| 			featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions) | ||||
|  | ||||
| 			client := setupNewSimpleClient(test.initialClientNodes, test.pods) | ||||
| 			gcc, podInformer, nodeInformer := NewFromClient(ctx, client, -1) | ||||
| @@ -416,23 +407,11 @@ func TestGCUnscheduledTerminating(t *testing.T) { | ||||
| 	} | ||||
|  | ||||
| 	testCases := []struct { | ||||
| 		name                          string | ||||
| 		pods                          []nameToPhase | ||||
| 		deletedPodNames               sets.Set[string] | ||||
| 		patchedPodNames               sets.Set[string] | ||||
| 		enablePodDisruptionConditions bool | ||||
| 		name            string | ||||
| 		pods            []nameToPhase | ||||
| 		deletedPodNames sets.Set[string] | ||||
| 		patchedPodNames sets.Set[string] | ||||
| 	}{ | ||||
| 		{ | ||||
| 			name: "Unscheduled pod in any phase must be deleted, the phase of the running pod is changed to Failed; PodDisruptionConditions enabled", | ||||
| 			pods: []nameToPhase{ | ||||
| 				{name: "a", phase: v1.PodFailed, deletionTimeStamp: &metav1.Time{}, nodeName: ""}, | ||||
| 				{name: "b", phase: v1.PodSucceeded, deletionTimeStamp: &metav1.Time{}, nodeName: ""}, | ||||
| 				{name: "c", phase: v1.PodRunning, deletionTimeStamp: &metav1.Time{}, nodeName: ""}, | ||||
| 			}, | ||||
| 			deletedPodNames:               sets.New("a", "b", "c"), | ||||
| 			patchedPodNames:               sets.New("c"), | ||||
| 			enablePodDisruptionConditions: true, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name: "Unscheduled pod in any phase must be deleted", | ||||
| 			pods: []nameToPhase{ | ||||
| @@ -457,7 +436,6 @@ func TestGCUnscheduledTerminating(t *testing.T) { | ||||
| 		t.Run(test.name, func(t *testing.T) { | ||||
| 			resetMetrics() | ||||
| 			_, ctx := ktesting.NewTestContext(t) | ||||
| 			featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions) | ||||
| 			creationTime := time.Unix(0, 0) | ||||
|  | ||||
| 			pods := make([]*v1.Pod, 0, len(test.pods)) | ||||
| @@ -505,12 +483,11 @@ func TestGCTerminating(t *testing.T) { | ||||
| 	} | ||||
|  | ||||
| 	testCases := []struct { | ||||
| 		name                          string | ||||
| 		pods                          []nameToPodConfig | ||||
| 		nodes                         []node | ||||
| 		deletedPodNames               sets.Set[string] | ||||
| 		patchedPodNames               sets.Set[string] | ||||
| 		enablePodDisruptionConditions bool | ||||
| 		name            string | ||||
| 		pods            []nameToPodConfig | ||||
| 		nodes           []node | ||||
| 		deletedPodNames sets.Set[string] | ||||
| 		patchedPodNames sets.Set[string] | ||||
| 	}{ | ||||
| 		{ | ||||
| 			name: "pods have deletion timestamp set and the corresponding nodes are not ready", | ||||
| @@ -592,7 +569,7 @@ func TestGCTerminating(t *testing.T) { | ||||
| 			patchedPodNames: sets.New("b1", "b4", "b5", "b6"), | ||||
| 		}, | ||||
| 		{ | ||||
| 			name: "pods deleted from node tained out-of-service; PodDisruptionConditions enabled", | ||||
| 			name: "pods deleted from node tainted out-of-service", | ||||
| 			nodes: []node{ | ||||
| 				{name: "worker", readyCondition: v1.ConditionFalse, taints: []v1.Taint{{Key: v1.TaintNodeOutOfService, | ||||
| 					Effect: v1.TaintEffectNoExecute}}}, | ||||
| @@ -602,16 +579,14 @@ func TestGCTerminating(t *testing.T) { | ||||
| 				{name: "b", phase: v1.PodFailed, deletionTimeStamp: &metav1.Time{}, nodeName: "worker"}, | ||||
| 				{name: "c", phase: v1.PodSucceeded, deletionTimeStamp: &metav1.Time{}, nodeName: "worker"}, | ||||
| 			}, | ||||
| 			deletedPodNames:               sets.New("a", "b", "c"), | ||||
| 			patchedPodNames:               sets.New("a"), | ||||
| 			enablePodDisruptionConditions: true, | ||||
| 			deletedPodNames: sets.New("a", "b", "c"), | ||||
| 			patchedPodNames: sets.New("a"), | ||||
| 		}, | ||||
| 	} | ||||
| 	for _, test := range testCases { | ||||
| 		t.Run(test.name, func(t *testing.T) { | ||||
| 			resetMetrics() | ||||
| 			_, ctx := ktesting.NewTestContext(t) | ||||
| 			featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions) | ||||
|  | ||||
| 			creationTime := time.Unix(0, 0) | ||||
| 			nodes := make([]*v1.Node, 0, len(test.nodes)) | ||||
| @@ -720,7 +695,6 @@ func TestGCInspectingPatchedPodBeforeDeletion(t *testing.T) { | ||||
| 	for _, test := range testCases { | ||||
| 		t.Run(test.name, func(t *testing.T) { | ||||
| 			_, ctx := ktesting.NewTestContext(t) | ||||
| 			featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, true) | ||||
|  | ||||
| 			pods := []*v1.Pod{test.pod} | ||||
|  | ||||
|   | ||||
| @@ -32,14 +32,11 @@ import ( | ||||
| 	"k8s.io/apimachinery/pkg/labels" | ||||
| 	"k8s.io/apimachinery/pkg/types" | ||||
| 	"k8s.io/apimachinery/pkg/util/wait" | ||||
| 	"k8s.io/apiserver/pkg/util/feature" | ||||
| 	"k8s.io/client-go/informers" | ||||
| 	"k8s.io/client-go/kubernetes/fake" | ||||
| 	clienttesting "k8s.io/client-go/testing" | ||||
| 	"k8s.io/client-go/tools/cache" | ||||
| 	featuregatetesting "k8s.io/component-base/featuregate/testing" | ||||
| 	"k8s.io/kubernetes/pkg/controller/testutil" | ||||
| 	"k8s.io/kubernetes/pkg/features" | ||||
| ) | ||||
|  | ||||
| var timeForControllerToProgressForSanityCheck = 20 * time.Millisecond | ||||
| @@ -139,12 +136,11 @@ func TestFilterNoExecuteTaints(t *testing.T) { | ||||
|  | ||||
| func TestCreatePod(t *testing.T) { | ||||
| 	testCases := []struct { | ||||
| 		description                   string | ||||
| 		pod                           *corev1.Pod | ||||
| 		taintedNodes                  map[string][]corev1.Taint | ||||
| 		expectPatch                   bool | ||||
| 		expectDelete                  bool | ||||
| 		enablePodDisruptionConditions bool | ||||
| 		description  string | ||||
| 		pod          *corev1.Pod | ||||
| 		taintedNodes map[string][]corev1.Taint | ||||
| 		expectPatch  bool | ||||
| 		expectDelete bool | ||||
| 	}{ | ||||
| 		{ | ||||
| 			description:  "not scheduled - ignore", | ||||
| @@ -164,18 +160,9 @@ func TestCreatePod(t *testing.T) { | ||||
| 			taintedNodes: map[string][]corev1.Taint{ | ||||
| 				"node1": {createNoExecuteTaint(1)}, | ||||
| 			}, | ||||
| 			expectPatch:  true, | ||||
| 			expectDelete: true, | ||||
| 		}, | ||||
| 		{ | ||||
| 			description: "schedule on tainted Node; PodDisruptionConditions enabled", | ||||
| 			pod:         testutil.NewPod("pod1", "node1"), | ||||
| 			taintedNodes: map[string][]corev1.Taint{ | ||||
| 				"node1": {createNoExecuteTaint(1)}, | ||||
| 			}, | ||||
| 			expectPatch:                   true, | ||||
| 			expectDelete:                  true, | ||||
| 			enablePodDisruptionConditions: true, | ||||
| 		}, | ||||
| 		{ | ||||
| 			description: "schedule on tainted Node with finite toleration", | ||||
| 			pod:         addToleration(testutil.NewPod("pod1", "node1"), 1, 100), | ||||
| @@ -198,13 +185,13 @@ func TestCreatePod(t *testing.T) { | ||||
| 			taintedNodes: map[string][]corev1.Taint{ | ||||
| 				"node1": {createNoExecuteTaint(1)}, | ||||
| 			}, | ||||
| 			expectPatch:  true, | ||||
| 			expectDelete: true, | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| 	for _, item := range testCases { | ||||
| 		t.Run(item.description, func(t *testing.T) { | ||||
| 			featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.PodDisruptionConditions, item.enablePodDisruptionConditions) | ||||
| 			ctx, cancel := context.WithCancel(context.Background()) | ||||
| 			fakeClientset := fake.NewSimpleClientset(&corev1.PodList{Items: []corev1.Pod{*item.pod}}) | ||||
| 			controller, podIndexer, _ := setupNewController(ctx, fakeClientset) | ||||
| @@ -240,27 +227,15 @@ func TestDeletePod(t *testing.T) { | ||||
|  | ||||
| func TestUpdatePod(t *testing.T) { | ||||
| 	testCases := []struct { | ||||
| 		description                   string | ||||
| 		prevPod                       *corev1.Pod | ||||
| 		awaitForScheduledEviction     bool | ||||
| 		newPod                        *corev1.Pod | ||||
| 		taintedNodes                  map[string][]corev1.Taint | ||||
| 		expectPatch                   bool | ||||
| 		expectDelete                  bool | ||||
| 		enablePodDisruptionConditions bool | ||||
| 		skipOnWindows                 bool | ||||
| 		description               string | ||||
| 		prevPod                   *corev1.Pod | ||||
| 		awaitForScheduledEviction bool | ||||
| 		newPod                    *corev1.Pod | ||||
| 		taintedNodes              map[string][]corev1.Taint | ||||
| 		expectPatch               bool | ||||
| 		expectDelete              bool | ||||
| 		skipOnWindows             bool | ||||
| 	}{ | ||||
| 		{ | ||||
| 			description: "scheduling onto tainted Node results in patch and delete when PodDisruptionConditions enabled", | ||||
| 			prevPod:     testutil.NewPod("pod1", ""), | ||||
| 			newPod:      testutil.NewPod("pod1", "node1"), | ||||
| 			taintedNodes: map[string][]corev1.Taint{ | ||||
| 				"node1": {createNoExecuteTaint(1)}, | ||||
| 			}, | ||||
| 			expectPatch:                   true, | ||||
| 			expectDelete:                  true, | ||||
| 			enablePodDisruptionConditions: true, | ||||
| 		}, | ||||
| 		{ | ||||
| 			description: "scheduling onto tainted Node", | ||||
| 			prevPod:     testutil.NewPod("pod1", ""), | ||||
| @@ -268,6 +243,7 @@ func TestUpdatePod(t *testing.T) { | ||||
| 			taintedNodes: map[string][]corev1.Taint{ | ||||
| 				"node1": {createNoExecuteTaint(1)}, | ||||
| 			}, | ||||
| 			expectPatch:  true, | ||||
| 			expectDelete: true, | ||||
| 		}, | ||||
| 		{ | ||||
| @@ -287,6 +263,7 @@ func TestUpdatePod(t *testing.T) { | ||||
| 			taintedNodes: map[string][]corev1.Taint{ | ||||
| 				"node1": {createNoExecuteTaint(1)}, | ||||
| 			}, | ||||
| 			expectPatch:  true, | ||||
| 			expectDelete: true, | ||||
| 		}, | ||||
| 		{ | ||||
| @@ -297,6 +274,7 @@ func TestUpdatePod(t *testing.T) { | ||||
| 			taintedNodes: map[string][]corev1.Taint{ | ||||
| 				"node1": {createNoExecuteTaint(1)}, | ||||
| 			}, | ||||
| 			expectPatch:   true, | ||||
| 			expectDelete:  true, | ||||
| 			skipOnWindows: true, | ||||
| 		}, | ||||
| @@ -308,7 +286,6 @@ func TestUpdatePod(t *testing.T) { | ||||
| 				// TODO: remove skip once the flaking test has been fixed. | ||||
| 				t.Skip("Skip flaking test on Windows.") | ||||
| 			} | ||||
| 			featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.PodDisruptionConditions, item.enablePodDisruptionConditions) | ||||
| 			ctx, cancel := context.WithCancel(context.Background()) | ||||
| 			fakeClientset := fake.NewSimpleClientset(&corev1.PodList{Items: []corev1.Pod{*item.prevPod}}) | ||||
| 			controller, podIndexer, _ := setupNewController(context.TODO(), fakeClientset) | ||||
| @@ -417,33 +394,22 @@ func TestDeleteNode(t *testing.T) { | ||||
|  | ||||
| func TestUpdateNode(t *testing.T) { | ||||
| 	testCases := []struct { | ||||
| 		description                   string | ||||
| 		pods                          []corev1.Pod | ||||
| 		oldNode                       *corev1.Node | ||||
| 		newNode                       *corev1.Node | ||||
| 		expectPatch                   bool | ||||
| 		expectDelete                  bool | ||||
| 		additionalSleep               time.Duration | ||||
| 		enablePodDisruptionConditions bool | ||||
| 		description     string | ||||
| 		pods            []corev1.Pod | ||||
| 		oldNode         *corev1.Node | ||||
| 		newNode         *corev1.Node | ||||
| 		expectPatch     bool | ||||
| 		expectDelete    bool | ||||
| 		additionalSleep time.Duration | ||||
| 	}{ | ||||
| 		{ | ||||
| 			description: "Added taint, expect node patched and deleted when PodDisruptionConditions is enabled", | ||||
| 			pods: []corev1.Pod{ | ||||
| 				*testutil.NewPod("pod1", "node1"), | ||||
| 			}, | ||||
| 			oldNode:                       testutil.NewNode("node1"), | ||||
| 			newNode:                       addTaintsToNode(testutil.NewNode("node1"), "testTaint1", "taint1", []int{1}), | ||||
| 			expectPatch:                   true, | ||||
| 			expectDelete:                  true, | ||||
| 			enablePodDisruptionConditions: true, | ||||
| 		}, | ||||
| 		{ | ||||
| 			description: "Added taint", | ||||
| 			description: "Added taint, expect node patched and deleted", | ||||
| 			pods: []corev1.Pod{ | ||||
| 				*testutil.NewPod("pod1", "node1"), | ||||
| 			}, | ||||
| 			oldNode:      testutil.NewNode("node1"), | ||||
| 			newNode:      addTaintsToNode(testutil.NewNode("node1"), "testTaint1", "taint1", []int{1}), | ||||
| 			expectPatch:  true, | ||||
| 			expectDelete: true, | ||||
| 		}, | ||||
| 		{ | ||||
| @@ -462,6 +428,7 @@ func TestUpdateNode(t *testing.T) { | ||||
| 			}, | ||||
| 			oldNode:      testutil.NewNode("node1"), | ||||
| 			newNode:      addTaintsToNode(testutil.NewNode("node1"), "testTaint1", "taint1", []int{1, 2}), | ||||
| 			expectPatch:  true, | ||||
| 			expectDelete: true, | ||||
| 		}, | ||||
| 		{ | ||||
| @@ -501,13 +468,13 @@ func TestUpdateNode(t *testing.T) { | ||||
| 			}, | ||||
| 			oldNode:      testutil.NewNode("node1"), | ||||
| 			newNode:      addTaintsToNode(testutil.NewNode("node1"), "testTaint1", "taint1", []int{1, 2}), | ||||
| 			expectPatch:  true, | ||||
| 			expectDelete: true, | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| 	for _, item := range testCases { | ||||
| 		t.Run(item.description, func(t *testing.T) { | ||||
| 			featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.PodDisruptionConditions, item.enablePodDisruptionConditions) | ||||
| 			ctx, cancel := context.WithCancel(context.Background()) | ||||
| 			defer cancel() | ||||
|  | ||||
|   | ||||
| @@ -566,6 +566,7 @@ const ( | ||||
| 	// kep: https://kep.k8s.io/3329 | ||||
| 	// alpha: v1.25 | ||||
| 	// beta: v1.26 | ||||
| 	// stable: v1.31 | ||||
| 	// | ||||
| 	// Enables support for appending a dedicated pod condition indicating that | ||||
| 	// the pod is being deleted due to a disruption. | ||||
| @@ -1115,7 +1116,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS | ||||
|  | ||||
| 	PodDeletionCost: {Default: true, PreRelease: featuregate.Beta}, | ||||
|  | ||||
| 	PodDisruptionConditions: {Default: true, PreRelease: featuregate.Beta}, | ||||
| 	PodDisruptionConditions: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33 | ||||
|  | ||||
| 	PodReadyToStartContainersCondition: {Default: true, PreRelease: featuregate.Beta}, | ||||
|  | ||||
|   | ||||
| @@ -271,14 +271,7 @@ func TestMemoryPressure_VerifyPodStatus(t *testing.T) { | ||||
| 	testCases := map[string]struct { | ||||
| 		wantPodStatus v1.PodStatus | ||||
| 	}{ | ||||
| 		"eviction due to memory pressure; no image fs": { | ||||
| 			wantPodStatus: v1.PodStatus{ | ||||
| 				Phase:   v1.PodFailed, | ||||
| 				Reason:  "Evicted", | ||||
| 				Message: "The node was low on resource: memory. Threshold quantity: 2Gi, available: 1500Mi. ", | ||||
| 			}, | ||||
| 		}, | ||||
| 		"eviction due to memory pressure; image fs": { | ||||
| 		"eviction due to memory pressure": { | ||||
| 			wantPodStatus: v1.PodStatus{ | ||||
| 				Phase:   v1.PodFailed, | ||||
| 				Reason:  "Evicted", | ||||
| @@ -286,92 +279,84 @@ func TestMemoryPressure_VerifyPodStatus(t *testing.T) { | ||||
| 			}, | ||||
| 		}, | ||||
| 	} | ||||
| 	for name, tc := range testCases { | ||||
| 		for _, enablePodDisruptionConditions := range []bool{false, true} { | ||||
| 			t.Run(fmt.Sprintf("%s;PodDisruptionConditions=%v", name, enablePodDisruptionConditions), func(t *testing.T) { | ||||
| 				featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, enablePodDisruptionConditions) | ||||
| 	for _, tc := range testCases { | ||||
| 		podMaker := makePodWithMemoryStats | ||||
| 		summaryStatsMaker := makeMemoryStats | ||||
| 		podsToMake := []podToMake{ | ||||
| 			{name: "below-requests", requests: newResourceList("", "1Gi", ""), limits: newResourceList("", "1Gi", ""), memoryWorkingSet: "900Mi"}, | ||||
| 			{name: "above-requests", requests: newResourceList("", "100Mi", ""), limits: newResourceList("", "1Gi", ""), memoryWorkingSet: "700Mi"}, | ||||
| 		} | ||||
| 		pods := []*v1.Pod{} | ||||
| 		podStats := map[*v1.Pod]statsapi.PodStats{} | ||||
| 		for _, podToMake := range podsToMake { | ||||
| 			pod, podStat := podMaker(podToMake.name, podToMake.priority, podToMake.requests, podToMake.limits, podToMake.memoryWorkingSet) | ||||
| 			pods = append(pods, pod) | ||||
| 			podStats[pod] = podStat | ||||
| 		} | ||||
| 		activePodsFunc := func() []*v1.Pod { | ||||
| 			return pods | ||||
| 		} | ||||
|  | ||||
| 				podMaker := makePodWithMemoryStats | ||||
| 				summaryStatsMaker := makeMemoryStats | ||||
| 				podsToMake := []podToMake{ | ||||
| 					{name: "below-requests", requests: newResourceList("", "1Gi", ""), limits: newResourceList("", "1Gi", ""), memoryWorkingSet: "900Mi"}, | ||||
| 					{name: "above-requests", requests: newResourceList("", "100Mi", ""), limits: newResourceList("", "1Gi", ""), memoryWorkingSet: "700Mi"}, | ||||
| 				} | ||||
| 				pods := []*v1.Pod{} | ||||
| 				podStats := map[*v1.Pod]statsapi.PodStats{} | ||||
| 				for _, podToMake := range podsToMake { | ||||
| 					pod, podStat := podMaker(podToMake.name, podToMake.priority, podToMake.requests, podToMake.limits, podToMake.memoryWorkingSet) | ||||
| 					pods = append(pods, pod) | ||||
| 					podStats[pod] = podStat | ||||
| 				} | ||||
| 				activePodsFunc := func() []*v1.Pod { | ||||
| 					return pods | ||||
| 				} | ||||
| 		fakeClock := testingclock.NewFakeClock(time.Now()) | ||||
| 		podKiller := &mockPodKiller{} | ||||
| 		diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: ptr.To(false)} | ||||
| 		diskGC := &mockDiskGC{err: nil} | ||||
| 		nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""} | ||||
|  | ||||
| 				fakeClock := testingclock.NewFakeClock(time.Now()) | ||||
| 				podKiller := &mockPodKiller{} | ||||
| 				diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: ptr.To(false)} | ||||
| 				diskGC := &mockDiskGC{err: nil} | ||||
| 				nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""} | ||||
|  | ||||
| 				config := Config{ | ||||
| 					PressureTransitionPeriod: time.Minute * 5, | ||||
| 					Thresholds: []evictionapi.Threshold{ | ||||
| 						{ | ||||
| 							Signal:   evictionapi.SignalMemoryAvailable, | ||||
| 							Operator: evictionapi.OpLessThan, | ||||
| 							Value: evictionapi.ThresholdValue{ | ||||
| 								Quantity: quantityMustParse("2Gi"), | ||||
| 							}, | ||||
| 						}, | ||||
| 		config := Config{ | ||||
| 			PressureTransitionPeriod: time.Minute * 5, | ||||
| 			Thresholds: []evictionapi.Threshold{ | ||||
| 				{ | ||||
| 					Signal:   evictionapi.SignalMemoryAvailable, | ||||
| 					Operator: evictionapi.OpLessThan, | ||||
| 					Value: evictionapi.ThresholdValue{ | ||||
| 						Quantity: quantityMustParse("2Gi"), | ||||
| 					}, | ||||
| 				} | ||||
| 				summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("1500Mi", podStats)} | ||||
| 				manager := &managerImpl{ | ||||
| 					clock:                        fakeClock, | ||||
| 					killPodFunc:                  podKiller.killPodNow, | ||||
| 					imageGC:                      diskGC, | ||||
| 					containerGC:                  diskGC, | ||||
| 					config:                       config, | ||||
| 					recorder:                     &record.FakeRecorder{}, | ||||
| 					summaryProvider:              summaryProvider, | ||||
| 					nodeRef:                      nodeRef, | ||||
| 					nodeConditionsLastObservedAt: nodeConditionsObservedAt{}, | ||||
| 					thresholdsFirstObservedAt:    thresholdsObservedAt{}, | ||||
| 				} | ||||
| 				}, | ||||
| 			}, | ||||
| 		} | ||||
| 		summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("1500Mi", podStats)} | ||||
| 		manager := &managerImpl{ | ||||
| 			clock:                        fakeClock, | ||||
| 			killPodFunc:                  podKiller.killPodNow, | ||||
| 			imageGC:                      diskGC, | ||||
| 			containerGC:                  diskGC, | ||||
| 			config:                       config, | ||||
| 			recorder:                     &record.FakeRecorder{}, | ||||
| 			summaryProvider:              summaryProvider, | ||||
| 			nodeRef:                      nodeRef, | ||||
| 			nodeConditionsLastObservedAt: nodeConditionsObservedAt{}, | ||||
| 			thresholdsFirstObservedAt:    thresholdsObservedAt{}, | ||||
| 		} | ||||
|  | ||||
| 				// synchronize to detect the memory pressure | ||||
| 				_, err := manager.synchronize(diskInfoProvider, activePodsFunc) | ||||
| 		// synchronize to detect the memory pressure | ||||
| 		_, err := manager.synchronize(diskInfoProvider, activePodsFunc) | ||||
|  | ||||
| 				if err != nil { | ||||
| 					t.Fatalf("Manager expects no error but got %v", err) | ||||
| 				} | ||||
| 				// verify memory pressure is detected | ||||
| 				if !manager.IsUnderMemoryPressure() { | ||||
| 					t.Fatalf("Manager should have detected memory pressure") | ||||
| 				} | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("Manager expects no error but got %v", err) | ||||
| 		} | ||||
| 		// verify memory pressure is detected | ||||
| 		if !manager.IsUnderMemoryPressure() { | ||||
| 			t.Fatalf("Manager should have detected memory pressure") | ||||
| 		} | ||||
|  | ||||
| 				// verify a pod is selected for eviction | ||||
| 				if podKiller.pod == nil { | ||||
| 					t.Fatalf("Manager should have selected a pod for eviction") | ||||
| 				} | ||||
| 		// verify a pod is selected for eviction | ||||
| 		if podKiller.pod == nil { | ||||
| 			t.Fatalf("Manager should have selected a pod for eviction") | ||||
| 		} | ||||
|  | ||||
| 				wantPodStatus := tc.wantPodStatus.DeepCopy() | ||||
| 				if enablePodDisruptionConditions { | ||||
| 					wantPodStatus.Conditions = append(wantPodStatus.Conditions, v1.PodCondition{ | ||||
| 						Type:    "DisruptionTarget", | ||||
| 						Status:  "True", | ||||
| 						Reason:  "TerminationByKubelet", | ||||
| 						Message: "The node was low on resource: memory. Threshold quantity: 2Gi, available: 1500Mi. ", | ||||
| 					}) | ||||
| 				} | ||||
| 		wantPodStatus := tc.wantPodStatus.DeepCopy() | ||||
| 		wantPodStatus.Conditions = append(wantPodStatus.Conditions, v1.PodCondition{ | ||||
| 			Type:    "DisruptionTarget", | ||||
| 			Status:  "True", | ||||
| 			Reason:  "TerminationByKubelet", | ||||
| 			Message: "The node was low on resource: memory. Threshold quantity: 2Gi, available: 1500Mi. ", | ||||
| 		}) | ||||
|  | ||||
| 				// verify the pod status after applying the status update function | ||||
| 				podKiller.statusFn(&podKiller.pod.Status) | ||||
| 				if diff := cmp.Diff(*wantPodStatus, podKiller.pod.Status, cmpopts.IgnoreFields(v1.PodCondition{}, "LastProbeTime", "LastTransitionTime")); diff != "" { | ||||
| 					t.Errorf("Unexpected pod status of the evicted pod (-want,+got):\n%s", diff) | ||||
| 				} | ||||
| 			}) | ||||
| 		// verify the pod status after applying the status update function | ||||
| 		podKiller.statusFn(&podKiller.pod.Status) | ||||
| 		if diff := cmp.Diff(*wantPodStatus, podKiller.pod.Status, cmpopts.IgnoreFields(v1.PodCondition{}, "LastProbeTime", "LastTransitionTime")); diff != "" { | ||||
| 			t.Errorf("Unexpected pod status of the evicted pod (-want,+got):\n%s", diff) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| @@ -388,93 +373,85 @@ func TestPIDPressure_VerifyPodStatus(t *testing.T) { | ||||
| 			}, | ||||
| 		}, | ||||
| 	} | ||||
| 	for name, tc := range testCases { | ||||
| 		for _, enablePodDisruptionConditions := range []bool{true, false} { | ||||
| 			t.Run(fmt.Sprintf("%s;PodDisruptionConditions=%v", name, enablePodDisruptionConditions), func(t *testing.T) { | ||||
| 				featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, enablePodDisruptionConditions) | ||||
| 	for _, tc := range testCases { | ||||
| 		podMaker := makePodWithPIDStats | ||||
| 		summaryStatsMaker := makePIDStats | ||||
| 		podsToMake := []podToMake{ | ||||
| 			{name: "pod1", priority: lowPriority, pidUsage: 500}, | ||||
| 			{name: "pod2", priority: defaultPriority, pidUsage: 500}, | ||||
| 		} | ||||
| 		pods := []*v1.Pod{} | ||||
| 		podStats := map[*v1.Pod]statsapi.PodStats{} | ||||
| 		for _, podToMake := range podsToMake { | ||||
| 			pod, podStat := podMaker(podToMake.name, podToMake.priority, 2) | ||||
| 			pods = append(pods, pod) | ||||
| 			podStats[pod] = podStat | ||||
| 		} | ||||
| 		activePodsFunc := func() []*v1.Pod { | ||||
| 			return pods | ||||
| 		} | ||||
|  | ||||
| 				podMaker := makePodWithPIDStats | ||||
| 				summaryStatsMaker := makePIDStats | ||||
| 				podsToMake := []podToMake{ | ||||
| 					{name: "pod1", priority: lowPriority, pidUsage: 500}, | ||||
| 					{name: "pod2", priority: defaultPriority, pidUsage: 500}, | ||||
| 				} | ||||
| 				pods := []*v1.Pod{} | ||||
| 				podStats := map[*v1.Pod]statsapi.PodStats{} | ||||
| 				for _, podToMake := range podsToMake { | ||||
| 					pod, podStat := podMaker(podToMake.name, podToMake.priority, 2) | ||||
| 					pods = append(pods, pod) | ||||
| 					podStats[pod] = podStat | ||||
| 				} | ||||
| 				activePodsFunc := func() []*v1.Pod { | ||||
| 					return pods | ||||
| 				} | ||||
| 		fakeClock := testingclock.NewFakeClock(time.Now()) | ||||
| 		podKiller := &mockPodKiller{} | ||||
| 		diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: ptr.To(false)} | ||||
| 		diskGC := &mockDiskGC{err: nil} | ||||
| 		nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""} | ||||
|  | ||||
| 				fakeClock := testingclock.NewFakeClock(time.Now()) | ||||
| 				podKiller := &mockPodKiller{} | ||||
| 				diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: ptr.To(false)} | ||||
| 				diskGC := &mockDiskGC{err: nil} | ||||
| 				nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""} | ||||
|  | ||||
| 				config := Config{ | ||||
| 					PressureTransitionPeriod: time.Minute * 5, | ||||
| 					Thresholds: []evictionapi.Threshold{ | ||||
| 						{ | ||||
| 							Signal:   evictionapi.SignalPIDAvailable, | ||||
| 							Operator: evictionapi.OpLessThan, | ||||
| 							Value: evictionapi.ThresholdValue{ | ||||
| 								Quantity: quantityMustParse("1200"), | ||||
| 							}, | ||||
| 						}, | ||||
| 		config := Config{ | ||||
| 			PressureTransitionPeriod: time.Minute * 5, | ||||
| 			Thresholds: []evictionapi.Threshold{ | ||||
| 				{ | ||||
| 					Signal:   evictionapi.SignalPIDAvailable, | ||||
| 					Operator: evictionapi.OpLessThan, | ||||
| 					Value: evictionapi.ThresholdValue{ | ||||
| 						Quantity: quantityMustParse("1200"), | ||||
| 					}, | ||||
| 				} | ||||
| 				summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("1500", "1000", podStats)} | ||||
| 				manager := &managerImpl{ | ||||
| 					clock:                        fakeClock, | ||||
| 					killPodFunc:                  podKiller.killPodNow, | ||||
| 					imageGC:                      diskGC, | ||||
| 					containerGC:                  diskGC, | ||||
| 					config:                       config, | ||||
| 					recorder:                     &record.FakeRecorder{}, | ||||
| 					summaryProvider:              summaryProvider, | ||||
| 					nodeRef:                      nodeRef, | ||||
| 					nodeConditionsLastObservedAt: nodeConditionsObservedAt{}, | ||||
| 					thresholdsFirstObservedAt:    thresholdsObservedAt{}, | ||||
| 				} | ||||
| 				}, | ||||
| 			}, | ||||
| 		} | ||||
| 		summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("1500", "1000", podStats)} | ||||
| 		manager := &managerImpl{ | ||||
| 			clock:                        fakeClock, | ||||
| 			killPodFunc:                  podKiller.killPodNow, | ||||
| 			imageGC:                      diskGC, | ||||
| 			containerGC:                  diskGC, | ||||
| 			config:                       config, | ||||
| 			recorder:                     &record.FakeRecorder{}, | ||||
| 			summaryProvider:              summaryProvider, | ||||
| 			nodeRef:                      nodeRef, | ||||
| 			nodeConditionsLastObservedAt: nodeConditionsObservedAt{}, | ||||
| 			thresholdsFirstObservedAt:    thresholdsObservedAt{}, | ||||
| 		} | ||||
|  | ||||
| 				// synchronize to detect the PID pressure | ||||
| 				_, err := manager.synchronize(diskInfoProvider, activePodsFunc) | ||||
| 		// synchronize to detect the PID pressure | ||||
| 		_, err := manager.synchronize(diskInfoProvider, activePodsFunc) | ||||
|  | ||||
| 				if err != nil { | ||||
| 					t.Fatalf("Manager expects no error but got %v", err) | ||||
| 				} | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("Manager expects no error but got %v", err) | ||||
| 		} | ||||
|  | ||||
| 				// verify PID pressure is detected | ||||
| 				if !manager.IsUnderPIDPressure() { | ||||
| 					t.Fatalf("Manager should have detected PID pressure") | ||||
| 				} | ||||
| 		// verify PID pressure is detected | ||||
| 		if !manager.IsUnderPIDPressure() { | ||||
| 			t.Fatalf("Manager should have detected PID pressure") | ||||
| 		} | ||||
|  | ||||
| 				// verify a pod is selected for eviction | ||||
| 				if podKiller.pod == nil { | ||||
| 					t.Fatalf("Manager should have selected a pod for eviction") | ||||
| 				} | ||||
| 		// verify a pod is selected for eviction | ||||
| 		if podKiller.pod == nil { | ||||
| 			t.Fatalf("Manager should have selected a pod for eviction") | ||||
| 		} | ||||
|  | ||||
| 				wantPodStatus := tc.wantPodStatus.DeepCopy() | ||||
| 				if enablePodDisruptionConditions { | ||||
| 					wantPodStatus.Conditions = append(wantPodStatus.Conditions, v1.PodCondition{ | ||||
| 						Type:    "DisruptionTarget", | ||||
| 						Status:  "True", | ||||
| 						Reason:  "TerminationByKubelet", | ||||
| 						Message: "The node was low on resource: pids. Threshold quantity: 1200, available: 500. ", | ||||
| 					}) | ||||
| 				} | ||||
| 		wantPodStatus := tc.wantPodStatus.DeepCopy() | ||||
| 		wantPodStatus.Conditions = append(wantPodStatus.Conditions, v1.PodCondition{ | ||||
| 			Type:    "DisruptionTarget", | ||||
| 			Status:  "True", | ||||
| 			Reason:  "TerminationByKubelet", | ||||
| 			Message: "The node was low on resource: pids. Threshold quantity: 1200, available: 500. ", | ||||
| 		}) | ||||
|  | ||||
| 				// verify the pod status after applying the status update function | ||||
| 				podKiller.statusFn(&podKiller.pod.Status) | ||||
| 				if diff := cmp.Diff(*wantPodStatus, podKiller.pod.Status, cmpopts.IgnoreFields(v1.PodCondition{}, "LastProbeTime", "LastTransitionTime")); diff != "" { | ||||
| 					t.Errorf("Unexpected pod status of the evicted pod (-want,+got):\n%s", diff) | ||||
| 				} | ||||
| 			}) | ||||
| 		// verify the pod status after applying the status update function | ||||
| 		podKiller.statusFn(&podKiller.pod.Status) | ||||
| 		if diff := cmp.Diff(*wantPodStatus, podKiller.pod.Status, cmpopts.IgnoreFields(v1.PodCondition{}, "LastProbeTime", "LastTransitionTime")); diff != "" { | ||||
| 			t.Errorf("Unexpected pod status of the evicted pod (-want,+got):\n%s", diff) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| @@ -570,97 +547,90 @@ func TestDiskPressureNodeFs_VerifyPodStatus(t *testing.T) { | ||||
| 			}, | ||||
| 		}, | ||||
| 	} | ||||
| 	for name, tc := range testCases { | ||||
| 		for _, enablePodDisruptionConditions := range []bool{false, true} { | ||||
| 			t.Run(fmt.Sprintf("%s;PodDisruptionConditions=%v", name, enablePodDisruptionConditions), func(t *testing.T) { | ||||
| 				featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.KubeletSeparateDiskGC, tc.kubeletSeparateDiskFeature) | ||||
| 				featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, enablePodDisruptionConditions) | ||||
| 	for _, tc := range testCases { | ||||
| 		featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.KubeletSeparateDiskGC, tc.kubeletSeparateDiskFeature) | ||||
|  | ||||
| 				podMaker := makePodWithDiskStats | ||||
| 				summaryStatsMaker := makeDiskStats | ||||
| 				podsToMake := tc.podToMakes | ||||
| 				wantPodStatus := v1.PodStatus{ | ||||
| 					Phase:   v1.PodFailed, | ||||
| 					Reason:  "Evicted", | ||||
| 					Message: tc.evictionMessage, | ||||
| 				} | ||||
| 				pods := []*v1.Pod{} | ||||
| 				podStats := map[*v1.Pod]statsapi.PodStats{} | ||||
| 				for _, podToMake := range podsToMake { | ||||
| 					pod, podStat := podMaker(podToMake.name, podToMake.priority, podToMake.requests, podToMake.limits, podToMake.rootFsUsed, podToMake.logsFsUsed, podToMake.perLocalVolumeUsed, nil) | ||||
| 					pods = append(pods, pod) | ||||
| 					podStats[pod] = podStat | ||||
| 				} | ||||
| 				activePodsFunc := func() []*v1.Pod { | ||||
| 					return pods | ||||
| 				} | ||||
| 		podMaker := makePodWithDiskStats | ||||
| 		summaryStatsMaker := makeDiskStats | ||||
| 		podsToMake := tc.podToMakes | ||||
| 		wantPodStatus := v1.PodStatus{ | ||||
| 			Phase:   v1.PodFailed, | ||||
| 			Reason:  "Evicted", | ||||
| 			Message: tc.evictionMessage, | ||||
| 		} | ||||
| 		pods := []*v1.Pod{} | ||||
| 		podStats := map[*v1.Pod]statsapi.PodStats{} | ||||
| 		for _, podToMake := range podsToMake { | ||||
| 			pod, podStat := podMaker(podToMake.name, podToMake.priority, podToMake.requests, podToMake.limits, podToMake.rootFsUsed, podToMake.logsFsUsed, podToMake.perLocalVolumeUsed, nil) | ||||
| 			pods = append(pods, pod) | ||||
| 			podStats[pod] = podStat | ||||
| 		} | ||||
| 		activePodsFunc := func() []*v1.Pod { | ||||
| 			return pods | ||||
| 		} | ||||
|  | ||||
| 				fakeClock := testingclock.NewFakeClock(time.Now()) | ||||
| 				podKiller := &mockPodKiller{} | ||||
| 				diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: tc.dedicatedImageFs} | ||||
| 				diskGC := &mockDiskGC{err: nil, readAndWriteSeparate: tc.writeableSeparateFromReadOnly} | ||||
| 				nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""} | ||||
| 		fakeClock := testingclock.NewFakeClock(time.Now()) | ||||
| 		podKiller := &mockPodKiller{} | ||||
| 		diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: tc.dedicatedImageFs} | ||||
| 		diskGC := &mockDiskGC{err: nil, readAndWriteSeparate: tc.writeableSeparateFromReadOnly} | ||||
| 		nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""} | ||||
|  | ||||
| 				config := Config{ | ||||
| 					PressureTransitionPeriod: time.Minute * 5, | ||||
| 					Thresholds:               []evictionapi.Threshold{tc.thresholdToMonitor}, | ||||
| 				} | ||||
| 				diskStat := diskStats{ | ||||
| 					rootFsAvailableBytes:      tc.nodeFsStats, | ||||
| 					imageFsAvailableBytes:     tc.imageFsStats, | ||||
| 					containerFsAvailableBytes: tc.containerFsStats, | ||||
| 					podStats:                  podStats, | ||||
| 				} | ||||
| 				summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker(diskStat)} | ||||
| 				manager := &managerImpl{ | ||||
| 					clock:                        fakeClock, | ||||
| 					killPodFunc:                  podKiller.killPodNow, | ||||
| 					imageGC:                      diskGC, | ||||
| 					containerGC:                  diskGC, | ||||
| 					config:                       config, | ||||
| 					recorder:                     &record.FakeRecorder{}, | ||||
| 					summaryProvider:              summaryProvider, | ||||
| 					nodeRef:                      nodeRef, | ||||
| 					nodeConditionsLastObservedAt: nodeConditionsObservedAt{}, | ||||
| 					thresholdsFirstObservedAt:    thresholdsObservedAt{}, | ||||
| 				} | ||||
| 		config := Config{ | ||||
| 			PressureTransitionPeriod: time.Minute * 5, | ||||
| 			Thresholds:               []evictionapi.Threshold{tc.thresholdToMonitor}, | ||||
| 		} | ||||
| 		diskStat := diskStats{ | ||||
| 			rootFsAvailableBytes:      tc.nodeFsStats, | ||||
| 			imageFsAvailableBytes:     tc.imageFsStats, | ||||
| 			containerFsAvailableBytes: tc.containerFsStats, | ||||
| 			podStats:                  podStats, | ||||
| 		} | ||||
| 		summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker(diskStat)} | ||||
| 		manager := &managerImpl{ | ||||
| 			clock:                        fakeClock, | ||||
| 			killPodFunc:                  podKiller.killPodNow, | ||||
| 			imageGC:                      diskGC, | ||||
| 			containerGC:                  diskGC, | ||||
| 			config:                       config, | ||||
| 			recorder:                     &record.FakeRecorder{}, | ||||
| 			summaryProvider:              summaryProvider, | ||||
| 			nodeRef:                      nodeRef, | ||||
| 			nodeConditionsLastObservedAt: nodeConditionsObservedAt{}, | ||||
| 			thresholdsFirstObservedAt:    thresholdsObservedAt{}, | ||||
| 		} | ||||
|  | ||||
| 				// synchronize | ||||
| 				pods, synchErr := manager.synchronize(diskInfoProvider, activePodsFunc) | ||||
| 		// synchronize | ||||
| 		pods, synchErr := manager.synchronize(diskInfoProvider, activePodsFunc) | ||||
|  | ||||
| 				if synchErr == nil && tc.expectErr != "" { | ||||
| 					t.Fatalf("Manager should report error but did not") | ||||
| 				} else if tc.expectErr != "" && synchErr != nil { | ||||
| 					if diff := cmp.Diff(tc.expectErr, synchErr.Error()); diff != "" { | ||||
| 						t.Errorf("Unexpected error (-want,+got):\n%s", diff) | ||||
| 					} | ||||
| 				} else { | ||||
| 					// verify manager detected disk pressure | ||||
| 					if !manager.IsUnderDiskPressure() { | ||||
| 						t.Fatalf("Manager should report disk pressure") | ||||
| 					} | ||||
| 		if synchErr == nil && tc.expectErr != "" { | ||||
| 			t.Fatalf("Manager should report error but did not") | ||||
| 		} else if tc.expectErr != "" && synchErr != nil { | ||||
| 			if diff := cmp.Diff(tc.expectErr, synchErr.Error()); diff != "" { | ||||
| 				t.Errorf("Unexpected error (-want,+got):\n%s", diff) | ||||
| 			} | ||||
| 		} else { | ||||
| 			// verify manager detected disk pressure | ||||
| 			if !manager.IsUnderDiskPressure() { | ||||
| 				t.Fatalf("Manager should report disk pressure") | ||||
| 			} | ||||
|  | ||||
| 					// verify a pod is selected for eviction | ||||
| 					if podKiller.pod == nil { | ||||
| 						t.Fatalf("Manager should have selected a pod for eviction") | ||||
| 					} | ||||
| 			// verify a pod is selected for eviction | ||||
| 			if podKiller.pod == nil { | ||||
| 				t.Fatalf("Manager should have selected a pod for eviction") | ||||
| 			} | ||||
|  | ||||
| 					if enablePodDisruptionConditions { | ||||
| 						wantPodStatus.Conditions = append(wantPodStatus.Conditions, v1.PodCondition{ | ||||
| 							Type:    "DisruptionTarget", | ||||
| 							Status:  "True", | ||||
| 							Reason:  "TerminationByKubelet", | ||||
| 							Message: tc.evictionMessage, | ||||
| 						}) | ||||
| 					} | ||||
|  | ||||
| 					// verify the pod status after applying the status update function | ||||
| 					podKiller.statusFn(&podKiller.pod.Status) | ||||
| 					if diff := cmp.Diff(wantPodStatus, podKiller.pod.Status, cmpopts.IgnoreFields(v1.PodCondition{}, "LastProbeTime", "LastTransitionTime")); diff != "" { | ||||
| 						t.Errorf("Unexpected pod status of the evicted pod (-want,+got):\n%s", diff) | ||||
| 					} | ||||
| 				} | ||||
| 			wantPodStatus.Conditions = append(wantPodStatus.Conditions, v1.PodCondition{ | ||||
| 				Type:    "DisruptionTarget", | ||||
| 				Status:  "True", | ||||
| 				Reason:  "TerminationByKubelet", | ||||
| 				Message: tc.evictionMessage, | ||||
| 			}) | ||||
|  | ||||
| 			// verify the pod status after applying the status update function | ||||
| 			podKiller.statusFn(&podKiller.pod.Status) | ||||
| 			if diff := cmp.Diff(wantPodStatus, podKiller.pod.Status, cmpopts.IgnoreFields(v1.PodCondition{}, "LastProbeTime", "LastTransitionTime")); diff != "" { | ||||
| 				t.Errorf("Unexpected pod status of the evicted pod (-want,+got):\n%s", diff) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|   | ||||
| @@ -3267,13 +3267,12 @@ func Test_generateAPIPodStatus(t *testing.T) { | ||||
| 		unreadyContainer                           []string | ||||
| 		previousStatus                             v1.PodStatus | ||||
| 		isPodTerminal                              bool | ||||
| 		enablePodDisruptionConditions              bool | ||||
| 		expected                                   v1.PodStatus | ||||
| 		expectedPodDisruptionCondition             v1.PodCondition | ||||
| 		expectedPodDisruptionCondition             *v1.PodCondition | ||||
| 		expectedPodReadyToStartContainersCondition v1.PodCondition | ||||
| 	}{ | ||||
| 		{ | ||||
| 			name: "pod disruption condition is copied over and the phase is set to failed when deleted; PodDisruptionConditions enabled", | ||||
| 			name: "pod disruption condition is copied over and the phase is set to failed when deleted", | ||||
| 			pod: &v1.Pod{ | ||||
| 				Spec: desiredState, | ||||
| 				Status: v1.PodStatus{ | ||||
| @@ -3301,8 +3300,7 @@ func Test_generateAPIPodStatus(t *testing.T) { | ||||
| 					LastTransitionTime: normalized_now, | ||||
| 				}}, | ||||
| 			}, | ||||
| 			isPodTerminal:                 true, | ||||
| 			enablePodDisruptionConditions: true, | ||||
| 			isPodTerminal: true, | ||||
| 			expected: v1.PodStatus{ | ||||
| 				Phase:    v1.PodFailed, | ||||
| 				HostIP:   "127.0.0.1", | ||||
| @@ -3319,7 +3317,7 @@ func Test_generateAPIPodStatus(t *testing.T) { | ||||
| 					ready(waitingWithLastTerminationUnknown("containerB", 0)), | ||||
| 				}, | ||||
| 			}, | ||||
| 			expectedPodDisruptionCondition: v1.PodCondition{ | ||||
| 			expectedPodDisruptionCondition: &v1.PodCondition{ | ||||
| 				Type:               v1.DisruptionTarget, | ||||
| 				Status:             v1.ConditionTrue, | ||||
| 				LastTransitionTime: normalized_now, | ||||
| @@ -3705,7 +3703,6 @@ func Test_generateAPIPodStatus(t *testing.T) { | ||||
| 	for _, test := range tests { | ||||
| 		for _, enablePodReadyToStartContainersCondition := range []bool{false, true} { | ||||
| 			t.Run(test.name, func(t *testing.T) { | ||||
| 				featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions) | ||||
| 				featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodReadyToStartContainersCondition, enablePodReadyToStartContainersCondition) | ||||
| 				testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) | ||||
| 				defer testKubelet.Cleanup() | ||||
| @@ -3719,8 +3716,8 @@ func Test_generateAPIPodStatus(t *testing.T) { | ||||
| 				if enablePodReadyToStartContainersCondition { | ||||
| 					expected.Conditions = append([]v1.PodCondition{test.expectedPodReadyToStartContainersCondition}, expected.Conditions...) | ||||
| 				} | ||||
| 				if test.enablePodDisruptionConditions { | ||||
| 					expected.Conditions = append([]v1.PodCondition{test.expectedPodDisruptionCondition}, expected.Conditions...) | ||||
| 				if test.expectedPodDisruptionCondition != nil { | ||||
| 					expected.Conditions = append([]v1.PodCondition{*test.expectedPodDisruptionCondition}, expected.Conditions...) | ||||
| 				} | ||||
| 				if !apiequality.Semantic.DeepEqual(*expected, actual) { | ||||
| 					t.Fatalf("Unexpected status: %s", cmp.Diff(*expected, actual)) | ||||
|   | ||||
| @@ -125,14 +125,13 @@ func TestManager(t *testing.T) { | ||||
| 		shutdownGracePeriodCriticalPods  time.Duration | ||||
| 		systemInhibitDelay               time.Duration | ||||
| 		overrideSystemInhibitDelay       time.Duration | ||||
| 		enablePodDisruptionConditions    bool | ||||
| 		expectedDidOverrideInhibitDelay  bool | ||||
| 		expectedPodToGracePeriodOverride map[string]int64 | ||||
| 		expectedError                    error | ||||
| 		expectedPodStatuses              map[string]v1.PodStatus | ||||
| 	}{ | ||||
| 		{ | ||||
| 			desc: "verify pod status; PodDisruptionConditions enabled", | ||||
| 			desc: "verify pod status", | ||||
| 			activePods: []*v1.Pod{ | ||||
| 				{ | ||||
| 					ObjectMeta: metav1.ObjectMeta{Name: "running-pod"}, | ||||
| @@ -160,7 +159,6 @@ func TestManager(t *testing.T) { | ||||
| 			shutdownGracePeriodCriticalPods:  time.Duration(10 * time.Second), | ||||
| 			systemInhibitDelay:               time.Duration(40 * time.Second), | ||||
| 			overrideSystemInhibitDelay:       time.Duration(40 * time.Second), | ||||
| 			enablePodDisruptionConditions:    true, | ||||
| 			expectedDidOverrideInhibitDelay:  false, | ||||
| 			expectedPodToGracePeriodOverride: map[string]int64{"running-pod": 20, "failed-pod": 20, "succeeded-pod": 20}, | ||||
| 			expectedPodStatuses: map[string]v1.PodStatus{ | ||||
| @@ -212,7 +210,6 @@ func TestManager(t *testing.T) { | ||||
| 			shutdownGracePeriodCriticalPods:  time.Duration(10 * time.Second), | ||||
| 			systemInhibitDelay:               time.Duration(40 * time.Second), | ||||
| 			overrideSystemInhibitDelay:       time.Duration(40 * time.Second), | ||||
| 			enablePodDisruptionConditions:    false, | ||||
| 			expectedDidOverrideInhibitDelay:  false, | ||||
| 			expectedPodToGracePeriodOverride: map[string]int64{"normal-pod-nil-grace-period": 20, "critical-pod-nil-grace-period": 10}, | ||||
| 			expectedPodStatuses: map[string]v1.PodStatus{ | ||||
| @@ -220,11 +217,27 @@ func TestManager(t *testing.T) { | ||||
| 					Phase:   v1.PodFailed, | ||||
| 					Message: "Pod was terminated in response to imminent node shutdown.", | ||||
| 					Reason:  "Terminated", | ||||
| 					Conditions: []v1.PodCondition{ | ||||
| 						{ | ||||
| 							Type:    v1.DisruptionTarget, | ||||
| 							Status:  v1.ConditionTrue, | ||||
| 							Reason:  "TerminationByKubelet", | ||||
| 							Message: "Pod was terminated in response to imminent node shutdown.", | ||||
| 						}, | ||||
| 					}, | ||||
| 				}, | ||||
| 				"critical-pod-nil-grace-period": { | ||||
| 					Phase:   v1.PodFailed, | ||||
| 					Message: "Pod was terminated in response to imminent node shutdown.", | ||||
| 					Reason:  "Terminated", | ||||
| 					Conditions: []v1.PodCondition{ | ||||
| 						{ | ||||
| 							Type:    v1.DisruptionTarget, | ||||
| 							Status:  v1.ConditionTrue, | ||||
| 							Reason:  "TerminationByKubelet", | ||||
| 							Message: "Pod was terminated in response to imminent node shutdown.", | ||||
| 						}, | ||||
| 					}, | ||||
| 				}, | ||||
| 			}, | ||||
| 		}, | ||||
| @@ -331,7 +344,6 @@ func TestManager(t *testing.T) { | ||||
| 			systemDbus = func() (dbusInhibiter, error) { | ||||
| 				return fakeDbus, nil | ||||
| 			} | ||||
| 			featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.PodDisruptionConditions, tc.enablePodDisruptionConditions) | ||||
| 			featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.GracefulNodeShutdown, true) | ||||
|  | ||||
| 			proberManager := probetest.FakeManager{} | ||||
|   | ||||
| @@ -35,14 +35,11 @@ import ( | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	"k8s.io/apimachinery/pkg/runtime" | ||||
| 	"k8s.io/apimachinery/pkg/runtime/schema" | ||||
| 	utilfeature "k8s.io/apiserver/pkg/util/feature" | ||||
| 	clientset "k8s.io/client-go/kubernetes" | ||||
| 	"k8s.io/client-go/kubernetes/fake" | ||||
| 	core "k8s.io/client-go/testing" | ||||
| 	featuregatetesting "k8s.io/component-base/featuregate/testing" | ||||
| 	podutil "k8s.io/kubernetes/pkg/api/v1/pod" | ||||
| 	api "k8s.io/kubernetes/pkg/apis/core" | ||||
| 	"k8s.io/kubernetes/pkg/features" | ||||
| 	kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" | ||||
| 	kubepod "k8s.io/kubernetes/pkg/kubelet/pod" | ||||
| 	statustest "k8s.io/kubernetes/pkg/kubelet/status/testing" | ||||
| @@ -1065,9 +1062,8 @@ func TestTerminatePod_DefaultUnknownStatus(t *testing.T) { | ||||
|  | ||||
| func TestTerminatePod_EnsurePodPhaseIsTerminal(t *testing.T) { | ||||
| 	testCases := map[string]struct { | ||||
| 		enablePodDisruptionConditions bool | ||||
| 		status                        v1.PodStatus | ||||
| 		wantStatus                    v1.PodStatus | ||||
| 		status     v1.PodStatus | ||||
| 		wantStatus v1.PodStatus | ||||
| 	}{ | ||||
| 		"Pending pod": { | ||||
| 			status: v1.PodStatus{ | ||||
| @@ -1542,24 +1538,14 @@ func deleteAction() core.DeleteAction { | ||||
|  | ||||
| func TestMergePodStatus(t *testing.T) { | ||||
| 	useCases := []struct { | ||||
| 		desc                          string | ||||
| 		enablePodDisruptionConditions bool | ||||
| 		hasRunningContainers          bool | ||||
| 		oldPodStatus                  func(input v1.PodStatus) v1.PodStatus | ||||
| 		newPodStatus                  func(input v1.PodStatus) v1.PodStatus | ||||
| 		expectPodStatus               v1.PodStatus | ||||
| 		desc                 string | ||||
| 		hasRunningContainers bool | ||||
| 		oldPodStatus         func(input v1.PodStatus) v1.PodStatus | ||||
| 		newPodStatus         func(input v1.PodStatus) v1.PodStatus | ||||
| 		expectPodStatus      v1.PodStatus | ||||
| 	}{ | ||||
| 		{ | ||||
| 			"no change", | ||||
| 			false, | ||||
| 			false, | ||||
| 			func(input v1.PodStatus) v1.PodStatus { return input }, | ||||
| 			func(input v1.PodStatus) v1.PodStatus { return input }, | ||||
| 			getPodStatus(), | ||||
| 		}, | ||||
| 		{ | ||||
| 			"add DisruptionTarget condition when transitioning into failed phase; PodDisruptionConditions enabled", | ||||
| 			true, | ||||
| 			"add DisruptionTarget condition when transitioning into failed phase", | ||||
| 			false, | ||||
| 			func(input v1.PodStatus) v1.PodStatus { return input }, | ||||
| 			func(input v1.PodStatus) v1.PodStatus { | ||||
| @@ -1598,8 +1584,7 @@ func TestMergePodStatus(t *testing.T) { | ||||
| 			}, | ||||
| 		}, | ||||
| 		{ | ||||
| 			"don't add DisruptionTarget condition when transitioning into failed phase, but there are might still be running containers; PodDisruptionConditions enabled", | ||||
| 			true, | ||||
| 			"don't add DisruptionTarget condition when transitioning into failed phase, but there might still be running containers", | ||||
| 			true, | ||||
| 			func(input v1.PodStatus) v1.PodStatus { return input }, | ||||
| 			func(input v1.PodStatus) v1.PodStatus { | ||||
| @@ -1627,8 +1612,7 @@ func TestMergePodStatus(t *testing.T) { | ||||
| 			}, | ||||
| 		}, | ||||
| 		{ | ||||
| 			"preserve DisruptionTarget condition; PodDisruptionConditions enabled", | ||||
| 			true, | ||||
| 			"preserve DisruptionTarget condition", | ||||
| 			false, | ||||
| 			func(input v1.PodStatus) v1.PodStatus { | ||||
| 				input.Conditions = append(input.Conditions, v1.PodCondition{ | ||||
| @@ -1662,43 +1646,7 @@ func TestMergePodStatus(t *testing.T) { | ||||
| 			}, | ||||
| 		}, | ||||
| 		{ | ||||
| 			"preserve DisruptionTarget condition; PodDisruptionConditions disabled", | ||||
| 			false, | ||||
| 			false, | ||||
| 			func(input v1.PodStatus) v1.PodStatus { | ||||
| 				input.Conditions = append(input.Conditions, v1.PodCondition{ | ||||
| 					Type:   v1.DisruptionTarget, | ||||
| 					Status: v1.ConditionTrue, | ||||
| 					Reason: "TerminationByKubelet", | ||||
| 				}) | ||||
| 				return input | ||||
| 			}, | ||||
| 			func(input v1.PodStatus) v1.PodStatus { | ||||
| 				return input | ||||
| 			}, | ||||
| 			v1.PodStatus{ | ||||
| 				Phase: v1.PodRunning, | ||||
| 				Conditions: []v1.PodCondition{ | ||||
| 					{ | ||||
| 						Type:   v1.PodReady, | ||||
| 						Status: v1.ConditionTrue, | ||||
| 					}, | ||||
| 					{ | ||||
| 						Type:   v1.PodScheduled, | ||||
| 						Status: v1.ConditionTrue, | ||||
| 					}, | ||||
| 					{ | ||||
| 						Type:   v1.DisruptionTarget, | ||||
| 						Status: v1.ConditionTrue, | ||||
| 						Reason: "TerminationByKubelet", | ||||
| 					}, | ||||
| 				}, | ||||
| 				Message: "Message", | ||||
| 			}, | ||||
| 		}, | ||||
| 		{ | ||||
| 			"override DisruptionTarget condition; PodDisruptionConditions enabled", | ||||
| 			true, | ||||
| 			"override DisruptionTarget condition", | ||||
| 			false, | ||||
| 			func(input v1.PodStatus) v1.PodStatus { | ||||
| 				input.Conditions = append(input.Conditions, v1.PodCondition{ | ||||
| @@ -1744,8 +1692,7 @@ func TestMergePodStatus(t *testing.T) { | ||||
| 			}, | ||||
| 		}, | ||||
| 		{ | ||||
| 			"don't override DisruptionTarget condition when remaining in running phase; PodDisruptionConditions enabled", | ||||
| 			true, | ||||
| 			"don't override DisruptionTarget condition when remaining in running phase", | ||||
| 			false, | ||||
| 			func(input v1.PodStatus) v1.PodStatus { | ||||
| 				input.Conditions = append(input.Conditions, v1.PodCondition{ | ||||
| @@ -1784,8 +1731,7 @@ func TestMergePodStatus(t *testing.T) { | ||||
| 			}, | ||||
| 		}, | ||||
| 		{ | ||||
| 			"don't override DisruptionTarget condition when transitioning to failed phase but there might still be running containers; PodDisruptionConditions enabled", | ||||
| 			true, | ||||
| 			"don't override DisruptionTarget condition when transitioning to failed phase but there might still be running containers", | ||||
| 			true, | ||||
| 			func(input v1.PodStatus) v1.PodStatus { | ||||
| 				input.Conditions = append(input.Conditions, v1.PodCondition{ | ||||
| @@ -1827,7 +1773,6 @@ func TestMergePodStatus(t *testing.T) { | ||||
| 		{ | ||||
| 			"readiness changes", | ||||
| 			false, | ||||
| 			false, | ||||
| 			func(input v1.PodStatus) v1.PodStatus { return input }, | ||||
| 			func(input v1.PodStatus) v1.PodStatus { | ||||
| 				input.Conditions[0].Status = v1.ConditionFalse | ||||
| @@ -1851,7 +1796,6 @@ func TestMergePodStatus(t *testing.T) { | ||||
| 		{ | ||||
| 			"additional pod condition", | ||||
| 			false, | ||||
| 			false, | ||||
| 			func(input v1.PodStatus) v1.PodStatus { | ||||
| 				input.Conditions = append(input.Conditions, v1.PodCondition{ | ||||
| 					Type:   v1.PodConditionType("example.com/feature"), | ||||
| @@ -1882,7 +1826,6 @@ func TestMergePodStatus(t *testing.T) { | ||||
| 		{ | ||||
| 			"additional pod condition and readiness changes", | ||||
| 			false, | ||||
| 			false, | ||||
| 			func(input v1.PodStatus) v1.PodStatus { | ||||
| 				input.Conditions = append(input.Conditions, v1.PodCondition{ | ||||
| 					Type:   v1.PodConditionType("example.com/feature"), | ||||
| @@ -1916,7 +1859,6 @@ func TestMergePodStatus(t *testing.T) { | ||||
| 		{ | ||||
| 			"additional pod condition changes", | ||||
| 			false, | ||||
| 			false, | ||||
| 			func(input v1.PodStatus) v1.PodStatus { | ||||
| 				input.Conditions = append(input.Conditions, v1.PodCondition{ | ||||
| 					Type:   v1.PodConditionType("example.com/feature"), | ||||
| @@ -1953,7 +1895,6 @@ func TestMergePodStatus(t *testing.T) { | ||||
| 		{ | ||||
| 			"phase is transitioning to failed and no containers running", | ||||
| 			false, | ||||
| 			false, | ||||
| 			func(input v1.PodStatus) v1.PodStatus { | ||||
| 				input.Phase = v1.PodRunning | ||||
| 				input.Reason = "Unknown" | ||||
| @@ -1990,7 +1931,6 @@ func TestMergePodStatus(t *testing.T) { | ||||
| 		}, | ||||
| 		{ | ||||
| 			"phase is transitioning to failed and containers running", | ||||
| 			false, | ||||
| 			true, | ||||
| 			func(input v1.PodStatus) v1.PodStatus { | ||||
| 				input.Phase = v1.PodRunning | ||||
| @@ -2024,7 +1964,6 @@ func TestMergePodStatus(t *testing.T) { | ||||
|  | ||||
| 	for _, tc := range useCases { | ||||
| 		t.Run(tc.desc, func(t *testing.T) { | ||||
| 			featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, tc.enablePodDisruptionConditions) | ||||
| 			output := mergePodStatus(tc.oldPodStatus(getPodStatus()), tc.newPodStatus(getPodStatus()), tc.hasRunningContainers) | ||||
| 			if !conditionsEqual(output.Conditions, tc.expectPodStatus.Conditions) || !statusEqual(output, tc.expectPodStatus) { | ||||
| 				t.Fatalf("unexpected output: %s", cmp.Diff(tc.expectPodStatus, output)) | ||||
|   | ||||
| @@ -27,7 +27,6 @@ import ( | ||||
|  | ||||
| func TestPodConditionByKubelet(t *testing.T) { | ||||
| 	featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodReadyToStartContainersCondition, true) | ||||
| 	featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, true) | ||||
|  | ||||
| 	trueCases := []v1.PodConditionType{ | ||||
| 		v1.PodScheduled, | ||||
| @@ -56,8 +55,6 @@ func TestPodConditionByKubelet(t *testing.T) { | ||||
| } | ||||
|  | ||||
| func TestPodConditionSharedByKubelet(t *testing.T) { | ||||
| 	featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, true) | ||||
|  | ||||
| 	trueCases := []v1.PodConditionType{ | ||||
| 		v1.DisruptionTarget, | ||||
| 	} | ||||
|   | ||||
| @@ -802,46 +802,42 @@ func TestAddConditionAndDelete(t *testing.T) { | ||||
| 	evictionRest := newEvictionStorage(storage.Store, client.PolicyV1()) | ||||
|  | ||||
| 	for _, tc := range cases { | ||||
| 		for _, conditionsEnabled := range []bool{true, false} { | ||||
| 			name := fmt.Sprintf("%s_conditions=%v", tc.name, conditionsEnabled) | ||||
| 			t.Run(name, func(t *testing.T) { | ||||
| 				featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, conditionsEnabled) | ||||
| 				var deleteOptions *metav1.DeleteOptions | ||||
| 				if tc.initialPod { | ||||
| 					newPod := validNewPod() | ||||
| 					createdObj, err := storage.Create(testContext, newPod, rest.ValidateAllObjectFunc, &metav1.CreateOptions{}) | ||||
| 					if err != nil { | ||||
| 		t.Run(tc.name, func(t *testing.T) { | ||||
| 			var deleteOptions *metav1.DeleteOptions | ||||
| 			if tc.initialPod { | ||||
| 				newPod := validNewPod() | ||||
| 				createdObj, err := storage.Create(testContext, newPod, rest.ValidateAllObjectFunc, &metav1.CreateOptions{}) | ||||
| 				if err != nil { | ||||
| 					t.Fatal(err) | ||||
| 				} | ||||
| 				t.Cleanup(func() { | ||||
| 					zero := int64(0) | ||||
| 					if _, _, err := storage.Delete(testContext, newPod.Name, rest.ValidateAllObjectFunc, &metav1.DeleteOptions{GracePeriodSeconds: &zero}); err != nil && !apierrors.IsNotFound(err) { | ||||
| 						t.Fatal(err) | ||||
| 					} | ||||
| 					t.Cleanup(func() { | ||||
| 						zero := int64(0) | ||||
| 						if _, _, err := storage.Delete(testContext, newPod.Name, rest.ValidateAllObjectFunc, &metav1.DeleteOptions{GracePeriodSeconds: &zero}); err != nil && !apierrors.IsNotFound(err) { | ||||
| 							t.Fatal(err) | ||||
| 						} | ||||
| 					}) | ||||
| 					deleteOptions = tc.makeDeleteOptions(createdObj.(*api.Pod)) | ||||
| 				} else { | ||||
| 					deleteOptions = tc.makeDeleteOptions(nil) | ||||
| 				} | ||||
| 				if deleteOptions == nil { | ||||
| 					deleteOptions = &metav1.DeleteOptions{} | ||||
| 				} | ||||
| 				}) | ||||
| 				deleteOptions = tc.makeDeleteOptions(createdObj.(*api.Pod)) | ||||
| 			} else { | ||||
| 				deleteOptions = tc.makeDeleteOptions(nil) | ||||
| 			} | ||||
| 			if deleteOptions == nil { | ||||
| 				deleteOptions = &metav1.DeleteOptions{} | ||||
| 			} | ||||
|  | ||||
| 				err := addConditionAndDeletePod(evictionRest, testContext, "foo", rest.ValidateAllObjectFunc, deleteOptions) | ||||
| 				if err == nil { | ||||
| 					if tc.expectErr != "" { | ||||
| 						t.Fatalf("expected err containing %q, got none", tc.expectErr) | ||||
| 					} | ||||
| 					return | ||||
| 			err := addConditionAndDeletePod(evictionRest, testContext, "foo", rest.ValidateAllObjectFunc, deleteOptions) | ||||
| 			if err == nil { | ||||
| 				if tc.expectErr != "" { | ||||
| 					t.Fatalf("expected err containing %q, got none", tc.expectErr) | ||||
| 				} | ||||
| 				if tc.expectErr == "" { | ||||
| 					t.Fatalf("unexpected err: %v", err) | ||||
| 				} | ||||
| 				if !strings.Contains(err.Error(), tc.expectErr) { | ||||
| 					t.Fatalf("expected err containing %q, got %v", tc.expectErr, err) | ||||
| 				} | ||||
| 			}) | ||||
| 		} | ||||
| 				return | ||||
| 			} | ||||
| 			if tc.expectErr == "" { | ||||
| 				t.Fatalf("unexpected err: %v", err) | ||||
| 			} | ||||
| 			if !strings.Contains(err.Error(), tc.expectErr) { | ||||
| 				t.Fatalf("expected err containing %q, got %v", tc.expectErr, err) | ||||
| 			} | ||||
| 		}) | ||||
| 	} | ||||
| } | ||||
|  | ||||
|   | ||||
| @@ -1437,7 +1437,7 @@ func TestPodEligibleToPreemptOthers(t *testing.T) { | ||||
| 			pods:                []*v1.Pod{st.MakePod().Name("p1").UID("p1").Priority(lowPriority).Node("node1").Terminating().Obj()}, | ||||
| 			nodes:               []string{"node1"}, | ||||
| 			nominatedNodeStatus: nil, | ||||
| 			expected:            false, | ||||
| 			expected:            true, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:                "Pod without nominated node", | ||||
| @@ -1456,8 +1456,7 @@ func TestPodEligibleToPreemptOthers(t *testing.T) { | ||||
| 			expected:            false, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name: "victim Pods terminating, feature PodDisruptionConditions is enabled", | ||||
| 			fts:  feature.Features{EnablePodDisruptionConditions: true}, | ||||
| 			name: "victim Pods terminating", | ||||
| 			pod:  st.MakePod().Name("p_with_nominated_node").UID("p").Priority(highPriority).NominatedNodeName("node1").Obj(), | ||||
| 			pods: []*v1.Pod{st.MakePod().Name("p1").UID("p1").Priority(lowPriority).Node("node1").Terminating(). | ||||
| 				Condition(v1.DisruptionTarget, v1.ConditionTrue, v1.PodReasonPreemptionByScheduler).Obj()}, | ||||
| @@ -1465,34 +1464,17 @@ func TestPodEligibleToPreemptOthers(t *testing.T) { | ||||
| 			expected: false, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:     "non-victim Pods terminating, feature PodDisruptionConditions is enabled", | ||||
| 			fts:      feature.Features{EnablePodDisruptionConditions: true}, | ||||
| 			name:     "non-victim Pods terminating", | ||||
| 			pod:      st.MakePod().Name("p_with_nominated_node").UID("p").Priority(highPriority).NominatedNodeName("node1").Obj(), | ||||
| 			pods:     []*v1.Pod{st.MakePod().Name("p1").UID("p1").Priority(lowPriority).Node("node1").Terminating().Obj()}, | ||||
| 			nodes:    []string{"node1"}, | ||||
| 			expected: true, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name: "victim Pods terminating, feature PodDisruptionConditions is disabled", | ||||
| 			fts:  feature.Features{EnablePodDisruptionConditions: false}, | ||||
| 			pod:  st.MakePod().Name("p_with_nominated_node").UID("p").Priority(highPriority).NominatedNodeName("node1").Obj(), | ||||
| 			pods: []*v1.Pod{st.MakePod().Name("p1").UID("p1").Priority(lowPriority).Node("node1").Terminating(). | ||||
| 				Condition(v1.DisruptionTarget, v1.ConditionTrue, v1.PodReasonPreemptionByScheduler).Obj()}, | ||||
| 			nodes:    []string{"node1"}, | ||||
| 			expected: false, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:     "non-victim Pods terminating, feature PodDisruptionConditions is disabled", | ||||
| 			fts:      feature.Features{EnablePodDisruptionConditions: false}, | ||||
| 			pod:      st.MakePod().Name("p_with_nominated_node").UID("p").Priority(highPriority).NominatedNodeName("node1").Obj(), | ||||
| 			pods:     []*v1.Pod{st.MakePod().Name("p1").UID("p1").Priority(lowPriority).Node("node1").Terminating().Obj()}, | ||||
| 			nodes:    []string{"node1"}, | ||||
| 			expected: false, | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| 	for _, test := range tests { | ||||
| 		t.Run(test.name, func(t *testing.T) { | ||||
| 			test.fts.EnablePodDisruptionConditions = true | ||||
| 			logger, ctx := ktesting.NewTestContext(t) | ||||
| 			ctx, cancel := context.WithCancel(ctx) | ||||
| 			defer cancel() | ||||
|   | ||||
| @@ -31,7 +31,6 @@ import ( | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	"k8s.io/apimachinery/pkg/fields" | ||||
| 	kubeletstatsv1alpha1 "k8s.io/kubelet/pkg/apis/stats/v1alpha1" | ||||
| 	"k8s.io/kubernetes/pkg/features" | ||||
| 	kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" | ||||
| 	"k8s.io/kubernetes/pkg/kubelet/eviction" | ||||
| 	evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api" | ||||
| @@ -513,16 +512,13 @@ var _ = SIGDescribe("PriorityPidEvictionOrdering", framework.WithSlow(), framewo | ||||
| 		runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logPidMetrics, specs) | ||||
| 	}) | ||||
|  | ||||
| 	f.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition)+"; PodDisruptionConditions enabled", nodefeature.PodDisruptionConditions, func() { | ||||
| 	f.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition)+"; baseline scenario to verify DisruptionTarget is added", nodefeature.PodDisruptionConditions, func() { | ||||
| 		tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) { | ||||
| 			pidsConsumed := int64(10000) | ||||
| 			summary := eventuallyGetSummary(ctx) | ||||
| 			availablePids := *(summary.Node.Rlimit.MaxPID) - *(summary.Node.Rlimit.NumOfRunningProcesses) | ||||
| 			initialConfig.EvictionHard = map[string]string{string(evictionapi.SignalPIDAvailable): fmt.Sprintf("%d", availablePids-pidsConsumed)} | ||||
| 			initialConfig.EvictionMinimumReclaim = map[string]string{} | ||||
| 			initialConfig.FeatureGates = map[string]bool{ | ||||
| 				string(features.PodDisruptionConditions): true, | ||||
| 			} | ||||
| 		}) | ||||
| 		disruptionTarget := v1.DisruptionTarget | ||||
| 		specs := []podEvictSpec{ | ||||
|   | ||||
| @@ -83,7 +83,7 @@ var _ = SIGDescribe("GracefulNodeShutdown", framework.WithSerial(), nodefeature. | ||||
| 		} | ||||
| 	}) | ||||
|  | ||||
| 	f.Context("graceful node shutdown when PodDisruptionConditions are enabled", nodefeature.PodDisruptionConditions, func() { | ||||
| 	f.Context("graceful node shutdown; baseline scenario to verify DisruptionTarget is added", nodefeature.PodDisruptionConditions, func() { | ||||
|  | ||||
| 		const ( | ||||
| 			pollInterval            = 1 * time.Second | ||||
| @@ -95,7 +95,6 @@ var _ = SIGDescribe("GracefulNodeShutdown", framework.WithSerial(), nodefeature. | ||||
| 		tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) { | ||||
| 			initialConfig.FeatureGates = map[string]bool{ | ||||
| 				string(features.GracefulNodeShutdown):                   true, | ||||
| 				string(features.PodDisruptionConditions):                true, | ||||
| 				string(features.GracefulNodeShutdownBasedOnPodPriority): false, | ||||
| 			} | ||||
| 			initialConfig.ShutdownGracePeriod = metav1.Duration{Duration: nodeShutdownGracePeriod} | ||||
|   | ||||
| @@ -346,36 +346,22 @@ func TestEvictionVersions(t *testing.T) { | ||||
| // TestEvictionWithFinalizers tests eviction with the use of finalizers | ||||
| func TestEvictionWithFinalizers(t *testing.T) { | ||||
| 	cases := map[string]struct { | ||||
| 		enablePodDisruptionConditions bool | ||||
| 		phase                         v1.PodPhase | ||||
| 		dryRun                        bool | ||||
| 		wantDisruptionTargetCond      bool | ||||
| 		phase                    v1.PodPhase | ||||
| 		dryRun                   bool | ||||
| 		wantDisruptionTargetCond bool | ||||
| 	}{ | ||||
| 		"terminal pod with PodDisruptionConditions enabled": { | ||||
| 			enablePodDisruptionConditions: true, | ||||
| 			phase:                         v1.PodSucceeded, | ||||
| 			wantDisruptionTargetCond:      true, | ||||
| 		"terminal pod": { | ||||
| 			phase:                    v1.PodSucceeded, | ||||
| 			wantDisruptionTargetCond: true, | ||||
| 		}, | ||||
| 		"terminal pod with PodDisruptionConditions disabled": { | ||||
| 			enablePodDisruptionConditions: false, | ||||
| 			phase:                         v1.PodSucceeded, | ||||
| 			wantDisruptionTargetCond:      false, | ||||
| 		"running pod": { | ||||
| 			phase:                    v1.PodRunning, | ||||
| 			wantDisruptionTargetCond: true, | ||||
| 		}, | ||||
| 		"running pod with PodDisruptionConditions enabled": { | ||||
| 			enablePodDisruptionConditions: true, | ||||
| 			phase:                         v1.PodRunning, | ||||
| 			wantDisruptionTargetCond:      true, | ||||
| 		}, | ||||
| 		"running pod with PodDisruptionConditions disabled": { | ||||
| 			enablePodDisruptionConditions: false, | ||||
| 			phase:                         v1.PodRunning, | ||||
| 			wantDisruptionTargetCond:      false, | ||||
| 		}, | ||||
| 		"running pod with PodDisruptionConditions enabled should not update conditions in dry-run mode": { | ||||
| 			enablePodDisruptionConditions: true, | ||||
| 			phase:                         v1.PodRunning, | ||||
| 			dryRun:                        true, | ||||
| 			wantDisruptionTargetCond:      false, | ||||
| 		"running pod should not update conditions in dry-run mode": { | ||||
| 			phase:                    v1.PodRunning, | ||||
| 			dryRun:                   true, | ||||
| 			wantDisruptionTargetCond: false, | ||||
| 		}, | ||||
| 	} | ||||
| 	for name, tc := range cases { | ||||
| @@ -386,7 +372,6 @@ func TestEvictionWithFinalizers(t *testing.T) { | ||||
|  | ||||
| 			ns := framework.CreateNamespaceOrDie(clientSet, "eviction-with-finalizers", t) | ||||
| 			defer framework.DeleteNamespaceOrDie(clientSet, ns, t) | ||||
| 			featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.PodDisruptionConditions, tc.enablePodDisruptionConditions) | ||||
| 			defer tCtx.Cancel("test has completed") | ||||
|  | ||||
| 			informers.Start(tCtx.Done()) | ||||
|   | ||||
| @@ -51,31 +51,21 @@ func TestEvictionForNoExecuteTaintAddedByUser(t *testing.T) { | ||||
| 	nodeIndex := 1 // the exact node doesn't matter, pick one | ||||
|  | ||||
| 	tests := map[string]struct { | ||||
| 		enablePodDisruptionConditions          bool | ||||
| 		enableSeparateTaintEvictionController  bool | ||||
| 		startStandaloneTaintEvictionController bool | ||||
| 		wantPodEvicted                         bool | ||||
| 	}{ | ||||
| 		"Test eviction for NoExecute taint added by user; pod condition added when PodDisruptionConditions enabled; separate taint eviction controller disabled": { | ||||
| 			enablePodDisruptionConditions:          true, | ||||
| 			enableSeparateTaintEvictionController:  false, | ||||
| 			startStandaloneTaintEvictionController: false, | ||||
| 			wantPodEvicted:                         true, | ||||
| 		}, | ||||
| 		"Test eviction for NoExecute taint added by user; no pod condition added when PodDisruptionConditions disabled; separate taint eviction controller disabled": { | ||||
| 			enablePodDisruptionConditions:          false, | ||||
| 		"Test eviction for NoExecute taint added by user; pod condition added; separate taint eviction controller disabled": { | ||||
| 			enableSeparateTaintEvictionController:  false, | ||||
| 			startStandaloneTaintEvictionController: false, | ||||
| 			wantPodEvicted:                         true, | ||||
| 		}, | ||||
| 		"Test eviction for NoExecute taint added by user; separate taint eviction controller enabled but not started": { | ||||
| 			enablePodDisruptionConditions:          false, | ||||
| 			enableSeparateTaintEvictionController:  true, | ||||
| 			startStandaloneTaintEvictionController: false, | ||||
| 			wantPodEvicted:                         false, | ||||
| 		}, | ||||
| 		"Test eviction for NoExecute taint added by user; separate taint eviction controller enabled and started": { | ||||
| 			enablePodDisruptionConditions:          false, | ||||
| 			enableSeparateTaintEvictionController:  true, | ||||
| 			startStandaloneTaintEvictionController: true, | ||||
| 			wantPodEvicted:                         true, | ||||
| @@ -124,7 +114,6 @@ func TestEvictionForNoExecuteTaintAddedByUser(t *testing.T) { | ||||
| 				}, | ||||
| 			} | ||||
|  | ||||
| 			featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions) | ||||
| 			featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.SeparateTaintEvictionController, test.enableSeparateTaintEvictionController) | ||||
| 			testCtx := testutils.InitTestAPIServer(t, "taint-no-execute", nil) | ||||
| 			cs := testCtx.ClientSet | ||||
| @@ -202,9 +191,9 @@ func TestEvictionForNoExecuteTaintAddedByUser(t *testing.T) { | ||||
| 				t.Fatalf("Test Failed: error: %q, while getting updated pod", err) | ||||
| 			} | ||||
| 			_, cond := podutil.GetPodCondition(&testPod.Status, v1.DisruptionTarget) | ||||
| 			if test.enablePodDisruptionConditions && cond == nil { | ||||
| 			if test.wantPodEvicted && cond == nil { | ||||
| 				t.Errorf("Pod %q does not have the expected condition: %q", klog.KObj(testPod), v1.DisruptionTarget) | ||||
| 			} else if !test.enablePodDisruptionConditions && cond != nil { | ||||
| 			} else if !test.wantPodEvicted && cond != nil { | ||||
| 				t.Errorf("Pod %q has an unexpected condition: %q", klog.KObj(testPod), v1.DisruptionTarget) | ||||
| 			} | ||||
| 		}) | ||||
|   | ||||
| @@ -40,16 +40,14 @@ import ( | ||||
| // TestPodGcOrphanedPodsWithFinalizer tests deletion of orphaned pods | ||||
| func TestPodGcOrphanedPodsWithFinalizer(t *testing.T) { | ||||
| 	tests := map[string]struct { | ||||
| 		enablePodDisruptionConditions bool | ||||
| 		enableJobPodReplacementPolicy bool | ||||
| 		phase                         v1.PodPhase | ||||
| 		wantPhase                     v1.PodPhase | ||||
| 		wantDisruptionTarget          *v1.PodCondition | ||||
| 	}{ | ||||
| 		"PodDisruptionConditions enabled": { | ||||
| 			enablePodDisruptionConditions: true, | ||||
| 			phase:                         v1.PodPending, | ||||
| 			wantPhase:                     v1.PodFailed, | ||||
| 		"pending pod": { | ||||
| 			phase:     v1.PodPending, | ||||
| 			wantPhase: v1.PodFailed, | ||||
| 			wantDisruptionTarget: &v1.PodCondition{ | ||||
| 				Type:    v1.DisruptionTarget, | ||||
| 				Status:  v1.ConditionTrue, | ||||
| @@ -57,8 +55,7 @@ func TestPodGcOrphanedPodsWithFinalizer(t *testing.T) { | ||||
| 				Message: "PodGC: node no longer exists", | ||||
| 			}, | ||||
| 		}, | ||||
| 		"PodDisruptionConditions and PodReplacementPolicy enabled": { | ||||
| 			enablePodDisruptionConditions: true, | ||||
| 		"pending pod; PodReplacementPolicy enabled": { | ||||
| 			enableJobPodReplacementPolicy: true, | ||||
| 			phase:                         v1.PodPending, | ||||
| 			wantPhase:                     v1.PodFailed, | ||||
| @@ -69,32 +66,18 @@ func TestPodGcOrphanedPodsWithFinalizer(t *testing.T) { | ||||
| 				Message: "PodGC: node no longer exists", | ||||
| 			}, | ||||
| 		}, | ||||
| 		"Only PodReplacementPolicy enabled; no PodDisruptionCondition": { | ||||
| 			enablePodDisruptionConditions: false, | ||||
| 			enableJobPodReplacementPolicy: true, | ||||
| 			phase:                         v1.PodPending, | ||||
| 			wantPhase:                     v1.PodFailed, | ||||
| 		"succeeded pod": { | ||||
| 			phase:     v1.PodSucceeded, | ||||
| 			wantPhase: v1.PodSucceeded, | ||||
| 		}, | ||||
| 		"PodDisruptionConditions disabled": { | ||||
| 			enablePodDisruptionConditions: false, | ||||
| 			phase:                         v1.PodPending, | ||||
| 			wantPhase:                     v1.PodPending, | ||||
| 		}, | ||||
| 		"PodDisruptionConditions enabled; succeeded pod": { | ||||
| 			enablePodDisruptionConditions: true, | ||||
| 			phase:                         v1.PodSucceeded, | ||||
| 			wantPhase:                     v1.PodSucceeded, | ||||
| 		}, | ||||
| 		"PodDisruptionConditions enabled; failed pod": { | ||||
| 			enablePodDisruptionConditions: true, | ||||
| 			phase:                         v1.PodFailed, | ||||
| 			wantPhase:                     v1.PodFailed, | ||||
| 		"failed pod": { | ||||
| 			phase:     v1.PodFailed, | ||||
| 			wantPhase: v1.PodFailed, | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| 	for name, test := range tests { | ||||
| 		t.Run(name, func(t *testing.T) { | ||||
| 			featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions) | ||||
| 			featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.JobPodReplacementPolicy, test.enableJobPodReplacementPolicy) | ||||
| 			testCtx := setup(t, "podgc-orphaned") | ||||
| 			cs := testCtx.ClientSet | ||||
| @@ -170,31 +153,18 @@ func TestPodGcOrphanedPodsWithFinalizer(t *testing.T) { | ||||
| // TestTerminatingOnOutOfServiceNode tests deletion pods terminating on out-of-service nodes | ||||
| func TestTerminatingOnOutOfServiceNode(t *testing.T) { | ||||
| 	tests := map[string]struct { | ||||
| 		enablePodDisruptionConditions bool | ||||
| 		enableJobPodReplacementPolicy bool | ||||
| 		withFinalizer                 bool | ||||
| 		wantPhase                     v1.PodPhase | ||||
| 	}{ | ||||
| 		"pod has phase changed to Failed when PodDisruptionConditions enabled": { | ||||
| 			enablePodDisruptionConditions: true, | ||||
| 			withFinalizer:                 true, | ||||
| 			wantPhase:                     v1.PodFailed, | ||||
| 		"pod has phase changed to Failed": { | ||||
| 			withFinalizer: true, | ||||
| 			wantPhase:     v1.PodFailed, | ||||
| 		}, | ||||
| 		"pod has phase unchanged when PodDisruptionConditions disabled": { | ||||
| 			enablePodDisruptionConditions: false, | ||||
| 			withFinalizer:                 true, | ||||
| 			wantPhase:                     v1.PodPending, | ||||
| 		"pod is getting deleted when no finalizer": { | ||||
| 			withFinalizer: false, | ||||
| 		}, | ||||
| 		"pod is getting deleted when no finalizer and PodDisruptionConditions enabled": { | ||||
| 			enablePodDisruptionConditions: true, | ||||
| 			withFinalizer:                 false, | ||||
| 		}, | ||||
| 		"pod is getting deleted when no finalizer and PodDisruptionConditions disabled": { | ||||
| 			enablePodDisruptionConditions: false, | ||||
| 			withFinalizer:                 false, | ||||
| 		}, | ||||
| 		"pod has phase changed when PodDisruptionConditions disabled, but JobPodReplacementPolicy enabled": { | ||||
| 			enablePodDisruptionConditions: false, | ||||
| 		"pod has phase changed when JobPodReplacementPolicy enabled": { | ||||
| 			enableJobPodReplacementPolicy: true, | ||||
| 			withFinalizer:                 true, | ||||
| 			wantPhase:                     v1.PodFailed, | ||||
| @@ -203,7 +173,6 @@ func TestTerminatingOnOutOfServiceNode(t *testing.T) { | ||||
|  | ||||
| 	for name, test := range tests { | ||||
| 		t.Run(name, func(t *testing.T) { | ||||
| 			featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions) | ||||
| 			featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NodeOutOfServiceVolumeDetach, true) | ||||
| 			featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.JobPodReplacementPolicy, test.enableJobPodReplacementPolicy) | ||||
| 			testCtx := setup(t, "podgc-out-of-service") | ||||
| @@ -385,7 +354,6 @@ func TestPodGcForPodsWithDuplicatedFieldKeys(t *testing.T) { | ||||
|  | ||||
| 	for name, test := range tests { | ||||
| 		t.Run(name, func(t *testing.T) { | ||||
| 			featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, true) | ||||
| 			testCtx := setup(t, "podgc-orphaned") | ||||
| 			cs := testCtx.ClientSet | ||||
|  | ||||
|   | ||||
| @@ -33,17 +33,14 @@ import ( | ||||
| 	"k8s.io/apimachinery/pkg/types" | ||||
| 	"k8s.io/apimachinery/pkg/util/intstr" | ||||
| 	"k8s.io/apimachinery/pkg/util/wait" | ||||
| 	"k8s.io/apiserver/pkg/util/feature" | ||||
| 	"k8s.io/client-go/informers" | ||||
| 	clientset "k8s.io/client-go/kubernetes" | ||||
| 	restclient "k8s.io/client-go/rest" | ||||
| 	featuregatetesting "k8s.io/component-base/featuregate/testing" | ||||
| 	"k8s.io/component-helpers/storage/volume" | ||||
| 	"k8s.io/klog/v2" | ||||
| 	configv1 "k8s.io/kube-scheduler/config/v1" | ||||
| 	podutil "k8s.io/kubernetes/pkg/api/v1/pod" | ||||
| 	"k8s.io/kubernetes/pkg/apis/scheduling" | ||||
| 	"k8s.io/kubernetes/pkg/features" | ||||
| 	"k8s.io/kubernetes/pkg/scheduler" | ||||
| 	configtesting "k8s.io/kubernetes/pkg/scheduler/apis/config/testing" | ||||
| 	"k8s.io/kubernetes/pkg/scheduler/framework" | ||||
| @@ -200,41 +197,14 @@ func TestPreemption(t *testing.T) { | ||||
|  | ||||
| 	maxTokens := 1000 | ||||
| 	tests := []struct { | ||||
| 		name                          string | ||||
| 		existingPods                  []*v1.Pod | ||||
| 		pod                           *v1.Pod | ||||
| 		initTokens                    int | ||||
| 		enablePreFilter               bool | ||||
| 		unresolvable                  bool | ||||
| 		preemptedPodIndexes           map[int]struct{} | ||||
| 		enablePodDisruptionConditions bool | ||||
| 		name                string | ||||
| 		existingPods        []*v1.Pod | ||||
| 		pod                 *v1.Pod | ||||
| 		initTokens          int | ||||
| 		enablePreFilter     bool | ||||
| 		unresolvable        bool | ||||
| 		preemptedPodIndexes map[int]struct{} | ||||
| 	}{ | ||||
| 		{ | ||||
| 			name:       "basic pod preemption with PodDisruptionConditions enabled", | ||||
| 			initTokens: maxTokens, | ||||
| 			existingPods: []*v1.Pod{ | ||||
| 				initPausePod(&testutils.PausePodConfig{ | ||||
| 					Name:      "victim-pod", | ||||
| 					Namespace: testCtx.NS.Name, | ||||
| 					Priority:  &lowPriority, | ||||
| 					Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{ | ||||
| 						v1.ResourceCPU:    *resource.NewMilliQuantity(400, resource.DecimalSI), | ||||
| 						v1.ResourceMemory: *resource.NewQuantity(200, resource.DecimalSI)}, | ||||
| 					}, | ||||
| 				}), | ||||
| 			}, | ||||
| 			pod: initPausePod(&testutils.PausePodConfig{ | ||||
| 				Name:      "preemptor-pod", | ||||
| 				Namespace: testCtx.NS.Name, | ||||
| 				Priority:  &highPriority, | ||||
| 				Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{ | ||||
| 					v1.ResourceCPU:    *resource.NewMilliQuantity(300, resource.DecimalSI), | ||||
| 					v1.ResourceMemory: *resource.NewQuantity(200, resource.DecimalSI)}, | ||||
| 				}, | ||||
| 			}), | ||||
| 			preemptedPodIndexes:           map[int]struct{}{0: {}}, | ||||
| 			enablePodDisruptionConditions: true, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:       "basic pod preemption", | ||||
| 			initTokens: maxTokens, | ||||
| @@ -484,7 +454,6 @@ func TestPreemption(t *testing.T) { | ||||
|  | ||||
| 	for _, test := range tests { | ||||
| 		t.Run(test.name, func(t *testing.T) { | ||||
| 			featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions) | ||||
| 			filter.Tokens = test.initTokens | ||||
| 			filter.EnablePreFilter = test.enablePreFilter | ||||
| 			filter.Unresolvable = test.unresolvable | ||||
| @@ -513,10 +482,8 @@ func TestPreemption(t *testing.T) { | ||||
| 						t.Errorf("Error %v when getting the updated status for pod %v/%v ", err, p.Namespace, p.Name) | ||||
| 					} | ||||
| 					_, cond := podutil.GetPodCondition(&pod.Status, v1.DisruptionTarget) | ||||
| 					if test.enablePodDisruptionConditions && cond == nil { | ||||
| 					if cond == nil { | ||||
| 						t.Errorf("Pod %q does not have the expected condition: %q", klog.KObj(pod), v1.DisruptionTarget) | ||||
| 					} else if test.enablePodDisruptionConditions == false && cond != nil { | ||||
| 						t.Errorf("Pod %q has an unexpected condition: %q", klog.KObj(pod), v1.DisruptionTarget) | ||||
| 					} | ||||
| 				} else { | ||||
| 					if p.DeletionTimestamp != nil { | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Michal Wozniak
					Michal Wozniak