Add DisruptionTarget condition when preempting for critical pod
This commit is contained in:
		@@ -21,10 +21,13 @@ import (
 | 
				
			|||||||
	"math"
 | 
						"math"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	v1 "k8s.io/api/core/v1"
 | 
						v1 "k8s.io/api/core/v1"
 | 
				
			||||||
 | 
						utilfeature "k8s.io/apiserver/pkg/util/feature"
 | 
				
			||||||
	"k8s.io/client-go/tools/record"
 | 
						"k8s.io/client-go/tools/record"
 | 
				
			||||||
	"k8s.io/klog/v2"
 | 
						"k8s.io/klog/v2"
 | 
				
			||||||
 | 
						podutil "k8s.io/kubernetes/pkg/api/v1/pod"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api/v1/resource"
 | 
						"k8s.io/kubernetes/pkg/api/v1/resource"
 | 
				
			||||||
	v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
 | 
						v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
 | 
				
			||||||
 | 
						"k8s.io/kubernetes/pkg/features"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/kubelet/events"
 | 
						"k8s.io/kubernetes/pkg/kubelet/events"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/kubelet/eviction"
 | 
						"k8s.io/kubernetes/pkg/kubelet/eviction"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/kubelet/lifecycle"
 | 
						"k8s.io/kubernetes/pkg/kubelet/lifecycle"
 | 
				
			||||||
@@ -103,6 +106,14 @@ func (c *CriticalPodAdmissionHandler) evictPodsToFreeRequests(admitPod *v1.Pod,
 | 
				
			|||||||
			status.Phase = v1.PodFailed
 | 
								status.Phase = v1.PodFailed
 | 
				
			||||||
			status.Reason = events.PreemptContainer
 | 
								status.Reason = events.PreemptContainer
 | 
				
			||||||
			status.Message = message
 | 
								status.Message = message
 | 
				
			||||||
 | 
								if utilfeature.DefaultFeatureGate.Enabled(features.PodDisruptionConditions) {
 | 
				
			||||||
 | 
									podutil.UpdatePodCondition(status, &v1.PodCondition{
 | 
				
			||||||
 | 
										Type:    v1.DisruptionTarget,
 | 
				
			||||||
 | 
										Status:  v1.ConditionTrue,
 | 
				
			||||||
 | 
										Reason:  v1.PodReasonTerminationByKubelet,
 | 
				
			||||||
 | 
										Message: "Pod was preempted by Kubelet to accommodate a critical pod.",
 | 
				
			||||||
 | 
									})
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
		})
 | 
							})
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			klog.ErrorS(err, "Failed to evict pod", "pod", klog.KObj(pod))
 | 
								klog.ErrorS(err, "Failed to evict pod", "pod", klog.KObj(pod))
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -23,6 +23,7 @@ import (
 | 
				
			|||||||
	v1 "k8s.io/api/core/v1"
 | 
						v1 "k8s.io/api/core/v1"
 | 
				
			||||||
	"k8s.io/apimachinery/pkg/api/resource"
 | 
						"k8s.io/apimachinery/pkg/api/resource"
 | 
				
			||||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
						metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
				
			||||||
 | 
						"k8s.io/klog/v2"
 | 
				
			||||||
	kubeapi "k8s.io/kubernetes/pkg/apis/core"
 | 
						kubeapi "k8s.io/kubernetes/pkg/apis/core"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/apis/scheduling"
 | 
						"k8s.io/kubernetes/pkg/apis/scheduling"
 | 
				
			||||||
	kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
 | 
						kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
 | 
				
			||||||
@@ -87,6 +88,49 @@ var _ = SIGDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:CriticalPod]
 | 
				
			|||||||
				}
 | 
									}
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		})
 | 
							})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							ginkgo.It("should add DisruptionTarget condition to the preempted pod [NodeFeature:PodDisruptionConditions]", func(ctx context.Context) {
 | 
				
			||||||
 | 
								// because adminssion Priority enable, If the priority class is not found, the Pod is rejected.
 | 
				
			||||||
 | 
								node := getNodeName(ctx, f)
 | 
				
			||||||
 | 
								nonCriticalGuaranteed := getTestPod(false, guaranteedPodName, v1.ResourceRequirements{
 | 
				
			||||||
 | 
									Requests: v1.ResourceList{
 | 
				
			||||||
 | 
										v1.ResourceCPU:    resource.MustParse("100m"),
 | 
				
			||||||
 | 
										v1.ResourceMemory: resource.MustParse("100Mi"),
 | 
				
			||||||
 | 
									},
 | 
				
			||||||
 | 
									Limits: v1.ResourceList{
 | 
				
			||||||
 | 
										v1.ResourceCPU:    resource.MustParse("100m"),
 | 
				
			||||||
 | 
										v1.ResourceMemory: resource.MustParse("100Mi"),
 | 
				
			||||||
 | 
									},
 | 
				
			||||||
 | 
								}, node)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								criticalPod := getTestPod(true, criticalPodName, v1.ResourceRequirements{
 | 
				
			||||||
 | 
									// request the entire resource capacity of the node, so that
 | 
				
			||||||
 | 
									// admitting this pod requires the other pod to be preempted
 | 
				
			||||||
 | 
									Requests: getNodeCPUAndMemoryCapacity(ctx, f),
 | 
				
			||||||
 | 
								}, node)
 | 
				
			||||||
 | 
								criticalPod.Namespace = kubeapi.NamespaceSystem
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								ginkgo.By(fmt.Sprintf("create the non-critical pod %q", klog.KObj(nonCriticalGuaranteed)))
 | 
				
			||||||
 | 
								e2epod.NewPodClient(f).CreateSync(ctx, nonCriticalGuaranteed)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								ginkgo.By(fmt.Sprintf("create the critical pod %q", klog.KObj(criticalPod)))
 | 
				
			||||||
 | 
								e2epod.PodClientNS(f, kubeapi.NamespaceSystem).Create(ctx, criticalPod)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								ginkgo.By(fmt.Sprintf("await for the critical pod %q to be ready", klog.KObj(criticalPod)))
 | 
				
			||||||
 | 
								err := e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, criticalPod.Name, kubeapi.NamespaceSystem)
 | 
				
			||||||
 | 
								framework.ExpectNoError(err, "Failed to await for the pod to be running: %q", klog.KObj(criticalPod))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								// Check that non-critical pods other than the besteffort have been evicted
 | 
				
			||||||
 | 
								updatedPodList, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(ctx, metav1.ListOptions{})
 | 
				
			||||||
 | 
								framework.ExpectNoError(err)
 | 
				
			||||||
 | 
								for _, p := range updatedPodList.Items {
 | 
				
			||||||
 | 
									ginkgo.By(fmt.Sprintf("verify that the non-critical pod %q is preempted and has the DisruptionTarget condition", klog.KObj(&p)))
 | 
				
			||||||
 | 
									framework.ExpectEqual(p.Status.Phase, v1.PodSucceeded, fmt.Sprintf("pod: %v should be preempted with status: %#v", p.Name, p.Status))
 | 
				
			||||||
 | 
									if condition := e2epod.FindPodConditionByType(&p.Status, v1.DisruptionTarget); condition == nil {
 | 
				
			||||||
 | 
										framework.Failf("pod %q should have the condition: %q, pod status: %v", klog.KObj(&p), v1.DisruptionTarget, p.Status)
 | 
				
			||||||
 | 
									}
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
							})
 | 
				
			||||||
		ginkgo.AfterEach(func(ctx context.Context) {
 | 
							ginkgo.AfterEach(func(ctx context.Context) {
 | 
				
			||||||
			// Delete Pods
 | 
								// Delete Pods
 | 
				
			||||||
			e2epod.NewPodClient(f).DeleteSync(ctx, guaranteedPodName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
 | 
								e2epod.NewPodClient(f).DeleteSync(ctx, guaranteedPodName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
 | 
				
			||||||
 
 | 
				
			|||||||
		Reference in New Issue
	
	Block a user