592 lines
23 KiB
Go
592 lines
23 KiB
Go
/*
|
|
Copyright 2021 The Kubernetes Authors.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
package preemption
|
|
|
|
import (
|
|
"context"
|
|
"errors"
|
|
"fmt"
|
|
"math"
|
|
"sync"
|
|
"sync/atomic"
|
|
|
|
v1 "k8s.io/api/core/v1"
|
|
policy "k8s.io/api/policy/v1"
|
|
"k8s.io/apimachinery/pkg/labels"
|
|
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
|
corelisters "k8s.io/client-go/listers/core/v1"
|
|
policylisters "k8s.io/client-go/listers/policy/v1"
|
|
corev1helpers "k8s.io/component-helpers/scheduling/corev1"
|
|
"k8s.io/klog/v2"
|
|
extenderv1 "k8s.io/kube-scheduler/extender/v1"
|
|
"k8s.io/kubernetes/pkg/scheduler/framework"
|
|
"k8s.io/kubernetes/pkg/scheduler/metrics"
|
|
"k8s.io/kubernetes/pkg/scheduler/util"
|
|
)
|
|
|
|
// Candidate represents a nominated node on which the preemptor can be scheduled,
|
|
// along with the list of victims that should be evicted for the preemptor to fit the node.
|
|
type Candidate interface {
|
|
// Victims wraps a list of to-be-preempted Pods and the number of PDB violation.
|
|
Victims() *extenderv1.Victims
|
|
// Name returns the target node name where the preemptor gets nominated to run.
|
|
Name() string
|
|
}
|
|
|
|
type candidate struct {
|
|
victims *extenderv1.Victims
|
|
name string
|
|
}
|
|
|
|
// Victims returns s.victims.
|
|
func (s *candidate) Victims() *extenderv1.Victims {
|
|
return s.victims
|
|
}
|
|
|
|
// Name returns s.name.
|
|
func (s *candidate) Name() string {
|
|
return s.name
|
|
}
|
|
|
|
type candidateList struct {
|
|
idx int32
|
|
items []Candidate
|
|
}
|
|
|
|
func newCandidateList(size int32) *candidateList {
|
|
return &candidateList{idx: -1, items: make([]Candidate, size)}
|
|
}
|
|
|
|
// add adds a new candidate to the internal array atomically.
|
|
func (cl *candidateList) add(c *candidate) {
|
|
if idx := atomic.AddInt32(&cl.idx, 1); idx < int32(len(cl.items)) {
|
|
cl.items[idx] = c
|
|
}
|
|
}
|
|
|
|
// size returns the number of candidate stored. Note that some add() operations
|
|
// might still be executing when this is called, so care must be taken to
|
|
// ensure that all add() operations complete before accessing the elements of
|
|
// the list.
|
|
func (cl *candidateList) size() int32 {
|
|
n := atomic.LoadInt32(&cl.idx) + 1
|
|
if n >= int32(len(cl.items)) {
|
|
n = int32(len(cl.items))
|
|
}
|
|
return n
|
|
}
|
|
|
|
// get returns the internal candidate array. This function is NOT atomic and
|
|
// assumes that all add() operations have been completed.
|
|
func (cl *candidateList) get() []Candidate {
|
|
return cl.items[:cl.size()]
|
|
}
|
|
|
|
// Interface is expected to be implemented by different preemption plugins as all those member
|
|
// methods might have different behavior compared with the default preemption.
|
|
type Interface interface {
|
|
// GetOffsetAndNumCandidates chooses a random offset and calculates the number of candidates that should be
|
|
// shortlisted for dry running preemption.
|
|
GetOffsetAndNumCandidates(nodes int32) (int32, int32)
|
|
// CandidatesToVictimsMap builds a map from the target node to a list of to-be-preempted Pods and the number of PDB violation.
|
|
CandidatesToVictimsMap(candidates []Candidate) map[string]*extenderv1.Victims
|
|
// PodEligibleToPreemptOthers returns one bool and one string. The bool indicates whether this pod should be considered for
|
|
// preempting other pods or not. The string includes the reason if this pod isn't eligible.
|
|
PodEligibleToPreemptOthers(pod *v1.Pod, nominatedNodeStatus *framework.Status) (bool, string)
|
|
// SelectVictimsOnNode finds minimum set of pods on the given node that should be preempted in order to make enough room
|
|
// for "pod" to be scheduled.
|
|
// Note that both `state` and `nodeInfo` are deep copied.
|
|
SelectVictimsOnNode(ctx context.Context, state *framework.CycleState,
|
|
pod *v1.Pod, nodeInfo *framework.NodeInfo, pdbs []*policy.PodDisruptionBudget) ([]*v1.Pod, int, *framework.Status)
|
|
}
|
|
|
|
type Evaluator struct {
|
|
PluginName string
|
|
Handler framework.Handle
|
|
PodLister corelisters.PodLister
|
|
PdbLister policylisters.PodDisruptionBudgetLister
|
|
State *framework.CycleState
|
|
Interface
|
|
}
|
|
|
|
// Preempt returns a PostFilterResult carrying suggested nominatedNodeName, along with a Status.
|
|
// The semantics of returned <PostFilterResult, Status> varies on different scenarios:
|
|
// - <nil, Error>. This denotes it's a transient/rare error that may be self-healed in future cycles.
|
|
// - <nil, Unschedulable>. This status is mostly as expected like the preemptor is waiting for the
|
|
// victims to be fully terminated.
|
|
// - In both cases above, a nil PostFilterResult is returned to keep the pod's nominatedNodeName unchanged.
|
|
//
|
|
// - <non-nil PostFilterResult, Unschedulable>. It indicates the pod cannot be scheduled even with preemption.
|
|
// In this case, a non-nil PostFilterResult is returned and result.NominatingMode instructs how to deal with
|
|
// the nominatedNodeName.
|
|
// - <non-nil PostFilterResult}, Success>. It's the regular happy path
|
|
// and the non-empty nominatedNodeName will be applied to the preemptor pod.
|
|
func (ev *Evaluator) Preempt(ctx context.Context, pod *v1.Pod, m framework.NodeToStatusMap) (*framework.PostFilterResult, *framework.Status) {
|
|
// 0) Fetch the latest version of <pod>.
|
|
// It's safe to directly fetch pod here. Because the informer cache has already been
|
|
// initialized when creating the Scheduler obj.
|
|
// However, tests may need to manually initialize the shared pod informer.
|
|
podNamespace, podName := pod.Namespace, pod.Name
|
|
pod, err := ev.PodLister.Pods(pod.Namespace).Get(pod.Name)
|
|
if err != nil {
|
|
klog.ErrorS(err, "Getting the updated preemptor pod object", "pod", klog.KRef(podNamespace, podName))
|
|
return nil, framework.AsStatus(err)
|
|
}
|
|
|
|
// 1) Ensure the preemptor is eligible to preempt other pods.
|
|
if ok, msg := ev.PodEligibleToPreemptOthers(pod, m[pod.Status.NominatedNodeName]); !ok {
|
|
klog.V(5).InfoS("Pod is not eligible for preemption", "pod", klog.KObj(pod), "reason", msg)
|
|
return nil, framework.NewStatus(framework.Unschedulable, msg)
|
|
}
|
|
|
|
// 2) Find all preemption candidates.
|
|
candidates, nodeToStatusMap, err := ev.findCandidates(ctx, pod, m)
|
|
if err != nil && len(candidates) == 0 {
|
|
return nil, framework.AsStatus(err)
|
|
}
|
|
|
|
// Return a FitError only when there are no candidates that fit the pod.
|
|
if len(candidates) == 0 {
|
|
fitError := &framework.FitError{
|
|
Pod: pod,
|
|
NumAllNodes: len(nodeToStatusMap),
|
|
Diagnosis: framework.Diagnosis{
|
|
NodeToStatusMap: nodeToStatusMap,
|
|
// Leave FailedPlugins as nil as it won't be used on moving Pods.
|
|
},
|
|
}
|
|
// Specify nominatedNodeName to clear the pod's nominatedNodeName status, if applicable.
|
|
return framework.NewPostFilterResultWithNominatedNode(""), framework.NewStatus(framework.Unschedulable, fitError.Error())
|
|
}
|
|
|
|
// 3) Interact with registered Extenders to filter out some candidates if needed.
|
|
candidates, status := ev.callExtenders(pod, candidates)
|
|
if !status.IsSuccess() {
|
|
return nil, status
|
|
}
|
|
|
|
// 4) Find the best candidate.
|
|
bestCandidate := ev.SelectCandidate(candidates)
|
|
if bestCandidate == nil || len(bestCandidate.Name()) == 0 {
|
|
return nil, framework.NewStatus(framework.Unschedulable, "no candidate node for preemption")
|
|
}
|
|
|
|
// 5) Perform preparation work before nominating the selected candidate.
|
|
if status := ev.prepareCandidate(ctx, bestCandidate, pod, ev.PluginName); !status.IsSuccess() {
|
|
return nil, status
|
|
}
|
|
|
|
return framework.NewPostFilterResultWithNominatedNode(bestCandidate.Name()), framework.NewStatus(framework.Success)
|
|
}
|
|
|
|
// FindCandidates calculates a slice of preemption candidates.
|
|
// Each candidate is executable to make the given <pod> schedulable.
|
|
func (ev *Evaluator) findCandidates(ctx context.Context, pod *v1.Pod, m framework.NodeToStatusMap) ([]Candidate, framework.NodeToStatusMap, error) {
|
|
allNodes, err := ev.Handler.SnapshotSharedLister().NodeInfos().List()
|
|
if err != nil {
|
|
return nil, nil, err
|
|
}
|
|
if len(allNodes) == 0 {
|
|
return nil, nil, errors.New("no nodes available")
|
|
}
|
|
potentialNodes, unschedulableNodeStatus := nodesWherePreemptionMightHelp(allNodes, m)
|
|
if len(potentialNodes) == 0 {
|
|
klog.V(3).InfoS("Preemption will not help schedule pod on any node", "pod", klog.KObj(pod))
|
|
// In this case, we should clean-up any existing nominated node name of the pod.
|
|
if err := util.ClearNominatedNodeName(ctx, ev.Handler.ClientSet(), pod); err != nil {
|
|
klog.ErrorS(err, "Cannot clear 'NominatedNodeName' field of pod", "pod", klog.KObj(pod))
|
|
// We do not return as this error is not critical.
|
|
}
|
|
return nil, unschedulableNodeStatus, nil
|
|
}
|
|
|
|
pdbs, err := getPodDisruptionBudgets(ev.PdbLister)
|
|
if err != nil {
|
|
return nil, nil, err
|
|
}
|
|
|
|
offset, numCandidates := ev.GetOffsetAndNumCandidates(int32(len(potentialNodes)))
|
|
if klogV := klog.V(5); klogV.Enabled() {
|
|
var sample []string
|
|
for i := offset; i < offset+10 && i < int32(len(potentialNodes)); i++ {
|
|
sample = append(sample, potentialNodes[i].Node().Name)
|
|
}
|
|
klogV.InfoS("Selecting candidates from a pool of nodes", "potentialNodesCount", len(potentialNodes), "offset", offset, "sampleLength", len(sample), "sample", sample, "candidates", numCandidates)
|
|
}
|
|
candidates, nodeStatuses, err := ev.DryRunPreemption(ctx, pod, potentialNodes, pdbs, offset, numCandidates)
|
|
for node, nodeStatus := range unschedulableNodeStatus {
|
|
nodeStatuses[node] = nodeStatus
|
|
}
|
|
return candidates, nodeStatuses, err
|
|
}
|
|
|
|
// callExtenders calls given <extenders> to select the list of feasible candidates.
|
|
// We will only check <candidates> with extenders that support preemption.
|
|
// Extenders which do not support preemption may later prevent preemptor from being scheduled on the nominated
|
|
// node. In that case, scheduler will find a different host for the preemptor in subsequent scheduling cycles.
|
|
func (ev *Evaluator) callExtenders(pod *v1.Pod, candidates []Candidate) ([]Candidate, *framework.Status) {
|
|
extenders := ev.Handler.Extenders()
|
|
nodeLister := ev.Handler.SnapshotSharedLister().NodeInfos()
|
|
if len(extenders) == 0 {
|
|
return candidates, nil
|
|
}
|
|
|
|
// Migrate candidate slice to victimsMap to adapt to the Extender interface.
|
|
// It's only applicable for candidate slice that have unique nominated node name.
|
|
victimsMap := ev.CandidatesToVictimsMap(candidates)
|
|
if len(victimsMap) == 0 {
|
|
return candidates, nil
|
|
}
|
|
for _, extender := range extenders {
|
|
if !extender.SupportsPreemption() || !extender.IsInterested(pod) {
|
|
continue
|
|
}
|
|
nodeNameToVictims, err := extender.ProcessPreemption(pod, victimsMap, nodeLister)
|
|
if err != nil {
|
|
if extender.IsIgnorable() {
|
|
klog.InfoS("Skipping extender as it returned error and has ignorable flag set",
|
|
"extender", extender, "err", err)
|
|
continue
|
|
}
|
|
return nil, framework.AsStatus(err)
|
|
}
|
|
// Check if the returned victims are valid.
|
|
for nodeName, victims := range nodeNameToVictims {
|
|
if victims == nil || len(victims.Pods) == 0 {
|
|
if extender.IsIgnorable() {
|
|
delete(nodeNameToVictims, nodeName)
|
|
klog.InfoS("Ignoring node without victims", "node", klog.KRef("", nodeName))
|
|
continue
|
|
}
|
|
return nil, framework.AsStatus(fmt.Errorf("expected at least one victim pod on node %q", nodeName))
|
|
}
|
|
}
|
|
|
|
// Replace victimsMap with new result after preemption. So the
|
|
// rest of extenders can continue use it as parameter.
|
|
victimsMap = nodeNameToVictims
|
|
|
|
// If node list becomes empty, no preemption can happen regardless of other extenders.
|
|
if len(victimsMap) == 0 {
|
|
break
|
|
}
|
|
}
|
|
|
|
var newCandidates []Candidate
|
|
for nodeName := range victimsMap {
|
|
newCandidates = append(newCandidates, &candidate{
|
|
victims: victimsMap[nodeName],
|
|
name: nodeName,
|
|
})
|
|
}
|
|
return newCandidates, nil
|
|
}
|
|
|
|
// SelectCandidate chooses the best-fit candidate from given <candidates> and return it.
|
|
// NOTE: This method is exported for easier testing in default preemption.
|
|
func (ev *Evaluator) SelectCandidate(candidates []Candidate) Candidate {
|
|
if len(candidates) == 0 {
|
|
return nil
|
|
}
|
|
if len(candidates) == 1 {
|
|
return candidates[0]
|
|
}
|
|
|
|
victimsMap := ev.CandidatesToVictimsMap(candidates)
|
|
candidateNode := pickOneNodeForPreemption(victimsMap)
|
|
|
|
// Same as candidatesToVictimsMap, this logic is not applicable for out-of-tree
|
|
// preemption plugins that exercise different candidates on the same nominated node.
|
|
if victims := victimsMap[candidateNode]; victims != nil {
|
|
return &candidate{
|
|
victims: victims,
|
|
name: candidateNode,
|
|
}
|
|
}
|
|
|
|
// We shouldn't reach here.
|
|
klog.ErrorS(errors.New("no candidate selected"), "Should not reach here", "candidates", candidates)
|
|
// To not break the whole flow, return the first candidate.
|
|
return candidates[0]
|
|
}
|
|
|
|
// prepareCandidate does some preparation work before nominating the selected candidate:
|
|
// - Evict the victim pods
|
|
// - Reject the victim pods if they are in waitingPod map
|
|
// - Clear the low-priority pods' nominatedNodeName status if needed
|
|
func (ev *Evaluator) prepareCandidate(ctx context.Context, c Candidate, pod *v1.Pod, pluginName string) *framework.Status {
|
|
fh := ev.Handler
|
|
cs := ev.Handler.ClientSet()
|
|
for _, victim := range c.Victims().Pods {
|
|
// If the victim is a WaitingPod, send a reject message to the PermitPlugin.
|
|
// Otherwise we should delete the victim.
|
|
if waitingPod := fh.GetWaitingPod(victim.UID); waitingPod != nil {
|
|
waitingPod.Reject(pluginName, "preempted")
|
|
} else if err := util.DeletePod(ctx, cs, victim); err != nil {
|
|
klog.ErrorS(err, "Preempting pod", "pod", klog.KObj(victim), "preemptor", klog.KObj(pod))
|
|
return framework.AsStatus(err)
|
|
}
|
|
fh.EventRecorder().Eventf(victim, pod, v1.EventTypeNormal, "Preempted", "Preempting", "Preempted by %v/%v on node %v",
|
|
pod.Namespace, pod.Name, c.Name())
|
|
}
|
|
metrics.PreemptionVictims.Observe(float64(len(c.Victims().Pods)))
|
|
|
|
// Lower priority pods nominated to run on this node, may no longer fit on
|
|
// this node. So, we should remove their nomination. Removing their
|
|
// nomination updates these pods and moves them to the active queue. It
|
|
// lets scheduler find another place for them.
|
|
nominatedPods := getLowerPriorityNominatedPods(fh, pod, c.Name())
|
|
if err := util.ClearNominatedNodeName(ctx, cs, nominatedPods...); err != nil {
|
|
klog.ErrorS(err, "Cannot clear 'NominatedNodeName' field")
|
|
// We do not return as this error is not critical.
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// nodesWherePreemptionMightHelp returns a list of nodes with failed predicates
|
|
// that may be satisfied by removing pods from the node.
|
|
func nodesWherePreemptionMightHelp(nodes []*framework.NodeInfo, m framework.NodeToStatusMap) ([]*framework.NodeInfo, framework.NodeToStatusMap) {
|
|
var potentialNodes []*framework.NodeInfo
|
|
nodeStatuses := make(framework.NodeToStatusMap)
|
|
for _, node := range nodes {
|
|
name := node.Node().Name
|
|
// We rely on the status by each plugin - 'Unschedulable' or 'UnschedulableAndUnresolvable'
|
|
// to determine whether preemption may help or not on the node.
|
|
if m[name].Code() == framework.UnschedulableAndUnresolvable {
|
|
nodeStatuses[node.Node().Name] = framework.NewStatus(framework.UnschedulableAndUnresolvable, "Preemption is not helpful for scheduling")
|
|
continue
|
|
}
|
|
potentialNodes = append(potentialNodes, node)
|
|
}
|
|
return potentialNodes, nodeStatuses
|
|
}
|
|
|
|
func getPodDisruptionBudgets(pdbLister policylisters.PodDisruptionBudgetLister) ([]*policy.PodDisruptionBudget, error) {
|
|
if pdbLister != nil {
|
|
return pdbLister.List(labels.Everything())
|
|
}
|
|
return nil, nil
|
|
}
|
|
|
|
// pickOneNodeForPreemption chooses one node among the given nodes. It assumes
|
|
// pods in each map entry are ordered by decreasing priority.
|
|
// It picks a node based on the following criteria:
|
|
// 1. A node with minimum number of PDB violations.
|
|
// 2. A node with minimum highest priority victim is picked.
|
|
// 3. Ties are broken by sum of priorities of all victims.
|
|
// 4. If there are still ties, node with the minimum number of victims is picked.
|
|
// 5. If there are still ties, node with the latest start time of all highest priority victims is picked.
|
|
// 6. If there are still ties, the first such node is picked (sort of randomly).
|
|
// The 'minNodes1' and 'minNodes2' are being reused here to save the memory
|
|
// allocation and garbage collection time.
|
|
func pickOneNodeForPreemption(nodesToVictims map[string]*extenderv1.Victims) string {
|
|
if len(nodesToVictims) == 0 {
|
|
return ""
|
|
}
|
|
minNumPDBViolatingPods := int64(math.MaxInt32)
|
|
var minNodes1 []string
|
|
lenNodes1 := 0
|
|
for node, victims := range nodesToVictims {
|
|
numPDBViolatingPods := victims.NumPDBViolations
|
|
if numPDBViolatingPods < minNumPDBViolatingPods {
|
|
minNumPDBViolatingPods = numPDBViolatingPods
|
|
minNodes1 = nil
|
|
lenNodes1 = 0
|
|
}
|
|
if numPDBViolatingPods == minNumPDBViolatingPods {
|
|
minNodes1 = append(minNodes1, node)
|
|
lenNodes1++
|
|
}
|
|
}
|
|
if lenNodes1 == 1 {
|
|
return minNodes1[0]
|
|
}
|
|
|
|
// There are more than one node with minimum number PDB violating pods. Find
|
|
// the one with minimum highest priority victim.
|
|
minHighestPriority := int32(math.MaxInt32)
|
|
var minNodes2 = make([]string, lenNodes1)
|
|
lenNodes2 := 0
|
|
for i := 0; i < lenNodes1; i++ {
|
|
node := minNodes1[i]
|
|
victims := nodesToVictims[node]
|
|
// highestPodPriority is the highest priority among the victims on this node.
|
|
highestPodPriority := corev1helpers.PodPriority(victims.Pods[0])
|
|
if highestPodPriority < minHighestPriority {
|
|
minHighestPriority = highestPodPriority
|
|
lenNodes2 = 0
|
|
}
|
|
if highestPodPriority == minHighestPriority {
|
|
minNodes2[lenNodes2] = node
|
|
lenNodes2++
|
|
}
|
|
}
|
|
if lenNodes2 == 1 {
|
|
return minNodes2[0]
|
|
}
|
|
|
|
// There are a few nodes with minimum highest priority victim. Find the
|
|
// smallest sum of priorities.
|
|
minSumPriorities := int64(math.MaxInt64)
|
|
lenNodes1 = 0
|
|
for i := 0; i < lenNodes2; i++ {
|
|
var sumPriorities int64
|
|
node := minNodes2[i]
|
|
for _, pod := range nodesToVictims[node].Pods {
|
|
// We add MaxInt32+1 to all priorities to make all of them >= 0. This is
|
|
// needed so that a node with a few pods with negative priority is not
|
|
// picked over a node with a smaller number of pods with the same negative
|
|
// priority (and similar scenarios).
|
|
sumPriorities += int64(corev1helpers.PodPriority(pod)) + int64(math.MaxInt32+1)
|
|
}
|
|
if sumPriorities < minSumPriorities {
|
|
minSumPriorities = sumPriorities
|
|
lenNodes1 = 0
|
|
}
|
|
if sumPriorities == minSumPriorities {
|
|
minNodes1[lenNodes1] = node
|
|
lenNodes1++
|
|
}
|
|
}
|
|
if lenNodes1 == 1 {
|
|
return minNodes1[0]
|
|
}
|
|
|
|
// There are a few nodes with minimum highest priority victim and sum of priorities.
|
|
// Find one with the minimum number of pods.
|
|
minNumPods := math.MaxInt32
|
|
lenNodes2 = 0
|
|
for i := 0; i < lenNodes1; i++ {
|
|
node := minNodes1[i]
|
|
numPods := len(nodesToVictims[node].Pods)
|
|
if numPods < minNumPods {
|
|
minNumPods = numPods
|
|
lenNodes2 = 0
|
|
}
|
|
if numPods == minNumPods {
|
|
minNodes2[lenNodes2] = node
|
|
lenNodes2++
|
|
}
|
|
}
|
|
if lenNodes2 == 1 {
|
|
return minNodes2[0]
|
|
}
|
|
|
|
// There are a few nodes with same number of pods.
|
|
// Find the node that satisfies latest(earliestStartTime(all highest-priority pods on node))
|
|
latestStartTime := util.GetEarliestPodStartTime(nodesToVictims[minNodes2[0]])
|
|
if latestStartTime == nil {
|
|
// If the earliest start time of all pods on the 1st node is nil, just return it,
|
|
// which is not expected to happen.
|
|
klog.ErrorS(errors.New("earliestStartTime is nil for node"), "Should not reach here", "node", klog.KRef("", minNodes2[0]))
|
|
return minNodes2[0]
|
|
}
|
|
nodeToReturn := minNodes2[0]
|
|
for i := 1; i < lenNodes2; i++ {
|
|
node := minNodes2[i]
|
|
// Get earliest start time of all pods on the current node.
|
|
earliestStartTimeOnNode := util.GetEarliestPodStartTime(nodesToVictims[node])
|
|
if earliestStartTimeOnNode == nil {
|
|
klog.ErrorS(errors.New("earliestStartTime is nil for node"), "Should not reach here", "node", klog.KRef("", node))
|
|
continue
|
|
}
|
|
if earliestStartTimeOnNode.After(latestStartTime.Time) {
|
|
latestStartTime = earliestStartTimeOnNode
|
|
nodeToReturn = node
|
|
}
|
|
}
|
|
|
|
return nodeToReturn
|
|
}
|
|
|
|
// getLowerPriorityNominatedPods returns pods whose priority is smaller than the
|
|
// priority of the given "pod" and are nominated to run on the given node.
|
|
// Note: We could possibly check if the nominated lower priority pods still fit
|
|
// and return those that no longer fit, but that would require lots of
|
|
// manipulation of NodeInfo and PreFilter state per nominated pod. It may not be
|
|
// worth the complexity, especially because we generally expect to have a very
|
|
// small number of nominated pods per node.
|
|
func getLowerPriorityNominatedPods(pn framework.PodNominator, pod *v1.Pod, nodeName string) []*v1.Pod {
|
|
podInfos := pn.NominatedPodsForNode(nodeName)
|
|
|
|
if len(podInfos) == 0 {
|
|
return nil
|
|
}
|
|
|
|
var lowerPriorityPods []*v1.Pod
|
|
podPriority := corev1helpers.PodPriority(pod)
|
|
for _, pi := range podInfos {
|
|
if corev1helpers.PodPriority(pi.Pod) < podPriority {
|
|
lowerPriorityPods = append(lowerPriorityPods, pi.Pod)
|
|
}
|
|
}
|
|
return lowerPriorityPods
|
|
}
|
|
|
|
// DryRunPreemption simulates Preemption logic on <potentialNodes> in parallel,
|
|
// returns preemption candidates and a map indicating filtered nodes statuses.
|
|
// The number of candidates depends on the constraints defined in the plugin's args. In the returned list of
|
|
// candidates, ones that do not violate PDB are preferred over ones that do.
|
|
// NOTE: This method is exported for easier testing in default preemption.
|
|
func (ev *Evaluator) DryRunPreemption(ctx context.Context, pod *v1.Pod, potentialNodes []*framework.NodeInfo,
|
|
pdbs []*policy.PodDisruptionBudget, offset int32, numCandidates int32) ([]Candidate, framework.NodeToStatusMap, error) {
|
|
fh := ev.Handler
|
|
nonViolatingCandidates := newCandidateList(numCandidates)
|
|
violatingCandidates := newCandidateList(numCandidates)
|
|
parallelCtx, cancel := context.WithCancel(ctx)
|
|
nodeStatuses := make(framework.NodeToStatusMap)
|
|
var statusesLock sync.Mutex
|
|
var errs []error
|
|
checkNode := func(i int) {
|
|
nodeInfoCopy := potentialNodes[(int(offset)+i)%len(potentialNodes)].Clone()
|
|
stateCopy := ev.State.Clone()
|
|
pods, numPDBViolations, status := ev.SelectVictimsOnNode(ctx, stateCopy, pod, nodeInfoCopy, pdbs)
|
|
if status.IsSuccess() && len(pods) != 0 {
|
|
victims := extenderv1.Victims{
|
|
Pods: pods,
|
|
NumPDBViolations: int64(numPDBViolations),
|
|
}
|
|
c := &candidate{
|
|
victims: &victims,
|
|
name: nodeInfoCopy.Node().Name,
|
|
}
|
|
if numPDBViolations == 0 {
|
|
nonViolatingCandidates.add(c)
|
|
} else {
|
|
violatingCandidates.add(c)
|
|
}
|
|
nvcSize, vcSize := nonViolatingCandidates.size(), violatingCandidates.size()
|
|
if nvcSize > 0 && nvcSize+vcSize >= numCandidates {
|
|
cancel()
|
|
}
|
|
return
|
|
}
|
|
if status.IsSuccess() && len(pods) == 0 {
|
|
status = framework.AsStatus(fmt.Errorf("expected at least one victim pod on node %q", nodeInfoCopy.Node().Name))
|
|
}
|
|
statusesLock.Lock()
|
|
if status.Code() == framework.Error {
|
|
errs = append(errs, status.AsError())
|
|
}
|
|
nodeStatuses[nodeInfoCopy.Node().Name] = status
|
|
statusesLock.Unlock()
|
|
}
|
|
fh.Parallelizer().Until(parallelCtx, len(potentialNodes), checkNode)
|
|
return append(nonViolatingCandidates.get(), violatingCandidates.get()...), nodeStatuses, utilerrors.NewAggregate(errs)
|
|
}
|