817 lines
29 KiB
Go
817 lines
29 KiB
Go
/*
|
|
Copyright 2017 The Kubernetes Authors.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
// This file contains structures that implement scheduling queue types.
|
|
// Scheduling queues hold pods waiting to be scheduled. This file implements a/
|
|
// priority queue which has two sub queues. One sub-queue holds pods that are
|
|
// being considered for scheduling. This is called activeQ. Another queue holds
|
|
// pods that are already tried and are determined to be unschedulable. The latter
|
|
// is called unschedulableQ.
|
|
|
|
package queue
|
|
|
|
import (
|
|
"fmt"
|
|
"reflect"
|
|
"sync"
|
|
"time"
|
|
|
|
"k8s.io/klog/v2"
|
|
|
|
v1 "k8s.io/api/core/v1"
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
ktypes "k8s.io/apimachinery/pkg/types"
|
|
"k8s.io/apimachinery/pkg/util/wait"
|
|
"k8s.io/client-go/tools/cache"
|
|
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
|
"k8s.io/kubernetes/pkg/scheduler/internal/heap"
|
|
"k8s.io/kubernetes/pkg/scheduler/metrics"
|
|
"k8s.io/kubernetes/pkg/scheduler/util"
|
|
)
|
|
|
|
const (
|
|
// If the pod stays in unschedulableQ longer than the unschedulableQTimeInterval,
|
|
// the pod will be moved from unschedulableQ to activeQ.
|
|
unschedulableQTimeInterval = 60 * time.Second
|
|
|
|
queueClosed = "scheduling queue is closed"
|
|
)
|
|
|
|
const (
|
|
// DefaultPodInitialBackoffDuration is the default value for the initial backoff duration
|
|
// for unschedulable pods. To change the default podInitialBackoffDurationSeconds used by the
|
|
// scheduler, update the ComponentConfig value in defaults.go
|
|
DefaultPodInitialBackoffDuration time.Duration = 1 * time.Second
|
|
// DefaultPodMaxBackoffDuration is the default value for the max backoff duration
|
|
// for unschedulable pods. To change the default podMaxBackoffDurationSeconds used by the
|
|
// scheduler, update the ComponentConfig value in defaults.go
|
|
DefaultPodMaxBackoffDuration time.Duration = 10 * time.Second
|
|
)
|
|
|
|
// SchedulingQueue is an interface for a queue to store pods waiting to be scheduled.
|
|
// The interface follows a pattern similar to cache.FIFO and cache.Heap and
|
|
// makes it easy to use those data structures as a SchedulingQueue.
|
|
type SchedulingQueue interface {
|
|
Add(pod *v1.Pod) error
|
|
// AddUnschedulableIfNotPresent adds an unschedulable pod back to scheduling queue.
|
|
// The podSchedulingCycle represents the current scheduling cycle number which can be
|
|
// returned by calling SchedulingCycle().
|
|
AddUnschedulableIfNotPresent(pod *framework.QueuedPodInfo, podSchedulingCycle int64) error
|
|
// SchedulingCycle returns the current number of scheduling cycle which is
|
|
// cached by scheduling queue. Normally, incrementing this number whenever
|
|
// a pod is popped (e.g. called Pop()) is enough.
|
|
SchedulingCycle() int64
|
|
// Pop removes the head of the queue and returns it. It blocks if the
|
|
// queue is empty and waits until a new item is added to the queue.
|
|
Pop() (*framework.QueuedPodInfo, error)
|
|
Update(oldPod, newPod *v1.Pod) error
|
|
Delete(pod *v1.Pod) error
|
|
MoveAllToActiveOrBackoffQueue(event string)
|
|
AssignedPodAdded(pod *v1.Pod)
|
|
AssignedPodUpdated(pod *v1.Pod)
|
|
NominatedPodsForNode(nodeName string) []*v1.Pod
|
|
PendingPods() []*v1.Pod
|
|
// Close closes the SchedulingQueue so that the goroutine which is
|
|
// waiting to pop items can exit gracefully.
|
|
Close()
|
|
// UpdateNominatedPodForNode adds the given pod to the nominated pod map or
|
|
// updates it if it already exists.
|
|
UpdateNominatedPodForNode(pod *v1.Pod, nodeName string)
|
|
// DeleteNominatedPodIfExists deletes nominatedPod from internal cache
|
|
DeleteNominatedPodIfExists(pod *v1.Pod)
|
|
// NumUnschedulablePods returns the number of unschedulable pods exist in the SchedulingQueue.
|
|
NumUnschedulablePods() int
|
|
// Run starts the goroutines managing the queue.
|
|
Run()
|
|
}
|
|
|
|
// NewSchedulingQueue initializes a priority queue as a new scheduling queue.
|
|
func NewSchedulingQueue(lessFn framework.LessFunc, opts ...Option) SchedulingQueue {
|
|
return NewPriorityQueue(lessFn, opts...)
|
|
}
|
|
|
|
// NominatedNodeName returns nominated node name of a Pod.
|
|
func NominatedNodeName(pod *v1.Pod) string {
|
|
return pod.Status.NominatedNodeName
|
|
}
|
|
|
|
// PriorityQueue implements a scheduling queue.
|
|
// The head of PriorityQueue is the highest priority pending pod. This structure
|
|
// has three sub queues. One sub-queue holds pods that are being considered for
|
|
// scheduling. This is called activeQ and is a Heap. Another queue holds
|
|
// pods that are already tried and are determined to be unschedulable. The latter
|
|
// is called unschedulableQ. The third queue holds pods that are moved from
|
|
// unschedulable queues and will be moved to active queue when backoff are completed.
|
|
type PriorityQueue struct {
|
|
stop chan struct{}
|
|
clock util.Clock
|
|
|
|
// pod initial backoff duration.
|
|
podInitialBackoffDuration time.Duration
|
|
// pod maximum backoff duration.
|
|
podMaxBackoffDuration time.Duration
|
|
|
|
lock sync.RWMutex
|
|
cond sync.Cond
|
|
|
|
// activeQ is heap structure that scheduler actively looks at to find pods to
|
|
// schedule. Head of heap is the highest priority pod.
|
|
activeQ *heap.Heap
|
|
// podBackoffQ is a heap ordered by backoff expiry. Pods which have completed backoff
|
|
// are popped from this heap before the scheduler looks at activeQ
|
|
podBackoffQ *heap.Heap
|
|
// unschedulableQ holds pods that have been tried and determined unschedulable.
|
|
unschedulableQ *UnschedulablePodsMap
|
|
// nominatedPods is a structures that stores pods which are nominated to run
|
|
// on nodes.
|
|
nominatedPods *nominatedPodMap
|
|
// schedulingCycle represents sequence number of scheduling cycle and is incremented
|
|
// when a pod is popped.
|
|
schedulingCycle int64
|
|
// moveRequestCycle caches the sequence number of scheduling cycle when we
|
|
// received a move request. Unscheduable pods in and before this scheduling
|
|
// cycle will be put back to activeQueue if we were trying to schedule them
|
|
// when we received move request.
|
|
moveRequestCycle int64
|
|
|
|
// closed indicates that the queue is closed.
|
|
// It is mainly used to let Pop() exit its control loop while waiting for an item.
|
|
closed bool
|
|
}
|
|
|
|
type priorityQueueOptions struct {
|
|
clock util.Clock
|
|
podInitialBackoffDuration time.Duration
|
|
podMaxBackoffDuration time.Duration
|
|
}
|
|
|
|
// Option configures a PriorityQueue
|
|
type Option func(*priorityQueueOptions)
|
|
|
|
// WithClock sets clock for PriorityQueue, the default clock is util.RealClock.
|
|
func WithClock(clock util.Clock) Option {
|
|
return func(o *priorityQueueOptions) {
|
|
o.clock = clock
|
|
}
|
|
}
|
|
|
|
// WithPodInitialBackoffDuration sets pod initial backoff duration for PriorityQueue,
|
|
func WithPodInitialBackoffDuration(duration time.Duration) Option {
|
|
return func(o *priorityQueueOptions) {
|
|
o.podInitialBackoffDuration = duration
|
|
}
|
|
}
|
|
|
|
// WithPodMaxBackoffDuration sets pod max backoff duration for PriorityQueue,
|
|
func WithPodMaxBackoffDuration(duration time.Duration) Option {
|
|
return func(o *priorityQueueOptions) {
|
|
o.podMaxBackoffDuration = duration
|
|
}
|
|
}
|
|
|
|
var defaultPriorityQueueOptions = priorityQueueOptions{
|
|
clock: util.RealClock{},
|
|
podInitialBackoffDuration: DefaultPodInitialBackoffDuration,
|
|
podMaxBackoffDuration: DefaultPodMaxBackoffDuration,
|
|
}
|
|
|
|
// Making sure that PriorityQueue implements SchedulingQueue.
|
|
var _ SchedulingQueue = &PriorityQueue{}
|
|
|
|
// newQueuedPodInfoNoTimestamp builds a QueuedPodInfo object without timestamp.
|
|
func newQueuedPodInfoNoTimestamp(pod *v1.Pod) *framework.QueuedPodInfo {
|
|
return &framework.QueuedPodInfo{
|
|
Pod: pod,
|
|
}
|
|
}
|
|
|
|
// NewPriorityQueue creates a PriorityQueue object.
|
|
func NewPriorityQueue(
|
|
lessFn framework.LessFunc,
|
|
opts ...Option,
|
|
) *PriorityQueue {
|
|
options := defaultPriorityQueueOptions
|
|
for _, opt := range opts {
|
|
opt(&options)
|
|
}
|
|
|
|
comp := func(podInfo1, podInfo2 interface{}) bool {
|
|
pInfo1 := podInfo1.(*framework.QueuedPodInfo)
|
|
pInfo2 := podInfo2.(*framework.QueuedPodInfo)
|
|
return lessFn(pInfo1, pInfo2)
|
|
}
|
|
|
|
pq := &PriorityQueue{
|
|
clock: options.clock,
|
|
stop: make(chan struct{}),
|
|
podInitialBackoffDuration: options.podInitialBackoffDuration,
|
|
podMaxBackoffDuration: options.podMaxBackoffDuration,
|
|
activeQ: heap.NewWithRecorder(podInfoKeyFunc, comp, metrics.NewActivePodsRecorder()),
|
|
unschedulableQ: newUnschedulablePodsMap(metrics.NewUnschedulablePodsRecorder()),
|
|
nominatedPods: newNominatedPodMap(),
|
|
moveRequestCycle: -1,
|
|
}
|
|
pq.cond.L = &pq.lock
|
|
pq.podBackoffQ = heap.NewWithRecorder(podInfoKeyFunc, pq.podsCompareBackoffCompleted, metrics.NewBackoffPodsRecorder())
|
|
|
|
return pq
|
|
}
|
|
|
|
// Run starts the goroutine to pump from podBackoffQ to activeQ
|
|
func (p *PriorityQueue) Run() {
|
|
go wait.Until(p.flushBackoffQCompleted, 1.0*time.Second, p.stop)
|
|
go wait.Until(p.flushUnschedulableQLeftover, 30*time.Second, p.stop)
|
|
}
|
|
|
|
// Add adds a pod to the active queue. It should be called only when a new pod
|
|
// is added so there is no chance the pod is already in active/unschedulable/backoff queues
|
|
func (p *PriorityQueue) Add(pod *v1.Pod) error {
|
|
p.lock.Lock()
|
|
defer p.lock.Unlock()
|
|
pInfo := p.newQueuedPodInfo(pod)
|
|
if err := p.activeQ.Add(pInfo); err != nil {
|
|
klog.Errorf("Error adding pod %v to the scheduling queue: %v", nsNameForPod(pod), err)
|
|
return err
|
|
}
|
|
if p.unschedulableQ.get(pod) != nil {
|
|
klog.Errorf("Error: pod %v is already in the unschedulable queue.", nsNameForPod(pod))
|
|
p.unschedulableQ.delete(pod)
|
|
}
|
|
// Delete pod from backoffQ if it is backing off
|
|
if err := p.podBackoffQ.Delete(pInfo); err == nil {
|
|
klog.Errorf("Error: pod %v is already in the podBackoff queue.", nsNameForPod(pod))
|
|
}
|
|
metrics.SchedulerQueueIncomingPods.WithLabelValues("active", PodAdd).Inc()
|
|
p.nominatedPods.add(pod, "")
|
|
p.cond.Broadcast()
|
|
|
|
return nil
|
|
}
|
|
|
|
// nsNameForPod returns a namespacedname for a pod
|
|
func nsNameForPod(pod *v1.Pod) ktypes.NamespacedName {
|
|
return ktypes.NamespacedName{
|
|
Namespace: pod.Namespace,
|
|
Name: pod.Name,
|
|
}
|
|
}
|
|
|
|
// isPodBackingoff returns true if a pod is still waiting for its backoff timer.
|
|
// If this returns true, the pod should not be re-tried.
|
|
func (p *PriorityQueue) isPodBackingoff(podInfo *framework.QueuedPodInfo) bool {
|
|
boTime := p.getBackoffTime(podInfo)
|
|
return boTime.After(p.clock.Now())
|
|
}
|
|
|
|
// SchedulingCycle returns current scheduling cycle.
|
|
func (p *PriorityQueue) SchedulingCycle() int64 {
|
|
p.lock.RLock()
|
|
defer p.lock.RUnlock()
|
|
return p.schedulingCycle
|
|
}
|
|
|
|
// AddUnschedulableIfNotPresent inserts a pod that cannot be scheduled into
|
|
// the queue, unless it is already in the queue. Normally, PriorityQueue puts
|
|
// unschedulable pods in `unschedulableQ`. But if there has been a recent move
|
|
// request, then the pod is put in `podBackoffQ`.
|
|
func (p *PriorityQueue) AddUnschedulableIfNotPresent(pInfo *framework.QueuedPodInfo, podSchedulingCycle int64) error {
|
|
p.lock.Lock()
|
|
defer p.lock.Unlock()
|
|
pod := pInfo.Pod
|
|
if p.unschedulableQ.get(pod) != nil {
|
|
return fmt.Errorf("pod: %v is already present in unschedulable queue", nsNameForPod(pod))
|
|
}
|
|
|
|
// Refresh the timestamp since the pod is re-added.
|
|
pInfo.Timestamp = p.clock.Now()
|
|
if _, exists, _ := p.activeQ.Get(pInfo); exists {
|
|
return fmt.Errorf("pod: %v is already present in the active queue", nsNameForPod(pod))
|
|
}
|
|
if _, exists, _ := p.podBackoffQ.Get(pInfo); exists {
|
|
return fmt.Errorf("pod %v is already present in the backoff queue", nsNameForPod(pod))
|
|
}
|
|
|
|
// If a move request has been received, move it to the BackoffQ, otherwise move
|
|
// it to unschedulableQ.
|
|
if p.moveRequestCycle >= podSchedulingCycle {
|
|
if err := p.podBackoffQ.Add(pInfo); err != nil {
|
|
return fmt.Errorf("error adding pod %v to the backoff queue: %v", pod.Name, err)
|
|
}
|
|
metrics.SchedulerQueueIncomingPods.WithLabelValues("backoff", ScheduleAttemptFailure).Inc()
|
|
} else {
|
|
p.unschedulableQ.addOrUpdate(pInfo)
|
|
metrics.SchedulerQueueIncomingPods.WithLabelValues("unschedulable", ScheduleAttemptFailure).Inc()
|
|
}
|
|
|
|
p.nominatedPods.add(pod, "")
|
|
return nil
|
|
|
|
}
|
|
|
|
// flushBackoffQCompleted Moves all pods from backoffQ which have completed backoff in to activeQ
|
|
func (p *PriorityQueue) flushBackoffQCompleted() {
|
|
p.lock.Lock()
|
|
defer p.lock.Unlock()
|
|
for {
|
|
rawPodInfo := p.podBackoffQ.Peek()
|
|
if rawPodInfo == nil {
|
|
return
|
|
}
|
|
pod := rawPodInfo.(*framework.QueuedPodInfo).Pod
|
|
boTime := p.getBackoffTime(rawPodInfo.(*framework.QueuedPodInfo))
|
|
if boTime.After(p.clock.Now()) {
|
|
return
|
|
}
|
|
_, err := p.podBackoffQ.Pop()
|
|
if err != nil {
|
|
klog.Errorf("Unable to pop pod %v from backoff queue despite backoff completion.", nsNameForPod(pod))
|
|
return
|
|
}
|
|
p.activeQ.Add(rawPodInfo)
|
|
metrics.SchedulerQueueIncomingPods.WithLabelValues("active", BackoffComplete).Inc()
|
|
defer p.cond.Broadcast()
|
|
}
|
|
}
|
|
|
|
// flushUnschedulableQLeftover moves pod which stays in unschedulableQ longer than the unschedulableQTimeInterval
|
|
// to activeQ.
|
|
func (p *PriorityQueue) flushUnschedulableQLeftover() {
|
|
p.lock.Lock()
|
|
defer p.lock.Unlock()
|
|
|
|
var podsToMove []*framework.QueuedPodInfo
|
|
currentTime := p.clock.Now()
|
|
for _, pInfo := range p.unschedulableQ.podInfoMap {
|
|
lastScheduleTime := pInfo.Timestamp
|
|
if currentTime.Sub(lastScheduleTime) > unschedulableQTimeInterval {
|
|
podsToMove = append(podsToMove, pInfo)
|
|
}
|
|
}
|
|
|
|
if len(podsToMove) > 0 {
|
|
p.movePodsToActiveOrBackoffQueue(podsToMove, UnschedulableTimeout)
|
|
}
|
|
}
|
|
|
|
// Pop removes the head of the active queue and returns it. It blocks if the
|
|
// activeQ is empty and waits until a new item is added to the queue. It
|
|
// increments scheduling cycle when a pod is popped.
|
|
func (p *PriorityQueue) Pop() (*framework.QueuedPodInfo, error) {
|
|
p.lock.Lock()
|
|
defer p.lock.Unlock()
|
|
for p.activeQ.Len() == 0 {
|
|
// When the queue is empty, invocation of Pop() is blocked until new item is enqueued.
|
|
// When Close() is called, the p.closed is set and the condition is broadcast,
|
|
// which causes this loop to continue and return from the Pop().
|
|
if p.closed {
|
|
return nil, fmt.Errorf(queueClosed)
|
|
}
|
|
p.cond.Wait()
|
|
}
|
|
obj, err := p.activeQ.Pop()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
pInfo := obj.(*framework.QueuedPodInfo)
|
|
pInfo.Attempts++
|
|
p.schedulingCycle++
|
|
return pInfo, err
|
|
}
|
|
|
|
// isPodUpdated checks if the pod is updated in a way that it may have become
|
|
// schedulable. It drops status of the pod and compares it with old version.
|
|
func isPodUpdated(oldPod, newPod *v1.Pod) bool {
|
|
strip := func(pod *v1.Pod) *v1.Pod {
|
|
p := pod.DeepCopy()
|
|
p.ResourceVersion = ""
|
|
p.Generation = 0
|
|
p.Status = v1.PodStatus{}
|
|
return p
|
|
}
|
|
return !reflect.DeepEqual(strip(oldPod), strip(newPod))
|
|
}
|
|
|
|
// Update updates a pod in the active or backoff queue if present. Otherwise, it removes
|
|
// the item from the unschedulable queue if pod is updated in a way that it may
|
|
// become schedulable and adds the updated one to the active queue.
|
|
// If pod is not present in any of the queues, it is added to the active queue.
|
|
func (p *PriorityQueue) Update(oldPod, newPod *v1.Pod) error {
|
|
p.lock.Lock()
|
|
defer p.lock.Unlock()
|
|
|
|
if oldPod != nil {
|
|
oldPodInfo := newQueuedPodInfoNoTimestamp(oldPod)
|
|
// If the pod is already in the active queue, just update it there.
|
|
if oldPodInfo, exists, _ := p.activeQ.Get(oldPodInfo); exists {
|
|
p.nominatedPods.update(oldPod, newPod)
|
|
err := p.activeQ.Update(updatePod(oldPodInfo, newPod))
|
|
return err
|
|
}
|
|
|
|
// If the pod is in the backoff queue, update it there.
|
|
if oldPodInfo, exists, _ := p.podBackoffQ.Get(oldPodInfo); exists {
|
|
p.nominatedPods.update(oldPod, newPod)
|
|
p.podBackoffQ.Delete(oldPodInfo)
|
|
err := p.activeQ.Add(updatePod(oldPodInfo, newPod))
|
|
if err == nil {
|
|
p.cond.Broadcast()
|
|
}
|
|
return err
|
|
}
|
|
}
|
|
|
|
// If the pod is in the unschedulable queue, updating it may make it schedulable.
|
|
if usPodInfo := p.unschedulableQ.get(newPod); usPodInfo != nil {
|
|
p.nominatedPods.update(oldPod, newPod)
|
|
if isPodUpdated(oldPod, newPod) {
|
|
p.unschedulableQ.delete(usPodInfo.Pod)
|
|
err := p.activeQ.Add(updatePod(usPodInfo, newPod))
|
|
if err == nil {
|
|
p.cond.Broadcast()
|
|
}
|
|
return err
|
|
}
|
|
// Pod is already in unschedulable queue and hasnt updated, no need to backoff again
|
|
p.unschedulableQ.addOrUpdate(updatePod(usPodInfo, newPod))
|
|
return nil
|
|
}
|
|
// If pod is not in any of the queues, we put it in the active queue.
|
|
err := p.activeQ.Add(p.newQueuedPodInfo(newPod))
|
|
if err == nil {
|
|
p.nominatedPods.add(newPod, "")
|
|
p.cond.Broadcast()
|
|
}
|
|
return err
|
|
}
|
|
|
|
// Delete deletes the item from either of the two queues. It assumes the pod is
|
|
// only in one queue.
|
|
func (p *PriorityQueue) Delete(pod *v1.Pod) error {
|
|
p.lock.Lock()
|
|
defer p.lock.Unlock()
|
|
p.nominatedPods.delete(pod)
|
|
err := p.activeQ.Delete(newQueuedPodInfoNoTimestamp(pod))
|
|
if err != nil { // The item was probably not found in the activeQ.
|
|
p.podBackoffQ.Delete(newQueuedPodInfoNoTimestamp(pod))
|
|
p.unschedulableQ.delete(pod)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// AssignedPodAdded is called when a bound pod is added. Creation of this pod
|
|
// may make pending pods with matching affinity terms schedulable.
|
|
func (p *PriorityQueue) AssignedPodAdded(pod *v1.Pod) {
|
|
p.lock.Lock()
|
|
p.movePodsToActiveOrBackoffQueue(p.getUnschedulablePodsWithMatchingAffinityTerm(pod), AssignedPodAdd)
|
|
p.lock.Unlock()
|
|
}
|
|
|
|
// AssignedPodUpdated is called when a bound pod is updated. Change of labels
|
|
// may make pending pods with matching affinity terms schedulable.
|
|
func (p *PriorityQueue) AssignedPodUpdated(pod *v1.Pod) {
|
|
p.lock.Lock()
|
|
p.movePodsToActiveOrBackoffQueue(p.getUnschedulablePodsWithMatchingAffinityTerm(pod), AssignedPodUpdate)
|
|
p.lock.Unlock()
|
|
}
|
|
|
|
// MoveAllToActiveOrBackoffQueue moves all pods from unschedulableQ to activeQ or backoffQ.
|
|
// This function adds all pods and then signals the condition variable to ensure that
|
|
// if Pop() is waiting for an item, it receives it after all the pods are in the
|
|
// queue and the head is the highest priority pod.
|
|
func (p *PriorityQueue) MoveAllToActiveOrBackoffQueue(event string) {
|
|
p.lock.Lock()
|
|
defer p.lock.Unlock()
|
|
unschedulablePods := make([]*framework.QueuedPodInfo, 0, len(p.unschedulableQ.podInfoMap))
|
|
for _, pInfo := range p.unschedulableQ.podInfoMap {
|
|
unschedulablePods = append(unschedulablePods, pInfo)
|
|
}
|
|
p.movePodsToActiveOrBackoffQueue(unschedulablePods, event)
|
|
}
|
|
|
|
// NOTE: this function assumes lock has been acquired in caller
|
|
func (p *PriorityQueue) movePodsToActiveOrBackoffQueue(podInfoList []*framework.QueuedPodInfo, event string) {
|
|
for _, pInfo := range podInfoList {
|
|
pod := pInfo.Pod
|
|
if p.isPodBackingoff(pInfo) {
|
|
if err := p.podBackoffQ.Add(pInfo); err != nil {
|
|
klog.Errorf("Error adding pod %v to the backoff queue: %v", pod.Name, err)
|
|
} else {
|
|
metrics.SchedulerQueueIncomingPods.WithLabelValues("backoff", event).Inc()
|
|
p.unschedulableQ.delete(pod)
|
|
}
|
|
} else {
|
|
if err := p.activeQ.Add(pInfo); err != nil {
|
|
klog.Errorf("Error adding pod %v to the scheduling queue: %v", pod.Name, err)
|
|
} else {
|
|
metrics.SchedulerQueueIncomingPods.WithLabelValues("active", event).Inc()
|
|
p.unschedulableQ.delete(pod)
|
|
}
|
|
}
|
|
}
|
|
p.moveRequestCycle = p.schedulingCycle
|
|
p.cond.Broadcast()
|
|
}
|
|
|
|
// getUnschedulablePodsWithMatchingAffinityTerm returns unschedulable pods which have
|
|
// any affinity term that matches "pod".
|
|
// NOTE: this function assumes lock has been acquired in caller.
|
|
func (p *PriorityQueue) getUnschedulablePodsWithMatchingAffinityTerm(pod *v1.Pod) []*framework.QueuedPodInfo {
|
|
var podsToMove []*framework.QueuedPodInfo
|
|
for _, pInfo := range p.unschedulableQ.podInfoMap {
|
|
up := pInfo.Pod
|
|
terms := util.GetPodAffinityTerms(up.Spec.Affinity)
|
|
for _, term := range terms {
|
|
namespaces := util.GetNamespacesFromPodAffinityTerm(up, &term)
|
|
selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector)
|
|
if err != nil {
|
|
klog.Errorf("Error getting label selectors for pod: %v.", up.Name)
|
|
}
|
|
if util.PodMatchesTermsNamespaceAndSelector(pod, namespaces, selector) {
|
|
podsToMove = append(podsToMove, pInfo)
|
|
break
|
|
}
|
|
}
|
|
|
|
}
|
|
return podsToMove
|
|
}
|
|
|
|
// NominatedPodsForNode returns pods that are nominated to run on the given node,
|
|
// but they are waiting for other pods to be removed from the node before they
|
|
// can be actually scheduled.
|
|
func (p *PriorityQueue) NominatedPodsForNode(nodeName string) []*v1.Pod {
|
|
p.lock.RLock()
|
|
defer p.lock.RUnlock()
|
|
return p.nominatedPods.podsForNode(nodeName)
|
|
}
|
|
|
|
// PendingPods returns all the pending pods in the queue. This function is
|
|
// used for debugging purposes in the scheduler cache dumper and comparer.
|
|
func (p *PriorityQueue) PendingPods() []*v1.Pod {
|
|
p.lock.RLock()
|
|
defer p.lock.RUnlock()
|
|
var result []*v1.Pod
|
|
for _, pInfo := range p.activeQ.List() {
|
|
result = append(result, pInfo.(*framework.QueuedPodInfo).Pod)
|
|
}
|
|
for _, pInfo := range p.podBackoffQ.List() {
|
|
result = append(result, pInfo.(*framework.QueuedPodInfo).Pod)
|
|
}
|
|
for _, pInfo := range p.unschedulableQ.podInfoMap {
|
|
result = append(result, pInfo.Pod)
|
|
}
|
|
return result
|
|
}
|
|
|
|
// Close closes the priority queue.
|
|
func (p *PriorityQueue) Close() {
|
|
p.lock.Lock()
|
|
defer p.lock.Unlock()
|
|
close(p.stop)
|
|
p.closed = true
|
|
p.cond.Broadcast()
|
|
}
|
|
|
|
// DeleteNominatedPodIfExists deletes pod nominatedPods.
|
|
func (p *PriorityQueue) DeleteNominatedPodIfExists(pod *v1.Pod) {
|
|
p.lock.Lock()
|
|
p.nominatedPods.delete(pod)
|
|
p.lock.Unlock()
|
|
}
|
|
|
|
// UpdateNominatedPodForNode adds a pod to the nominated pods of the given node.
|
|
// This is called during the preemption process after a node is nominated to run
|
|
// the pod. We update the structure before sending a request to update the pod
|
|
// object to avoid races with the following scheduling cycles.
|
|
func (p *PriorityQueue) UpdateNominatedPodForNode(pod *v1.Pod, nodeName string) {
|
|
p.lock.Lock()
|
|
p.nominatedPods.add(pod, nodeName)
|
|
p.lock.Unlock()
|
|
}
|
|
|
|
func (p *PriorityQueue) podsCompareBackoffCompleted(podInfo1, podInfo2 interface{}) bool {
|
|
pInfo1 := podInfo1.(*framework.QueuedPodInfo)
|
|
pInfo2 := podInfo2.(*framework.QueuedPodInfo)
|
|
bo1 := p.getBackoffTime(pInfo1)
|
|
bo2 := p.getBackoffTime(pInfo2)
|
|
return bo1.Before(bo2)
|
|
}
|
|
|
|
// NumUnschedulablePods returns the number of unschedulable pods exist in the SchedulingQueue.
|
|
func (p *PriorityQueue) NumUnschedulablePods() int {
|
|
p.lock.RLock()
|
|
defer p.lock.RUnlock()
|
|
return len(p.unschedulableQ.podInfoMap)
|
|
}
|
|
|
|
// newQueuedPodInfo builds a QueuedPodInfo object.
|
|
func (p *PriorityQueue) newQueuedPodInfo(pod *v1.Pod) *framework.QueuedPodInfo {
|
|
now := p.clock.Now()
|
|
return &framework.QueuedPodInfo{
|
|
Pod: pod,
|
|
Timestamp: now,
|
|
InitialAttemptTimestamp: now,
|
|
}
|
|
}
|
|
|
|
// getBackoffTime returns the time that podInfo completes backoff
|
|
func (p *PriorityQueue) getBackoffTime(podInfo *framework.QueuedPodInfo) time.Time {
|
|
duration := p.calculateBackoffDuration(podInfo)
|
|
backoffTime := podInfo.Timestamp.Add(duration)
|
|
return backoffTime
|
|
}
|
|
|
|
// calculateBackoffDuration is a helper function for calculating the backoffDuration
|
|
// based on the number of attempts the pod has made.
|
|
func (p *PriorityQueue) calculateBackoffDuration(podInfo *framework.QueuedPodInfo) time.Duration {
|
|
duration := p.podInitialBackoffDuration
|
|
for i := 1; i < podInfo.Attempts; i++ {
|
|
duration = duration * 2
|
|
if duration > p.podMaxBackoffDuration {
|
|
return p.podMaxBackoffDuration
|
|
}
|
|
}
|
|
return duration
|
|
}
|
|
|
|
func updatePod(oldPodInfo interface{}, newPod *v1.Pod) *framework.QueuedPodInfo {
|
|
pInfo := oldPodInfo.(*framework.QueuedPodInfo)
|
|
pInfo.Pod = newPod
|
|
return pInfo
|
|
}
|
|
|
|
// UnschedulablePodsMap holds pods that cannot be scheduled. This data structure
|
|
// is used to implement unschedulableQ.
|
|
type UnschedulablePodsMap struct {
|
|
// podInfoMap is a map key by a pod's full-name and the value is a pointer to the QueuedPodInfo.
|
|
podInfoMap map[string]*framework.QueuedPodInfo
|
|
keyFunc func(*v1.Pod) string
|
|
// metricRecorder updates the counter when elements of an unschedulablePodsMap
|
|
// get added or removed, and it does nothing if it's nil
|
|
metricRecorder metrics.MetricRecorder
|
|
}
|
|
|
|
// Add adds a pod to the unschedulable podInfoMap.
|
|
func (u *UnschedulablePodsMap) addOrUpdate(pInfo *framework.QueuedPodInfo) {
|
|
podID := u.keyFunc(pInfo.Pod)
|
|
if _, exists := u.podInfoMap[podID]; !exists && u.metricRecorder != nil {
|
|
u.metricRecorder.Inc()
|
|
}
|
|
u.podInfoMap[podID] = pInfo
|
|
}
|
|
|
|
// Delete deletes a pod from the unschedulable podInfoMap.
|
|
func (u *UnschedulablePodsMap) delete(pod *v1.Pod) {
|
|
podID := u.keyFunc(pod)
|
|
if _, exists := u.podInfoMap[podID]; exists && u.metricRecorder != nil {
|
|
u.metricRecorder.Dec()
|
|
}
|
|
delete(u.podInfoMap, podID)
|
|
}
|
|
|
|
// Get returns the QueuedPodInfo if a pod with the same key as the key of the given "pod"
|
|
// is found in the map. It returns nil otherwise.
|
|
func (u *UnschedulablePodsMap) get(pod *v1.Pod) *framework.QueuedPodInfo {
|
|
podKey := u.keyFunc(pod)
|
|
if pInfo, exists := u.podInfoMap[podKey]; exists {
|
|
return pInfo
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// Clear removes all the entries from the unschedulable podInfoMap.
|
|
func (u *UnschedulablePodsMap) clear() {
|
|
u.podInfoMap = make(map[string]*framework.QueuedPodInfo)
|
|
if u.metricRecorder != nil {
|
|
u.metricRecorder.Clear()
|
|
}
|
|
}
|
|
|
|
// newUnschedulablePodsMap initializes a new object of UnschedulablePodsMap.
|
|
func newUnschedulablePodsMap(metricRecorder metrics.MetricRecorder) *UnschedulablePodsMap {
|
|
return &UnschedulablePodsMap{
|
|
podInfoMap: make(map[string]*framework.QueuedPodInfo),
|
|
keyFunc: util.GetPodFullName,
|
|
metricRecorder: metricRecorder,
|
|
}
|
|
}
|
|
|
|
// nominatedPodMap is a structure that stores pods nominated to run on nodes.
|
|
// It exists because nominatedNodeName of pod objects stored in the structure
|
|
// may be different than what scheduler has here. We should be able to find pods
|
|
// by their UID and update/delete them.
|
|
type nominatedPodMap struct {
|
|
// nominatedPods is a map keyed by a node name and the value is a list of
|
|
// pods which are nominated to run on the node. These are pods which can be in
|
|
// the activeQ or unschedulableQ.
|
|
nominatedPods map[string][]*v1.Pod
|
|
// nominatedPodToNode is map keyed by a Pod UID to the node name where it is
|
|
// nominated.
|
|
nominatedPodToNode map[ktypes.UID]string
|
|
}
|
|
|
|
func (npm *nominatedPodMap) add(p *v1.Pod, nodeName string) {
|
|
// always delete the pod if it already exist, to ensure we never store more than
|
|
// one instance of the pod.
|
|
npm.delete(p)
|
|
|
|
nnn := nodeName
|
|
if len(nnn) == 0 {
|
|
nnn = NominatedNodeName(p)
|
|
if len(nnn) == 0 {
|
|
return
|
|
}
|
|
}
|
|
npm.nominatedPodToNode[p.UID] = nnn
|
|
for _, np := range npm.nominatedPods[nnn] {
|
|
if np.UID == p.UID {
|
|
klog.V(4).Infof("Pod %v/%v already exists in the nominated map!", p.Namespace, p.Name)
|
|
return
|
|
}
|
|
}
|
|
npm.nominatedPods[nnn] = append(npm.nominatedPods[nnn], p)
|
|
}
|
|
|
|
func (npm *nominatedPodMap) delete(p *v1.Pod) {
|
|
nnn, ok := npm.nominatedPodToNode[p.UID]
|
|
if !ok {
|
|
return
|
|
}
|
|
for i, np := range npm.nominatedPods[nnn] {
|
|
if np.UID == p.UID {
|
|
npm.nominatedPods[nnn] = append(npm.nominatedPods[nnn][:i], npm.nominatedPods[nnn][i+1:]...)
|
|
if len(npm.nominatedPods[nnn]) == 0 {
|
|
delete(npm.nominatedPods, nnn)
|
|
}
|
|
break
|
|
}
|
|
}
|
|
delete(npm.nominatedPodToNode, p.UID)
|
|
}
|
|
|
|
func (npm *nominatedPodMap) update(oldPod, newPod *v1.Pod) {
|
|
// In some cases, an Update event with no "NominatedNode" present is received right
|
|
// after a node("NominatedNode") is reserved for this pod in memory.
|
|
// In this case, we need to keep reserving the NominatedNode when updating the pod pointer.
|
|
nodeName := ""
|
|
// We won't fall into below `if` block if the Update event represents:
|
|
// (1) NominatedNode info is added
|
|
// (2) NominatedNode info is updated
|
|
// (3) NominatedNode info is removed
|
|
if NominatedNodeName(oldPod) == "" && NominatedNodeName(newPod) == "" {
|
|
if nnn, ok := npm.nominatedPodToNode[oldPod.UID]; ok {
|
|
// This is the only case we should continue reserving the NominatedNode
|
|
nodeName = nnn
|
|
}
|
|
}
|
|
// We update irrespective of the nominatedNodeName changed or not, to ensure
|
|
// that pod pointer is updated.
|
|
npm.delete(oldPod)
|
|
npm.add(newPod, nodeName)
|
|
}
|
|
|
|
func (npm *nominatedPodMap) podsForNode(nodeName string) []*v1.Pod {
|
|
if list, ok := npm.nominatedPods[nodeName]; ok {
|
|
return list
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func newNominatedPodMap() *nominatedPodMap {
|
|
return &nominatedPodMap{
|
|
nominatedPods: make(map[string][]*v1.Pod),
|
|
nominatedPodToNode: make(map[ktypes.UID]string),
|
|
}
|
|
}
|
|
|
|
// MakeNextPodFunc returns a function to retrieve the next pod from a given
|
|
// scheduling queue
|
|
func MakeNextPodFunc(queue SchedulingQueue) func() *framework.QueuedPodInfo {
|
|
return func() *framework.QueuedPodInfo {
|
|
podInfo, err := queue.Pop()
|
|
if err == nil {
|
|
klog.V(4).Infof("About to try and schedule pod %v/%v", podInfo.Pod.Namespace, podInfo.Pod.Name)
|
|
return podInfo
|
|
}
|
|
klog.Errorf("Error while retrieving next pod from scheduling queue: %v", err)
|
|
return nil
|
|
}
|
|
}
|
|
|
|
func podInfoKeyFunc(obj interface{}) (string, error) {
|
|
return cache.MetaNamespaceKeyFunc(obj.(*framework.QueuedPodInfo).Pod)
|
|
}
|