daemonset: Implement MaxSurge on daemonset update
If MaxSurge is set, the controller will attempt to double up nodes up to the allowed limit with a new pod, and then when the most recent (by hash) pod is ready, trigger deletion on the old pod. If the old pod goes unready before the new pod is ready, the old pod is immediately deleted. If an old pod goes unready before a new pod is placed on that node, a new pod is immediately added for that node even past the MaxSurge limit. The backoff clock is used consistently throughout the daemonset controller as an injectable clock for the purposes of testing.
This commit is contained in:
@@ -26,12 +26,11 @@ import (
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
intstrutil "k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/json"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
@@ -46,37 +45,146 @@ func (dsc *DaemonSetsController) rollingUpdate(ds *apps.DaemonSet, nodeList []*v
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err)
|
||||
}
|
||||
|
||||
_, oldPods := dsc.getAllDaemonSetPods(ds, nodeToDaemonPods, hash)
|
||||
maxUnavailable, numUnavailable, err := dsc.getUnavailableNumbers(ds, nodeList, nodeToDaemonPods)
|
||||
maxSurge, maxUnavailable, numUnavailable, err := dsc.getUnavailableNumbers(ds, nodeList, nodeToDaemonPods)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't get unavailable numbers: %v", err)
|
||||
}
|
||||
oldAvailablePods, oldUnavailablePods := util.SplitByAvailablePods(ds.Spec.MinReadySeconds, oldPods)
|
||||
|
||||
// for oldPods delete all not running pods
|
||||
now := dsc.failedPodsBackoff.Clock.Now()
|
||||
|
||||
// When not surging, we delete just enough pods to stay under the maxUnavailable limit, if any
|
||||
// are necessary, and let the core loop create new instances on those nodes.
|
||||
if maxSurge == 0 {
|
||||
_, oldPods := dsc.getAllDaemonSetPods(ds, nodeToDaemonPods, hash)
|
||||
oldAvailablePods, oldUnavailablePods := util.SplitByAvailablePods(ds.Spec.MinReadySeconds, oldPods, now)
|
||||
|
||||
var oldPodsToDelete []string
|
||||
klog.V(4).Infof("Marking all unavailable old pods for deletion")
|
||||
for _, pod := range oldUnavailablePods {
|
||||
// Skip terminating pods. We won't delete them again
|
||||
if pod.DeletionTimestamp != nil {
|
||||
continue
|
||||
}
|
||||
klog.V(4).Infof("Marking pod %s/%s for deletion", ds.Name, pod.Name)
|
||||
oldPodsToDelete = append(oldPodsToDelete, pod.Name)
|
||||
}
|
||||
for _, pod := range oldAvailablePods {
|
||||
if numUnavailable >= maxUnavailable {
|
||||
klog.V(4).Infof("Number of unavailable DaemonSet pods: %d, is equal to or exceeds allowed maximum: %d", numUnavailable, maxUnavailable)
|
||||
break
|
||||
}
|
||||
|
||||
klog.V(4).Infof("Marking pod %s/%s for deletion", ds.Name, pod.Name)
|
||||
oldPodsToDelete = append(oldPodsToDelete, pod.Name)
|
||||
numUnavailable++
|
||||
}
|
||||
return dsc.syncNodes(ds, oldPodsToDelete, nil, hash)
|
||||
}
|
||||
|
||||
// When surging, we create new pods whenever an old pod is unavailable, and we can create up
|
||||
// to maxSurge extra pods
|
||||
//
|
||||
// Assumptions:
|
||||
// * Expect manage loop to allow no more than two pods per node, one old, one new
|
||||
// * Expect manage loop will create new pods if there are no pods on node
|
||||
// * Expect manage loop will handle failed pods
|
||||
// * Deleted pods do not count as unavailable so that updates make progress when nodes are down
|
||||
// Invariants:
|
||||
// * A node with an unavailable old pod is a candidate for immediate new pod creation
|
||||
// * An old available pod is deleted if a new pod is available
|
||||
// * No more than maxSurge new pods are created for old available pods at any one time
|
||||
//
|
||||
var oldPodsToDelete []string
|
||||
klog.V(4).Infof("Marking all unavailable old pods for deletion")
|
||||
for _, pod := range oldUnavailablePods {
|
||||
// Skip terminating pods. We won't delete them again
|
||||
var candidateNewNodes []string
|
||||
var allowedNewNodes []string
|
||||
var numSurge int
|
||||
|
||||
for nodeName, pods := range nodeToDaemonPods {
|
||||
newPod, oldPod, ok := findSurgePodsOnNode(ds, pods, hash)
|
||||
if !ok {
|
||||
// let the manage loop clean up this node, and treat it as a surge node
|
||||
klog.V(3).Infof("DaemonSet %s/%s has excess pods on node %s, skipping to allow the core loop to process", ds.Namespace, ds.Name, nodeName)
|
||||
numSurge++
|
||||
continue
|
||||
}
|
||||
switch {
|
||||
case oldPod == nil:
|
||||
// we don't need to do anything to this node, the manage loop will handle it
|
||||
case newPod == nil:
|
||||
// this is a surge candidate
|
||||
switch {
|
||||
case !podutil.IsPodAvailable(oldPod, ds.Spec.MinReadySeconds, metav1.Time{Time: now}):
|
||||
// the old pod isn't available, allow it to become a replacement
|
||||
klog.V(5).Infof("Pod %s on node %s is out of date and not available, allowing replacement", ds.Namespace, ds.Name, oldPod.Name, nodeName)
|
||||
// record the replacement
|
||||
if allowedNewNodes == nil {
|
||||
allowedNewNodes = make([]string, 0, len(nodeList))
|
||||
}
|
||||
allowedNewNodes = append(allowedNewNodes, nodeName)
|
||||
case numSurge >= maxSurge:
|
||||
// no point considering any other candidates
|
||||
continue
|
||||
default:
|
||||
klog.V(5).Infof("DaemonSet %s/%s pod %s on node %s is out of date, this is a surge candidate", ds.Namespace, ds.Name, oldPod.Name, nodeName)
|
||||
// record the candidate
|
||||
if candidateNewNodes == nil {
|
||||
candidateNewNodes = make([]string, 0, maxSurge)
|
||||
}
|
||||
candidateNewNodes = append(candidateNewNodes, nodeName)
|
||||
}
|
||||
default:
|
||||
// we have already surged onto this node, determine our state
|
||||
if !podutil.IsPodAvailable(newPod, ds.Spec.MinReadySeconds, metav1.Time{Time: now}) {
|
||||
// we're waiting to go available here
|
||||
numSurge++
|
||||
continue
|
||||
}
|
||||
// we're available, delete the old pod
|
||||
klog.V(5).Infof("DaemonSet %s/%s pod %s on node %s is available, remove %s", ds.Namespace, ds.Name, newPod.Name, nodeName, oldPod.Name)
|
||||
oldPodsToDelete = append(oldPodsToDelete, oldPod.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// use any of the candidates we can, including the allowedNewNodes
|
||||
klog.V(5).Infof("DaemonSet %s/%s allowing %d replacements, surge up to %d, %d are in progress, %d candidates", ds.Namespace, ds.Name, len(allowedNewNodes), maxSurge, numSurge, len(candidateNewNodes))
|
||||
remainingSurge := maxSurge - numSurge
|
||||
if remainingSurge < 0 {
|
||||
remainingSurge = 0
|
||||
}
|
||||
if max := len(candidateNewNodes); remainingSurge > max {
|
||||
remainingSurge = max
|
||||
}
|
||||
newNodesToCreate := append(allowedNewNodes, candidateNewNodes[:remainingSurge]...)
|
||||
|
||||
return dsc.syncNodes(ds, oldPodsToDelete, newNodesToCreate, hash)
|
||||
}
|
||||
|
||||
// findSurgePodsOnNode looks at non-deleted pods on a given node and returns true if there
|
||||
// is at most one of each old and new pods, or false if there are multiples. We can skip
|
||||
// processing the particular node in those scenarios and let the manage loop prune the
|
||||
// excess pods for our next time around.
|
||||
func findSurgePodsOnNode(ds *apps.DaemonSet, podsOnNode []*v1.Pod, hash string) (newPod, oldPod *v1.Pod, ok bool) {
|
||||
for _, pod := range podsOnNode {
|
||||
if pod.DeletionTimestamp != nil {
|
||||
continue
|
||||
}
|
||||
klog.V(4).Infof("Marking pod %s/%s for deletion", ds.Name, pod.Name)
|
||||
oldPodsToDelete = append(oldPodsToDelete, pod.Name)
|
||||
}
|
||||
|
||||
klog.V(4).Infof("Marking old pods for deletion")
|
||||
for _, pod := range oldAvailablePods {
|
||||
if numUnavailable >= maxUnavailable {
|
||||
klog.V(4).Infof("Number of unavailable DaemonSet pods: %d, is equal to or exceeds allowed maximum: %d", numUnavailable, maxUnavailable)
|
||||
break
|
||||
generation, err := util.GetTemplateGeneration(ds)
|
||||
if err != nil {
|
||||
generation = nil
|
||||
}
|
||||
if util.IsPodUpdated(pod, hash, generation) {
|
||||
if newPod != nil {
|
||||
return nil, nil, false
|
||||
}
|
||||
newPod = pod
|
||||
} else {
|
||||
if oldPod != nil {
|
||||
return nil, nil, false
|
||||
}
|
||||
oldPod = pod
|
||||
}
|
||||
klog.V(4).Infof("Marking pod %s/%s for deletion", ds.Name, pod.Name)
|
||||
oldPodsToDelete = append(oldPodsToDelete, pod.Name)
|
||||
numUnavailable++
|
||||
}
|
||||
return dsc.syncNodes(ds, oldPodsToDelete, []string{}, hash)
|
||||
return newPod, oldPod, true
|
||||
}
|
||||
|
||||
// constructHistory finds all histories controlled by the given DaemonSet, and
|
||||
@@ -385,14 +493,18 @@ func (dsc *DaemonSetsController) getAllDaemonSetPods(ds *apps.DaemonSet, nodeToD
|
||||
return newPods, oldPods
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) getUnavailableNumbers(ds *apps.DaemonSet, nodeList []*v1.Node, nodeToDaemonPods map[string][]*v1.Pod) (int, int, error) {
|
||||
// getUnavailableNumbers calculates the true number of allowed unavailable or surge pods.
|
||||
// TODO: This method duplicates calculations in the main update loop and should be refactored
|
||||
// to remove the need to calculate availability twice (once here, and once in the main loops)
|
||||
func (dsc *DaemonSetsController) getUnavailableNumbers(ds *apps.DaemonSet, nodeList []*v1.Node, nodeToDaemonPods map[string][]*v1.Pod) (int, int, int, error) {
|
||||
klog.V(4).Infof("Getting unavailable numbers")
|
||||
now := dsc.failedPodsBackoff.Clock.Now()
|
||||
var numUnavailable, desiredNumberScheduled int
|
||||
for i := range nodeList {
|
||||
node := nodeList[i]
|
||||
wantToRun, _, err := dsc.nodeShouldRunDaemonPod(node, ds)
|
||||
if err != nil {
|
||||
return -1, -1, err
|
||||
return -1, -1, -1, err
|
||||
}
|
||||
if !wantToRun {
|
||||
continue
|
||||
@@ -405,8 +517,8 @@ func (dsc *DaemonSetsController) getUnavailableNumbers(ds *apps.DaemonSet, nodeL
|
||||
}
|
||||
available := false
|
||||
for _, pod := range daemonPods {
|
||||
//for the purposes of update we ensure that the Pod is both available and not terminating
|
||||
if podutil.IsPodAvailable(pod, ds.Spec.MinReadySeconds, metav1.Now()) && pod.DeletionTimestamp == nil {
|
||||
// for the purposes of update we ensure that the Pod is both available and not terminating
|
||||
if podutil.IsPodAvailable(pod, ds.Spec.MinReadySeconds, metav1.Time{Time: now}) && pod.DeletionTimestamp == nil {
|
||||
available = true
|
||||
break
|
||||
}
|
||||
@@ -415,12 +527,25 @@ func (dsc *DaemonSetsController) getUnavailableNumbers(ds *apps.DaemonSet, nodeL
|
||||
numUnavailable++
|
||||
}
|
||||
}
|
||||
maxUnavailable, err := intstrutil.GetScaledValueFromIntOrPercent(ds.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable, desiredNumberScheduled, true)
|
||||
|
||||
maxUnavailable, err := util.UnavailableCount(ds, desiredNumberScheduled)
|
||||
if err != nil {
|
||||
return -1, -1, fmt.Errorf("invalid value for MaxUnavailable: %v", err)
|
||||
return -1, -1, -1, fmt.Errorf("invalid value for MaxUnavailable: %v", err)
|
||||
}
|
||||
klog.V(4).Infof(" DaemonSet %s/%s, maxUnavailable: %d, numUnavailable: %d", ds.Namespace, ds.Name, maxUnavailable, numUnavailable)
|
||||
return maxUnavailable, numUnavailable, nil
|
||||
|
||||
maxSurge, err := util.SurgeCount(ds, desiredNumberScheduled)
|
||||
if err != nil {
|
||||
return -1, -1, -1, fmt.Errorf("invalid value for MaxSurge: %v", err)
|
||||
}
|
||||
|
||||
// if the daemonset returned with an impossible configuration, obey the default of unavailable=1 (in the
|
||||
// event the apiserver returns 0 for both surge and unavailability)
|
||||
if desiredNumberScheduled > 0 && maxUnavailable == 0 && maxSurge == 0 {
|
||||
klog.Warningf("DaemonSet %s/%s is not configured for surge or unavailability, defaulting to accepting unavailability", ds.Namespace, ds.Name)
|
||||
maxUnavailable = 1
|
||||
}
|
||||
klog.V(0).Infof("DaemonSet %s/%s, maxSurge: %d, maxUnavailable: %d, numUnavailable: %d", ds.Namespace, ds.Name, maxSurge, maxUnavailable, numUnavailable)
|
||||
return maxSurge, maxUnavailable, numUnavailable, nil
|
||||
}
|
||||
|
||||
type historiesByRevision []*apps.ControllerRevision
|
||||
|
Reference in New Issue
Block a user