kubectl: scale down based on ready during rolling updates

This commit is contained in:
Michail Kargakis
2016-02-15 14:43:36 +01:00
parent 1a2f811a3b
commit 35fab99af7
2 changed files with 62 additions and 45 deletions

View File

@@ -113,10 +113,8 @@ type RollingUpdater struct {
getOrCreateTargetController func(controller *api.ReplicationController, sourceId string) (*api.ReplicationController, bool, error)
// cleanup performs post deployment cleanup tasks for newRc and oldRc.
cleanup func(oldRc, newRc *api.ReplicationController, config *RollingUpdaterConfig) error
// waitForReadyPods should block until there are >0 total pods ready amongst
// the old and new controllers, and should return the amount of old and new
// ready.
waitForReadyPods func(interval, timeout time.Duration, oldRc, newRc *api.ReplicationController) (int, int, error)
// getReadyPods returns the amount of old and new ready pods.
getReadyPods func(oldRc, newRc *api.ReplicationController) (int, int, error)
}
// NewRollingUpdater creates a RollingUpdater from a client.
@@ -128,7 +126,7 @@ func NewRollingUpdater(namespace string, client client.Interface) *RollingUpdate
// Inject real implementations.
updater.scaleAndWait = updater.scaleAndWaitWithScaler
updater.getOrCreateTargetController = updater.getOrCreateTargetControllerWithClient
updater.waitForReadyPods = updater.pollForReadyPods
updater.getReadyPods = updater.readyPods
updater.cleanup = updater.cleanupWithClients
return updater
}
@@ -299,7 +297,7 @@ func (r *RollingUpdater) scaleUp(newRc, oldRc *api.ReplicationController, origin
return scaledRc, nil
}
// scaleDown scales down oldRc to 0 at whatever increment possible given the
// scaleDown scales down oldRc to 0 at whatever decrement possible given the
// thresholds defined on the config. scaleDown will safely no-op as necessary
// when it detects redundancy or other relevant conditions.
func (r *RollingUpdater) scaleDown(newRc, oldRc *api.ReplicationController, desired, minAvailable, maxUnavailable, maxSurge int, config *RollingUpdaterConfig) (*api.ReplicationController, error) {
@@ -307,15 +305,19 @@ func (r *RollingUpdater) scaleDown(newRc, oldRc *api.ReplicationController, desi
if oldRc.Spec.Replicas == 0 {
return oldRc, nil
}
// Block until there are any pods ready.
_, newAvailable, err := r.waitForReadyPods(config.Interval, config.Timeout, oldRc, newRc)
// Get ready pods. We shouldn't block, otherwise in case both old and new
// pods are unavailable then the rolling update process blocks.
// Timeout-wise we are already covered by the progress check.
_, newAvailable, err := r.getReadyPods(oldRc, newRc)
if err != nil {
return nil, err
}
// The old controller is considered as part of the total because we want to
// maintain minimum availability even with a volatile old controller.
// Scale down as much as possible while maintaining minimum availability
decrement := oldRc.Spec.Replicas + newAvailable - minAvailable
allPods := oldRc.Spec.Replicas + newRc.Spec.Replicas
newUnavailable := newRc.Spec.Replicas - newAvailable
decrement := allPods - minAvailable - newUnavailable
// The decrement normally shouldn't drop below 0 because the available count
// always starts below the old replica count, but the old replica count can
// decrement due to externalities like pods death in the replica set. This
@@ -360,40 +362,34 @@ func (r *RollingUpdater) scaleAndWaitWithScaler(rc *api.ReplicationController, r
return r.c.ReplicationControllers(rc.Namespace).Get(rc.Name)
}
// pollForReadyPods polls oldRc and newRc each interval and returns the old
// and new ready counts for their pods. If a pod is observed as being ready,
// it's considered ready even if it later becomes notReady.
func (r *RollingUpdater) pollForReadyPods(interval, timeout time.Duration, oldRc, newRc *api.ReplicationController) (int, int, error) {
// readyPods returns the old and new ready counts for their pods.
// If a pod is observed as being ready, it's considered ready even
// if it later becomes notReady.
func (r *RollingUpdater) readyPods(oldRc, newRc *api.ReplicationController) (int, int, error) {
controllers := []*api.ReplicationController{oldRc, newRc}
oldReady := 0
newReady := 0
err := wait.Poll(interval, timeout, func() (done bool, err error) {
anyReady := false
for _, controller := range controllers {
selector := labels.Set(controller.Spec.Selector).AsSelector()
options := api.ListOptions{LabelSelector: selector}
pods, err := r.c.Pods(controller.Namespace).List(options)
if err != nil {
return false, err
}
for _, pod := range pods.Items {
if api.IsPodReady(&pod) {
switch controller.Name {
case oldRc.Name:
oldReady++
case newRc.Name:
newReady++
}
anyReady = true
for i := range controllers {
controller := controllers[i]
selector := labels.Set(controller.Spec.Selector).AsSelector()
options := api.ListOptions{LabelSelector: selector}
pods, err := r.c.Pods(controller.Namespace).List(options)
if err != nil {
return 0, 0, err
}
for _, pod := range pods.Items {
if api.IsPodReady(&pod) {
switch controller.Name {
case oldRc.Name:
oldReady++
case newRc.Name:
newReady++
}
}
}
if anyReady {
return true, nil
}
return false, nil
})
return oldReady, newReady, err
}
return oldReady, newReady, nil
}
// getOrCreateTargetControllerWithClient looks for an existing controller with