Wait for clean old RSs statuses in the middle of Recreate rollouts

This commit is contained in:
Michail Kargakis
2017-04-02 17:59:30 +02:00
parent 74c23bdf68
commit 97fed0aff4
7 changed files with 103 additions and 26 deletions

View File

@@ -26,6 +26,7 @@ import (
"time"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
@@ -537,10 +538,8 @@ func (dc *DeploymentController) getPodMapForDeployment(d *extensions.Deployment,
podMap[rs.UID] = &v1.PodList{}
}
for _, pod := range pods {
// Ignore inactive Pods since that's what ReplicaSet does.
if !controller.IsPodActive(pod) {
continue
}
// Do not ignore inactive Pods because Recreate Deployments need to verify that no
// Pods from older versions are running before spinning up new Pods.
controllerRef := controller.GetControllerOf(pod)
if controllerRef == nil {
continue
@@ -614,6 +613,10 @@ func (dc *DeploymentController) syncDeployment(key string) error {
return err
}
// List all Pods owned by this Deployment, grouped by their ReplicaSet.
// Current uses of the podMap are:
//
// * check if a Pod is labeled correctly with the pod-template-hash label.
// * check that no old Pods are running in the middle of Recreate Deployments.
podMap, err := dc.getPodMapForDeployment(d, rsList)
if err != nil {
return err