Deployment: Use ControllerRef to list controlled objects.
Although Deployment already applied its ControllerRef to adopt matching ReplicaSets, it actually still used label selectors to list objects that it controls. That meant it didn't actually follow the rules of ControllerRef, so it could still fight with other controller types. This should mean that the special handling for overlapping Deployments is no longer necessary, since each Deployment will only see objects that it owns (via ControllerRef).
This commit is contained in:
@@ -25,7 +25,7 @@ import (
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
@@ -36,31 +36,31 @@ import (
|
||||
)
|
||||
|
||||
// syncStatusOnly only updates Deployments Status and doesn't take any mutating actions.
|
||||
func (dc *DeploymentController) syncStatusOnly(deployment *extensions.Deployment) error {
|
||||
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(deployment, false)
|
||||
func (dc *DeploymentController) syncStatusOnly(d *extensions.Deployment, rsList []*extensions.ReplicaSet, podMap map[types.UID]*v1.PodList) error {
|
||||
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
allRSs := append(oldRSs, newRS)
|
||||
return dc.syncDeploymentStatus(allRSs, newRS, deployment)
|
||||
return dc.syncDeploymentStatus(allRSs, newRS, d)
|
||||
}
|
||||
|
||||
// sync is responsible for reconciling deployments on scaling events or when they
|
||||
// are paused.
|
||||
func (dc *DeploymentController) sync(deployment *extensions.Deployment) error {
|
||||
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(deployment, false)
|
||||
func (dc *DeploymentController) sync(d *extensions.Deployment, rsList []*extensions.ReplicaSet, podMap map[types.UID]*v1.PodList) error {
|
||||
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := dc.scale(deployment, newRS, oldRSs); err != nil {
|
||||
if err := dc.scale(d, newRS, oldRSs); err != nil {
|
||||
// If we get an error while trying to scale, the deployment will be requeued
|
||||
// so we can abort this resync
|
||||
return err
|
||||
}
|
||||
|
||||
allRSs := append(oldRSs, newRS)
|
||||
return dc.syncDeploymentStatus(allRSs, newRS, deployment)
|
||||
return dc.syncDeploymentStatus(allRSs, newRS, d)
|
||||
}
|
||||
|
||||
// checkPausedConditions checks if the given deployment is paused or not and adds an appropriate condition.
|
||||
@@ -98,25 +98,31 @@ func (dc *DeploymentController) checkPausedConditions(d *extensions.Deployment)
|
||||
}
|
||||
|
||||
// getAllReplicaSetsAndSyncRevision returns all the replica sets for the provided deployment (new and all old), with new RS's and deployment's revision updated.
|
||||
//
|
||||
// rsList should come from getReplicaSetsForDeployment(d).
|
||||
// podMap should come from getPodMapForReplicaSets(rsList).
|
||||
// These are passed around to avoid repeating expensive API calls.
|
||||
//
|
||||
// 1. Get all old RSes this deployment targets, and calculate the max revision number among them (maxOldV).
|
||||
// 2. Get new RS this deployment targets (whose pod template matches deployment's), and update new RS's revision number to (maxOldV + 1),
|
||||
// only if its revision number is smaller than (maxOldV + 1). If this step failed, we'll update it in the next deployment sync loop.
|
||||
// 3. Copy new RS's revision number to deployment (update deployment's revision). If this step failed, we'll update it in the next deployment sync loop.
|
||||
//
|
||||
// Note that currently the deployment controller is using caches to avoid querying the server for reads.
|
||||
// This may lead to stale reads of replica sets, thus incorrect deployment status.
|
||||
func (dc *DeploymentController) getAllReplicaSetsAndSyncRevision(deployment *extensions.Deployment, createIfNotExisted bool) (*extensions.ReplicaSet, []*extensions.ReplicaSet, error) {
|
||||
func (dc *DeploymentController) getAllReplicaSetsAndSyncRevision(d *extensions.Deployment, rsList []*extensions.ReplicaSet, podMap map[types.UID]*v1.PodList, createIfNotExisted bool) (*extensions.ReplicaSet, []*extensions.ReplicaSet, error) {
|
||||
// List the deployment's RSes & Pods and apply pod-template-hash info to deployment's adopted RSes/Pods
|
||||
rsList, podList, err := dc.rsAndPodsWithHashKeySynced(deployment)
|
||||
rsList, podList, err := dc.rsAndPodsWithHashKeySynced(d, rsList, podMap)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error labeling replica sets and pods with pod-template-hash: %v", err)
|
||||
}
|
||||
_, allOldRSs, err := deploymentutil.FindOldReplicaSets(deployment, rsList, podList)
|
||||
_, allOldRSs, err := deploymentutil.FindOldReplicaSets(d, rsList, podList)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Get new replica set with the updated revision number
|
||||
newRS, err := dc.getNewReplicaSet(deployment, rsList, allOldRSs, createIfNotExisted)
|
||||
newRS, err := dc.getNewReplicaSet(d, rsList, allOldRSs, createIfNotExisted)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -124,33 +130,28 @@ func (dc *DeploymentController) getAllReplicaSetsAndSyncRevision(deployment *ext
|
||||
return newRS, allOldRSs, nil
|
||||
}
|
||||
|
||||
// rsAndPodsWithHashKeySynced returns the RSes and pods the given deployment targets, with pod-template-hash information synced.
|
||||
func (dc *DeploymentController) rsAndPodsWithHashKeySynced(deployment *extensions.Deployment) ([]*extensions.ReplicaSet, *v1.PodList, error) {
|
||||
rsList, err := deploymentutil.ListReplicaSets(deployment,
|
||||
func(namespace string, options metav1.ListOptions) ([]*extensions.ReplicaSet, error) {
|
||||
parsed, err := labels.Parse(options.LabelSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dc.rsLister.ReplicaSets(namespace).List(parsed)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error listing ReplicaSets: %v", err)
|
||||
}
|
||||
// rsAndPodsWithHashKeySynced returns the RSes and pods the given deployment
|
||||
// targets, with pod-template-hash information synced.
|
||||
//
|
||||
// rsList should come from getReplicaSetsForDeployment(d).
|
||||
// podMap should come from getPodMapForReplicaSets(rsList).
|
||||
// These are passed around to avoid repeating expensive API calls.
|
||||
func (dc *DeploymentController) rsAndPodsWithHashKeySynced(d *extensions.Deployment, rsList []*extensions.ReplicaSet, podMap map[types.UID]*v1.PodList) ([]*extensions.ReplicaSet, *v1.PodList, error) {
|
||||
syncedRSList := []*extensions.ReplicaSet{}
|
||||
for _, rs := range rsList {
|
||||
// Add pod-template-hash information if it's not in the RS.
|
||||
// Otherwise, new RS produced by Deployment will overlap with pre-existing ones
|
||||
// that aren't constrained by the pod-template-hash.
|
||||
syncedRS, err := dc.addHashKeyToRSAndPods(rs)
|
||||
syncedRS, err := dc.addHashKeyToRSAndPods(rs, podMap[rs.UID])
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
syncedRSList = append(syncedRSList, syncedRS)
|
||||
}
|
||||
syncedPodList, err := dc.listPods(deployment)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
// Put all Pods from podMap into one list.
|
||||
syncedPodList := &v1.PodList{}
|
||||
for _, podList := range podMap {
|
||||
syncedPodList.Items = append(syncedPodList.Items, podList.Items...)
|
||||
}
|
||||
return syncedRSList, syncedPodList, nil
|
||||
}
|
||||
@@ -159,7 +160,7 @@ func (dc *DeploymentController) rsAndPodsWithHashKeySynced(deployment *extension
|
||||
// 1. Add hash label to the rs's pod template, and make sure the controller sees this update so that no orphaned pods will be created
|
||||
// 2. Add hash label to all pods this rs owns, wait until replicaset controller reports rs.Status.FullyLabeledReplicas equal to the desired number of replicas
|
||||
// 3. Add hash label to the rs's label and selector
|
||||
func (dc *DeploymentController) addHashKeyToRSAndPods(rs *extensions.ReplicaSet) (*extensions.ReplicaSet, error) {
|
||||
func (dc *DeploymentController) addHashKeyToRSAndPods(rs *extensions.ReplicaSet, podList *v1.PodList) (*extensions.ReplicaSet, error) {
|
||||
// If the rs already has the new hash label in its selector, it's done syncing
|
||||
if labelsutil.SelectorHasLabel(rs.Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) {
|
||||
return rs, nil
|
||||
@@ -189,24 +190,7 @@ func (dc *DeploymentController) addHashKeyToRSAndPods(rs *extensions.ReplicaSet)
|
||||
}
|
||||
|
||||
// 2. Update all pods managed by the rs to have the new hash label, so they will be correctly adopted.
|
||||
selector, err := metav1.LabelSelectorAsSelector(updatedRS.Spec.Selector)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in converting selector to label selector for replica set %s: %s", updatedRS.Name, err)
|
||||
}
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
parsed, err := labels.Parse(options.LabelSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pods, err := dc.podLister.Pods(updatedRS.Namespace).List(parsed)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in getting pod list for namespace %s and list options %+v: %s", rs.Namespace, options, err)
|
||||
}
|
||||
podList := v1.PodList{Items: make([]v1.Pod, 0, len(pods))}
|
||||
for i := range pods {
|
||||
podList.Items = append(podList.Items, *pods[i])
|
||||
}
|
||||
if err := deploymentutil.LabelPodsWithHash(&podList, dc.client, dc.podLister, rs.Namespace, rs.Name, hash); err != nil {
|
||||
if err := deploymentutil.LabelPodsWithHash(podList, dc.client, dc.podLister, rs.Namespace, rs.Name, hash); err != nil {
|
||||
return nil, fmt.Errorf("error in adding template hash label %s to pods %+v: %s", hash, podList, err)
|
||||
}
|
||||
|
||||
@@ -242,22 +226,6 @@ func (dc *DeploymentController) addHashKeyToRSAndPods(rs *extensions.ReplicaSet)
|
||||
return updatedRS, nil
|
||||
}
|
||||
|
||||
func (dc *DeploymentController) listPods(deployment *extensions.Deployment) (*v1.PodList, error) {
|
||||
return deploymentutil.ListPods(deployment,
|
||||
func(namespace string, options metav1.ListOptions) (*v1.PodList, error) {
|
||||
parsed, err := labels.Parse(options.LabelSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pods, err := dc.podLister.Pods(namespace).List(parsed)
|
||||
result := v1.PodList{Items: make([]v1.Pod, 0, len(pods))}
|
||||
for i := range pods {
|
||||
result.Items = append(result.Items, *pods[i])
|
||||
}
|
||||
return &result, err
|
||||
})
|
||||
}
|
||||
|
||||
// Returns a replica set that matches the intent of the given deployment. Returns nil if the new replica set doesn't exist yet.
|
||||
// 1. Get existing new RS (the RS that the given deployment targets, whose pod template is the same as deployment's).
|
||||
// 2. If there's existing new RS, update its revision number if it's smaller than (maxOldRevision + 1), where maxOldRevision is the max revision number among all old RSes.
|
||||
@@ -329,25 +297,17 @@ func (dc *DeploymentController) getNewReplicaSet(deployment *extensions.Deployme
|
||||
newRS := extensions.ReplicaSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
// Make the name deterministic, to ensure idempotence
|
||||
Name: deployment.Name + "-" + podTemplateSpecHash,
|
||||
Namespace: namespace,
|
||||
Name: deployment.Name + "-" + podTemplateSpecHash,
|
||||
Namespace: namespace,
|
||||
OwnerReferences: []metav1.OwnerReference{*newControllerRef(deployment)},
|
||||
},
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
Replicas: func(i int32) *int32 { return &i }(0),
|
||||
Replicas: new(int32),
|
||||
MinReadySeconds: deployment.Spec.MinReadySeconds,
|
||||
Selector: newRSSelector,
|
||||
Template: newRSTemplate,
|
||||
},
|
||||
}
|
||||
var trueVar = true
|
||||
controllerRef := &metav1.OwnerReference{
|
||||
APIVersion: getDeploymentKind().GroupVersion().String(),
|
||||
Kind: getDeploymentKind().Kind,
|
||||
Name: deployment.Name,
|
||||
UID: deployment.UID,
|
||||
Controller: &trueVar,
|
||||
}
|
||||
newRS.OwnerReferences = append(newRS.OwnerReferences, *controllerRef)
|
||||
allRSs := append(oldRSs, &newRS)
|
||||
newReplicasCount, err := deploymentutil.NewRSNewReplicas(deployment, allRSs, &newRS)
|
||||
if err != nil {
|
||||
@@ -632,8 +592,12 @@ func calculateStatus(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaS
|
||||
|
||||
// isScalingEvent checks whether the provided deployment has been updated with a scaling event
|
||||
// by looking at the desired-replicas annotation in the active replica sets of the deployment.
|
||||
func (dc *DeploymentController) isScalingEvent(d *extensions.Deployment) (bool, error) {
|
||||
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, false)
|
||||
//
|
||||
// rsList should come from getReplicaSetsForDeployment(d).
|
||||
// podMap should come from getPodMapForReplicaSets(rsList).
|
||||
// These are passed around to avoid repeating expensive API calls.
|
||||
func (dc *DeploymentController) isScalingEvent(d *extensions.Deployment, rsList []*extensions.ReplicaSet, podMap map[types.UID]*v1.PodList) (bool, error) {
|
||||
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, false)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -649,3 +613,15 @@ func (dc *DeploymentController) isScalingEvent(d *extensions.Deployment) (bool,
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// newControllerRef returns a ControllerRef pointing to the deployment.
|
||||
func newControllerRef(d *extensions.Deployment) *metav1.OwnerReference {
|
||||
isController := true
|
||||
return &metav1.OwnerReference{
|
||||
APIVersion: controllerKind.GroupVersion().String(),
|
||||
Kind: controllerKind.Kind,
|
||||
Name: d.Name,
|
||||
UID: d.UID,
|
||||
Controller: &isController,
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user