ReplicaSet has owner ref of the Deployment that created it
This commit is contained in:
parent
9ef610b672
commit
777977612b
@ -23,6 +23,7 @@ import (
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/runtime/schema"
|
||||
@ -143,3 +144,103 @@ func (m *PodControllerRefManager) ReleasePod(pod *v1.Pod) error {
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// ReplicaSetControllerRefManager is used to manage controllerRef of ReplicaSets.
|
||||
// Three methods are defined on this object 1: Classify 2: AdoptReplicaSet and
|
||||
// 3: ReleaseReplicaSet which are used to classify the ReplicaSets into appropriate
|
||||
// categories and accordingly adopt or release them. See comments on these functions
|
||||
// for more details.
|
||||
type ReplicaSetControllerRefManager struct {
|
||||
rsControl RSControlInterface
|
||||
controllerObject v1.ObjectMeta
|
||||
controllerSelector labels.Selector
|
||||
controllerKind schema.GroupVersionKind
|
||||
}
|
||||
|
||||
// NewReplicaSetControllerRefManager returns a ReplicaSetControllerRefManager that exposes
|
||||
// methods to manage the controllerRef of ReplicaSets.
|
||||
func NewReplicaSetControllerRefManager(
|
||||
rsControl RSControlInterface,
|
||||
controllerObject v1.ObjectMeta,
|
||||
controllerSelector labels.Selector,
|
||||
controllerKind schema.GroupVersionKind,
|
||||
) *ReplicaSetControllerRefManager {
|
||||
return &ReplicaSetControllerRefManager{rsControl, controllerObject, controllerSelector, controllerKind}
|
||||
}
|
||||
|
||||
// Classify, classifies the ReplicaSets into three categories:
|
||||
// 1. matchesAndControlled are the ReplicaSets whose labels
|
||||
// match the selector of the Deployment, and have a controllerRef pointing to the
|
||||
// Deployment.
|
||||
// 2. matchesNeedsController are ReplicaSets ,whose labels match the Deployment,
|
||||
// but don't have a controllerRef. (ReplicaSets with matching labels but with a
|
||||
// controllerRef pointing to other object are ignored)
|
||||
// 3. controlledDoesNotMatch are the ReplicaSets that have a controllerRef pointing
|
||||
// to the Deployment, but their labels no longer match the selector.
|
||||
func (m *ReplicaSetControllerRefManager) Classify(replicaSets []*extensions.ReplicaSet) (
|
||||
matchesAndControlled []*extensions.ReplicaSet,
|
||||
matchesNeedsController []*extensions.ReplicaSet,
|
||||
controlledDoesNotMatch []*extensions.ReplicaSet) {
|
||||
for i := range replicaSets {
|
||||
replicaSet := replicaSets[i]
|
||||
controllerRef := GetControllerOf(replicaSet.ObjectMeta)
|
||||
if controllerRef != nil {
|
||||
if controllerRef.UID != m.controllerObject.UID {
|
||||
// ignoring the ReplicaSet controlled by other Deployment
|
||||
glog.V(4).Infof("Ignoring ReplicaSet %v/%v, it's owned by [%s/%s, name: %s, uid: %s]",
|
||||
replicaSet.Namespace, replicaSet.Name, controllerRef.APIVersion, controllerRef.Kind, controllerRef.Name, controllerRef.UID)
|
||||
continue
|
||||
}
|
||||
// already controlled by this Deployment
|
||||
if m.controllerSelector.Matches(labels.Set(replicaSet.Labels)) {
|
||||
matchesAndControlled = append(matchesAndControlled, replicaSet)
|
||||
} else {
|
||||
controlledDoesNotMatch = append(controlledDoesNotMatch, replicaSet)
|
||||
}
|
||||
} else {
|
||||
if !m.controllerSelector.Matches(labels.Set(replicaSet.Labels)) {
|
||||
continue
|
||||
}
|
||||
matchesNeedsController = append(matchesNeedsController, replicaSet)
|
||||
}
|
||||
}
|
||||
return matchesAndControlled, matchesNeedsController, controlledDoesNotMatch
|
||||
}
|
||||
|
||||
// AdoptReplicaSet sends a patch to take control of the ReplicaSet. It returns the error if
|
||||
// the patching fails.
|
||||
func (m *ReplicaSetControllerRefManager) AdoptReplicaSet(replicaSet *extensions.ReplicaSet) error {
|
||||
// we should not adopt any ReplicaSets if the Deployment is about to be deleted
|
||||
if m.controllerObject.DeletionTimestamp != nil {
|
||||
return fmt.Errorf("cancel the adopt attempt for RS %s because the controller %v is being deleted",
|
||||
strings.Join([]string{replicaSet.Namespace, replicaSet.Name, string(replicaSet.UID)}, "_"), m.controllerObject.Name)
|
||||
}
|
||||
addControllerPatch := fmt.Sprintf(
|
||||
`{"metadata":{"ownerReferences":[{"apiVersion":"%s","kind":"%s","name":"%s","uid":"%s","controller":true}],"uid":"%s"}}`,
|
||||
m.controllerKind.GroupVersion(), m.controllerKind.Kind,
|
||||
m.controllerObject.Name, m.controllerObject.UID, replicaSet.UID)
|
||||
return m.rsControl.PatchReplicaSet(replicaSet.Namespace, replicaSet.Name, []byte(addControllerPatch))
|
||||
}
|
||||
|
||||
// ReleaseReplicaSet sends a patch to free the ReplicaSet from the control of the Deployment controller.
|
||||
// It returns the error if the patching fails. 404 and 422 errors are ignored.
|
||||
func (m *ReplicaSetControllerRefManager) ReleaseReplicaSet(replicaSet *extensions.ReplicaSet) error {
|
||||
glog.V(2).Infof("patching ReplicaSet %s_%s to remove its controllerRef to %s/%s:%s",
|
||||
replicaSet.Namespace, replicaSet.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.controllerObject.Name)
|
||||
deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.controllerObject.UID, replicaSet.UID)
|
||||
err := m.rsControl.PatchReplicaSet(replicaSet.Namespace, replicaSet.Name, []byte(deleteOwnerRefPatch))
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
// If the ReplicaSet no longer exists, ignore it.
|
||||
return nil
|
||||
}
|
||||
if errors.IsInvalid(err) {
|
||||
// Invalid error will be returned in two cases: 1. the ReplicaSet
|
||||
// has no owner reference, 2. the uid of the ReplicaSet doesn't
|
||||
// match, which means the ReplicaSet is deleted and then recreated.
|
||||
// In both cases, the error can be ignored.
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@ -361,6 +361,26 @@ const (
|
||||
SuccessfulDeletePodReason = "SuccessfulDelete"
|
||||
)
|
||||
|
||||
// RSControlInterface is an interface that knows how to add or delete
|
||||
// ReplicaSets, as well as increment or decrement them. It is used
|
||||
// by the deployment controller to ease testing of actions that it takes.
|
||||
type RSControlInterface interface {
|
||||
PatchReplicaSet(namespace, name string, data []byte) error
|
||||
}
|
||||
|
||||
// RealRSControl is the default implementation of RSControllerInterface.
|
||||
type RealRSControl struct {
|
||||
KubeClient clientset.Interface
|
||||
Recorder record.EventRecorder
|
||||
}
|
||||
|
||||
var _ RSControlInterface = &RealRSControl{}
|
||||
|
||||
func (r RealRSControl) PatchReplicaSet(namespace, name string, data []byte) error {
|
||||
_, err := r.KubeClient.Extensions().ReplicaSets(namespace).Patch(name, api.StrategicMergePatchType, data)
|
||||
return err
|
||||
}
|
||||
|
||||
// PodControlInterface is an interface that knows how to add or delete pods
|
||||
// created as an interface to allow testing.
|
||||
type PodControlInterface interface {
|
||||
|
@ -33,6 +33,7 @@ go_library(
|
||||
"//pkg/controller/deployment/util:go_default_library",
|
||||
"//pkg/controller/informers:go_default_library",
|
||||
"//pkg/labels:go_default_library",
|
||||
"//pkg/runtime/schema:go_default_library",
|
||||
"//pkg/util/errors:go_default_library",
|
||||
"//pkg/util/integer:go_default_library",
|
||||
"//pkg/util/labels:go_default_library",
|
||||
|
@ -28,6 +28,7 @@ import (
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
|
||||
@ -39,6 +40,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/runtime/schema"
|
||||
utilerrors "k8s.io/kubernetes/pkg/util/errors"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
|
||||
@ -58,9 +60,14 @@ const (
|
||||
MaxRetries = 5
|
||||
)
|
||||
|
||||
func getDeploymentKind() schema.GroupVersionKind {
|
||||
return extensions.SchemeGroupVersion.WithKind("Deployment")
|
||||
}
|
||||
|
||||
// DeploymentController is responsible for synchronizing Deployment objects stored
|
||||
// in the system with actual running replica sets and pods.
|
||||
type DeploymentController struct {
|
||||
rsControl controller.RSControlInterface
|
||||
client clientset.Interface
|
||||
eventRecorder record.EventRecorder
|
||||
|
||||
@ -106,6 +113,10 @@ func NewDeploymentController(dInformer informers.DeploymentInformer, rsInformer
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "deployment"),
|
||||
progressQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "progress-check"),
|
||||
}
|
||||
dc.rsControl = controller.RealRSControl{
|
||||
KubeClient: client,
|
||||
Recorder: dc.eventRecorder,
|
||||
}
|
||||
|
||||
dInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: dc.addDeployment,
|
||||
@ -428,6 +439,48 @@ func (dc *DeploymentController) handleErr(err error, key interface{}) {
|
||||
dc.queue.Forget(key)
|
||||
}
|
||||
|
||||
// classifyReplicaSets uses NewReplicaSetControllerRefManager to classify ReplicaSets
|
||||
// and adopts them if their labels match the Deployment but are missing the reference.
|
||||
// It also removes the controllerRef for ReplicaSets, whose labels no longer matches
|
||||
// the deployment.
|
||||
func (dc *DeploymentController) classifyReplicaSets(deployment *extensions.Deployment) error {
|
||||
rsList, err := dc.rsLister.ReplicaSets(deployment.Namespace).List(labels.Everything())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
deploymentSelector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
|
||||
if err != nil {
|
||||
return fmt.Errorf("deployment %s/%s has invalid label selector: %v", deployment.Namespace, deployment.Name, err)
|
||||
}
|
||||
cm := controller.NewReplicaSetControllerRefManager(dc.rsControl, deployment.ObjectMeta, deploymentSelector, getDeploymentKind())
|
||||
matchesAndControlled, matchesNeedsController, controlledDoesNotMatch := cm.Classify(rsList)
|
||||
// Adopt replica sets only if this deployment is not going to be deleted.
|
||||
if deployment.DeletionTimestamp == nil {
|
||||
for _, replicaSet := range matchesNeedsController {
|
||||
err := cm.AdoptReplicaSet(replicaSet)
|
||||
// continue to next RS if adoption fails.
|
||||
if err != nil {
|
||||
// If the RS no longer exists, don't even log the error.
|
||||
if !errors.IsNotFound(err) {
|
||||
utilruntime.HandleError(err)
|
||||
}
|
||||
} else {
|
||||
matchesAndControlled = append(matchesAndControlled, replicaSet)
|
||||
}
|
||||
}
|
||||
}
|
||||
// remove the controllerRef for the RS that no longer have matching labels
|
||||
var errlist []error
|
||||
for _, replicaSet := range controlledDoesNotMatch {
|
||||
err := cm.ReleaseReplicaSet(replicaSet)
|
||||
if err != nil {
|
||||
errlist = append(errlist, err)
|
||||
}
|
||||
}
|
||||
return utilerrors.NewAggregate(errlist)
|
||||
}
|
||||
|
||||
// syncDeployment will sync the deployment with the given key.
|
||||
// This function is not meant to be invoked concurrently with the same key.
|
||||
func (dc *DeploymentController) syncDeployment(key string) error {
|
||||
@ -486,6 +539,11 @@ func (dc *DeploymentController) syncDeployment(key string) error {
|
||||
return dc.syncStatusOnly(d)
|
||||
}
|
||||
|
||||
err = dc.classifyReplicaSets(deployment)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update deployment conditions with an Unknown condition when pausing/resuming
|
||||
// a deployment. In this way, we can be sure that we won't timeout when a user
|
||||
// resumes a Deployment with a set progressDeadlineSeconds.
|
||||
|
@ -335,6 +335,15 @@ func (dc *DeploymentController) getNewReplicaSet(deployment *extensions.Deployme
|
||||
Template: newRSTemplate,
|
||||
},
|
||||
}
|
||||
var trueVar = true
|
||||
controllerRef := &metav1.OwnerReference{
|
||||
APIVersion: getDeploymentKind().GroupVersion().String(),
|
||||
Kind: getDeploymentKind().Kind,
|
||||
Name: deployment.Name,
|
||||
UID: deployment.UID,
|
||||
Controller: &trueVar,
|
||||
}
|
||||
newRS.OwnerReferences = append(newRS.OwnerReferences, *controllerRef)
|
||||
allRSs := append(oldRSs, &newRS)
|
||||
newReplicasCount, err := deploymentutil.NewRSNewReplicas(deployment, allRSs, &newRS)
|
||||
if err != nil {
|
||||
|
@ -468,7 +468,9 @@ func (reaper *DeploymentReaper) Stop(namespace, name string, timeout time.Durati
|
||||
|
||||
// Delete deployment at the end.
|
||||
// Note: We delete deployment at the end so that if removing RSs fails, we at least have the deployment to retry.
|
||||
return deployments.Delete(name, nil)
|
||||
var falseVar = false
|
||||
nonOrphanOption := api.DeleteOptions{OrphanDependents: &falseVar}
|
||||
return deployments.Delete(name, &nonOrphanOption)
|
||||
}
|
||||
|
||||
type updateDeploymentFunc func(d *extensions.Deployment)
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/rest"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions/validation"
|
||||
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
|
||||
@ -44,6 +45,12 @@ type deploymentStrategy struct {
|
||||
// objects via the REST API.
|
||||
var Strategy = deploymentStrategy{api.Scheme, api.SimpleNameGenerator}
|
||||
|
||||
// DefaultGarbageCollectionPolicy returns Orphan because that's the default
|
||||
// behavior before the server-side garbage collection is implemented.
|
||||
func (deploymentStrategy) DefaultGarbageCollectionPolicy() rest.GarbageCollectionPolicy {
|
||||
return rest.OrphanDependents
|
||||
}
|
||||
|
||||
// NamespaceScoped is true for deployment.
|
||||
func (deploymentStrategy) NamespaceScoped() bool {
|
||||
return true
|
||||
|
@ -184,6 +184,7 @@ func stopDeploymentMaybeOverlap(c clientset.Interface, internalClient internalcl
|
||||
reaper, err := kubectl.ReaperFor(extensionsinternal.Kind("Deployment"), internalClient)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
timeout := 1 * time.Minute
|
||||
|
||||
err = reaper.Stop(ns, deployment.Name, timeout, api.NewDeleteOptions(0))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
|
@ -21,8 +21,10 @@ import (
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/metrics"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
@ -40,6 +42,40 @@ func getNonOrphanOptions() *v1.DeleteOptions {
|
||||
return &v1.DeleteOptions{OrphanDependents: &falseVar}
|
||||
}
|
||||
|
||||
var zero = int64(0)
|
||||
var deploymentLabels = map[string]string{"app": "gc-test"}
|
||||
var podTemplateSpec = v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: deploymentLabels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
TerminationGracePeriodSeconds: &zero,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "nginx",
|
||||
Image: "gcr.io/google_containers/nginx:1.7.9",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func newOwnerDeployment(f *framework.Framework, deploymentName string) *v1beta1.Deployment {
|
||||
replicas := int32(2)
|
||||
return &v1beta1.Deployment{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: deploymentName,
|
||||
},
|
||||
Spec: v1beta1.DeploymentSpec{
|
||||
Replicas: &replicas,
|
||||
Selector: &metav1.LabelSelector{MatchLabels: deploymentLabels},
|
||||
Strategy: v1beta1.DeploymentStrategy{
|
||||
Type: v1beta1.RollingUpdateDeploymentStrategyType,
|
||||
},
|
||||
Template: podTemplateSpec,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newOwnerRC(f *framework.Framework, name string) *v1.ReplicationController {
|
||||
var replicas int32
|
||||
replicas = 2
|
||||
@ -55,23 +91,40 @@ func newOwnerRC(f *framework.Framework, name string) *v1.ReplicationController {
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Replicas: &replicas,
|
||||
Selector: map[string]string{"app": "gc-test"},
|
||||
Template: &v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: map[string]string{"app": "gc-test"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "nginx",
|
||||
Image: "gcr.io/google_containers/nginx:1.7.9",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Template: &podTemplateSpec,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// verifyRemainingDeploymentsAndReplicaSets verifies if the number of the remaining deployment
|
||||
// and rs are deploymentNum and rsNum. It returns error if the
|
||||
// communication with the API server fails.
|
||||
func verifyRemainingDeploymentsAndReplicaSets(
|
||||
f *framework.Framework,
|
||||
clientSet clientset.Interface,
|
||||
deployment *v1beta1.Deployment,
|
||||
deploymentNum, rsNum int,
|
||||
) (bool, error) {
|
||||
var ret = true
|
||||
rs, err := clientSet.Extensions().ReplicaSets(f.Namespace.Name).List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list rs: %v", err)
|
||||
}
|
||||
if len(rs.Items) != rsNum {
|
||||
ret = false
|
||||
By(fmt.Sprintf("expected %d rs, got %d rs", rsNum, len(rs.Items)))
|
||||
}
|
||||
deployments, err := clientSet.Extensions().Deployments(f.Namespace.Name).List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list deployments: %v", err)
|
||||
}
|
||||
if len(deployments.Items) != deploymentNum {
|
||||
ret = false
|
||||
By(fmt.Sprintf("expected %d Deploymentss, got %d Deployments", deploymentNum, len(deployments.Items)))
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// verifyRemainingObjects verifies if the number of the remaining replication
|
||||
// controllers and pods are rcNum and podNum. It returns error if the
|
||||
// communication with the API server fails.
|
||||
@ -116,7 +169,7 @@ func gatherMetrics(f *framework.Framework) {
|
||||
|
||||
var _ = framework.KubeDescribe("Garbage collector", func() {
|
||||
f := framework.NewDefaultFramework("gc")
|
||||
It("[Feature:GarbageCollector] should delete pods created by rc when not orphaning", func() {
|
||||
It("should delete pods created by rc when not orphaning", func() {
|
||||
clientSet := f.ClientSet
|
||||
rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name)
|
||||
podClient := clientSet.Core().Pods(f.Namespace.Name)
|
||||
@ -167,7 +220,7 @@ var _ = framework.KubeDescribe("Garbage collector", func() {
|
||||
gatherMetrics(f)
|
||||
})
|
||||
|
||||
It("[Feature:GarbageCollector] should orphan pods created by rc if delete options say so", func() {
|
||||
It("should orphan pods created by rc if delete options say so", func() {
|
||||
clientSet := f.ClientSet
|
||||
rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name)
|
||||
podClient := clientSet.Core().Pods(f.Namespace.Name)
|
||||
@ -229,7 +282,7 @@ var _ = framework.KubeDescribe("Garbage collector", func() {
|
||||
gatherMetrics(f)
|
||||
})
|
||||
|
||||
It("[Feature:GarbageCollector] should orphan pods created by rc if deleteOptions.OrphanDependents is nil", func() {
|
||||
It("should orphan pods created by rc if deleteOptions.OrphanDependents is nil", func() {
|
||||
clientSet := f.ClientSet
|
||||
rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name)
|
||||
podClient := clientSet.Core().Pods(f.Namespace.Name)
|
||||
@ -275,4 +328,117 @@ var _ = framework.KubeDescribe("Garbage collector", func() {
|
||||
}
|
||||
gatherMetrics(f)
|
||||
})
|
||||
|
||||
It("should delete RS created by deployment when not orphaning", func() {
|
||||
clientSet := f.ClientSet
|
||||
deployClient := clientSet.Extensions().Deployments(f.Namespace.Name)
|
||||
rsClient := clientSet.Extensions().ReplicaSets(f.Namespace.Name)
|
||||
deploymentName := "simpletest.deployment"
|
||||
deployment := newOwnerDeployment(f, deploymentName)
|
||||
By("create the deployment")
|
||||
createdDeployment, err := deployClient.Create(deployment)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create deployment: %v", err)
|
||||
}
|
||||
// wait for deployment to create some rs
|
||||
By("Wait for the Deployment to create new ReplicaSet")
|
||||
err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) {
|
||||
rsList, err := rsClient.List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list rs: %v", err)
|
||||
}
|
||||
return len(rsList.Items) > 0, nil
|
||||
|
||||
})
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("Failed to wait for the Deployment to create some ReplicaSet: %v", err)
|
||||
}
|
||||
|
||||
By("delete the deployment")
|
||||
deleteOptions := getNonOrphanOptions()
|
||||
deleteOptions.Preconditions = v1.NewUIDPreconditions(string(createdDeployment.UID))
|
||||
if err := deployClient.Delete(deployment.ObjectMeta.Name, deleteOptions); err != nil {
|
||||
framework.Failf("failed to delete the deployment: %v", err)
|
||||
}
|
||||
By("wait for all rs to be garbage collected")
|
||||
err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) {
|
||||
return verifyRemainingDeploymentsAndReplicaSets(f, clientSet, deployment, 0, 0)
|
||||
})
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("Failed to wait for all rs to be garbage collected: %v", err)
|
||||
remainingRSs, err := rsClient.List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("failed to list RSs post mortem: %v", err)
|
||||
} else {
|
||||
framework.Failf("remaining rs are: %#v", remainingRSs)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
gatherMetrics(f)
|
||||
})
|
||||
|
||||
It("should orphan RS created by deployment when deleteOptions.OrphanDependents is true", func() {
|
||||
clientSet := f.ClientSet
|
||||
deployClient := clientSet.Extensions().Deployments(f.Namespace.Name)
|
||||
rsClient := clientSet.Extensions().ReplicaSets(f.Namespace.Name)
|
||||
deploymentName := "simpletest.deployment"
|
||||
deployment := newOwnerDeployment(f, deploymentName)
|
||||
By("create the deployment")
|
||||
createdDeployment, err := deployClient.Create(deployment)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create deployment: %v", err)
|
||||
}
|
||||
// wait for deployment to create some rs
|
||||
By("Wait for the Deployment to create new ReplicaSet")
|
||||
err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) {
|
||||
rsList, err := rsClient.List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list rs: %v", err)
|
||||
}
|
||||
return len(rsList.Items) > 0, nil
|
||||
|
||||
})
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("Failed to wait for the Deployment to create some ReplicaSet: %v", err)
|
||||
}
|
||||
|
||||
By("delete the deployment")
|
||||
deleteOptions := getOrphanOptions()
|
||||
deleteOptions.Preconditions = v1.NewUIDPreconditions(string(createdDeployment.UID))
|
||||
if err := deployClient.Delete(deployment.ObjectMeta.Name, deleteOptions); err != nil {
|
||||
framework.Failf("failed to delete the deployment: %v", err)
|
||||
}
|
||||
By("wait for 2 Minute to see if the garbage collector mistakenly deletes the rs")
|
||||
err = wait.PollImmediate(5*time.Second, 2*time.Minute, func() (bool, error) {
|
||||
return verifyRemainingDeploymentsAndReplicaSets(f, clientSet, deployment, 0, 1)
|
||||
})
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Failed to wait to see if the garbage collecter mistakenly deletes the rs: %v", err)
|
||||
remainingRSs, err := rsClient.List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("failed to list RSs post mortem: %v", err)
|
||||
} else {
|
||||
framework.Failf("remaining rs post mortem: %#v", remainingRSs)
|
||||
}
|
||||
remainingDSs, err := deployClient.List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("failed to list Deployments post mortem: %v", err)
|
||||
} else {
|
||||
framework.Failf("remaining deployment's post mortem: %#v", remainingDSs)
|
||||
}
|
||||
}
|
||||
rs, err := clientSet.Extensions().ReplicaSets(f.Namespace.Name).List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Failed to list ReplicaSet %v", err)
|
||||
}
|
||||
for _, replicaSet := range rs.Items {
|
||||
if controller.GetControllerOf(replicaSet.ObjectMeta) != nil {
|
||||
framework.Failf("Found ReplicaSet with non nil ownerRef %v", replicaSet)
|
||||
}
|
||||
}
|
||||
|
||||
gatherMetrics(f)
|
||||
})
|
||||
|
||||
})
|
||||
|
Loading…
Reference in New Issue
Block a user