Merge pull request #19840 from madhusudancs/replicaset-deployment

Auto commit by PR queue bot
This commit is contained in:
k8s-merge-robot
2016-02-09 18:57:42 -08:00
39 changed files with 8237 additions and 8013 deletions

View File

@@ -26,6 +26,7 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/validation"
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/record"
@@ -477,3 +478,16 @@ func (o ControllersByCreationTimestamp) Less(i, j int) bool {
}
return o[i].CreationTimestamp.Before(o[j].CreationTimestamp)
}
// ReplicaSetsByCreationTimestamp sorts a list of ReplicationSets by creation timestamp, using their names as a tie breaker.
type ReplicaSetsByCreationTimestamp []*extensions.ReplicaSet
func (o ReplicaSetsByCreationTimestamp) Len() int { return len(o) }
func (o ReplicaSetsByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
func (o ReplicaSetsByCreationTimestamp) Less(i, j int) bool {
if o[i].CreationTimestamp.Equal(o[j].CreationTimestamp) {
return o[i].Name < o[j].Name
}
return o[i].CreationTimestamp.Before(o[j].CreationTimestamp)
}

File diff suppressed because it is too large Load Diff

View File

@@ -34,7 +34,7 @@ import (
"k8s.io/kubernetes/pkg/util/intstr"
)
func TestDeploymentController_reconcileNewRC(t *testing.T) {
func TestDeploymentController_reconcileNewReplicaSet(t *testing.T) {
tests := []struct {
deploymentReplicas int
maxSurge intstr.IntOrString
@@ -87,16 +87,16 @@ func TestDeploymentController_reconcileNewRC(t *testing.T) {
for i, test := range tests {
t.Logf("executing scenario %d", i)
newRc := rc("foo-v2", test.newReplicas, nil)
oldRc := rc("foo-v2", test.oldReplicas, nil)
allRcs := []*api.ReplicationController{newRc, oldRc}
newRS := rs("foo-v2", test.newReplicas, nil)
oldRS := rs("foo-v2", test.oldReplicas, nil)
allRSs := []*exp.ReplicaSet{newRS, oldRS}
deployment := deployment("foo", test.deploymentReplicas, test.maxSurge, intstr.FromInt(0))
fake := fake.Clientset{}
controller := &DeploymentController{
client: &fake,
eventRecorder: &record.FakeRecorder{},
}
scaled, err := controller.reconcileNewRC(allRcs, newRc, deployment)
scaled, err := controller.reconcileNewReplicaSet(allRSs, newRS, deployment)
if err != nil {
t.Errorf("unexpected error: %v", err)
continue
@@ -115,21 +115,21 @@ func TestDeploymentController_reconcileNewRC(t *testing.T) {
t.Errorf("expected 1 action during scale, got: %v", fake.Actions())
continue
}
updated := fake.Actions()[0].(testclient.UpdateAction).GetObject().(*api.ReplicationController)
updated := fake.Actions()[0].(testclient.UpdateAction).GetObject().(*exp.ReplicaSet)
if e, a := test.expectedNewReplicas, updated.Spec.Replicas; e != a {
t.Errorf("expected update to %d replicas, got %d", e, a)
}
}
}
func TestDeploymentController_reconcileOldRCs(t *testing.T) {
func TestDeploymentController_reconcileOldReplicaSets(t *testing.T) {
tests := []struct {
deploymentReplicas int
maxUnavailable intstr.IntOrString
oldReplicas int
newReplicas int
readyPodsFromOldRC int
readyPodsFromNewRC int
readyPodsFromOldRS int
readyPodsFromNewRS int
scaleExpected bool
expectedOldReplicas int
}{
@@ -138,8 +138,8 @@ func TestDeploymentController_reconcileOldRCs(t *testing.T) {
maxUnavailable: intstr.FromInt(0),
oldReplicas: 10,
newReplicas: 0,
readyPodsFromOldRC: 10,
readyPodsFromNewRC: 0,
readyPodsFromOldRS: 10,
readyPodsFromNewRS: 0,
scaleExpected: false,
},
{
@@ -147,38 +147,38 @@ func TestDeploymentController_reconcileOldRCs(t *testing.T) {
maxUnavailable: intstr.FromInt(2),
oldReplicas: 10,
newReplicas: 0,
readyPodsFromOldRC: 10,
readyPodsFromNewRC: 0,
readyPodsFromOldRS: 10,
readyPodsFromNewRS: 0,
scaleExpected: true,
expectedOldReplicas: 8,
},
{ // expect unhealthy replicas from old rcs been cleaned up
{ // expect unhealthy replicas from old replica sets been cleaned up
deploymentReplicas: 10,
maxUnavailable: intstr.FromInt(2),
oldReplicas: 10,
newReplicas: 0,
readyPodsFromOldRC: 8,
readyPodsFromNewRC: 0,
readyPodsFromOldRS: 8,
readyPodsFromNewRS: 0,
scaleExpected: true,
expectedOldReplicas: 8,
},
{ // expect 1 unhealthy replica from old rcs been cleaned up, and 1 ready pod been scaled down
{ // expect 1 unhealthy replica from old replica sets been cleaned up, and 1 ready pod been scaled down
deploymentReplicas: 10,
maxUnavailable: intstr.FromInt(2),
oldReplicas: 10,
newReplicas: 0,
readyPodsFromOldRC: 9,
readyPodsFromNewRC: 0,
readyPodsFromOldRS: 9,
readyPodsFromNewRS: 0,
scaleExpected: true,
expectedOldReplicas: 8,
},
{ // the unavailable pods from the newRC would not make us scale down old RCs in a further step
{ // the unavailable pods from the newRS would not make us scale down old RSs in a further step
deploymentReplicas: 10,
maxUnavailable: intstr.FromInt(2),
oldReplicas: 8,
newReplicas: 2,
readyPodsFromOldRC: 8,
readyPodsFromNewRC: 0,
readyPodsFromOldRS: 8,
readyPodsFromNewRS: 0,
scaleExpected: false,
},
}
@@ -187,10 +187,10 @@ func TestDeploymentController_reconcileOldRCs(t *testing.T) {
newSelector := map[string]string{"foo": "new"}
oldSelector := map[string]string{"foo": "old"}
newRc := rc("foo-new", test.newReplicas, newSelector)
oldRc := rc("foo-old", test.oldReplicas, oldSelector)
oldRCs := []*api.ReplicationController{oldRc}
allRCs := []*api.ReplicationController{oldRc, newRc}
newRS := rs("foo-new", test.newReplicas, newSelector)
oldRS := rs("foo-old", test.oldReplicas, oldSelector)
oldRSs := []*exp.ReplicaSet{oldRS}
allRSs := []*exp.ReplicaSet{oldRS, newRS}
deployment := deployment("foo", test.deploymentReplicas, intstr.FromInt(0), test.maxUnavailable)
fakeClientset := fake.Clientset{}
@@ -198,10 +198,10 @@ func TestDeploymentController_reconcileOldRCs(t *testing.T) {
switch action.(type) {
case core.ListAction:
podList := &api.PodList{}
for podIndex := 0; podIndex < test.readyPodsFromOldRC; podIndex++ {
for podIndex := 0; podIndex < test.readyPodsFromOldRS; podIndex++ {
podList.Items = append(podList.Items, api.Pod{
ObjectMeta: api.ObjectMeta{
Name: fmt.Sprintf("%s-oldReadyPod-%d", oldRc.Name, podIndex),
Name: fmt.Sprintf("%s-oldReadyPod-%d", oldRS.Name, podIndex),
Labels: oldSelector,
},
Status: api.PodStatus{
@@ -214,10 +214,10 @@ func TestDeploymentController_reconcileOldRCs(t *testing.T) {
},
})
}
for podIndex := 0; podIndex < test.oldReplicas-test.readyPodsFromOldRC; podIndex++ {
for podIndex := 0; podIndex < test.oldReplicas-test.readyPodsFromOldRS; podIndex++ {
podList.Items = append(podList.Items, api.Pod{
ObjectMeta: api.ObjectMeta{
Name: fmt.Sprintf("%s-oldUnhealthyPod-%d", oldRc.Name, podIndex),
Name: fmt.Sprintf("%s-oldUnhealthyPod-%d", oldRS.Name, podIndex),
Labels: oldSelector,
},
Status: api.PodStatus{
@@ -230,10 +230,10 @@ func TestDeploymentController_reconcileOldRCs(t *testing.T) {
},
})
}
for podIndex := 0; podIndex < test.readyPodsFromNewRC; podIndex++ {
for podIndex := 0; podIndex < test.readyPodsFromNewRS; podIndex++ {
podList.Items = append(podList.Items, api.Pod{
ObjectMeta: api.ObjectMeta{
Name: fmt.Sprintf("%s-newReadyPod-%d", oldRc.Name, podIndex),
Name: fmt.Sprintf("%s-newReadyPod-%d", oldRS.Name, podIndex),
Labels: newSelector,
},
Status: api.PodStatus{
@@ -246,10 +246,10 @@ func TestDeploymentController_reconcileOldRCs(t *testing.T) {
},
})
}
for podIndex := 0; podIndex < test.oldReplicas-test.readyPodsFromOldRC; podIndex++ {
for podIndex := 0; podIndex < test.oldReplicas-test.readyPodsFromOldRS; podIndex++ {
podList.Items = append(podList.Items, api.Pod{
ObjectMeta: api.ObjectMeta{
Name: fmt.Sprintf("%s-newUnhealthyPod-%d", oldRc.Name, podIndex),
Name: fmt.Sprintf("%s-newUnhealthyPod-%d", oldRS.Name, podIndex),
Labels: newSelector,
},
Status: api.PodStatus{
@@ -271,7 +271,7 @@ func TestDeploymentController_reconcileOldRCs(t *testing.T) {
eventRecorder: &record.FakeRecorder{},
}
scaled, err := controller.reconcileOldRCs(allRCs, oldRCs, newRc, deployment, false)
scaled, err := controller.reconcileOldReplicaSets(allRSs, oldRSs, newRS, deployment, false)
if err != nil {
t.Errorf("unexpected error: %v", err)
continue
@@ -327,8 +327,8 @@ func TestDeploymentController_cleanupUnhealthyReplicas(t *testing.T) {
for i, test := range tests {
t.Logf("executing scenario %d", i)
oldRc := rc("foo-v2", test.oldReplicas, nil)
oldRCs := []*api.ReplicationController{oldRc}
oldRS := rs("foo-v2", test.oldReplicas, nil)
oldRSs := []*exp.ReplicaSet{oldRS}
deployment := deployment("foo", 10, intstr.FromInt(2), intstr.FromInt(2))
fakeClientset := fake.Clientset{}
fakeClientset.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
@@ -338,7 +338,7 @@ func TestDeploymentController_cleanupUnhealthyReplicas(t *testing.T) {
for podIndex := 0; podIndex < test.readyPods; podIndex++ {
podList.Items = append(podList.Items, api.Pod{
ObjectMeta: api.ObjectMeta{
Name: fmt.Sprintf("%s-readyPod-%d", oldRc.Name, podIndex),
Name: fmt.Sprintf("%s-readyPod-%d", oldRS.Name, podIndex),
},
Status: api.PodStatus{
Conditions: []api.PodCondition{
@@ -353,7 +353,7 @@ func TestDeploymentController_cleanupUnhealthyReplicas(t *testing.T) {
for podIndex := 0; podIndex < test.unHealthyPods; podIndex++ {
podList.Items = append(podList.Items, api.Pod{
ObjectMeta: api.ObjectMeta{
Name: fmt.Sprintf("%s-unHealthyPod-%d", oldRc.Name, podIndex),
Name: fmt.Sprintf("%s-unHealthyPod-%d", oldRS.Name, podIndex),
},
Status: api.PodStatus{
Conditions: []api.PodCondition{
@@ -374,7 +374,7 @@ func TestDeploymentController_cleanupUnhealthyReplicas(t *testing.T) {
client: &fakeClientset,
eventRecorder: &record.FakeRecorder{},
}
cleanupCount, err := controller.cleanupUnhealthyReplicas(oldRCs, deployment, test.maxCleanupCount)
cleanupCount, err := controller.cleanupUnhealthyReplicas(oldRSs, deployment, test.maxCleanupCount)
if err != nil {
t.Errorf("unexpected error: %v", err)
continue
@@ -386,7 +386,7 @@ func TestDeploymentController_cleanupUnhealthyReplicas(t *testing.T) {
}
}
func TestDeploymentController_scaleDownOldRCsForRollingUpdate(t *testing.T) {
func TestDeploymentController_scaleDownOldReplicaSetsForRollingUpdate(t *testing.T) {
tests := []struct {
deploymentReplicas int
maxUnavailable intstr.IntOrString
@@ -428,9 +428,9 @@ func TestDeploymentController_scaleDownOldRCsForRollingUpdate(t *testing.T) {
for i, test := range tests {
t.Logf("executing scenario %d", i)
oldRc := rc("foo-v2", test.oldReplicas, nil)
allRcs := []*api.ReplicationController{oldRc}
oldRcs := []*api.ReplicationController{oldRc}
oldRS := rs("foo-v2", test.oldReplicas, nil)
allRSs := []*exp.ReplicaSet{oldRS}
oldRSs := []*exp.ReplicaSet{oldRS}
deployment := deployment("foo", test.deploymentReplicas, intstr.FromInt(0), test.maxUnavailable)
fakeClientset := fake.Clientset{}
fakeClientset.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
@@ -440,7 +440,8 @@ func TestDeploymentController_scaleDownOldRCsForRollingUpdate(t *testing.T) {
for podIndex := 0; podIndex < test.readyPods; podIndex++ {
podList.Items = append(podList.Items, api.Pod{
ObjectMeta: api.ObjectMeta{
Name: fmt.Sprintf("%s-pod-%d", oldRc.Name, podIndex),
Name: fmt.Sprintf("%s-pod-%d", oldRS.Name, podIndex),
Labels: map[string]string{"foo": "bar"},
},
Status: api.PodStatus{
Conditions: []api.PodCondition{
@@ -460,7 +461,7 @@ func TestDeploymentController_scaleDownOldRCsForRollingUpdate(t *testing.T) {
client: &fakeClientset,
eventRecorder: &record.FakeRecorder{},
}
scaled, err := controller.scaleDownOldRCsForRollingUpdate(allRcs, oldRcs, deployment)
scaled, err := controller.scaleDownOldReplicaSetsForRollingUpdate(allRSs, oldRSs, deployment)
if err != nil {
t.Errorf("unexpected error: %v", err)
continue
@@ -492,42 +493,42 @@ func TestDeploymentController_scaleDownOldRCsForRollingUpdate(t *testing.T) {
t.Errorf("expected an update action")
continue
}
updated := updateAction.GetObject().(*api.ReplicationController)
updated := updateAction.GetObject().(*exp.ReplicaSet)
if e, a := test.expectedOldReplicas, updated.Spec.Replicas; e != a {
t.Errorf("expected update to %d replicas, got %d", e, a)
}
}
}
func TestDeploymentController_cleanupOldRCs(t *testing.T) {
func TestDeploymentController_cleanupOldReplicaSets(t *testing.T) {
selector := map[string]string{"foo": "bar"}
tests := []struct {
oldRCs []*api.ReplicationController
oldRSs []*exp.ReplicaSet
revisionHistoryLimit int
expectedDeletions int
}{
{
oldRCs: []*api.ReplicationController{
rc("foo-1", 0, selector),
rc("foo-2", 0, selector),
rc("foo-3", 0, selector),
oldRSs: []*exp.ReplicaSet{
rs("foo-1", 0, selector),
rs("foo-2", 0, selector),
rs("foo-3", 0, selector),
},
revisionHistoryLimit: 1,
expectedDeletions: 2,
},
{
oldRCs: []*api.ReplicationController{
rc("foo-1", 0, selector),
rc("foo-2", 0, selector),
oldRSs: []*exp.ReplicaSet{
rs("foo-1", 0, selector),
rs("foo-2", 0, selector),
},
revisionHistoryLimit: 0,
expectedDeletions: 2,
},
{
oldRCs: []*api.ReplicationController{
rc("foo-1", 1, selector),
rc("foo-2", 1, selector),
oldRSs: []*exp.ReplicaSet{
rs("foo-1", 1, selector),
rs("foo-2", 1, selector),
},
revisionHistoryLimit: 0,
expectedDeletions: 0,
@@ -539,14 +540,14 @@ func TestDeploymentController_cleanupOldRCs(t *testing.T) {
controller := NewDeploymentController(fake, controller.NoResyncPeriodFunc)
controller.eventRecorder = &record.FakeRecorder{}
controller.rcStoreSynced = alwaysReady
controller.rsStoreSynced = alwaysReady
controller.podStoreSynced = alwaysReady
for _, rc := range test.oldRCs {
controller.rcStore.Add(rc)
for _, rs := range test.oldRSs {
controller.rsStore.Add(rs)
}
d := newDeployment(1, &tests[i].revisionHistoryLimit)
controller.cleanupOldRcs(test.oldRCs, *d)
controller.cleanupOldReplicaSets(test.oldRSs, *d)
gotDeletions := 0
for _, action := range fake.Actions() {
@@ -555,20 +556,20 @@ func TestDeploymentController_cleanupOldRCs(t *testing.T) {
}
}
if gotDeletions != test.expectedDeletions {
t.Errorf("expect %v old rcs been deleted, but got %v", test.expectedDeletions, gotDeletions)
t.Errorf("expect %v old replica sets been deleted, but got %v", test.expectedDeletions, gotDeletions)
continue
}
}
}
func rc(name string, replicas int, selector map[string]string) *api.ReplicationController {
return &api.ReplicationController{
func rs(name string, replicas int, selector map[string]string) *exp.ReplicaSet {
return &exp.ReplicaSet{
ObjectMeta: api.ObjectMeta{
Name: name,
},
Spec: api.ReplicationControllerSpec{
Spec: exp.ReplicaSetSpec{
Replicas: replicas,
Selector: selector,
Selector: &unversioned.LabelSelector{MatchLabels: selector},
Template: &api.PodTemplateSpec{},
},
}
@@ -609,7 +610,7 @@ func newDeployment(replicas int, revisionHistoryLimit *int) *exp.Deployment {
RollingUpdate: &exp.RollingUpdateDeployment{},
},
Replicas: replicas,
Selector: map[string]string{"foo": "bar"},
Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
Template: api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: map[string]string{
@@ -640,13 +641,13 @@ func getKey(d *exp.Deployment, t *testing.T) string {
}
}
func newReplicationController(d *exp.Deployment, name string, replicas int) *api.ReplicationController {
return &api.ReplicationController{
func newReplicaSet(d *exp.Deployment, name string, replicas int) *exp.ReplicaSet {
return &exp.ReplicaSet{
ObjectMeta: api.ObjectMeta{
Name: name,
Namespace: api.NamespaceDefault,
},
Spec: api.ReplicationControllerSpec{
Spec: exp.ReplicaSetSpec{
Replicas: replicas,
Template: &d.Spec.Template,
},
@@ -664,7 +665,7 @@ type fixture struct {
client *fake.Clientset
// Objects to put in the store.
dStore []*exp.Deployment
rcStore []*api.ReplicationController
rsStore []*exp.ReplicaSet
podStore []*api.Pod
// Actions expected to happen on the client. Objects from here are also
@@ -678,14 +679,14 @@ func (f *fixture) expectUpdateDeploymentAction(d *exp.Deployment) {
f.objects.Items = append(f.objects.Items, d)
}
func (f *fixture) expectCreateRCAction(rc *api.ReplicationController) {
f.actions = append(f.actions, core.NewCreateAction("replicationcontrollers", rc.Namespace, rc))
f.objects.Items = append(f.objects.Items, rc)
func (f *fixture) expectCreateRSAction(rs *exp.ReplicaSet) {
f.actions = append(f.actions, core.NewCreateAction("replicasets", rs.Namespace, rs))
f.objects.Items = append(f.objects.Items, rs)
}
func (f *fixture) expectUpdateRCAction(rc *api.ReplicationController) {
f.actions = append(f.actions, core.NewUpdateAction("replicationcontrollers", rc.Namespace, rc))
f.objects.Items = append(f.objects.Items, rc)
func (f *fixture) expectUpdateRSAction(rs *exp.ReplicaSet) {
f.actions = append(f.actions, core.NewUpdateAction("replicasets", rs.Namespace, rs))
f.objects.Items = append(f.objects.Items, rs)
}
func (f *fixture) expectListPodAction(namespace string, opt api.ListOptions) {
@@ -703,13 +704,13 @@ func (f *fixture) run(deploymentName string) {
f.client = fake.NewSimpleClientset(f.objects)
c := NewDeploymentController(f.client, controller.NoResyncPeriodFunc)
c.eventRecorder = &record.FakeRecorder{}
c.rcStoreSynced = alwaysReady
c.rsStoreSynced = alwaysReady
c.podStoreSynced = alwaysReady
for _, d := range f.dStore {
c.dStore.Store.Add(d)
}
for _, rc := range f.rcStore {
c.rcStore.Store.Add(rc)
for _, rs := range f.rsStore {
c.rsStore.Store.Add(rs)
}
for _, pod := range f.podStore {
c.podStore.Store.Add(pod)
@@ -739,22 +740,22 @@ func (f *fixture) run(deploymentName string) {
}
}
func TestSyncDeploymentCreatesRC(t *testing.T) {
func TestSyncDeploymentCreatesReplicaSet(t *testing.T) {
f := newFixture(t)
d := newDeployment(1, nil)
f.dStore = append(f.dStore, d)
// expect that one rc with zero replicas is created
// expect that one ReplicaSet with zero replicas is created
// then is updated to 1 replica
rc := newReplicationController(d, "deploymentrc-4186632231", 0)
updatedRC := newReplicationController(d, "deploymentrc-4186632231", 1)
rs := newReplicaSet(d, "deploymentrs-4186632231", 0)
updatedRS := newReplicaSet(d, "deploymentrs-4186632231", 1)
opt := newListOptions()
f.expectCreateRCAction(rc)
f.expectCreateRSAction(rs)
f.expectUpdateDeploymentAction(d)
f.expectUpdateRCAction(updatedRC)
f.expectListPodAction(rc.Namespace, opt)
f.expectUpdateRSAction(updatedRS)
f.expectListPodAction(rs.Namespace, opt)
f.expectUpdateDeploymentAction(d)
f.run(getKey(d, t))