Merge pull request #35164 from janetkuo/petset-status-fix

Automatic merge from submit-queue

Verify petset status.replicas in e2e test

<!--  Thanks for sending a pull request!  Here are some tips for you:
1. If this is your first time, read our contributor guidelines https://github.com/kubernetes/kubernetes/blob/master/CONTRIBUTING.md and developer guide https://github.com/kubernetes/kubernetes/blob/master/docs/devel/development.md
2. If you want *faster* PR reviews, read how: https://github.com/kubernetes/kubernetes/blob/master/docs/devel/faster_reviews.md
3. Follow the instructions for writing a release note: https://github.com/kubernetes/kubernetes/blob/master/docs/devel/pull-requests.md#release-notes
-->

**What this PR does / why we need it**: follow up #33983. PetSet status.replicas bug is fixed, so adding tests for it (especially for the `should handle healthy pet restarts during scale` case)

**Which issue this PR fixes** *(optional, in `fixes #<issue number>(, #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes #

**Special notes for your reviewer**:  cc @erictune @foxish @kubernetes/sig-apps

**Release note**:
<!--  Steps to write your release note:
1. Use the release-note-* labels to set the release note state (if you have access) 
2. Enter your extended release note in the below block; leaving it blank means using the PR title as the release note. If no release note is required, just write `NONE`. 
-->
```release-note
NONE
```
This commit is contained in:
Kubernetes Submit Queue
2016-10-20 05:29:41 -07:00
committed by GitHub

View File

@@ -160,8 +160,6 @@ var _ = framework.KubeDescribe("PetSet [Slow] [Feature:PetSet]", func() {
By("Waiting for pet at index 1 to enter running.")
pst.waitForRunning(2, ps)
// TODO: verify petset status.replicas
// Now we have 1 healthy and 1 unhealthy pet. Deleting the healthy pet should *not*
// create a new pet till the remaining pet becomes healthy, which won't happen till
// we set the healthy bit.
@@ -583,8 +581,6 @@ func (p *petSetTester) saturate(ps *apps.PetSet) {
framework.Logf("Marking pet at index " + fmt.Sprintf("%v", i) + " healthy")
p.setHealthy(ps)
}
framework.Logf("Waiting for pet set status.replicas updated to %d", ps.Spec.Replicas)
p.waitForStatus(ps, ps.Spec.Replicas)
}
func (p *petSetTester) deletePetAtIndex(index int, ps *apps.PetSet) {
@@ -692,6 +688,8 @@ func (p *petSetTester) waitForRunning(numPets int32, ps *apps.PetSet) {
if pollErr != nil {
framework.Failf("Failed waiting for pods to enter running: %v", pollErr)
}
p.waitForStatus(ps, numPets)
}
func (p *petSetTester) setHealthy(ps *apps.PetSet) {
@@ -717,6 +715,8 @@ func (p *petSetTester) setHealthy(ps *apps.PetSet) {
}
func (p *petSetTester) waitForStatus(ps *apps.PetSet, expectedReplicas int32) {
framework.Logf("Waiting for petset status.replicas updated to %d", expectedReplicas)
ns, name := ps.Namespace, ps.Name
pollErr := wait.PollImmediate(petsetPoll, petsetTimeout,
func() (bool, error) {
@@ -725,13 +725,13 @@ func (p *petSetTester) waitForStatus(ps *apps.PetSet, expectedReplicas int32) {
return false, err
}
if psGet.Status.Replicas != expectedReplicas {
framework.Logf("Waiting for pet set status to become %d, currently %d", expectedReplicas, ps.Status.Replicas)
framework.Logf("Waiting for pet set status to become %d, currently %d", expectedReplicas, psGet.Status.Replicas)
return false, nil
}
return true, nil
})
if pollErr != nil {
framework.Failf("Failed waiting for pet set status.replicas updated to %d, got %d: %v", expectedReplicas, ps.Status.Replicas, pollErr)
framework.Failf("Failed waiting for pet set status.replicas updated to %d: %v", expectedReplicas, pollErr)
}
}
@@ -748,6 +748,7 @@ func deleteAllPetSets(c *client.Client, ns string) {
if err := pst.scale(&ps, 0); err != nil {
errList = append(errList, fmt.Sprintf("%v", err))
}
pst.waitForStatus(&ps, 0)
framework.Logf("Deleting petset %v", ps.Name)
if err := c.Apps().PetSets(ps.Namespace).Delete(ps.Name, nil); err != nil {
errList = append(errList, fmt.Sprintf("%v", err))