Merge pull request #46669 from kow3ns/statefulset-update

Automatic merge from submit-queue (batch tested with PRs 46235, 44786, 46833, 46756, 46669)

implements StatefulSet update

**What this PR does / why we need it**:
1. Implements rolling update for StatefulSets
2. Implements controller history for StatefulSets.
3. Makes StatefulSet status reporting consistent with DaemonSet and ReplicaSet.

https://github.com/kubernetes/features/issues/188

**Special notes for your reviewer**:

**Release note**:
```release-note
Implements rolling update for StatefulSets. Updates can be performed using the RollingUpdate, Paritioned, or OnDelete strategies. OnDelete implements the manual behavior from 1.6. status now tracks 
replicas, readyReplicas, currentReplicas, and updatedReplicas. The semantics of replicas is now consistent with DaemonSet and ReplicaSet, and readyReplicas has the semantics that replicas did prior to this release.
```
This commit is contained in:
Kubernetes Submit Queue
2017-06-07 00:27:53 -07:00
committed by GitHub
56 changed files with 9132 additions and 864 deletions

View File

@@ -312,6 +312,22 @@ func (s *StatefulSetTester) waitForRunning(numStatefulPods int32, ss *apps.State
}
}
// WaitForState periodically polls for the ss and its pods until the until function returns either true or an error
func (s *StatefulSetTester) WaitForState(ss *apps.StatefulSet, until func(*apps.StatefulSet, *v1.PodList) (bool, error)) {
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout,
func() (bool, error) {
ssGet, err := s.c.Apps().StatefulSets(ss.Namespace).Get(ss.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
podList := s.GetPodList(ssGet)
return until(ssGet, podList)
})
if pollErr != nil {
Failf("Failed waiting for pods to enter running: %v", pollErr)
}
}
// WaitForRunningAndReady waits for numStatefulPods in ss to be Running and Ready.
func (s *StatefulSetTester) WaitForRunningAndReady(numStatefulPods int32, ss *apps.StatefulSet) {
s.waitForRunning(numStatefulPods, ss, true)
@@ -365,8 +381,33 @@ func (s *StatefulSetTester) SetHealthy(ss *apps.StatefulSet) {
}
}
// WaitForStatus waits for the ss.Status.Replicas to be equal to expectedReplicas
func (s *StatefulSetTester) WaitForStatus(ss *apps.StatefulSet, expectedReplicas int32) {
// WaitForStatusReadyReplicas waits for the ss.Status.ReadyReplicas to be equal to expectedReplicas
func (s *StatefulSetTester) WaitForStatusReadyReplicas(ss *apps.StatefulSet, expectedReplicas int32) {
Logf("Waiting for statefulset status.replicas updated to %d", expectedReplicas)
ns, name := ss.Namespace, ss.Name
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout,
func() (bool, error) {
ssGet, err := s.c.Apps().StatefulSets(ns).Get(name, metav1.GetOptions{})
if err != nil {
return false, err
}
if *ssGet.Status.ObservedGeneration < ss.Generation {
return false, nil
}
if ssGet.Status.ReadyReplicas != expectedReplicas {
Logf("Waiting for stateful set status to become %d, currently %d", expectedReplicas, ssGet.Status.Replicas)
return false, nil
}
return true, nil
})
if pollErr != nil {
Failf("Failed waiting for stateful set status.readyReplicas updated to %d: %v", expectedReplicas, pollErr)
}
}
// WaitForStatusReplicas waits for the ss.Status.Replicas to be equal to expectedReplicas
func (s *StatefulSetTester) WaitForStatusReplicas(ss *apps.StatefulSet, expectedReplicas int32) {
Logf("Waiting for statefulset status.replicas updated to %d", expectedReplicas)
ns, name := ss.Namespace, ss.Name
@@ -416,7 +457,7 @@ func DeleteAllStatefulSets(c clientset.Interface, ns string) {
if err := sst.Scale(&ss, 0); err != nil {
errList = append(errList, fmt.Sprintf("%v", err))
}
sst.WaitForStatus(&ss, 0)
sst.WaitForStatusReplicas(&ss, 0)
Logf("Deleting statefulset %v", ss.Name)
// Use OrphanDependents=false so it's deleted synchronously.
// We already made sure the Pods are gone inside Scale().
@@ -561,6 +602,7 @@ func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulP
Volumes: vols,
},
},
UpdateStrategy: apps.StatefulSetUpdateStrategy{Type: apps.RollingUpdateStatefulSetStrategyType},
VolumeClaimTemplates: claims,
ServiceName: governingSvcName,
},

View File

@@ -31,7 +31,6 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/kubernetes/pkg/api/v1"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/controller"
@@ -250,8 +249,11 @@ var _ = framework.KubeDescribe("StatefulSet", func() {
It("should allow template updates", func() {
By("Creating stateful set " + ssName + " in namespace " + ns)
*(ss.Spec.Replicas) = 2
testProbe := &v1.Probe{Handler: v1.Handler{HTTPGet: &v1.HTTPGetAction{
Path: "/index.html",
Port: intstr.IntOrString{IntVal: 80}}}}
ss := framework.NewStatefulSet("ss2", ns, headlessSvcName, 2, nil, nil, labels)
ss.Spec.Template.Spec.Containers[0].ReadinessProbe = testProbe
ss, err := c.Apps().StatefulSets(ns).Create(ss)
Expect(err).NotTo(HaveOccurred())
@@ -268,79 +270,21 @@ var _ = framework.KubeDescribe("StatefulSet", func() {
})
Expect(err).NotTo(HaveOccurred())
updateIndex := 0
By(fmt.Sprintf("Deleting stateful pod at index %d", updateIndex))
sst.DeleteStatefulPodAtIndex(updateIndex, ss)
By("Waiting for all stateful pods to be running again")
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
By(fmt.Sprintf("Verifying stateful pod at index %d is updated", updateIndex))
verify := func(pod *v1.Pod) {
podImage := pod.Spec.Containers[0].Image
Expect(podImage).To(Equal(newImage), fmt.Sprintf("Expected stateful pod image %s updated to %s", podImage, newImage))
}
sst.VerifyPodAtIndex(updateIndex, ss, verify)
})
It("Scaling down before scale up is finished should wait until current pod will be running and ready before it will be removed", func() {
By("Creating stateful set " + ssName + " in namespace " + ns + ", and pausing scale operations after each pod")
testProbe := &v1.Probe{Handler: v1.Handler{HTTPGet: &v1.HTTPGetAction{
Path: "/index.html",
Port: intstr.IntOrString{IntVal: 80}}}}
ss := framework.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, labels)
ss.Spec.Template.Spec.Containers[0].ReadinessProbe = testProbe
framework.SetStatefulSetInitializedAnnotation(ss, "false")
ss, err := c.Apps().StatefulSets(ns).Create(ss)
Expect(err).NotTo(HaveOccurred())
sst := framework.NewStatefulSetTester(c)
sst.WaitForRunningAndReady(1, ss)
By("Scaling up stateful set " + ssName + " to 3 replicas and pausing after 2nd pod")
sst.SetHealthy(ss)
sst.UpdateReplicas(ss, 3)
sst.WaitForRunningAndReady(2, ss)
By("Before scale up finished setting 2nd pod to be not ready by breaking readiness probe")
sst.BreakProbe(ss, testProbe)
sst.WaitForStatus(ss, 0)
sst.WaitForRunningAndNotReady(2, ss)
By("Continue scale operation after the 2nd pod, and scaling down to 1 replica")
sst.SetHealthy(ss)
sst.UpdateReplicas(ss, 1)
By("Verifying that the 2nd pod wont be removed if it is not running and ready")
sst.ConfirmStatefulPodCount(2, ss, 10*time.Second, true)
expectedPodName := ss.Name + "-1"
expectedPod, err := f.ClientSet.Core().Pods(ns).Get(expectedPodName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
By("Verifying the 2nd pod is removed only when it becomes running and ready")
sst.RestoreProbe(ss, testProbe)
watcher, err := f.ClientSet.Core().Pods(ns).Watch(metav1.SingleObject(
metav1.ObjectMeta{
Name: expectedPod.Name,
ResourceVersion: expectedPod.ResourceVersion,
},
))
Expect(err).NotTo(HaveOccurred())
_, err = watch.Until(framework.StatefulSetTimeout, watcher, func(event watch.Event) (bool, error) {
pod := event.Object.(*v1.Pod)
if event.Type == watch.Deleted && pod.Name == expectedPodName {
return false, fmt.Errorf("Pod %v was deleted before enter running", pod.Name)
}
framework.Logf("Observed event %v for pod %v. Phase %v, Pod is ready %v",
event.Type, pod.Name, pod.Status.Phase, podutil.IsPodReady(pod))
if pod.Name != expectedPodName {
sst.WaitForState(ss, func(set *apps.StatefulSet, pods *v1.PodList) (bool, error) {
if len(pods.Items) < 2 {
return false, nil
}
if pod.Status.Phase == v1.PodRunning && podutil.IsPodReady(pod) {
return true, nil
for i := range pods.Items {
if pods.Items[i].Spec.Containers[0].Image != newImage {
framework.Logf("Waiting for pod %s to have image %s current image %s",
pods.Items[i].Name,
newImage,
pods.Items[i].Spec.Containers[0].Image)
return false, nil
}
}
return false, nil
return true, nil
})
Expect(err).NotTo(HaveOccurred())
})
It("Scaling should happen in predictable order and halt if any stateful pod is unhealthy", func() {
@@ -367,7 +311,7 @@ var _ = framework.KubeDescribe("StatefulSet", func() {
By("Confirming that stateful set scale up will halt with unhealthy stateful pod")
sst.BreakProbe(ss, testProbe)
sst.WaitForRunningAndNotReady(*ss.Spec.Replicas, ss)
sst.WaitForStatus(ss, 0)
sst.WaitForStatusReadyReplicas(ss, 0)
sst.UpdateReplicas(ss, 3)
sst.ConfirmStatefulPodCount(1, ss, 10*time.Second, true)
@@ -397,7 +341,7 @@ var _ = framework.KubeDescribe("StatefulSet", func() {
Expect(err).NotTo(HaveOccurred())
sst.BreakProbe(ss, testProbe)
sst.WaitForStatus(ss, 0)
sst.WaitForStatusReadyReplicas(ss, 0)
sst.WaitForRunningAndNotReady(3, ss)
sst.UpdateReplicas(ss, 0)
sst.ConfirmStatefulPodCount(3, ss, 10*time.Second, true)
@@ -442,7 +386,7 @@ var _ = framework.KubeDescribe("StatefulSet", func() {
By("Confirming that stateful set scale up will not halt with unhealthy stateful pod")
sst.BreakProbe(ss, testProbe)
sst.WaitForRunningAndNotReady(*ss.Spec.Replicas, ss)
sst.WaitForStatus(ss, 0)
sst.WaitForStatusReadyReplicas(ss, 0)
sst.UpdateReplicas(ss, 3)
sst.ConfirmStatefulPodCount(3, ss, 10*time.Second, false)
@@ -452,7 +396,7 @@ var _ = framework.KubeDescribe("StatefulSet", func() {
By("Scale down will not halt with unhealthy stateful pod")
sst.BreakProbe(ss, testProbe)
sst.WaitForStatus(ss, 0)
sst.WaitForStatusReadyReplicas(ss, 0)
sst.WaitForRunningAndNotReady(3, ss)
sst.UpdateReplicas(ss, 0)
sst.ConfirmStatefulPodCount(0, ss, 10*time.Second, false)
@@ -460,7 +404,7 @@ var _ = framework.KubeDescribe("StatefulSet", func() {
By("Scaling down stateful set " + ssName + " to 0 replicas and waiting until none of pods will run in namespace" + ns)
sst.RestoreProbe(ss, testProbe)
sst.Scale(ss, 0)
sst.WaitForStatus(ss, 0)
sst.WaitForStatusReadyReplicas(ss, 0)
})
It("Should recreate evicted statefulset", func() {