Node upgrade tests.
This commit is contained in:
@@ -24,6 +24,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
||||
@@ -34,9 +35,9 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var serveHostnameImage string = "gcr.io/google_containers/serve_hostname:1.1"
|
||||
const serveHostnameImage = "gcr.io/google_containers/serve_hostname:1.1"
|
||||
|
||||
func resizeNodeInstanceGroup(size int) error {
|
||||
func resizeGroup(size int) error {
|
||||
// TODO: make this hit the compute API directly instread of shelling out to gcloud.
|
||||
output, err := exec.Command("gcloud", "preview", "managed-instance-groups", "--project="+testContext.CloudConfig.ProjectID, "--zone="+testContext.CloudConfig.Zone,
|
||||
"resize", testContext.CloudConfig.NodeInstanceGroup, fmt.Sprintf("--new-size=%v", size)).CombinedOutput()
|
||||
@@ -46,7 +47,7 @@ func resizeNodeInstanceGroup(size int) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func nodeInstanceGroupSize() (int, error) {
|
||||
func groupSize() (int, error) {
|
||||
// TODO: make this hit the compute API directly instread of shelling out to gcloud.
|
||||
output, err := exec.Command("gcloud", "preview", "managed-instance-groups", "--project="+testContext.CloudConfig.ProjectID,
|
||||
"--zone="+testContext.CloudConfig.Zone, "describe", testContext.CloudConfig.NodeInstanceGroup).CombinedOutput()
|
||||
@@ -71,9 +72,9 @@ func nodeInstanceGroupSize() (int, error) {
|
||||
return currentSize, nil
|
||||
}
|
||||
|
||||
func waitForNodeInstanceGroupSize(size int) error {
|
||||
func waitForGroupSize(size int) error {
|
||||
for start := time.Now(); time.Since(start) < 4*time.Minute; time.Sleep(5 * time.Second) {
|
||||
currentSize, err := nodeInstanceGroupSize()
|
||||
currentSize, err := groupSize()
|
||||
if err != nil {
|
||||
Logf("Failed to get node instance group size: %v", err)
|
||||
continue
|
||||
@@ -104,7 +105,7 @@ func waitForClusterSize(c *client.Client, size int) error {
|
||||
return fmt.Errorf("timeout waiting for cluster size to be %d", size)
|
||||
}
|
||||
|
||||
func newServiceWithNameSelector(name string) *api.Service {
|
||||
func svcByName(name string) *api.Service {
|
||||
return &api.Service{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "test-service",
|
||||
@@ -121,12 +122,12 @@ func newServiceWithNameSelector(name string) *api.Service {
|
||||
}
|
||||
}
|
||||
|
||||
func createServiceWithNameSelector(c *client.Client, ns, name string) error {
|
||||
_, err := c.Services(ns).Create(newServiceWithNameSelector(name))
|
||||
func newSVCByName(c *client.Client, ns, name string) error {
|
||||
_, err := c.Services(ns).Create(svcByName(name))
|
||||
return err
|
||||
}
|
||||
|
||||
func newPodOnNode(podName, nodeName string, image string) *api.Pod {
|
||||
func podOnNode(podName, nodeName string, image string) *api.Pod {
|
||||
return &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: podName,
|
||||
@@ -148,18 +149,39 @@ func newPodOnNode(podName, nodeName string, image string) *api.Pod {
|
||||
}
|
||||
}
|
||||
|
||||
func createServeHostnamePodOnNode(c *client.Client, namespace, podName, nodeName string) error {
|
||||
pod, err := c.Pods(namespace).Create(newPodOnNode(podName, nodeName, serveHostnameImage))
|
||||
func newPodOnNode(c *client.Client, namespace, podName, nodeName string) error {
|
||||
pod, err := c.Pods(namespace).Create(podOnNode(podName, nodeName, serveHostnameImage))
|
||||
if err == nil {
|
||||
Logf("Created pod %s on node %s", pod.ObjectMeta.Name, nodeName)
|
||||
} else {
|
||||
Logf("Failed to create pod %s on node %s: %s", podName, nodeName, err)
|
||||
Logf("Failed to create pod %s on node %s: %v", podName, nodeName, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func newReplicationControllerWithNameSelector(name string, replicas int, image string) *api.ReplicationController {
|
||||
func rcByName(name string, replicas int, image string, labels map[string]string) *api.ReplicationController {
|
||||
return rcByNameContainer(name, replicas, image, labels, api.Container{
|
||||
Name: name,
|
||||
Image: image,
|
||||
})
|
||||
}
|
||||
|
||||
func rcByNamePort(name string, replicas int, image string, port int, labels map[string]string) *api.ReplicationController {
|
||||
return rcByNameContainer(name, replicas, image, labels, api.Container{
|
||||
Name: name,
|
||||
Image: image,
|
||||
Ports: []api.ContainerPort{{ContainerPort: port}},
|
||||
})
|
||||
}
|
||||
|
||||
func rcByNameContainer(name string, replicas int, image string, labels map[string]string, c api.Container) *api.ReplicationController {
|
||||
// Add "name": name to the labels, overwriting if it exists.
|
||||
labels["name"] = name
|
||||
return &api.ReplicationController{
|
||||
TypeMeta: api.TypeMeta{
|
||||
Kind: "ReplicationController",
|
||||
APIVersion: latest.Version,
|
||||
},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
@@ -170,28 +192,24 @@ func newReplicationControllerWithNameSelector(name string, replicas int, image s
|
||||
},
|
||||
Template: &api.PodTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Labels: map[string]string{"name": name},
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: name,
|
||||
Image: image,
|
||||
Ports: []api.ContainerPort{{ContainerPort: 9376}},
|
||||
},
|
||||
},
|
||||
Containers: []api.Container{c},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func createServeHostnameReplicationController(c *client.Client, ns, name string, replicas int) (*api.ReplicationController, error) {
|
||||
// newRCByName creates a replication controller with a selector by name of name.
|
||||
func newRCByName(c *client.Client, ns, name string, replicas int) (*api.ReplicationController, error) {
|
||||
By(fmt.Sprintf("creating replication controller %s", name))
|
||||
return c.ReplicationControllers(ns).Create(newReplicationControllerWithNameSelector(name, replicas, serveHostnameImage))
|
||||
return c.ReplicationControllers(ns).Create(rcByNamePort(
|
||||
name, replicas, serveHostnameImage, 9376, map[string]string{}))
|
||||
}
|
||||
|
||||
func resizeReplicationController(c *client.Client, ns, name string, replicas int) error {
|
||||
func resizeRC(c *client.Client, ns, name string, replicas int) error {
|
||||
rc, err := c.ReplicationControllers(ns).Get(name)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -201,7 +219,7 @@ func resizeReplicationController(c *client.Client, ns, name string, replicas int
|
||||
return err
|
||||
}
|
||||
|
||||
func waitForPodsCreated(c *client.Client, ns, name string, replicas int) (*api.PodList, error) {
|
||||
func podsCreated(c *client.Client, ns, name string, replicas int) (*api.PodList, error) {
|
||||
// List the pods, making sure we observe all the replicas.
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
|
||||
@@ -218,7 +236,7 @@ func waitForPodsCreated(c *client.Client, ns, name string, replicas int) (*api.P
|
||||
return nil, fmt.Errorf("Pod name %s: Gave up waiting for %d pods to come up", name, replicas)
|
||||
}
|
||||
|
||||
func waitForPodsRunning(c *client.Client, pods *api.PodList) []error {
|
||||
func podsRunning(c *client.Client, pods *api.PodList) []error {
|
||||
// Wait for the pods to enter the running state. Waiting loops until the pods
|
||||
// are running so non-running pods cause a timeout for this test.
|
||||
By("ensuring each pod is running")
|
||||
@@ -233,24 +251,24 @@ func waitForPodsRunning(c *client.Client, pods *api.PodList) []error {
|
||||
return e
|
||||
}
|
||||
|
||||
func verifyPodsResponding(c *client.Client, ns, name string, pods *api.PodList) error {
|
||||
func podsResponding(c *client.Client, ns, name string, wantName bool, pods *api.PodList) error {
|
||||
By("trying to dial each unique pod")
|
||||
retryTimeout := 2 * time.Minute
|
||||
retryInterval := 5 * time.Second
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||
return wait.Poll(retryInterval, retryTimeout, podResponseChecker{c, ns, label, name, pods}.checkAllResponses)
|
||||
return wait.Poll(retryInterval, retryTimeout, podResponseChecker{c, ns, label, name, wantName, pods}.checkAllResponses)
|
||||
}
|
||||
|
||||
func waitForPodsCreatedRunningResponding(c *client.Client, ns, name string, replicas int) error {
|
||||
pods, err := waitForPodsCreated(c, ns, name, replicas)
|
||||
func verifyPods(c *client.Client, ns, name string, wantName bool, replicas int) error {
|
||||
pods, err := podsCreated(c, ns, name, replicas)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
e := waitForPodsRunning(c, pods)
|
||||
e := podsRunning(c, pods)
|
||||
if len(e) > 0 {
|
||||
return fmt.Errorf("Failed to wait for pods running: %v", e)
|
||||
}
|
||||
err = verifyPodsResponding(c, ns, name, pods)
|
||||
err = podsResponding(c, ns, name, wantName, pods)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -331,7 +349,7 @@ func performTemporaryNetworkFailure(c *client.Client, ns, rcName string, replica
|
||||
waitForRCPodToDisappear(c, ns, rcName, podNameToDisappear)
|
||||
|
||||
By("verifying whether the pod from the unreachable node is recreated")
|
||||
err := waitForPodsCreatedRunningResponding(c, ns, rcName, replicas)
|
||||
err := verifyPods(c, ns, rcName, true, replicas)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// network traffic is unblocked in a defered function
|
||||
@@ -372,10 +390,10 @@ var _ = Describe("Nodes", func() {
|
||||
return
|
||||
}
|
||||
By("restoring the original node instance group size")
|
||||
if err := resizeNodeInstanceGroup(testContext.CloudConfig.NumNodes); err != nil {
|
||||
if err := resizeGroup(testContext.CloudConfig.NumNodes); err != nil {
|
||||
Failf("Couldn't restore the original node instance group size: %v", err)
|
||||
}
|
||||
if err := waitForNodeInstanceGroupSize(testContext.CloudConfig.NumNodes); err != nil {
|
||||
if err := waitForGroupSize(testContext.CloudConfig.NumNodes); err != nil {
|
||||
Failf("Couldn't restore the original node instance group size: %v", err)
|
||||
}
|
||||
if err := waitForClusterSize(c, testContext.CloudConfig.NumNodes); err != nil {
|
||||
@@ -396,20 +414,20 @@ var _ = Describe("Nodes", func() {
|
||||
// The source for the Docker containter kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
|
||||
name := "my-hostname-delete-node"
|
||||
replicas := testContext.CloudConfig.NumNodes
|
||||
createServeHostnameReplicationController(c, ns, name, replicas)
|
||||
err := waitForPodsCreatedRunningResponding(c, ns, name, replicas)
|
||||
newRCByName(c, ns, name, replicas)
|
||||
err := verifyPods(c, ns, name, true, replicas)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("decreasing cluster size to %d", replicas-1))
|
||||
err = resizeNodeInstanceGroup(replicas - 1)
|
||||
err = resizeGroup(replicas - 1)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = waitForNodeInstanceGroupSize(replicas - 1)
|
||||
err = waitForGroupSize(replicas - 1)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = waitForClusterSize(c, replicas-1)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("verifying whether the pods from the removed node are recreated")
|
||||
err = waitForPodsCreatedRunningResponding(c, ns, name, replicas)
|
||||
err = verifyPods(c, ns, name, true, replicas)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
@@ -426,24 +444,24 @@ var _ = Describe("Nodes", func() {
|
||||
// Create a replication controller for a service that serves its hostname.
|
||||
// The source for the Docker containter kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
|
||||
name := "my-hostname-add-node"
|
||||
createServiceWithNameSelector(c, ns, name)
|
||||
newSVCByName(c, ns, name)
|
||||
replicas := testContext.CloudConfig.NumNodes
|
||||
createServeHostnameReplicationController(c, ns, name, replicas)
|
||||
err := waitForPodsCreatedRunningResponding(c, ns, name, replicas)
|
||||
newRCByName(c, ns, name, replicas)
|
||||
err := verifyPods(c, ns, name, true, replicas)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("increasing cluster size to %d", replicas+1))
|
||||
err = resizeNodeInstanceGroup(replicas + 1)
|
||||
err = resizeGroup(replicas + 1)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = waitForNodeInstanceGroupSize(replicas + 1)
|
||||
err = waitForGroupSize(replicas + 1)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = waitForClusterSize(c, replicas+1)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("increasing size of the replication controller to %d and verifying all pods are running", replicas+1))
|
||||
err = resizeReplicationController(c, ns, name, replicas+1)
|
||||
err = resizeRC(c, ns, name, replicas+1)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = waitForPodsCreatedRunningResponding(c, ns, name, replicas+1)
|
||||
err = verifyPods(c, ns, name, true, replicas+1)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
})
|
||||
@@ -472,10 +490,10 @@ var _ = Describe("Nodes", func() {
|
||||
// Create a replication controller for a service that serves its hostname.
|
||||
// The source for the Docker containter kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
|
||||
name := "my-hostname-net"
|
||||
createServiceWithNameSelector(c, ns, name)
|
||||
newSVCByName(c, ns, name)
|
||||
replicas := testContext.CloudConfig.NumNodes
|
||||
createServeHostnameReplicationController(c, ns, name, replicas)
|
||||
err := waitForPodsCreatedRunningResponding(c, ns, name, replicas)
|
||||
newRCByName(c, ns, name, replicas)
|
||||
err := verifyPods(c, ns, name, true, replicas)
|
||||
Expect(err).NotTo(HaveOccurred(), "Each pod should start running and responding")
|
||||
|
||||
By("choose a node with at least one pod - we will block some network traffic on this node")
|
||||
@@ -496,9 +514,9 @@ var _ = Describe("Nodes", func() {
|
||||
// increasing the RC size is not a valid way to test this
|
||||
// since we have no guarantees the pod will be scheduled on our node.
|
||||
additionalPod := "additionalpod"
|
||||
err = createServeHostnamePodOnNode(c, ns, additionalPod, node.Name)
|
||||
err = newPodOnNode(c, ns, additionalPod, node.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = waitForPodsCreatedRunningResponding(c, ns, additionalPod, 1)
|
||||
err = verifyPods(c, ns, additionalPod, true, 1)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// verify that it is really on the requested node
|
||||
|
Reference in New Issue
Block a user