Fix disruptive tests for GKE regional clusters

This commit is contained in:
wojtekt
2018-04-05 13:34:59 +02:00
parent 98e89770c6
commit 4daac74de7
4 changed files with 45 additions and 20 deletions

View File

@@ -112,9 +112,11 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
// Create a replication controller for a service that serves its hostname.
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
name := "my-hostname-delete-node"
replicas := int32(framework.TestContext.CloudConfig.NumNodes)
numNodes, err := framework.NumberOfRegisteredNodes(c)
Expect(err).NotTo(HaveOccurred())
replicas := int32(numNodes)
common.NewRCByName(c, ns, name, replicas, nil)
err := framework.VerifyPods(c, ns, name, true, replicas)
err = framework.VerifyPods(c, ns, name, true, replicas)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("decreasing cluster size to %d", replicas-1))
@@ -140,9 +142,11 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
name := "my-hostname-add-node"
common.NewSVCByName(c, ns, name)
replicas := int32(framework.TestContext.CloudConfig.NumNodes)
numNodes, err := framework.NumberOfRegisteredNodes(c)
Expect(err).NotTo(HaveOccurred())
replicas := int32(numNodes)
common.NewRCByName(c, ns, name, replicas, nil)
err := framework.VerifyPods(c, ns, name, true, replicas)
err = framework.VerifyPods(c, ns, name, true, replicas)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("increasing cluster size to %d", replicas+1))

View File

@@ -68,11 +68,12 @@ var _ = SIGDescribe("Restart [Disruptive]", func() {
// check must be identical to that call.
framework.SkipUnlessProviderIs("gce", "gke")
ps = testutils.NewPodStore(f.ClientSet, metav1.NamespaceSystem, labels.Everything(), fields.Everything())
numNodes = framework.TestContext.CloudConfig.NumNodes
var err error
numNodes, err = framework.NumberOfRegisteredNodes(f.ClientSet)
Expect(err).NotTo(HaveOccurred())
systemNamespace = metav1.NamespaceSystem
By("ensuring all nodes are ready")
var err error
originalNodeNames, err = framework.CheckNodesReady(f.ClientSet, framework.NodeReadyInitialTimeout, numNodes)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Got the following nodes before restart: %v", originalNodeNames)