diff --git a/cmd/integration/integration.go b/cmd/integration/integration.go index b49354ee8a9..767fe7fbf91 100644 --- a/cmd/integration/integration.go +++ b/cmd/integration/integration.go @@ -65,7 +65,7 @@ import ( "k8s.io/kubernetes/plugin/pkg/scheduler" _ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider" "k8s.io/kubernetes/plugin/pkg/scheduler/factory" - "k8s.io/kubernetes/test/e2e" + e2e "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/integration" "k8s.io/kubernetes/test/integration/framework" diff --git a/docs/devel/writing-good-e2e-tests.md b/docs/devel/writing-good-e2e-tests.md index 54b700309f6..2cb0fe47abc 100644 --- a/docs/devel/writing-good-e2e-tests.md +++ b/docs/devel/writing-good-e2e-tests.md @@ -180,7 +180,7 @@ right thing. Here are a few pointers: -+ [E2e Framework](../../test/e2e/framework.go): ++ [E2e Framework](../../test/e2e/framework/framework.go): Familiarise yourself with this test framework and how to use it. Amongst others, it automatically creates uniquely named namespaces within which your tests can run to avoid name clashes, and reliably @@ -194,7 +194,7 @@ Here are a few pointers: should always use this framework. Trying other home-grown approaches to avoiding name clashes and resource leaks has proven to be a very bad idea. -+ [E2e utils library](../../test/e2e/util.go): ++ [E2e utils library](../../test/e2e/framework/util.go): This handy library provides tons of reusable code for a host of commonly needed test functionality, including waiting for resources to enter specified states, safely and consistently retrying failed diff --git a/hack/verify-flags/exceptions.txt b/hack/verify-flags/exceptions.txt index b75a511e82a..82caf87825f 100644 --- a/hack/verify-flags/exceptions.txt +++ b/hack/verify-flags/exceptions.txt @@ -79,7 +79,7 @@ pkg/util/oom/oom_linux.go:// Writes 'value' to /proc//oom_score_adj for all pkg/util/oom/oom_linux.go:// Writes 'value' to /proc//oom_score_adj. PID = 0 means self test/e2e/configmap.go: Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/configmap-volume/data-1"}, test/e2e/downwardapi_volume.go: Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=" + filePath}, -test/e2e/es_cluster_logging.go: Failf("No cluster_name field in Elasticsearch response: %v", esResponse) +test/e2e/es_cluster_logging.go: framework.Failf("No cluster_name field in Elasticsearch response: %v", esResponse) test/e2e/es_cluster_logging.go: // Check to see if have a cluster_name field. test/e2e/es_cluster_logging.go: clusterName, ok := esResponse["cluster_name"] test/e2e/host_path.go: fmt.Sprintf("--file_content_in_loop=%v", filePath), diff --git a/test/e2e/addon_update.go b/test/e2e/addon_update.go index c14dfe9b242..6077046e0ac 100644 --- a/test/e2e/addon_update.go +++ b/test/e2e/addon_update.go @@ -27,6 +27,7 @@ import ( "golang.org/x/crypto/ssh" "k8s.io/kubernetes/pkg/api" client "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -188,18 +189,18 @@ type stringPair struct { data, fileName string } -var _ = KubeDescribe("Addon update", func() { +var _ = framework.KubeDescribe("Addon update", func() { var dir string var sshClient *ssh.Client - f := NewDefaultFramework("addon-update-test") + f := framework.NewDefaultFramework("addon-update-test") BeforeEach(func() { // This test requires: // - SSH master access // ... so the provider check should be identical to the intersection of // providers that provide those capabilities. - if !providerIs("gce") { + if !framework.ProviderIs("gce") { return } @@ -210,26 +211,26 @@ var _ = KubeDescribe("Addon update", func() { // Reduce the addon update intervals so that we have faster response // to changes in the addon directory. // do not use "service" command because it clears the environment variables - switch testContext.OSDistro { + switch framework.TestContext.OSDistro { case "debian": sshExecAndVerify(sshClient, "sudo TEST_ADDON_CHECK_INTERVAL_SEC=1 /etc/init.d/kube-addons restart") case "trusty": sshExecAndVerify(sshClient, "sudo initctl restart kube-addons TEST_ADDON_CHECK_INTERVAL_SEC=1") default: - Failf("Unsupported OS distro type %s", testContext.OSDistro) + framework.Failf("Unsupported OS distro type %s", framework.TestContext.OSDistro) } }) AfterEach(func() { if sshClient != nil { // restart addon_update with the default options - switch testContext.OSDistro { + switch framework.TestContext.OSDistro { case "debian": sshExec(sshClient, "sudo /etc/init.d/kube-addons restart") case "trusty": sshExec(sshClient, "sudo initctl restart kube-addons") default: - Failf("Unsupported OS distro type %s", testContext.OSDistro) + framework.Failf("Unsupported OS distro type %s", framework.TestContext.OSDistro) } sshClient.Close() } @@ -242,7 +243,7 @@ var _ = KubeDescribe("Addon update", func() { // - master access // ... so the provider check should be identical to the intersection of // providers that provide those capabilities. - SkipUnlessProviderIs("gce") + framework.SkipUnlessProviderIs("gce") //these tests are long, so I squeezed several cases in one scenario Expect(sshClient).NotTo(BeNil()) @@ -337,20 +338,20 @@ var _ = KubeDescribe("Addon update", func() { }) func waitForServiceInAddonTest(c *client.Client, addonNamespace, name string, exist bool) { - expectNoError(waitForService(c, addonNamespace, name, exist, addonTestPollInterval, addonTestPollTimeout)) + framework.ExpectNoError(framework.WaitForService(c, addonNamespace, name, exist, addonTestPollInterval, addonTestPollTimeout)) } func waitForReplicationControllerInAddonTest(c *client.Client, addonNamespace, name string, exist bool) { - expectNoError(waitForReplicationController(c, addonNamespace, name, exist, addonTestPollInterval, addonTestPollTimeout)) + framework.ExpectNoError(framework.WaitForReplicationController(c, addonNamespace, name, exist, addonTestPollInterval, addonTestPollTimeout)) } // TODO marekbiskup 2015-06-11: merge the ssh code into pkg/util/ssh.go after // kubernetes v1.0 is released. In particular the code of sshExec. func getMasterSSHClient() (*ssh.Client, error) { // Get a signer for the provider. - signer, err := getSigner(testContext.Provider) + signer, err := framework.GetSigner(framework.TestContext.Provider) if err != nil { - return nil, fmt.Errorf("error getting signer for provider %s: '%v'", testContext.Provider, err) + return nil, fmt.Errorf("error getting signer for provider %s: '%v'", framework.TestContext.Provider, err) } config := &ssh.ClientConfig{ @@ -358,7 +359,7 @@ func getMasterSSHClient() (*ssh.Client, error) { Auth: []ssh.AuthMethod{ssh.PublicKeys(signer)}, } - host := getMasterHost() + ":22" + host := framework.GetMasterHost() + ":22" client, err := ssh.Dial("tcp", host, config) if err != nil { return nil, fmt.Errorf("error getting SSH client to host %s: '%v'", host, err) @@ -373,7 +374,7 @@ func sshExecAndVerify(client *ssh.Client, cmd string) { } func sshExec(client *ssh.Client, cmd string) (string, string, int, error) { - Logf("Executing '%s' on %v", cmd, client.RemoteAddr()) + framework.Logf("Executing '%s' on %v", cmd, client.RemoteAddr()) session, err := client.NewSession() if err != nil { return "", "", 0, fmt.Errorf("error creating session to host %s: '%v'", client.RemoteAddr(), err) @@ -405,7 +406,7 @@ func sshExec(client *ssh.Client, cmd string) (string, string, int, error) { } func writeRemoteFile(sshClient *ssh.Client, data, dir, fileName string, mode os.FileMode) error { - Logf(fmt.Sprintf("Writing remote file '%s/%s' on %v", dir, fileName, sshClient.RemoteAddr())) + framework.Logf(fmt.Sprintf("Writing remote file '%s/%s' on %v", dir, fileName, sshClient.RemoteAddr())) session, err := sshClient.NewSession() if err != nil { return fmt.Errorf("error creating session to host %s: '%v'", sshClient.RemoteAddr(), err) diff --git a/test/e2e/autoscaling_utils.go b/test/e2e/autoscaling_utils.go index fb7539fe06d..ee1ed0dae17 100644 --- a/test/e2e/autoscaling_utils.go +++ b/test/e2e/autoscaling_utils.go @@ -24,6 +24,7 @@ import ( "k8s.io/kubernetes/pkg/api" client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/util/intstr" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" ) @@ -58,7 +59,7 @@ rc.ConsumeCPU(300) type ResourceConsumer struct { name string kind string - framework *Framework + framework *framework.Framework cpu chan int mem chan int customMetric chan int @@ -72,15 +73,15 @@ type ResourceConsumer struct { requestSizeCustomMetric int } -func NewDynamicResourceConsumer(name, kind string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, framework *Framework) *ResourceConsumer { +func NewDynamicResourceConsumer(name, kind string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, f *framework.Framework) *ResourceConsumer { return newResourceConsumer(name, kind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, dynamicConsumptionTimeInSeconds, - dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, cpuLimit, memLimit, framework) + dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, cpuLimit, memLimit, f) } // TODO this still defaults to replication controller -func NewStaticResourceConsumer(name string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, framework *Framework) *ResourceConsumer { +func NewStaticResourceConsumer(name string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, f *framework.Framework) *ResourceConsumer { return newResourceConsumer(name, kindRC, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, staticConsumptionTimeInSeconds, - initCPUTotal/replicas, initMemoryTotal/replicas, initCustomMetric/replicas, cpuLimit, memLimit, framework) + initCPUTotal/replicas, initMemoryTotal/replicas, initCustomMetric/replicas, cpuLimit, memLimit, f) } /* @@ -91,13 +92,13 @@ memLimit argument is in megabytes, memLimit is a maximum amount of memory that c cpuLimit argument is in millicores, cpuLimit is a maximum amount of cpu that can be consumed by a single pod */ func newResourceConsumer(name, kind string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, consumptionTimeInSeconds, requestSizeInMillicores, - requestSizeInMegabytes int, requestSizeCustomMetric int, cpuLimit, memLimit int64, framework *Framework) *ResourceConsumer { + requestSizeInMegabytes int, requestSizeCustomMetric int, cpuLimit, memLimit int64, f *framework.Framework) *ResourceConsumer { - runServiceAndWorkloadForResourceConsumer(framework.Client, framework.Namespace.Name, name, kind, replicas, cpuLimit, memLimit) + runServiceAndWorkloadForResourceConsumer(f.Client, f.Namespace.Name, name, kind, replicas, cpuLimit, memLimit) rc := &ResourceConsumer{ name: name, kind: kind, - framework: framework, + framework: f, cpu: make(chan int), mem: make(chan int), customMetric: make(chan int), @@ -121,19 +122,19 @@ func newResourceConsumer(name, kind string, replicas, initCPUTotal, initMemoryTo // ConsumeCPU consumes given number of CPU func (rc *ResourceConsumer) ConsumeCPU(millicores int) { - Logf("RC %s: consume %v millicores in total", rc.name, millicores) + framework.Logf("RC %s: consume %v millicores in total", rc.name, millicores) rc.cpu <- millicores } // ConsumeMem consumes given number of Mem func (rc *ResourceConsumer) ConsumeMem(megabytes int) { - Logf("RC %s: consume %v MB in total", rc.name, megabytes) + framework.Logf("RC %s: consume %v MB in total", rc.name, megabytes) rc.mem <- megabytes } // ConsumeMem consumes given number of custom metric func (rc *ResourceConsumer) ConsumeCustomMetric(amount int) { - Logf("RC %s: consume custom metric %v in total", rc.name, amount) + framework.Logf("RC %s: consume custom metric %v in total", rc.name, amount) rc.customMetric <- amount } @@ -145,13 +146,13 @@ func (rc *ResourceConsumer) makeConsumeCPURequests() { for { select { case millicores := <-rc.cpu: - Logf("RC %s: consume %v millicores in total", rc.name, millicores) + framework.Logf("RC %s: consume %v millicores in total", rc.name, millicores) if rc.requestSizeInMillicores != 0 { count = millicores / rc.requestSizeInMillicores } rest = millicores - count*rc.requestSizeInMillicores case <-time.After(sleepTime): - Logf("RC %s: sending %v requests to consume %v millicores each and 1 request to consume %v millicores", rc.name, count, rc.requestSizeInMillicores, rest) + framework.Logf("RC %s: sending %v requests to consume %v millicores each and 1 request to consume %v millicores", rc.name, count, rc.requestSizeInMillicores, rest) if count > 0 { rc.sendConsumeCPURequests(count, rc.requestSizeInMillicores, rc.consumptionTimeInSeconds) } @@ -173,13 +174,13 @@ func (rc *ResourceConsumer) makeConsumeMemRequests() { for { select { case megabytes := <-rc.mem: - Logf("RC %s: consume %v MB in total", rc.name, megabytes) + framework.Logf("RC %s: consume %v MB in total", rc.name, megabytes) if rc.requestSizeInMegabytes != 0 { count = megabytes / rc.requestSizeInMegabytes } rest = megabytes - count*rc.requestSizeInMegabytes case <-time.After(sleepTime): - Logf("RC %s: sending %v requests to consume %v MB each and 1 request to consume %v MB", rc.name, count, rc.requestSizeInMegabytes, rest) + framework.Logf("RC %s: sending %v requests to consume %v MB each and 1 request to consume %v MB", rc.name, count, rc.requestSizeInMegabytes, rest) if count > 0 { rc.sendConsumeMemRequests(count, rc.requestSizeInMegabytes, rc.consumptionTimeInSeconds) } @@ -201,13 +202,13 @@ func (rc *ResourceConsumer) makeConsumeCustomMetric() { for { select { case total := <-rc.customMetric: - Logf("RC %s: consume custom metric %v in total", rc.name, total) + framework.Logf("RC %s: consume custom metric %v in total", rc.name, total) if rc.requestSizeInMegabytes != 0 { count = total / rc.requestSizeCustomMetric } rest = total - count*rc.requestSizeCustomMetric case <-time.After(sleepTime): - Logf("RC %s: sending %v requests to consume %v custom metric each and 1 request to consume %v", + framework.Logf("RC %s: sending %v requests to consume %v custom metric each and 1 request to consume %v", rc.name, count, rc.requestSizeCustomMetric, rest) if count > 0 { rc.sendConsumeCustomMetric(count, rc.requestSizeCustomMetric, rc.consumptionTimeInSeconds) @@ -243,36 +244,36 @@ func (rc *ResourceConsumer) sendConsumeCustomMetric(requests, delta, durationSec // sendOneConsumeCPURequest sends POST request for cpu consumption func (rc *ResourceConsumer) sendOneConsumeCPURequest(millicores int, durationSec int) { defer GinkgoRecover() - proxyRequest, err := getServicesProxyRequest(rc.framework.Client, rc.framework.Client.Post()) - expectNoError(err) + proxyRequest, err := framework.GetServicesProxyRequest(rc.framework.Client, rc.framework.Client.Post()) + framework.ExpectNoError(err) _, err = proxyRequest.Namespace(rc.framework.Namespace.Name). Name(rc.name). Suffix("ConsumeCPU"). Param("millicores", strconv.Itoa(millicores)). Param("durationSec", strconv.Itoa(durationSec)). DoRaw() - expectNoError(err) + framework.ExpectNoError(err) } // sendOneConsumeMemRequest sends POST request for memory consumption func (rc *ResourceConsumer) sendOneConsumeMemRequest(megabytes int, durationSec int) { defer GinkgoRecover() - proxyRequest, err := getServicesProxyRequest(rc.framework.Client, rc.framework.Client.Post()) - expectNoError(err) + proxyRequest, err := framework.GetServicesProxyRequest(rc.framework.Client, rc.framework.Client.Post()) + framework.ExpectNoError(err) _, err = proxyRequest.Namespace(rc.framework.Namespace.Name). Name(rc.name). Suffix("ConsumeMem"). Param("megabytes", strconv.Itoa(megabytes)). Param("durationSec", strconv.Itoa(durationSec)). DoRaw() - expectNoError(err) + framework.ExpectNoError(err) } // sendOneConsumeCustomMetric sends POST request for custom metric consumption func (rc *ResourceConsumer) sendOneConsumeCustomMetric(delta int, durationSec int) { defer GinkgoRecover() - proxyRequest, err := getServicesProxyRequest(rc.framework.Client, rc.framework.Client.Post()) - expectNoError(err) + proxyRequest, err := framework.GetServicesProxyRequest(rc.framework.Client, rc.framework.Client.Post()) + framework.ExpectNoError(err) _, err = proxyRequest.Namespace(rc.framework.Namespace.Name). Name(rc.name). Suffix("BumpMetric"). @@ -280,34 +281,34 @@ func (rc *ResourceConsumer) sendOneConsumeCustomMetric(delta int, durationSec in Param("delta", strconv.Itoa(delta)). Param("durationSec", strconv.Itoa(durationSec)). DoRaw() - expectNoError(err) + framework.ExpectNoError(err) } func (rc *ResourceConsumer) GetReplicas() int { switch rc.kind { case kindRC: replicationController, err := rc.framework.Client.ReplicationControllers(rc.framework.Namespace.Name).Get(rc.name) - expectNoError(err) + framework.ExpectNoError(err) if replicationController == nil { - Failf(rcIsNil) + framework.Failf(rcIsNil) } return replicationController.Status.Replicas case kindDeployment: deployment, err := rc.framework.Client.Deployments(rc.framework.Namespace.Name).Get(rc.name) - expectNoError(err) + framework.ExpectNoError(err) if deployment == nil { - Failf(deploymentIsNil) + framework.Failf(deploymentIsNil) } return deployment.Status.Replicas case kindReplicaSet: rs, err := rc.framework.Client.ReplicaSets(rc.framework.Namespace.Name).Get(rc.name) - expectNoError(err) + framework.ExpectNoError(err) if rs == nil { - Failf(rsIsNil) + framework.Failf(rsIsNil) } return rs.Status.Replicas default: - Failf(invalidKind) + framework.Failf(invalidKind) } return 0 } @@ -316,24 +317,24 @@ func (rc *ResourceConsumer) WaitForReplicas(desiredReplicas int) { timeout := 10 * time.Minute for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) { if desiredReplicas == rc.GetReplicas() { - Logf("%s: current replicas number is equal to desired replicas number: %d", rc.kind, desiredReplicas) + framework.Logf("%s: current replicas number is equal to desired replicas number: %d", rc.kind, desiredReplicas) return } else { - Logf("%s: current replicas number %d waiting to be %d", rc.kind, rc.GetReplicas(), desiredReplicas) + framework.Logf("%s: current replicas number %d waiting to be %d", rc.kind, rc.GetReplicas(), desiredReplicas) } } - Failf("timeout waiting %v for pods size to be %d", timeout, desiredReplicas) + framework.Failf("timeout waiting %v for pods size to be %d", timeout, desiredReplicas) } func (rc *ResourceConsumer) EnsureDesiredReplicas(desiredReplicas int, timeout time.Duration) { for start := time.Now(); time.Since(start) < timeout; time.Sleep(10 * time.Second) { actual := rc.GetReplicas() if desiredReplicas != actual { - Failf("Number of replicas has changed: expected %v, got %v", desiredReplicas, actual) + framework.Failf("Number of replicas has changed: expected %v, got %v", desiredReplicas, actual) } - Logf("Number of replicas is as expected") + framework.Logf("Number of replicas is as expected") } - Logf("Number of replicas was stable over %v", timeout) + framework.Logf("Number of replicas was stable over %v", timeout) } func (rc *ResourceConsumer) CleanUp() { @@ -343,8 +344,8 @@ func (rc *ResourceConsumer) CleanUp() { rc.stopCustomMetric <- 0 // Wait some time to ensure all child goroutines are finished. time.Sleep(10 * time.Second) - expectNoError(DeleteRC(rc.framework.Client, rc.framework.Namespace.Name, rc.name)) - expectNoError(rc.framework.Client.Services(rc.framework.Namespace.Name).Delete(rc.name)) + framework.ExpectNoError(framework.DeleteRC(rc.framework.Client, rc.framework.Namespace.Name, rc.name)) + framework.ExpectNoError(rc.framework.Client.Services(rc.framework.Namespace.Name).Delete(rc.name)) } func runServiceAndWorkloadForResourceConsumer(c *client.Client, ns, name, kind string, replicas int, cpuLimitMillis, memLimitMb int64) { @@ -364,9 +365,9 @@ func runServiceAndWorkloadForResourceConsumer(c *client.Client, ns, name, kind s }, }, }) - expectNoError(err) + framework.ExpectNoError(err) - rcConfig := RCConfig{ + rcConfig := framework.RCConfig{ Client: c, Image: resourceConsumerImage, Name: name, @@ -381,22 +382,22 @@ func runServiceAndWorkloadForResourceConsumer(c *client.Client, ns, name, kind s switch kind { case kindRC: - expectNoError(RunRC(rcConfig)) + framework.ExpectNoError(framework.RunRC(rcConfig)) break case kindDeployment: - dpConfig := DeploymentConfig{ + dpConfig := framework.DeploymentConfig{ rcConfig, } - expectNoError(RunDeployment(dpConfig)) + framework.ExpectNoError(framework.RunDeployment(dpConfig)) break case kindReplicaSet: - rsConfig := ReplicaSetConfig{ + rsConfig := framework.ReplicaSetConfig{ rcConfig, } - expectNoError(RunReplicaSet(rsConfig)) + framework.ExpectNoError(framework.RunReplicaSet(rsConfig)) break default: - Failf(invalidKind) + framework.Failf(invalidKind) } // Make sure endpoints are propagated. diff --git a/test/e2e/batch_v1_jobs.go b/test/e2e/batch_v1_jobs.go index b0f41cc183f..b09e611e286 100644 --- a/test/e2e/batch_v1_jobs.go +++ b/test/e2e/batch_v1_jobs.go @@ -30,6 +30,7 @@ import ( "k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -43,8 +44,8 @@ const ( v1JobSelectorKey = "job-name" ) -var _ = KubeDescribe("V1Job", func() { - f := NewDefaultFramework("v1job") +var _ = framework.KubeDescribe("V1Job", func() { + f := framework.NewDefaultFramework("v1job") parallelism := 2 completions := 4 lotsOfFailures := 5 // more than completions @@ -105,7 +106,7 @@ var _ = KubeDescribe("V1Job", func() { Expect(err).NotTo(HaveOccurred()) By("Ensuring job shows many failures") - err = wait.Poll(poll, v1JobTimeout, func() (bool, error) { + err = wait.Poll(framework.Poll, v1JobTimeout, func() (bool, error) { curr, err := f.Client.Batch().Jobs(f.Namespace.Name).Get(job.Name) if err != nil { return false, err @@ -274,7 +275,7 @@ func deleteV1Job(c *client.Client, ns, name string) error { // Wait for all pods to become Running. Only use when pods will run for a long time, or it will be racy. func waitForAllPodsRunningV1(c *client.Client, ns, jobName string, parallelism int) error { label := labels.SelectorFromSet(labels.Set(map[string]string{v1JobSelectorKey: jobName})) - return wait.Poll(poll, v1JobTimeout, func() (bool, error) { + return wait.Poll(framework.Poll, v1JobTimeout, func() (bool, error) { options := api.ListOptions{LabelSelector: label} pods, err := c.Pods(ns).List(options) if err != nil { @@ -292,7 +293,7 @@ func waitForAllPodsRunningV1(c *client.Client, ns, jobName string, parallelism i // Wait for job to reach completions. func waitForV1JobFinish(c *client.Client, ns, jobName string, completions int) error { - return wait.Poll(poll, v1JobTimeout, func() (bool, error) { + return wait.Poll(framework.Poll, v1JobTimeout, func() (bool, error) { curr, err := c.Batch().Jobs(ns).Get(jobName) if err != nil { return false, err @@ -303,7 +304,7 @@ func waitForV1JobFinish(c *client.Client, ns, jobName string, completions int) e // Wait for job fail. func waitForV1JobFail(c *client.Client, ns, jobName string) error { - return wait.Poll(poll, v1JobTimeout, func() (bool, error) { + return wait.Poll(framework.Poll, v1JobTimeout, func() (bool, error) { curr, err := c.Batch().Jobs(ns).Get(jobName) if err != nil { return false, err diff --git a/test/e2e/cadvisor.go b/test/e2e/cadvisor.go index 6c95a3e8e02..3ab90a5278e 100644 --- a/test/e2e/cadvisor.go +++ b/test/e2e/cadvisor.go @@ -22,6 +22,7 @@ import ( "k8s.io/kubernetes/pkg/api" client "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" ) @@ -31,9 +32,9 @@ const ( sleepDuration = 10 * time.Second ) -var _ = KubeDescribe("Cadvisor", func() { +var _ = framework.KubeDescribe("Cadvisor", func() { - f := NewDefaultFramework("cadvisor") + f := framework.NewDefaultFramework("cadvisor") It("should be healthy on every node.", func() { CheckCadvisorHealthOnAllNodes(f.Client, 5*time.Minute) @@ -44,7 +45,7 @@ func CheckCadvisorHealthOnAllNodes(c *client.Client, timeout time.Duration) { // It should be OK to list unschedulable Nodes here. By("getting list of nodes") nodeList, err := c.Nodes().List(api.ListOptions{}) - expectNoError(err) + framework.ExpectNoError(err) var errors []error retries := maxRetries for { @@ -65,8 +66,8 @@ func CheckCadvisorHealthOnAllNodes(c *client.Client, timeout time.Duration) { if retries--; retries <= 0 { break } - Logf("failed to retrieve kubelet stats -\n %v", errors) + framework.Logf("failed to retrieve kubelet stats -\n %v", errors) time.Sleep(sleepDuration) } - Failf("Failed after retrying %d times for cadvisor to be healthy on all nodes. Errors:\n%v", maxRetries, errors) + framework.Failf("Failed after retrying %d times for cadvisor to be healthy on all nodes. Errors:\n%v", maxRetries, errors) } diff --git a/test/e2e/cluster_size_autoscaling.go b/test/e2e/cluster_size_autoscaling.go index 766631a53dd..052e0a72cd9 100644 --- a/test/e2e/cluster_size_autoscaling.go +++ b/test/e2e/cluster_size_autoscaling.go @@ -22,6 +22,7 @@ import ( "time" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -37,16 +38,16 @@ const ( // run by default. // // These tests take ~20 minutes to run each. -var _ = KubeDescribe("Cluster size autoscaling [Feature:ClusterSizeAutoscaling] [Slow]", func() { - f := NewDefaultFramework("autoscaling") +var _ = framework.KubeDescribe("Cluster size autoscaling [Feature:ClusterSizeAutoscaling] [Slow]", func() { + f := framework.NewDefaultFramework("autoscaling") var nodeCount int var coresPerNode int var memCapacityMb int BeforeEach(func() { - SkipUnlessProviderIs("gce") + framework.SkipUnlessProviderIs("gce") - nodes := ListSchedulableNodesOrDie(f.Client) + nodes := framework.ListSchedulableNodesOrDie(f.Client) nodeCount = len(nodes.Items) Expect(nodeCount).NotTo(BeZero()) cpu := nodes.Items[0].Status.Capacity[api.ResourceCPU] @@ -64,23 +65,23 @@ var _ = KubeDescribe("Cluster size autoscaling [Feature:ClusterSizeAutoscaling] // Consume 50% CPU rcs := createConsumingRCs(f, "cpu-utilization", nodeCount*coresPerNode, 500, 0) - err := waitForClusterSize(f.Client, nodeCount+1, scaleUpTimeout) + err := framework.WaitForClusterSize(f.Client, nodeCount+1, scaleUpTimeout) for _, rc := range rcs { rc.CleanUp() } - expectNoError(err) + framework.ExpectNoError(err) - expectNoError(waitForClusterSize(f.Client, nodeCount, scaleDownTimeout)) + framework.ExpectNoError(framework.WaitForClusterSize(f.Client, nodeCount, scaleDownTimeout)) }) It("Should scale cluster size based on cpu reservation", func() { setUpAutoscaler("cpu/node_reservation", 0.5, nodeCount, nodeCount+1) ReserveCpu(f, "cpu-reservation", 600*nodeCount*coresPerNode) - expectNoError(waitForClusterSize(f.Client, nodeCount+1, scaleUpTimeout)) + framework.ExpectNoError(framework.WaitForClusterSize(f.Client, nodeCount+1, scaleUpTimeout)) - expectNoError(DeleteRC(f.Client, f.Namespace.Name, "cpu-reservation")) - expectNoError(waitForClusterSize(f.Client, nodeCount, scaleDownTimeout)) + framework.ExpectNoError(framework.DeleteRC(f.Client, f.Namespace.Name, "cpu-reservation")) + framework.ExpectNoError(framework.WaitForClusterSize(f.Client, nodeCount, scaleDownTimeout)) }) It("Should scale cluster size based on memory utilization", func() { @@ -89,23 +90,23 @@ var _ = KubeDescribe("Cluster size autoscaling [Feature:ClusterSizeAutoscaling] // Consume 60% of total memory capacity megabytesPerReplica := int(memCapacityMb * 6 / 10 / coresPerNode) rcs := createConsumingRCs(f, "mem-utilization", nodeCount*coresPerNode, 0, megabytesPerReplica) - err := waitForClusterSize(f.Client, nodeCount+1, scaleUpTimeout) + err := framework.WaitForClusterSize(f.Client, nodeCount+1, scaleUpTimeout) for _, rc := range rcs { rc.CleanUp() } - expectNoError(err) + framework.ExpectNoError(err) - expectNoError(waitForClusterSize(f.Client, nodeCount, scaleDownTimeout)) + framework.ExpectNoError(framework.WaitForClusterSize(f.Client, nodeCount, scaleDownTimeout)) }) It("Should scale cluster size based on memory reservation", func() { setUpAutoscaler("memory/node_reservation", 0.5, nodeCount, nodeCount+1) ReserveMemory(f, "memory-reservation", nodeCount*memCapacityMb*6/10) - expectNoError(waitForClusterSize(f.Client, nodeCount+1, scaleUpTimeout)) + framework.ExpectNoError(framework.WaitForClusterSize(f.Client, nodeCount+1, scaleUpTimeout)) - expectNoError(DeleteRC(f.Client, f.Namespace.Name, "memory-reservation")) - expectNoError(waitForClusterSize(f.Client, nodeCount, scaleDownTimeout)) + framework.ExpectNoError(framework.DeleteRC(f.Client, f.Namespace.Name, "memory-reservation")) + framework.ExpectNoError(framework.WaitForClusterSize(f.Client, nodeCount, scaleDownTimeout)) }) }) @@ -113,17 +114,17 @@ func setUpAutoscaler(metric string, target float64, min, max int) { // TODO integrate with kube-up.sh script once it will support autoscaler setup. By("Setting up autoscaler to scale based on " + metric) out, err := exec.Command("gcloud", "compute", "instance-groups", "managed", "set-autoscaling", - testContext.CloudConfig.NodeInstanceGroup, - "--project="+testContext.CloudConfig.ProjectID, - "--zone="+testContext.CloudConfig.Zone, + framework.TestContext.CloudConfig.NodeInstanceGroup, + "--project="+framework.TestContext.CloudConfig.ProjectID, + "--zone="+framework.TestContext.CloudConfig.Zone, "--custom-metric-utilization=metric=custom.cloudmonitoring.googleapis.com/kubernetes.io/"+metric+fmt.Sprintf(",utilization-target=%v", target)+",utilization-target-type=GAUGE", fmt.Sprintf("--min-num-replicas=%v", min), fmt.Sprintf("--max-num-replicas=%v", max), ).CombinedOutput() - expectNoError(err, "Output: "+string(out)) + framework.ExpectNoError(err, "Output: "+string(out)) } -func createConsumingRCs(f *Framework, name string, count, cpuPerReplica, memPerReplica int) []*ResourceConsumer { +func createConsumingRCs(f *framework.Framework, name string, count, cpuPerReplica, memPerReplica int) []*ResourceConsumer { var res []*ResourceConsumer for i := 1; i <= count; i++ { name := fmt.Sprintf("%s-%d", name, i) @@ -135,16 +136,16 @@ func createConsumingRCs(f *Framework, name string, count, cpuPerReplica, memPerR func cleanUpAutoscaler() { By("Removing autoscaler") out, err := exec.Command("gcloud", "compute", "instance-groups", "managed", "stop-autoscaling", - testContext.CloudConfig.NodeInstanceGroup, - "--project="+testContext.CloudConfig.ProjectID, - "--zone="+testContext.CloudConfig.Zone, + framework.TestContext.CloudConfig.NodeInstanceGroup, + "--project="+framework.TestContext.CloudConfig.ProjectID, + "--zone="+framework.TestContext.CloudConfig.Zone, ).CombinedOutput() - expectNoError(err, "Output: "+string(out)) + framework.ExpectNoError(err, "Output: "+string(out)) } -func ReserveCpu(f *Framework, id string, millicores int) { +func ReserveCpu(f *framework.Framework, id string, millicores int) { By(fmt.Sprintf("Running RC which reserves %v millicores", millicores)) - config := &RCConfig{ + config := &framework.RCConfig{ Client: f.Client, Name: id, Namespace: f.Namespace.Name, @@ -153,12 +154,12 @@ func ReserveCpu(f *Framework, id string, millicores int) { Replicas: millicores / 100, CpuRequest: 100, } - expectNoError(RunRC(*config)) + framework.ExpectNoError(framework.RunRC(*config)) } -func ReserveMemory(f *Framework, id string, megabytes int) { +func ReserveMemory(f *framework.Framework, id string, megabytes int) { By(fmt.Sprintf("Running RC which reserves %v MB of memory", megabytes)) - config := &RCConfig{ + config := &framework.RCConfig{ Client: f.Client, Name: id, Namespace: f.Namespace.Name, @@ -167,5 +168,5 @@ func ReserveMemory(f *Framework, id string, megabytes int) { Replicas: megabytes / 500, MemRequest: 500 * 1024 * 1024, } - expectNoError(RunRC(*config)) + framework.ExpectNoError(framework.RunRC(*config)) } diff --git a/test/e2e/cluster_upgrade.go b/test/e2e/cluster_upgrade.go index c4324048b04..8024d66e6f7 100644 --- a/test/e2e/cluster_upgrade.go +++ b/test/e2e/cluster_upgrade.go @@ -31,6 +31,7 @@ import ( "k8s.io/kubernetes/pkg/api" client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -39,7 +40,7 @@ import ( // realVersion turns a version constant s into a version string deployable on // GKE. See hack/get-build.sh for more information. func realVersion(s string) (string, error) { - v, _, err := runCmd(path.Join(testContext.RepoRoot, "hack/get-build.sh"), "-v", s) + v, _, err := runCmd(path.Join(framework.TestContext.RepoRoot, "hack/get-build.sh"), "-v", s) if err != nil { return v, err } @@ -49,46 +50,46 @@ func realVersion(s string) (string, error) { // The following upgrade functions are passed into the framework below and used // to do the actual upgrades. var masterUpgrade = func(v string) error { - switch testContext.Provider { + switch framework.TestContext.Provider { case "gce": return masterUpgradeGCE(v) case "gke": return masterUpgradeGKE(v) default: - return fmt.Errorf("masterUpgrade() is not implemented for provider %s", testContext.Provider) + return fmt.Errorf("masterUpgrade() is not implemented for provider %s", framework.TestContext.Provider) } } func masterUpgradeGCE(rawV string) error { v := "v" + rawV - _, _, err := runCmd(path.Join(testContext.RepoRoot, "cluster/gce/upgrade.sh"), "-M", v) + _, _, err := runCmd(path.Join(framework.TestContext.RepoRoot, "cluster/gce/upgrade.sh"), "-M", v) return err } func masterUpgradeGKE(v string) error { - Logf("Upgrading master to %q", v) + framework.Logf("Upgrading master to %q", v) _, _, err := runCmd("gcloud", "container", - fmt.Sprintf("--project=%s", testContext.CloudConfig.ProjectID), - fmt.Sprintf("--zone=%s", testContext.CloudConfig.Zone), + fmt.Sprintf("--project=%s", framework.TestContext.CloudConfig.ProjectID), + fmt.Sprintf("--zone=%s", framework.TestContext.CloudConfig.Zone), "clusters", "upgrade", - testContext.CloudConfig.Cluster, + framework.TestContext.CloudConfig.Cluster, "--master", fmt.Sprintf("--cluster-version=%s", v), "--quiet") return err } -var nodeUpgrade = func(f *Framework, replicas int, v string) error { +var nodeUpgrade = func(f *framework.Framework, replicas int, v string) error { // Perform the upgrade. var err error - switch testContext.Provider { + switch framework.TestContext.Provider { case "gce": err = nodeUpgradeGCE(v) case "gke": err = nodeUpgradeGKE(v) default: - err = fmt.Errorf("nodeUpgrade() is not implemented for provider %s", testContext.Provider) + err = fmt.Errorf("nodeUpgrade() is not implemented for provider %s", framework.TestContext.Provider) } if err != nil { return err @@ -98,12 +99,12 @@ var nodeUpgrade = func(f *Framework, replicas int, v string) error { // // TODO(ihmccreery) We shouldn't have to wait for nodes to be ready in // GKE; the operation shouldn't return until they all are. - Logf("Waiting up to %v for all nodes to be ready after the upgrade", restartNodeReadyAgainTimeout) - if _, err := checkNodesReady(f.Client, restartNodeReadyAgainTimeout, testContext.CloudConfig.NumNodes); err != nil { + framework.Logf("Waiting up to %v for all nodes to be ready after the upgrade", restartNodeReadyAgainTimeout) + if _, err := checkNodesReady(f.Client, restartNodeReadyAgainTimeout, framework.TestContext.CloudConfig.NumNodes); err != nil { return err } - Logf("Waiting up to %v for all pods to be running and ready after the upgrade", restartPodReadyAgainTimeout) - return waitForPodsRunningReady(f.Namespace.Name, replicas, restartPodReadyAgainTimeout) + framework.Logf("Waiting up to %v for all pods to be running and ready after the upgrade", restartPodReadyAgainTimeout) + return framework.WaitForPodsRunningReady(f.Namespace.Name, replicas, restartPodReadyAgainTimeout) } func nodeUpgradeGCE(rawV string) error { @@ -111,21 +112,21 @@ func nodeUpgradeGCE(rawV string) error { // would trigger a node update; right now it's very different. v := "v" + rawV - Logf("Getting the node template before the upgrade") + framework.Logf("Getting the node template before the upgrade") tmplBefore, err := migTemplate() if err != nil { return fmt.Errorf("error getting the node template before the upgrade: %v", err) } - Logf("Preparing node upgrade by creating new instance template for %q", v) - stdout, _, err := runCmd(path.Join(testContext.RepoRoot, "cluster/gce/upgrade.sh"), "-P", v) + framework.Logf("Preparing node upgrade by creating new instance template for %q", v) + stdout, _, err := runCmd(path.Join(framework.TestContext.RepoRoot, "cluster/gce/upgrade.sh"), "-P", v) if err != nil { cleanupNodeUpgradeGCE(tmplBefore) return fmt.Errorf("error preparing node upgrade: %v", err) } tmpl := strings.TrimSpace(stdout) - Logf("Performing a node upgrade to %q; waiting at most %v per node", tmpl, restartPerNodeTimeout) + framework.Logf("Performing a node upgrade to %q; waiting at most %v per node", tmpl, restartPerNodeTimeout) if err := migRollingUpdate(tmpl, restartPerNodeTimeout); err != nil { cleanupNodeUpgradeGCE(tmplBefore) return fmt.Errorf("error doing node upgrade via a migRollingUpdate to %s: %v", tmpl, err) @@ -134,42 +135,42 @@ func nodeUpgradeGCE(rawV string) error { } func cleanupNodeUpgradeGCE(tmplBefore string) { - Logf("Cleaning up any unused node templates") + framework.Logf("Cleaning up any unused node templates") tmplAfter, err := migTemplate() if err != nil { - Logf("Could not get node template post-upgrade; may have leaked template %s", tmplBefore) + framework.Logf("Could not get node template post-upgrade; may have leaked template %s", tmplBefore) return } if tmplBefore == tmplAfter { // The node upgrade failed so there's no need to delete // anything. - Logf("Node template %s is still in use; not cleaning up", tmplBefore) + framework.Logf("Node template %s is still in use; not cleaning up", tmplBefore) return } - Logf("Deleting node template %s", tmplBefore) + framework.Logf("Deleting node template %s", tmplBefore) if _, _, err := retryCmd("gcloud", "compute", "instance-templates", - fmt.Sprintf("--project=%s", testContext.CloudConfig.ProjectID), + fmt.Sprintf("--project=%s", framework.TestContext.CloudConfig.ProjectID), "delete", tmplBefore); err != nil { - Logf("gcloud compute instance-templates delete %s call failed with err: %v", tmplBefore, err) - Logf("May have leaked instance template %q", tmplBefore) + framework.Logf("gcloud compute instance-templates delete %s call failed with err: %v", tmplBefore, err) + framework.Logf("May have leaked instance template %q", tmplBefore) } } func nodeUpgradeGKE(v string) error { - Logf("Upgrading nodes to %q", v) + framework.Logf("Upgrading nodes to %q", v) _, _, err := runCmd("gcloud", "container", - fmt.Sprintf("--project=%s", testContext.CloudConfig.ProjectID), - fmt.Sprintf("--zone=%s", testContext.CloudConfig.Zone), + fmt.Sprintf("--project=%s", framework.TestContext.CloudConfig.ProjectID), + fmt.Sprintf("--zone=%s", framework.TestContext.CloudConfig.Zone), "clusters", "upgrade", - testContext.CloudConfig.Cluster, + framework.TestContext.CloudConfig.Cluster, fmt.Sprintf("--cluster-version=%s", v), "--quiet") return err } -var _ = KubeDescribe("Upgrade [Feature:Upgrade]", func() { +var _ = framework.KubeDescribe("Upgrade [Feature:Upgrade]", func() { svcName, replicas := "baz", 2 var rcName, ip, v string @@ -179,14 +180,14 @@ var _ = KubeDescribe("Upgrade [Feature:Upgrade]", func() { // The version is determined once at the beginning of the test so that // the master and nodes won't be skewed if the value changes during the // test. - By(fmt.Sprintf("Getting real version for %q", testContext.UpgradeTarget)) + By(fmt.Sprintf("Getting real version for %q", framework.TestContext.UpgradeTarget)) var err error - v, err = realVersion(testContext.UpgradeTarget) - expectNoError(err) - Logf("Version for %q is %q", testContext.UpgradeTarget, v) + v, err = realVersion(framework.TestContext.UpgradeTarget) + framework.ExpectNoError(err) + framework.Logf("Version for %q is %q", framework.TestContext.UpgradeTarget, v) }) - f := NewDefaultFramework("cluster-upgrade") + f := framework.NewDefaultFramework("cluster-upgrade") var w *ServiceTestFixture BeforeEach(func() { By("Setting up the service, RC, and pods") @@ -202,10 +203,10 @@ var _ = KubeDescribe("Upgrade [Feature:Upgrade]", func() { Expect(err).NotTo(HaveOccurred()) ingresses := result.Status.LoadBalancer.Ingress if len(ingresses) != 1 { - Failf("Was expecting only 1 ingress IP but got %d (%v): %v", len(ingresses), ingresses, result) + framework.Failf("Was expecting only 1 ingress IP but got %d (%v): %v", len(ingresses), ingresses, result) } ingress = ingresses[0] - Logf("Got load balancer ingress point %v", ingress) + framework.Logf("Got load balancer ingress point %v", ingress) ip = ingress.IP if ip == "" { ip = ingress.Hostname @@ -222,98 +223,98 @@ var _ = KubeDescribe("Upgrade [Feature:Upgrade]", func() { w.Cleanup() }) - KubeDescribe("master upgrade", func() { + framework.KubeDescribe("master upgrade", func() { It("should maintain responsive services [Feature:MasterUpgrade]", func() { By("Validating cluster before master upgrade") - expectNoError(validate(f, svcName, rcName, ingress, replicas)) + framework.ExpectNoError(validate(f, svcName, rcName, ingress, replicas)) By("Performing a master upgrade") testUpgrade(ip, v, masterUpgrade) By("Checking master version") - expectNoError(checkMasterVersion(f.Client, v)) + framework.ExpectNoError(checkMasterVersion(f.Client, v)) By("Validating cluster after master upgrade") - expectNoError(validate(f, svcName, rcName, ingress, replicas)) + framework.ExpectNoError(validate(f, svcName, rcName, ingress, replicas)) }) }) - KubeDescribe("node upgrade", func() { + framework.KubeDescribe("node upgrade", func() { It("should maintain a functioning cluster [Feature:NodeUpgrade]", func() { By("Validating cluster before node upgrade") - expectNoError(validate(f, svcName, rcName, ingress, replicas)) + framework.ExpectNoError(validate(f, svcName, rcName, ingress, replicas)) By("Performing a node upgrade") // Circumnavigate testUpgrade, since services don't necessarily stay up. - Logf("Starting upgrade") - expectNoError(nodeUpgrade(f, replicas, v)) - Logf("Upgrade complete") + framework.Logf("Starting upgrade") + framework.ExpectNoError(nodeUpgrade(f, replicas, v)) + framework.Logf("Upgrade complete") By("Checking node versions") - expectNoError(checkNodesVersions(f.Client, v)) + framework.ExpectNoError(checkNodesVersions(f.Client, v)) By("Validating cluster after node upgrade") - expectNoError(validate(f, svcName, rcName, ingress, replicas)) + framework.ExpectNoError(validate(f, svcName, rcName, ingress, replicas)) }) It("should maintain responsive services [Feature:ExperimentalNodeUpgrade]", func() { By("Validating cluster before node upgrade") - expectNoError(validate(f, svcName, rcName, ingress, replicas)) + framework.ExpectNoError(validate(f, svcName, rcName, ingress, replicas)) By("Performing a node upgrade") testUpgrade(ip, v, func(v string) error { return nodeUpgrade(f, replicas, v) }) By("Checking node versions") - expectNoError(checkNodesVersions(f.Client, v)) + framework.ExpectNoError(checkNodesVersions(f.Client, v)) By("Validating cluster after node upgrade") - expectNoError(validate(f, svcName, rcName, ingress, replicas)) + framework.ExpectNoError(validate(f, svcName, rcName, ingress, replicas)) }) }) - KubeDescribe("cluster upgrade", func() { + framework.KubeDescribe("cluster upgrade", func() { It("should maintain responsive services [Feature:ClusterUpgrade]", func() { By("Validating cluster before master upgrade") - expectNoError(validate(f, svcName, rcName, ingress, replicas)) + framework.ExpectNoError(validate(f, svcName, rcName, ingress, replicas)) By("Performing a master upgrade") testUpgrade(ip, v, masterUpgrade) By("Checking master version") - expectNoError(checkMasterVersion(f.Client, v)) + framework.ExpectNoError(checkMasterVersion(f.Client, v)) By("Validating cluster after master upgrade") - expectNoError(validate(f, svcName, rcName, ingress, replicas)) + framework.ExpectNoError(validate(f, svcName, rcName, ingress, replicas)) By("Validating cluster before node upgrade") - expectNoError(validate(f, svcName, rcName, ingress, replicas)) + framework.ExpectNoError(validate(f, svcName, rcName, ingress, replicas)) By("Performing a node upgrade") // Circumnavigate testUpgrade, since services don't necessarily stay up. - Logf("Starting upgrade") - expectNoError(nodeUpgrade(f, replicas, v)) - Logf("Upgrade complete") + framework.Logf("Starting upgrade") + framework.ExpectNoError(nodeUpgrade(f, replicas, v)) + framework.Logf("Upgrade complete") By("Checking node versions") - expectNoError(checkNodesVersions(f.Client, v)) + framework.ExpectNoError(checkNodesVersions(f.Client, v)) By("Validating cluster after node upgrade") - expectNoError(validate(f, svcName, rcName, ingress, replicas)) + framework.ExpectNoError(validate(f, svcName, rcName, ingress, replicas)) }) It("should maintain responsive services [Feature:ExperimentalClusterUpgrade]", func() { By("Validating cluster before master upgrade") - expectNoError(validate(f, svcName, rcName, ingress, replicas)) + framework.ExpectNoError(validate(f, svcName, rcName, ingress, replicas)) By("Performing a master upgrade") testUpgrade(ip, v, masterUpgrade) By("Checking master version") - expectNoError(checkMasterVersion(f.Client, v)) + framework.ExpectNoError(checkMasterVersion(f.Client, v)) By("Validating cluster after master upgrade") - expectNoError(validate(f, svcName, rcName, ingress, replicas)) + framework.ExpectNoError(validate(f, svcName, rcName, ingress, replicas)) By("Validating cluster before node upgrade") - expectNoError(validate(f, svcName, rcName, ingress, replicas)) + framework.ExpectNoError(validate(f, svcName, rcName, ingress, replicas)) By("Performing a node upgrade") testUpgrade(ip, v, func(v string) error { return nodeUpgrade(f, replicas, v) }) By("Checking node versions") - expectNoError(checkNodesVersions(f.Client, v)) + framework.ExpectNoError(checkNodesVersions(f.Client, v)) By("Validating cluster after node upgrade") - expectNoError(validate(f, svcName, rcName, ingress, replicas)) + framework.ExpectNoError(validate(f, svcName, rcName, ingress, replicas)) }) }) }) func testUpgrade(ip, v string, upF func(v string) error) { - Logf("Starting async validation") + framework.Logf("Starting async validation") httpClient := http.Client{Timeout: 2 * time.Second} done := make(chan struct{}, 1) // Let's make sure we've finished the heartbeat before shutting things down. @@ -323,14 +324,14 @@ func testUpgrade(ip, v string, upF func(v string) error) { wg.Add(1) defer wg.Done() - if err := wait.Poll(poll, singleCallTimeout, func() (bool, error) { + if err := wait.Poll(framework.Poll, framework.SingleCallTimeout, func() (bool, error) { r, err := httpClient.Get("http://" + ip) if err != nil { - Logf("Error reaching %s: %v", ip, err) + framework.Logf("Error reaching %s: %v", ip, err) return false, nil } if r.StatusCode < http.StatusOK || r.StatusCode >= http.StatusNotFound { - Logf("Bad response; status: %d, response: %v", r.StatusCode, r) + framework.Logf("Bad response; status: %d, response: %v", r.StatusCode, r) return false, nil } return true, nil @@ -340,17 +341,17 @@ func testUpgrade(ip, v string, upF func(v string) error) { // a failure is very confusing to track down because from the logs // everything looks fine. msg := fmt.Sprintf("Failed to contact service during upgrade: %v", err) - Logf(msg) - Failf(msg) + framework.Logf(msg) + framework.Failf(msg) } }, 200*time.Millisecond, done) - Logf("Starting upgrade") - expectNoError(upF(v)) + framework.Logf("Starting upgrade") + framework.ExpectNoError(upF(v)) done <- struct{}{} - Logf("Stopping async validation") + framework.Logf("Stopping async validation") wg.Wait() - Logf("Upgrade complete") + framework.Logf("Upgrade complete") } func checkMasterVersion(c *client.Client, want string) error { @@ -366,12 +367,12 @@ func checkMasterVersion(c *client.Client, want string) error { return fmt.Errorf("master had kube-apiserver version %s which does not start with %s", got, want) } - Logf("Master is at version %s", want) + framework.Logf("Master is at version %s", want) return nil } func checkNodesVersions(c *client.Client, want string) error { - l := ListSchedulableNodesOrDie(c) + l := framework.ListSchedulableNodesOrDie(c) for _, n := range l.Items { // We do prefix trimming and then matching because: // want looks like: 0.19.3-815-g50e67d4 @@ -390,15 +391,15 @@ func checkNodesVersions(c *client.Client, want string) error { return nil } -// retryCmd runs cmd using args and retries it for up to singleCallTimeout if +// retryCmd runs cmd using args and retries it for up to framework.SingleCallTimeout if // it returns an error. It returns stdout and stderr. func retryCmd(command string, args ...string) (string, string, error) { var err error stdout, stderr := "", "" - wait.Poll(poll, singleCallTimeout, func() (bool, error) { + wait.Poll(framework.Poll, framework.SingleCallTimeout, func() (bool, error) { stdout, stderr, err = runCmd(command, args...) if err != nil { - Logf("Got %v", err) + framework.Logf("Got %v", err) return false, nil } return true, nil @@ -412,7 +413,7 @@ func retryCmd(command string, args ...string) (string, string, error) { // TODO(ihmccreery) This function should either be moved into util.go or // removed; other e2e's use bare exe.Command. func runCmd(command string, args ...string) (string, string, error) { - Logf("Running %s %v", command, args) + framework.Logf("Running %s %v", command, args) var bout, berr bytes.Buffer cmd := exec.Command(command, args...) // We also output to the OS stdout/stderr to aid in debugging in case cmd @@ -428,8 +429,8 @@ func runCmd(command string, args ...string) (string, string, error) { return stdout, stderr, nil } -func validate(f *Framework, svcNameWant, rcNameWant string, ingress api.LoadBalancerIngress, podsWant int) error { - Logf("Beginning cluster validation") +func validate(f *framework.Framework, svcNameWant, rcNameWant string, ingress api.LoadBalancerIngress, podsWant int) error { + framework.Logf("Beginning cluster validation") // Verify RC. rcs, err := f.Client.ReplicationControllers(f.Namespace.Name).List(api.ListOptions{}) if err != nil { @@ -443,7 +444,7 @@ func validate(f *Framework, svcNameWant, rcNameWant string, ingress api.LoadBala } // Verify pods. - if err := verifyPods(f.Client, f.Namespace.Name, rcNameWant, false, podsWant); err != nil { + if err := framework.VerifyPods(f.Client, f.Namespace.Name, rcNameWant, false, podsWant); err != nil { return fmt.Errorf("failed to find %d %q pods: %v", podsWant, rcNameWant, err) } @@ -458,7 +459,7 @@ func validate(f *Framework, svcNameWant, rcNameWant string, ingress api.LoadBala // TODO(mikedanese): Make testLoadBalancerReachable return an error. testLoadBalancerReachable(ingress, 80) - Logf("Cluster validation succeeded") + framework.Logf("Cluster validation succeeded") return nil } @@ -486,15 +487,15 @@ func migTemplate() (string, error) { var errLast error var templ string key := "instanceTemplate" - if wait.Poll(poll, singleCallTimeout, func() (bool, error) { + if wait.Poll(framework.Poll, framework.SingleCallTimeout, func() (bool, error) { // TODO(mikedanese): make this hit the compute API directly instead of // shelling out to gcloud. // An `instance-groups managed describe` call outputs what we want to stdout. output, _, err := retryCmd("gcloud", "compute", "instance-groups", "managed", - fmt.Sprintf("--project=%s", testContext.CloudConfig.ProjectID), + fmt.Sprintf("--project=%s", framework.TestContext.CloudConfig.ProjectID), "describe", - fmt.Sprintf("--zone=%s", testContext.CloudConfig.Zone), - testContext.CloudConfig.NodeInstanceGroup) + fmt.Sprintf("--zone=%s", framework.TestContext.CloudConfig.Zone), + framework.TestContext.CloudConfig.NodeInstanceGroup) if err != nil { errLast = fmt.Errorf("gcloud compute instance-groups managed describe call failed with err: %v", err) return false, nil @@ -503,10 +504,10 @@ func migTemplate() (string, error) { // The 'describe' call probably succeeded; parse the output and try to // find the line that looks like "instanceTemplate: url/to/" and // return . - if val := parseKVLines(output, key); len(val) > 0 { + if val := framework.ParseKVLines(output, key); len(val) > 0 { url := strings.Split(val, "/") templ = url[len(url)-1] - Logf("MIG group %s using template: %s", testContext.CloudConfig.NodeInstanceGroup, templ) + framework.Logf("MIG group %s using template: %s", framework.TestContext.CloudConfig.NodeInstanceGroup, templ) return true, nil } errLast = fmt.Errorf("couldn't find %s in output to get MIG template. Output: %s", key, output) @@ -524,7 +525,7 @@ func migRollingUpdateStart(templ string, nt time.Duration) (string, error) { var errLast error var id string prefix, suffix := "Started [", "]." - if err := wait.Poll(poll, singleCallTimeout, func() (bool, error) { + if err := wait.Poll(framework.Poll, framework.SingleCallTimeout, func() (bool, error) { // TODO(mikedanese): make this hit the compute API directly instead of // shelling out to gcloud. // NOTE(mikedanese): If you are changing this gcloud command, update @@ -532,11 +533,11 @@ func migRollingUpdateStart(templ string, nt time.Duration) (string, error) { // A `rolling-updates start` call outputs what we want to stderr. _, output, err := retryCmd("gcloud", "alpha", "compute", "rolling-updates", - fmt.Sprintf("--project=%s", testContext.CloudConfig.ProjectID), - fmt.Sprintf("--zone=%s", testContext.CloudConfig.Zone), + fmt.Sprintf("--project=%s", framework.TestContext.CloudConfig.ProjectID), + fmt.Sprintf("--zone=%s", framework.TestContext.CloudConfig.Zone), "start", // Required args. - fmt.Sprintf("--group=%s", testContext.CloudConfig.NodeInstanceGroup), + fmt.Sprintf("--group=%s", framework.TestContext.CloudConfig.NodeInstanceGroup), fmt.Sprintf("--template=%s", templ), // Optional args to fine-tune behavior. fmt.Sprintf("--instance-startup-timeout=%ds", int(nt.Seconds())), @@ -560,7 +561,7 @@ func migRollingUpdateStart(templ string, nt time.Duration) (string, error) { } url := strings.Split(strings.TrimSuffix(strings.TrimPrefix(line, prefix), suffix), "/") id = url[len(url)-1] - Logf("Started MIG rolling update; ID: %s", id) + framework.Logf("Started MIG rolling update; ID: %s", id) return true, nil } errLast = fmt.Errorf("couldn't find line like '%s ... %s' in output to MIG rolling-update start. Output: %s", @@ -578,42 +579,42 @@ func migRollingUpdateStart(templ string, nt time.Duration) (string, error) { func migRollingUpdatePoll(id string, nt time.Duration) error { // Two keys and a val. status, progress, done := "status", "statusMessage", "ROLLED_OUT" - start, timeout := time.Now(), nt*time.Duration(testContext.CloudConfig.NumNodes) + start, timeout := time.Now(), nt*time.Duration(framework.TestContext.CloudConfig.NumNodes) var errLast error - Logf("Waiting up to %v for MIG rolling update to complete.", timeout) + framework.Logf("Waiting up to %v for MIG rolling update to complete.", timeout) if wait.Poll(restartPoll, timeout, func() (bool, error) { // A `rolling-updates describe` call outputs what we want to stdout. output, _, err := retryCmd("gcloud", "alpha", "compute", "rolling-updates", - fmt.Sprintf("--project=%s", testContext.CloudConfig.ProjectID), - fmt.Sprintf("--zone=%s", testContext.CloudConfig.Zone), + fmt.Sprintf("--project=%s", framework.TestContext.CloudConfig.ProjectID), + fmt.Sprintf("--zone=%s", framework.TestContext.CloudConfig.Zone), "describe", id) if err != nil { errLast = fmt.Errorf("Error calling rolling-updates describe %s: %v", id, err) - Logf("%v", errLast) + framework.Logf("%v", errLast) return false, nil } // The 'describe' call probably succeeded; parse the output and try to // find the line that looks like "status: " and see whether it's // done. - Logf("Waiting for MIG rolling update: %s (%v elapsed)", - parseKVLines(output, progress), time.Since(start)) - if st := parseKVLines(output, status); st == done { + framework.Logf("Waiting for MIG rolling update: %s (%v elapsed)", + framework.ParseKVLines(output, progress), time.Since(start)) + if st := framework.ParseKVLines(output, status); st == done { return true, nil } return false, nil }) != nil { return fmt.Errorf("timeout waiting %v for MIG rolling update to complete. Last error: %v", timeout, errLast) } - Logf("MIG rolling update complete after %v", time.Since(start)) + framework.Logf("MIG rolling update complete after %v", time.Since(start)) return nil } func testLoadBalancerReachable(ingress api.LoadBalancerIngress, port int) bool { loadBalancerLagTimeout := loadBalancerLagTimeoutDefault - if providerIs("aws") { + if framework.ProviderIs("aws") { loadBalancerLagTimeout = loadBalancerLagTimeoutAWS } return testLoadBalancerReachableInTime(ingress, port, loadBalancerLagTimeout) @@ -637,7 +638,7 @@ func conditionFuncDecorator(ip string, port int, fn func(string, int, string, st func testReachableInTime(testFunc wait.ConditionFunc, timeout time.Duration) bool { By(fmt.Sprintf("Waiting up to %v", timeout)) - err := wait.PollImmediate(poll, timeout, testFunc) + err := wait.PollImmediate(framework.Poll, timeout, testFunc) if err != nil { Expect(err).NotTo(HaveOccurred(), "Error waiting") return false @@ -655,14 +656,14 @@ func waitForLoadBalancerIngress(c *client.Client, serviceName, namespace string) for start := time.Now(); time.Since(start) < timeout; time.Sleep(3 * time.Second) { service, err := c.Services(namespace).Get(serviceName) if err != nil { - Logf("Get service failed, ignoring for 5s: %v", err) + framework.Logf("Get service failed, ignoring for 5s: %v", err) continue } if len(service.Status.LoadBalancer.Ingress) > 0 { return service, nil } if i%5 == 0 { - Logf("Waiting for service %s in namespace %s to have a LoadBalancer ingress point (%v)", serviceName, namespace, time.Since(start)) + framework.Logf("Waiting for service %s in namespace %s to have a LoadBalancer ingress point (%v)", serviceName, namespace, time.Since(start)) } i++ } diff --git a/test/e2e/configmap.go b/test/e2e/configmap.go index 001e1fb3268..205a4c2f375 100644 --- a/test/e2e/configmap.go +++ b/test/e2e/configmap.go @@ -22,14 +22,15 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) -var _ = KubeDescribe("ConfigMap", func() { +var _ = framework.KubeDescribe("ConfigMap", func() { - f := NewDefaultFramework("configmap") + f := framework.NewDefaultFramework("configmap") It("should be consumable from pods in volume [Conformance]", func() { doConfigMapE2EWithoutMappings(f, 0, 0) @@ -82,12 +83,12 @@ var _ = KubeDescribe("ConfigMap", func() { defer func() { By("Cleaning up the configMap") if err := f.Client.ConfigMaps(f.Namespace.Name).Delete(configMap.Name); err != nil { - Failf("unable to delete configMap %v: %v", configMap.Name, err) + framework.Failf("unable to delete configMap %v: %v", configMap.Name, err) } }() var err error if configMap, err = f.Client.ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { - Failf("unable to create test configMap %s: %v", configMap.Name, err) + framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } pod := &api.Pod{ @@ -133,13 +134,13 @@ var _ = KubeDescribe("ConfigMap", func() { _, err = f.Client.Pods(f.Namespace.Name).Create(pod) Expect(err).NotTo(HaveOccurred()) - expectNoError(waitForPodRunningInNamespace(f.Client, pod.Name, f.Namespace.Name)) + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.Client, pod.Name, f.Namespace.Name)) pollLogs := func() (string, error) { - return getPodLogs(f.Client, f.Namespace.Name, pod.Name, containerName) + return framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, containerName) } - Eventually(pollLogs, podLogTimeout, poll).Should(ContainSubstring("value-1")) + Eventually(pollLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1")) By(fmt.Sprintf("Updating configmap %v", configMap.Name)) configMap.ResourceVersion = "" // to force update @@ -148,7 +149,7 @@ var _ = KubeDescribe("ConfigMap", func() { Expect(err).NotTo(HaveOccurred()) By("waiting to observe update in volume") - Eventually(pollLogs, podLogTimeout, poll).Should(ContainSubstring("value-2")) + Eventually(pollLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-2")) }) It("should be consumable via environment variable [Conformance]", func() { @@ -158,12 +159,12 @@ var _ = KubeDescribe("ConfigMap", func() { defer func() { By("Cleaning up the configMap") if err := f.Client.ConfigMaps(f.Namespace.Name).Delete(configMap.Name); err != nil { - Failf("unable to delete configMap %v: %v", configMap.Name, err) + framework.Failf("unable to delete configMap %v: %v", configMap.Name, err) } }() var err error if configMap, err = f.Client.ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { - Failf("unable to create test configMap %s: %v", configMap.Name, err) + framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } pod := &api.Pod{ @@ -195,13 +196,13 @@ var _ = KubeDescribe("ConfigMap", func() { }, } - testContainerOutput("consume configMaps", f.Client, pod, 0, []string{ + framework.TestContainerOutput("consume configMaps", f.Client, pod, 0, []string{ "CONFIG_DATA_1=value-1", }, f.Namespace.Name) }) }) -func newConfigMap(f *Framework, name string) *api.ConfigMap { +func newConfigMap(f *framework.Framework, name string) *api.ConfigMap { return &api.ConfigMap{ ObjectMeta: api.ObjectMeta{ Namespace: f.Namespace.Name, @@ -215,7 +216,7 @@ func newConfigMap(f *Framework, name string) *api.ConfigMap { } } -func doConfigMapE2EWithoutMappings(f *Framework, uid, fsGroup int64) { +func doConfigMapE2EWithoutMappings(f *framework.Framework, uid, fsGroup int64) { var ( name = "configmap-test-volume-" + string(util.NewUUID()) volumeName = "configmap-volume" @@ -227,12 +228,12 @@ func doConfigMapE2EWithoutMappings(f *Framework, uid, fsGroup int64) { defer func() { By("Cleaning up the configMap") if err := f.Client.ConfigMaps(f.Namespace.Name).Delete(configMap.Name); err != nil { - Failf("unable to delete configMap %v: %v", configMap.Name, err) + framework.Failf("unable to delete configMap %v: %v", configMap.Name, err) } }() var err error if configMap, err = f.Client.ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { - Failf("unable to create test configMap %s: %v", configMap.Name, err) + framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } pod := &api.Pod{ @@ -279,13 +280,13 @@ func doConfigMapE2EWithoutMappings(f *Framework, uid, fsGroup int64) { pod.Spec.SecurityContext.FSGroup = &fsGroup } - testContainerOutput("consume configMaps", f.Client, pod, 0, []string{ + framework.TestContainerOutput("consume configMaps", f.Client, pod, 0, []string{ "content of file \"/etc/configmap-volume/data-1\": value-1", }, f.Namespace.Name) } -func doConfigMapE2EWithMappings(f *Framework, uid, fsGroup int64) { +func doConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup int64) { var ( name = "configmap-test-volume-map-" + string(util.NewUUID()) volumeName = "configmap-volume" @@ -297,12 +298,12 @@ func doConfigMapE2EWithMappings(f *Framework, uid, fsGroup int64) { defer func() { By("Cleaning up the configMap") if err := f.Client.ConfigMaps(f.Namespace.Name).Delete(configMap.Name); err != nil { - Failf("unable to delete configMap %v: %v", configMap.Name, err) + framework.Failf("unable to delete configMap %v: %v", configMap.Name, err) } }() var err error if configMap, err = f.Client.ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { - Failf("unable to create test configMap %s: %v", configMap.Name, err) + framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } pod := &api.Pod{ @@ -355,7 +356,7 @@ func doConfigMapE2EWithMappings(f *Framework, uid, fsGroup int64) { pod.Spec.SecurityContext.FSGroup = &fsGroup } - testContainerOutput("consume configMaps", f.Client, pod, 0, []string{ + framework.TestContainerOutput("consume configMaps", f.Client, pod, 0, []string{ "content of file \"/etc/configmap-volume/path/to/data-2\": value-2", }, f.Namespace.Name) } diff --git a/test/e2e/container_probe.go b/test/e2e/container_probe.go index 823910e9837..718db5a70b4 100644 --- a/test/e2e/container_probe.go +++ b/test/e2e/container_probe.go @@ -25,6 +25,7 @@ import ( "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -35,49 +36,49 @@ const ( probTestInitialDelaySeconds = 30 ) -var _ = KubeDescribe("Probing container", func() { - framework := NewDefaultFramework("container-probe") +var _ = framework.KubeDescribe("Probing container", func() { + f := framework.NewDefaultFramework("container-probe") var podClient client.PodInterface probe := webserverProbeBuilder{} BeforeEach(func() { - podClient = framework.Client.Pods(framework.Namespace.Name) + podClient = f.Client.Pods(f.Namespace.Name) }) It("with readiness probe should not be ready before initial delay and never restart [Conformance]", func() { p, err := podClient.Create(makePodSpec(probe.withInitialDelay().build(), nil)) - expectNoError(err) + framework.ExpectNoError(err) - Expect(wait.Poll(poll, 240*time.Second, func() (bool, error) { + Expect(wait.Poll(framework.Poll, 240*time.Second, func() (bool, error) { p, err := podClient.Get(p.Name) if err != nil { return false, err } ready := api.IsPodReady(p) if !ready { - Logf("pod is not yet ready; pod has phase %q.", p.Status.Phase) + framework.Logf("pod is not yet ready; pod has phase %q.", p.Status.Phase) return false, nil } return true, nil })).NotTo(HaveOccurred(), "pod never became ready") p, err = podClient.Get(p.Name) - expectNoError(err) - isReady, err := podRunningReady(p) - expectNoError(err) + framework.ExpectNoError(err) + isReady, err := framework.PodRunningReady(p) + framework.ExpectNoError(err) Expect(isReady).To(BeTrue(), "pod should be ready") // We assume the pod became ready when the container became ready. This // is true for a single container pod. readyTime, err := getTransitionTimeForReadyCondition(p) - expectNoError(err) + framework.ExpectNoError(err) startedTime, err := getContainerStartedTime(p, probTestContainerName) - expectNoError(err) + framework.ExpectNoError(err) - Logf("Container started at %v, pod became ready at %v", startedTime, readyTime) + framework.Logf("Container started at %v, pod became ready at %v", startedTime, readyTime) initialDelay := probTestInitialDelaySeconds * time.Second if readyTime.Sub(startedTime) < initialDelay { - Failf("Pod became ready before it's %v initial delay", initialDelay) + framework.Failf("Pod became ready before it's %v initial delay", initialDelay) } restartCount := getRestartCount(p) @@ -86,9 +87,9 @@ var _ = KubeDescribe("Probing container", func() { It("with readiness probe that fails should never be ready and never restart [Conformance]", func() { p, err := podClient.Create(makePodSpec(probe.withFailing().build(), nil)) - expectNoError(err) + framework.ExpectNoError(err) - err = wait.Poll(poll, 180*time.Second, func() (bool, error) { + err = wait.Poll(framework.Poll, 180*time.Second, func() (bool, error) { p, err := podClient.Get(p.Name) if err != nil { return false, err @@ -96,13 +97,13 @@ var _ = KubeDescribe("Probing container", func() { return api.IsPodReady(p), nil }) if err != wait.ErrWaitTimeout { - Failf("expecting wait timeout error but got: %v", err) + framework.Failf("expecting wait timeout error but got: %v", err) } p, err = podClient.Get(p.Name) - expectNoError(err) + framework.ExpectNoError(err) - isReady, err := podRunningReady(p) + isReady, err := framework.PodRunningReady(p) Expect(isReady).NotTo(BeTrue(), "pod should be not ready") restartCount := getRestartCount(p) diff --git a/test/e2e/daemon_restart.go b/test/e2e/daemon_restart.go index d81becd0de9..e7c137e6046 100644 --- a/test/e2e/daemon_restart.go +++ b/test/e2e/daemon_restart.go @@ -32,6 +32,7 @@ import ( "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/watch" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -55,10 +56,10 @@ const ( ) // nodeExec execs the given cmd on node via SSH. Note that the nodeName is an sshable name, -// eg: the name returned by getMasterHost(). This is also not guaranteed to work across +// eg: the name returned by framework.GetMasterHost(). This is also not guaranteed to work across // cloud providers since it involves ssh. -func nodeExec(nodeName, cmd string) (SSHResult, error) { - result, err := SSH(cmd, fmt.Sprintf("%v:%v", nodeName, sshPort), testContext.Provider) +func nodeExec(nodeName, cmd string) (framework.SSHResult, error) { + result, err := framework.SSH(cmd, fmt.Sprintf("%v:%v", nodeName, sshPort), framework.TestContext.Provider) Expect(err).NotTo(HaveOccurred()) return result, err } @@ -75,8 +76,8 @@ type restartDaemonConfig struct { // NewRestartConfig creates a restartDaemonConfig for the given node and daemon. func NewRestartConfig(nodeName, daemonName string, healthzPort int, pollInterval, pollTimeout time.Duration) *restartDaemonConfig { - if !providerIs("gce") { - Logf("WARNING: SSH through the restart config might not work on %s", testContext.Provider) + if !framework.ProviderIs("gce") { + framework.Logf("WARNING: SSH through the restart config might not work on %s", framework.TestContext.Provider) } return &restartDaemonConfig{ nodeName: nodeName, @@ -93,31 +94,31 @@ func (r *restartDaemonConfig) String() string { // waitUp polls healthz of the daemon till it returns "ok" or the polling hits the pollTimeout func (r *restartDaemonConfig) waitUp() { - Logf("Checking if %v is up by polling for a 200 on its /healthz endpoint", r) + framework.Logf("Checking if %v is up by polling for a 200 on its /healthz endpoint", r) healthzCheck := fmt.Sprintf( "curl -s -o /dev/null -I -w \"%%{http_code}\" http://localhost:%v/healthz", r.healthzPort) err := wait.Poll(r.pollInterval, r.pollTimeout, func() (bool, error) { result, err := nodeExec(r.nodeName, healthzCheck) - expectNoError(err) + framework.ExpectNoError(err) if result.Code == 0 { httpCode, err := strconv.Atoi(result.Stdout) if err != nil { - Logf("Unable to parse healthz http return code: %v", err) + framework.Logf("Unable to parse healthz http return code: %v", err) } else if httpCode == 200 { return true, nil } } - Logf("node %v exec command, '%v' failed with exitcode %v: \n\tstdout: %v\n\tstderr: %v", + framework.Logf("node %v exec command, '%v' failed with exitcode %v: \n\tstdout: %v\n\tstderr: %v", r.nodeName, healthzCheck, result.Code, result.Stdout, result.Stderr) return false, nil }) - expectNoError(err, "%v did not respond with a 200 via %v within %v", r, healthzCheck, r.pollTimeout) + framework.ExpectNoError(err, "%v did not respond with a 200 via %v within %v", r, healthzCheck, r.pollTimeout) } // kill sends a SIGTERM to the daemon func (r *restartDaemonConfig) kill() { - Logf("Killing %v", r) + framework.Logf("Killing %v", r) nodeExec(r.nodeName, fmt.Sprintf("pgrep %v | xargs -I {} sudo kill {}", r.daemonName)) } @@ -163,7 +164,7 @@ func replacePods(pods []*api.Pod, store cache.Store) { for i := range pods { found = append(found, pods[i]) } - expectNoError(store.Replace(found, "0")) + framework.ExpectNoError(store.Replace(found, "0")) } // getContainerRestarts returns the count of container restarts across all pods matching the given labelSelector, @@ -171,26 +172,26 @@ func replacePods(pods []*api.Pod, store cache.Store) { func getContainerRestarts(c *client.Client, ns string, labelSelector labels.Selector) (int, []string) { options := api.ListOptions{LabelSelector: labelSelector} pods, err := c.Pods(ns).List(options) - expectNoError(err) + framework.ExpectNoError(err) failedContainers := 0 containerRestartNodes := sets.NewString() for _, p := range pods.Items { - for _, v := range FailedContainers(&p) { - failedContainers = failedContainers + v.restarts + for _, v := range framework.FailedContainers(&p) { + failedContainers = failedContainers + v.Restarts containerRestartNodes.Insert(p.Spec.NodeName) } } return failedContainers, containerRestartNodes.List() } -var _ = KubeDescribe("DaemonRestart [Disruptive]", func() { +var _ = framework.KubeDescribe("DaemonRestart [Disruptive]", func() { - framework := NewDefaultFramework("daemonrestart") + f := framework.NewDefaultFramework("daemonrestart") rcName := "daemonrestart" + strconv.Itoa(numPods) + "-" + string(util.NewUUID()) labelSelector := labels.Set(map[string]string{"name": rcName}).AsSelector() existingPods := cache.NewStore(cache.MetaNamespaceKeyFunc) var ns string - var config RCConfig + var config framework.RCConfig var controller *controllerframework.Controller var newPods cache.Store var stopCh chan struct{} @@ -199,20 +200,20 @@ var _ = KubeDescribe("DaemonRestart [Disruptive]", func() { BeforeEach(func() { // These tests require SSH // TODO(11834): Enable this test in GKE once experimental API there is switched on - SkipUnlessProviderIs("gce", "aws") - ns = framework.Namespace.Name + framework.SkipUnlessProviderIs("gce", "aws") + ns = f.Namespace.Name // All the restart tests need an rc and a watch on pods of the rc. // Additionally some of them might scale the rc during the test. - config = RCConfig{ - Client: framework.Client, + config = framework.RCConfig{ + Client: f.Client, Name: rcName, Namespace: ns, Image: "gcr.io/google_containers/pause:2.0", Replicas: numPods, CreatedPods: &[]*api.Pod{}, } - Expect(RunRC(config)).NotTo(HaveOccurred()) + Expect(framework.RunRC(config)).NotTo(HaveOccurred()) replacePods(*config.CreatedPods, existingPods) stopCh = make(chan struct{}) @@ -221,11 +222,11 @@ var _ = KubeDescribe("DaemonRestart [Disruptive]", func() { &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { options.LabelSelector = labelSelector - return framework.Client.Pods(ns).List(options) + return f.Client.Pods(ns).List(options) }, WatchFunc: func(options api.ListOptions) (watch.Interface, error) { options.LabelSelector = labelSelector - return framework.Client.Pods(ns).Watch(options) + return f.Client.Pods(ns).Watch(options) }, }, &api.Pod{}, @@ -252,7 +253,7 @@ var _ = KubeDescribe("DaemonRestart [Disruptive]", func() { It("Controller Manager should not create/delete replicas across restart", func() { restarter := NewRestartConfig( - getMasterHost(), "kube-controller", ports.ControllerManagerPort, restartPollInterval, restartTimeout) + framework.GetMasterHost(), "kube-controller", ports.ControllerManagerPort, restartPollInterval, restartTimeout) restarter.restart() // The intent is to ensure the replication controller manager has observed and reported status of @@ -260,7 +261,7 @@ var _ = KubeDescribe("DaemonRestart [Disruptive]", func() { // that it had the opportunity to create/delete pods, if it were going to do so. Scaling the RC // to the same size achieves this, because the scale operation advances the RC's sequence number // and awaits it to be observed and reported back in the RC's status. - ScaleRC(framework.Client, ns, rcName, numPods, true) + framework.ScaleRC(f.Client, ns, rcName, numPods, true) // Only check the keys, the pods can be different if the kubelet updated it. // TODO: Can it really? @@ -274,14 +275,14 @@ var _ = KubeDescribe("DaemonRestart [Disruptive]", func() { } if len(newKeys.List()) != len(existingKeys.List()) || !newKeys.IsSuperset(existingKeys) { - Failf("RcManager created/deleted pods after restart \n\n %+v", tracker) + framework.Failf("RcManager created/deleted pods after restart \n\n %+v", tracker) } }) It("Scheduler should continue assigning pods to nodes across restart", func() { restarter := NewRestartConfig( - getMasterHost(), "kube-scheduler", ports.SchedulerPort, restartPollInterval, restartTimeout) + framework.GetMasterHost(), "kube-scheduler", ports.SchedulerPort, restartPollInterval, restartTimeout) // Create pods while the scheduler is down and make sure the scheduler picks them up by // scaling the rc to the same size. @@ -289,28 +290,28 @@ var _ = KubeDescribe("DaemonRestart [Disruptive]", func() { restarter.kill() // This is best effort to try and create pods while the scheduler is down, // since we don't know exactly when it is restarted after the kill signal. - expectNoError(ScaleRC(framework.Client, ns, rcName, numPods+5, false)) + framework.ExpectNoError(framework.ScaleRC(f.Client, ns, rcName, numPods+5, false)) restarter.waitUp() - expectNoError(ScaleRC(framework.Client, ns, rcName, numPods+5, true)) + framework.ExpectNoError(framework.ScaleRC(f.Client, ns, rcName, numPods+5, true)) }) It("Kubelet should not restart containers across restart", func() { - nodeIPs, err := getNodePublicIps(framework.Client) - expectNoError(err) - preRestarts, badNodes := getContainerRestarts(framework.Client, ns, labelSelector) + nodeIPs, err := getNodePublicIps(f.Client) + framework.ExpectNoError(err) + preRestarts, badNodes := getContainerRestarts(f.Client, ns, labelSelector) if preRestarts != 0 { - Logf("WARNING: Non-zero container restart count: %d across nodes %v", preRestarts, badNodes) + framework.Logf("WARNING: Non-zero container restart count: %d across nodes %v", preRestarts, badNodes) } for _, ip := range nodeIPs { restarter := NewRestartConfig( ip, "kubelet", ports.KubeletReadOnlyPort, restartPollInterval, restartTimeout) restarter.restart() } - postRestarts, badNodes := getContainerRestarts(framework.Client, ns, labelSelector) + postRestarts, badNodes := getContainerRestarts(f.Client, ns, labelSelector) if postRestarts != preRestarts { - dumpNodeDebugInfo(framework.Client, badNodes) - Failf("Net container restart count went from %v -> %v after kubelet restart on nodes %v \n\n %+v", preRestarts, postRestarts, badNodes, tracker) + framework.DumpNodeDebugInfo(f.Client, badNodes) + framework.Failf("Net container restart count went from %v -> %v after kubelet restart on nodes %v \n\n %+v", preRestarts, postRestarts, badNodes, tracker) } }) }) diff --git a/test/e2e/daemon_set.go b/test/e2e/daemon_set.go index 72c9056c2d2..26480c3afa8 100644 --- a/test/e2e/daemon_set.go +++ b/test/e2e/daemon_set.go @@ -32,6 +32,7 @@ import ( "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -53,25 +54,25 @@ const ( // happen. In the future, running in parallel may work if we have an eviction // model which lets the DS controller kick out other pods to make room. // See http://issues.k8s.io/21767 for more details -var _ = KubeDescribe("Daemon set [Serial]", func() { - var f *Framework +var _ = framework.KubeDescribe("Daemon set [Serial]", func() { + var f *framework.Framework AfterEach(func() { if daemonsets, err := f.Client.DaemonSets(f.Namespace.Name).List(api.ListOptions{}); err == nil { - Logf("daemonset: %s", runtime.EncodeOrDie(api.Codecs.LegacyCodec(registered.EnabledVersions()...), daemonsets)) + framework.Logf("daemonset: %s", runtime.EncodeOrDie(api.Codecs.LegacyCodec(registered.EnabledVersions()...), daemonsets)) } else { - Logf("unable to dump daemonsets: %v", err) + framework.Logf("unable to dump daemonsets: %v", err) } if pods, err := f.Client.Pods(f.Namespace.Name).List(api.ListOptions{}); err == nil { - Logf("pods: %s", runtime.EncodeOrDie(api.Codecs.LegacyCodec(registered.EnabledVersions()...), pods)) + framework.Logf("pods: %s", runtime.EncodeOrDie(api.Codecs.LegacyCodec(registered.EnabledVersions()...), pods)) } else { - Logf("unable to dump pods: %v", err) + framework.Logf("unable to dump pods: %v", err) } err := clearDaemonSetNodeLabels(f.Client) Expect(err).NotTo(HaveOccurred()) }) - f = NewDefaultFramework("daemonsets") + f = framework.NewDefaultFramework("daemonsets") image := "gcr.io/google_containers/serve_hostname:v1.4" dsName := "daemon-set" @@ -89,7 +90,7 @@ var _ = KubeDescribe("Daemon set [Serial]", func() { It("should run and stop simple daemon", func() { label := map[string]string{daemonsetNameLabel: dsName} - Logf("Creating simple daemon set %s", dsName) + framework.Logf("Creating simple daemon set %s", dsName) _, err := c.DaemonSets(ns).Create(&extensions.DaemonSet{ ObjectMeta: api.ObjectMeta{ Name: dsName, @@ -113,7 +114,7 @@ var _ = KubeDescribe("Daemon set [Serial]", func() { }) Expect(err).NotTo(HaveOccurred()) defer func() { - Logf("Check that reaper kills all daemon pods for %s", dsName) + framework.Logf("Check that reaper kills all daemon pods for %s", dsName) dsReaper, err := kubectl.ReaperFor(extensions.Kind("DaemonSet"), c) Expect(err).NotTo(HaveOccurred()) err = dsReaper.Stop(ns, dsName, 0, nil) @@ -146,7 +147,7 @@ var _ = KubeDescribe("Daemon set [Serial]", func() { It("should run and stop complex daemon", func() { complexLabel := map[string]string{daemonsetNameLabel: dsName} nodeSelector := map[string]string{daemonsetColorLabel: "blue"} - Logf("Creating daemon with a node selector %s", dsName) + framework.Logf("Creating daemon with a node selector %s", dsName) _, err := c.DaemonSets(ns).Create(&extensions.DaemonSet{ ObjectMeta: api.ObjectMeta{ Name: dsName, @@ -177,7 +178,7 @@ var _ = KubeDescribe("Daemon set [Serial]", func() { Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes") By("Change label of node, check that daemon pod is launched.") - nodeList := ListSchedulableNodesOrDie(f.Client) + nodeList := framework.ListSchedulableNodesOrDie(f.Client) Expect(len(nodeList.Items)).To(BeNumerically(">", 0)) newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector) Expect(err).NotTo(HaveOccurred(), "error setting labels on node") @@ -212,7 +213,7 @@ func separateDaemonSetNodeLabels(labels map[string]string) (map[string]string, m } func clearDaemonSetNodeLabels(c *client.Client) error { - nodeList := ListSchedulableNodesOrDie(c) + nodeList := framework.ListSchedulableNodesOrDie(c) for _, node := range nodeList.Items { _, err := setDaemonSetNodeLabels(c, node.Name, map[string]string{}) if err != nil { @@ -248,7 +249,7 @@ func setDaemonSetNodeLabels(c *client.Client, nodeName string, labels map[string return true, err } if se, ok := err.(*apierrs.StatusError); ok && se.ErrStatus.Reason == unversioned.StatusReasonConflict { - Logf("failed to update node due to resource version conflict") + framework.Logf("failed to update node due to resource version conflict") return false, nil } return false, err @@ -262,7 +263,7 @@ func setDaemonSetNodeLabels(c *client.Client, nodeName string, labels map[string return newNode, nil } -func checkDaemonPodOnNodes(f *Framework, selector map[string]string, nodeNames []string) func() (bool, error) { +func checkDaemonPodOnNodes(f *framework.Framework, selector map[string]string, nodeNames []string) func() (bool, error) { return func() (bool, error) { selector := labels.Set(selector).AsSelector() options := api.ListOptions{LabelSelector: selector} @@ -276,7 +277,7 @@ func checkDaemonPodOnNodes(f *Framework, selector map[string]string, nodeNames [ for _, pod := range pods { nodesToPodCount[pod.Spec.NodeName] += 1 } - Logf("nodesToPodCount: %#v", nodesToPodCount) + framework.Logf("nodesToPodCount: %#v", nodesToPodCount) // Ensure that exactly 1 pod is running on all nodes in nodeNames. for _, nodeName := range nodeNames { @@ -292,10 +293,10 @@ func checkDaemonPodOnNodes(f *Framework, selector map[string]string, nodeNames [ } } -func checkRunningOnAllNodes(f *Framework, selector map[string]string) func() (bool, error) { +func checkRunningOnAllNodes(f *framework.Framework, selector map[string]string) func() (bool, error) { return func() (bool, error) { nodeList, err := f.Client.Nodes().List(api.ListOptions{}) - expectNoError(err) + framework.ExpectNoError(err) nodeNames := make([]string, 0) for _, node := range nodeList.Items { nodeNames = append(nodeNames, node.Name) @@ -304,6 +305,6 @@ func checkRunningOnAllNodes(f *Framework, selector map[string]string) func() (bo } } -func checkRunningOnNoNodes(f *Framework, selector map[string]string) func() (bool, error) { +func checkRunningOnNoNodes(f *framework.Framework, selector map[string]string) func() (bool, error) { return checkDaemonPodOnNodes(f, selector, make([]string, 0)) } diff --git a/test/e2e/dashboard.go b/test/e2e/dashboard.go index a369402d4c2..306e6902908 100644 --- a/test/e2e/dashboard.go +++ b/test/e2e/dashboard.go @@ -23,12 +23,13 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) -var _ = KubeDescribe("Kubernetes Dashboard", func() { +var _ = framework.KubeDescribe("Kubernetes Dashboard", func() { const ( uiServiceName = "kubernetes-dashboard" uiAppName = uiServiceName @@ -37,36 +38,36 @@ var _ = KubeDescribe("Kubernetes Dashboard", func() { serverStartTimeout = 1 * time.Minute ) - f := NewDefaultFramework(uiServiceName) + f := framework.NewDefaultFramework(uiServiceName) It("should check that the kubernetes-dashboard instance is alive", func() { By("Checking whether the kubernetes-dashboard service exists.") - err := waitForService(f.Client, uiNamespace, uiServiceName, true, poll, serviceStartTimeout) + err := framework.WaitForService(f.Client, uiNamespace, uiServiceName, true, framework.Poll, framework.ServiceStartTimeout) Expect(err).NotTo(HaveOccurred()) By("Checking to make sure the kubernetes-dashboard pods are running") selector := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": uiAppName})) - err = waitForPodsWithLabelRunning(f.Client, uiNamespace, selector) + err = framework.WaitForPodsWithLabelRunning(f.Client, uiNamespace, selector) Expect(err).NotTo(HaveOccurred()) By("Checking to make sure we get a response from the kubernetes-dashboard.") - err = wait.Poll(poll, serverStartTimeout, func() (bool, error) { + err = wait.Poll(framework.Poll, serverStartTimeout, func() (bool, error) { var status int - proxyRequest, errProxy := getServicesProxyRequest(f.Client, f.Client.Get()) + proxyRequest, errProxy := framework.GetServicesProxyRequest(f.Client, f.Client.Get()) if errProxy != nil { - Logf("Get services proxy request failed: %v", errProxy) + framework.Logf("Get services proxy request failed: %v", errProxy) } // Query against the proxy URL for the kube-ui service. err := proxyRequest.Namespace(uiNamespace). Name(uiServiceName). - Timeout(singleCallTimeout). + Timeout(framework.SingleCallTimeout). Do(). StatusCode(&status). Error() if status != http.StatusOK { - Logf("Unexpected status from kubernetes-dashboard: %v", status) + framework.Logf("Unexpected status from kubernetes-dashboard: %v", status) } else if err != nil { - Logf("Request to kube-ui failed: %v", err) + framework.Logf("Request to kube-ui failed: %v", err) } // Don't return err here as it aborts polling. return status == http.StatusOK, nil @@ -77,7 +78,7 @@ var _ = KubeDescribe("Kubernetes Dashboard", func() { var status int err = f.Client.Get(). AbsPath("/ui"). - Timeout(singleCallTimeout). + Timeout(framework.SingleCallTimeout). Do(). StatusCode(&status). Error() diff --git a/test/e2e/density.go b/test/e2e/density.go index c7b0183ae10..00a37836618 100644 --- a/test/e2e/density.go +++ b/test/e2e/density.go @@ -36,6 +36,7 @@ import ( "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/watch" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -49,36 +50,36 @@ const ( // Maximum container failures this test tolerates before failing. var MaxContainerFailures = 0 -func density30AddonResourceVerifier() map[string]resourceConstraint { - constraints := make(map[string]resourceConstraint) - constraints["fluentd-elasticsearch"] = resourceConstraint{ - cpuConstraint: 0.1, - memoryConstraint: 250 * (1024 * 1024), +func density30AddonResourceVerifier() map[string]framework.ResourceConstraint { + constraints := make(map[string]framework.ResourceConstraint) + constraints["fluentd-elasticsearch"] = framework.ResourceConstraint{ + CPUConstraint: 0.1, + MemoryConstraint: 250 * (1024 * 1024), } - constraints["elasticsearch-logging"] = resourceConstraint{ - cpuConstraint: 2, + constraints["elasticsearch-logging"] = framework.ResourceConstraint{ + CPUConstraint: 2, // TODO: bring it down to 750MB again, when we lower Kubelet verbosity level. I.e. revert #19164 - memoryConstraint: 5000 * (1024 * 1024), + MemoryConstraint: 5000 * (1024 * 1024), } - constraints["heapster"] = resourceConstraint{ - cpuConstraint: 2, - memoryConstraint: 1800 * (1024 * 1024), + constraints["heapster"] = framework.ResourceConstraint{ + CPUConstraint: 2, + MemoryConstraint: 1800 * (1024 * 1024), } - constraints["kibana-logging"] = resourceConstraint{ - cpuConstraint: 0.2, - memoryConstraint: 100 * (1024 * 1024), + constraints["kibana-logging"] = framework.ResourceConstraint{ + CPUConstraint: 0.2, + MemoryConstraint: 100 * (1024 * 1024), } - constraints["kube-proxy"] = resourceConstraint{ - cpuConstraint: 0.05, - memoryConstraint: 20 * (1024 * 1024), + constraints["kube-proxy"] = framework.ResourceConstraint{ + CPUConstraint: 0.05, + MemoryConstraint: 20 * (1024 * 1024), } - constraints["l7-lb-controller"] = resourceConstraint{ - cpuConstraint: 0.05, - memoryConstraint: 20 * (1024 * 1024), + constraints["l7-lb-controller"] = framework.ResourceConstraint{ + CPUConstraint: 0.05, + MemoryConstraint: 20 * (1024 * 1024), } - constraints["influxdb"] = resourceConstraint{ - cpuConstraint: 2, - memoryConstraint: 500 * (1024 * 1024), + constraints["influxdb"] = framework.ResourceConstraint{ + CPUConstraint: 2, + MemoryConstraint: 500 * (1024 * 1024), } return constraints } @@ -90,7 +91,7 @@ func density30AddonResourceVerifier() map[string]resourceConstraint { // IMPORTANT: This test is designed to work on large (>= 100 Nodes) clusters. For smaller ones // results will not be representative for control-plane performance as we'll start hitting // limits on Docker's concurrent container startup. -var _ = KubeDescribe("Density", func() { +var _ = framework.KubeDescribe("Density", func() { var c *client.Client var nodeCount int var RCName string @@ -109,35 +110,35 @@ var _ = KubeDescribe("Density", func() { saturationThreshold = MinSaturationThreshold } Expect(e2eStartupTime).NotTo(BeNumerically(">", saturationThreshold)) - saturationData := SaturationTime{ + saturationData := framework.SaturationTime{ TimeToSaturate: e2eStartupTime, NumberOfNodes: nodeCount, NumberOfPods: totalPods, Throughput: float32(totalPods) / float32(e2eStartupTime/time.Second), } - Logf("Cluster saturation time: %s", prettyPrintJSON(saturationData)) + framework.Logf("Cluster saturation time: %s", framework.PrettyPrintJSON(saturationData)) // Verify latency metrics. - highLatencyRequests, err := HighLatencyRequests(c) - expectNoError(err) + highLatencyRequests, err := framework.HighLatencyRequests(c) + framework.ExpectNoError(err) Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests") // Verify scheduler metrics. // TODO: Reset metrics at the beginning of the test. // We should do something similar to how we do it for APIserver. - expectNoError(VerifySchedulerLatency(c)) + framework.ExpectNoError(framework.VerifySchedulerLatency(c)) }) // Explicitly put here, to delete namespace at the end of the test // (after measuring latency metrics, etc.). - framework := NewDefaultFramework("density") - framework.NamespaceDeletionTimeout = time.Hour + f := framework.NewDefaultFramework("density") + f.NamespaceDeletionTimeout = time.Hour BeforeEach(func() { - c = framework.Client - ns = framework.Namespace.Name + c = f.Client + ns = f.Namespace.Name - nodes := ListSchedulableNodesOrDie(c) + nodes := framework.ListSchedulableNodesOrDie(c) nodeCount = len(nodes.Items) Expect(nodeCount).NotTo(BeZero()) @@ -147,15 +148,15 @@ var _ = KubeDescribe("Density", func() { // Terminating a namespace (deleting the remaining objects from it - which // generally means events) can affect the current run. Thus we wait for all // terminating namespace to be finally deleted before starting this test. - err := checkTestingNSDeletedExcept(c, ns) - expectNoError(err) + err := framework.CheckTestingNSDeletedExcept(c, ns) + framework.ExpectNoError(err) uuid = string(util.NewUUID()) - expectNoError(resetMetrics(c)) - expectNoError(os.Mkdir(fmt.Sprintf(testContext.OutputDir+"/%s", uuid), 0777)) + framework.ExpectNoError(framework.ResetMetrics(c)) + framework.ExpectNoError(os.Mkdir(fmt.Sprintf(framework.TestContext.OutputDir+"/%s", uuid), 0777)) - Logf("Listing nodes for easy debugging:\n") + framework.Logf("Listing nodes for easy debugging:\n") for _, node := range nodes.Items { var internalIP, externalIP string for _, address := range node.Status.Addresses { @@ -166,7 +167,7 @@ var _ = KubeDescribe("Density", func() { externalIP = address.Address } } - Logf("Name: %v, clusterIP: %v, externalIP: %v", node.ObjectMeta.Name, internalIP, externalIP) + framework.Logf("Name: %v, clusterIP: %v, externalIP: %v", node.ObjectMeta.Name, internalIP, externalIP) } }) @@ -192,7 +193,7 @@ var _ = KubeDescribe("Density", func() { switch testArg.podsPerNode { case 30: name = "[Feature:Performance] " + name - framework.addonResourceConstraints = density30AddonResourceVerifier() + f.AddonResourceConstraints = density30AddonResourceVerifier() case 95: name = "[Feature:HighDensityPerformance]" + name default: @@ -203,10 +204,10 @@ var _ = KubeDescribe("Density", func() { podsPerNode := itArg.podsPerNode totalPods = podsPerNode * nodeCount RCName = "density" + strconv.Itoa(totalPods) + "-" + uuid - fileHndl, err := os.Create(fmt.Sprintf(testContext.OutputDir+"/%s/pod_states.csv", uuid)) - expectNoError(err) + fileHndl, err := os.Create(fmt.Sprintf(framework.TestContext.OutputDir+"/%s/pod_states.csv", uuid)) + framework.ExpectNoError(err) defer fileHndl.Close() - config := RCConfig{Client: c, + config := framework.RCConfig{Client: c, Image: "gcr.io/google_containers/pause:2.0", Name: RCName, Namespace: ns, @@ -274,10 +275,10 @@ var _ = KubeDescribe("Density", func() { // Start the replication controller. startTime := time.Now() - expectNoError(RunRC(config)) + framework.ExpectNoError(framework.RunRC(config)) e2eStartupTime = time.Now().Sub(startTime) - Logf("E2E startup time for %d pods: %v", totalPods, e2eStartupTime) - Logf("Throughput (pods/s) during cluster saturation phase: %v", float32(totalPods)/float32(e2eStartupTime/time.Second)) + framework.Logf("E2E startup time for %d pods: %v", totalPods, e2eStartupTime) + framework.Logf("Throughput (pods/s) during cluster saturation phase: %v", float32(totalPods)/float32(e2eStartupTime/time.Second)) By("Waiting for all events to be recorded") last := -1 @@ -302,21 +303,21 @@ var _ = KubeDescribe("Density", func() { close(stop) if current != last { - Logf("Warning: Not all events were recorded after waiting %.2f minutes", timeout.Minutes()) + framework.Logf("Warning: Not all events were recorded after waiting %.2f minutes", timeout.Minutes()) } - Logf("Found %d events", current) + framework.Logf("Found %d events", current) if currentCount != lastCount { - Logf("Warning: Not all updates were recorded after waiting %.2f minutes", timeout.Minutes()) + framework.Logf("Warning: Not all updates were recorded after waiting %.2f minutes", timeout.Minutes()) } - Logf("Found %d updates", currentCount) + framework.Logf("Found %d updates", currentCount) // Tune the threshold for allowed failures. - badEvents := BadEvents(events) + badEvents := framework.BadEvents(events) Expect(badEvents).NotTo(BeNumerically(">", int(math.Floor(0.01*float64(totalPods))))) // Print some data about Pod to Node allocation By("Printing Pod to Node allocation data") podList, err := c.Pods(api.NamespaceAll).List(api.ListOptions{}) - expectNoError(err) + framework.ExpectNoError(err) pausePodAllocation := make(map[string]int) systemPodAllocation := make(map[string][]string) for _, pod := range podList.Items { @@ -332,7 +333,7 @@ var _ = KubeDescribe("Density", func() { } sort.Strings(nodeNames) for _, node := range nodeNames { - Logf("%v: %v pause pods, system pods: %v", node, pausePodAllocation[node], systemPodAllocation[node]) + framework.Logf("%v: %v pause pods, system pods: %v", node, pausePodAllocation[node], systemPodAllocation[node]) } if itArg.runLatencyTest { @@ -366,7 +367,7 @@ var _ = KubeDescribe("Density", func() { if startTime != unversioned.NewTime(time.Time{}) { runTimes[p.Name] = startTime } else { - Failf("Pod %v is reported to be running, but none of its containers is", p.Name) + framework.Failf("Pod %v is reported to be running, but none of its containers is", p.Name) } } } @@ -428,7 +429,7 @@ var _ = KubeDescribe("Density", func() { By("Waiting for all Pods begin observed by the watch...") for start := time.Now(); len(watchTimes) < nodeCount; time.Sleep(10 * time.Second) { if time.Since(start) < timeout { - Failf("Timeout reached waiting for all Pods being observed by the watch.") + framework.Failf("Timeout reached waiting for all Pods being observed by the watch.") } } close(stopCh) @@ -440,7 +441,7 @@ var _ = KubeDescribe("Density", func() { } for node, count := range nodeToLatencyPods { if count > 1 { - Logf("%d latency pods scheduled on %s", count, node) + framework.Logf("%d latency pods scheduled on %s", count, node) } } @@ -451,7 +452,7 @@ var _ = KubeDescribe("Density", func() { }.AsSelector() options := api.ListOptions{FieldSelector: selector} schedEvents, err := c.Events(ns).List(options) - expectNoError(err) + framework.ExpectNoError(err) for k := range createTimes { for _, event := range schedEvents.Items { if event.InvolvedObject.Name == k { @@ -461,11 +462,11 @@ var _ = KubeDescribe("Density", func() { } } - scheduleLag := make([]podLatencyData, 0) - startupLag := make([]podLatencyData, 0) - watchLag := make([]podLatencyData, 0) - schedToWatchLag := make([]podLatencyData, 0) - e2eLag := make([]podLatencyData, 0) + scheduleLag := make([]framework.PodLatencyData, 0) + startupLag := make([]framework.PodLatencyData, 0) + watchLag := make([]framework.PodLatencyData, 0) + schedToWatchLag := make([]framework.PodLatencyData, 0) + e2eLag := make([]framework.PodLatencyData, 0) for name, create := range createTimes { sched, ok := scheduleTimes[name] @@ -477,30 +478,30 @@ var _ = KubeDescribe("Density", func() { node, ok := nodes[name] Expect(ok).To(Equal(true)) - scheduleLag = append(scheduleLag, podLatencyData{name, node, sched.Time.Sub(create.Time)}) - startupLag = append(startupLag, podLatencyData{name, node, run.Time.Sub(sched.Time)}) - watchLag = append(watchLag, podLatencyData{name, node, watch.Time.Sub(run.Time)}) - schedToWatchLag = append(schedToWatchLag, podLatencyData{name, node, watch.Time.Sub(sched.Time)}) - e2eLag = append(e2eLag, podLatencyData{name, node, watch.Time.Sub(create.Time)}) + scheduleLag = append(scheduleLag, framework.PodLatencyData{name, node, sched.Time.Sub(create.Time)}) + startupLag = append(startupLag, framework.PodLatencyData{name, node, run.Time.Sub(sched.Time)}) + watchLag = append(watchLag, framework.PodLatencyData{name, node, watch.Time.Sub(run.Time)}) + schedToWatchLag = append(schedToWatchLag, framework.PodLatencyData{name, node, watch.Time.Sub(sched.Time)}) + e2eLag = append(e2eLag, framework.PodLatencyData{name, node, watch.Time.Sub(create.Time)}) } - sort.Sort(latencySlice(scheduleLag)) - sort.Sort(latencySlice(startupLag)) - sort.Sort(latencySlice(watchLag)) - sort.Sort(latencySlice(schedToWatchLag)) - sort.Sort(latencySlice(e2eLag)) + sort.Sort(framework.LatencySlice(scheduleLag)) + sort.Sort(framework.LatencySlice(startupLag)) + sort.Sort(framework.LatencySlice(watchLag)) + sort.Sort(framework.LatencySlice(schedToWatchLag)) + sort.Sort(framework.LatencySlice(e2eLag)) - printLatencies(scheduleLag, "worst schedule latencies") - printLatencies(startupLag, "worst run-after-schedule latencies") - printLatencies(watchLag, "worst watch latencies") - printLatencies(schedToWatchLag, "worst scheduled-to-end total latencies") - printLatencies(e2eLag, "worst e2e total latencies") + framework.PrintLatencies(scheduleLag, "worst schedule latencies") + framework.PrintLatencies(startupLag, "worst run-after-schedule latencies") + framework.PrintLatencies(watchLag, "worst watch latencies") + framework.PrintLatencies(schedToWatchLag, "worst scheduled-to-end total latencies") + framework.PrintLatencies(e2eLag, "worst e2e total latencies") // Test whether e2e pod startup time is acceptable. - podStartupLatency := PodStartupLatency{Latency: extractLatencyMetrics(e2eLag)} - expectNoError(VerifyPodStartupLatency(podStartupLatency)) + podStartupLatency := framework.PodStartupLatency{Latency: framework.ExtractLatencyMetrics(e2eLag)} + framework.ExpectNoError(framework.VerifyPodStartupLatency(podStartupLatency)) - logSuspiciousLatency(startupLag, e2eLag, nodeCount, c) + framework.LogSuspiciousLatency(startupLag, e2eLag, nodeCount, c) } By("Deleting ReplicationController") @@ -508,8 +509,8 @@ var _ = KubeDescribe("Density", func() { rc, err := c.ReplicationControllers(ns).Get(RCName) if err == nil && rc.Spec.Replicas != 0 { By("Cleaning up the replication controller") - err := DeleteRC(c, ns, RCName) - expectNoError(err) + err := framework.DeleteRC(c, ns, RCName) + framework.ExpectNoError(err) } By("Removing additional replication controllers if any") @@ -559,7 +560,7 @@ func createRunningPodFromRC(wg *sync.WaitGroup, c *client.Client, name, ns, imag }, } _, err := c.ReplicationControllers(ns).Create(rc) - expectNoError(err) - expectNoError(waitForRCPodsRunning(c, ns, name)) - Logf("Found pod '%s' running", name) + framework.ExpectNoError(err) + framework.ExpectNoError(framework.WaitForRCPodsRunning(c, ns, name)) + framework.Logf("Found pod '%s' running", name) } diff --git a/test/e2e/deployment.go b/test/e2e/deployment.go index 0c5ac85fe48..d9a8fb87493 100644 --- a/test/e2e/deployment.go +++ b/test/e2e/deployment.go @@ -34,6 +34,7 @@ import ( "k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/watch" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -46,8 +47,8 @@ const ( redisImageName = "redis" ) -var _ = KubeDescribe("Deployment", func() { - f := NewDefaultFramework("deployment") +var _ = framework.KubeDescribe("Deployment", func() { + f := framework.NewDefaultFramework("deployment") It("deployment should create new pods", func() { testNewDeployment(f) @@ -174,25 +175,25 @@ func stopDeployment(c *clientset.Clientset, oldC client.Interface, ns, deploymen deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) Expect(err).NotTo(HaveOccurred()) - Logf("deleting deployment %s", deploymentName) + framework.Logf("deleting deployment %s", deploymentName) reaper, err := kubectl.ReaperFor(extensions.Kind("Deployment"), oldC) Expect(err).NotTo(HaveOccurred()) timeout := 1 * time.Minute err = reaper.Stop(ns, deployment.Name, timeout, api.NewDeleteOptions(0)) Expect(err).NotTo(HaveOccurred()) - Logf("ensuring deployment %s was deleted", deploymentName) + framework.Logf("ensuring deployment %s was deleted", deploymentName) _, err = c.Extensions().Deployments(ns).Get(deployment.Name) Expect(err).To(HaveOccurred()) Expect(errors.IsNotFound(err)).To(BeTrue()) - Logf("ensuring deployment %s RSes were deleted", deploymentName) + framework.Logf("ensuring deployment %s RSes were deleted", deploymentName) selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector) Expect(err).NotTo(HaveOccurred()) options := api.ListOptions{LabelSelector: selector} rss, err := c.Extensions().ReplicaSets(ns).List(options) Expect(err).NotTo(HaveOccurred()) Expect(rss.Items).Should(HaveLen(0)) - Logf("ensuring deployment %s pods were deleted", deploymentName) + framework.Logf("ensuring deployment %s pods were deleted", deploymentName) var pods *api.PodList if err := wait.PollImmediate(time.Second, wait.ForeverTestTimeout, func() (bool, error) { pods, err = c.Core().Pods(ns).List(api.ListOptions{}) @@ -204,11 +205,11 @@ func stopDeployment(c *clientset.Clientset, oldC client.Interface, ns, deploymen } return false, nil }); err != nil { - Failf("Err : %s\n. Failed to remove deployment %s pods : %+v", err, deploymentName, pods) + framework.Failf("Err : %s\n. Failed to remove deployment %s pods : %+v", err, deploymentName, pods) } } -func testNewDeployment(f *Framework) { +func testNewDeployment(f *framework.Framework) { ns := f.Namespace.Name // TODO: remove unversionedClient when the refactoring is done. Currently some // functions like verifyPod still expects a unversioned#Client. @@ -217,7 +218,7 @@ func testNewDeployment(f *Framework) { deploymentName := "test-new-deployment" podLabels := map[string]string{"name": nginxImageName} replicas := 1 - Logf("Creating simple deployment %s", deploymentName) + framework.Logf("Creating simple deployment %s", deploymentName) d := newDeployment(deploymentName, replicas, podLabels, nginxImageName, nginxImage, extensions.RollingUpdateDeploymentStrategyType, nil) d.Annotations = map[string]string{"test": "should-copy-to-replica-set", kubectl.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"} _, err := c.Extensions().Deployments(ns).Create(d) @@ -225,10 +226,10 @@ func testNewDeployment(f *Framework) { defer stopDeployment(c, f.Client, ns, deploymentName) // Wait for it to be updated to revision 1 - err = waitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", nginxImage) + err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", nginxImage) Expect(err).NotTo(HaveOccurred()) - err = waitForDeploymentStatus(c, ns, deploymentName, replicas, replicas-1, replicas+1, 0) + err = framework.WaitForDeploymentStatus(c, ns, deploymentName, replicas, replicas-1, replicas+1, 0) Expect(err).NotTo(HaveOccurred()) deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) @@ -242,7 +243,7 @@ func testNewDeployment(f *Framework) { Expect(deployment.Annotations[kubectl.LastAppliedConfigAnnotation]).Should(Equal("should-not-copy-to-replica-set")) } -func testRollingUpdateDeployment(f *Framework) { +func testRollingUpdateDeployment(f *framework.Framework) { ns := f.Namespace.Name // TODO: remove unversionedClient when the refactoring is done. Currently some // functions like verifyPod still expects a unversioned#Client. @@ -260,24 +261,24 @@ func testRollingUpdateDeployment(f *Framework) { _, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, nginxImageName, nginxImage)) Expect(err).NotTo(HaveOccurred()) // Verify that the required pods have come up. - err = verifyPods(unversionedClient, ns, "sample-pod", false, 3) + err = framework.VerifyPods(unversionedClient, ns, "sample-pod", false, 3) if err != nil { - Logf("error in waiting for pods to come up: %s", err) + framework.Logf("error in waiting for pods to come up: %s", err) Expect(err).NotTo(HaveOccurred()) } // Create a deployment to delete nginx pods and instead bring up redis pods. deploymentName := "test-rolling-update-deployment" - Logf("Creating deployment %s", deploymentName) + framework.Logf("Creating deployment %s", deploymentName) _, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, redisImageName, redisImage, extensions.RollingUpdateDeploymentStrategyType, nil)) Expect(err).NotTo(HaveOccurred()) defer stopDeployment(c, f.Client, ns, deploymentName) // Wait for it to be updated to revision 1 - err = waitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", redisImage) + err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", redisImage) Expect(err).NotTo(HaveOccurred()) - err = waitForDeploymentStatus(c, ns, deploymentName, replicas, replicas-1, replicas+1, 0) + err = framework.WaitForDeploymentStatus(c, ns, deploymentName, replicas, replicas-1, replicas+1, 0) Expect(err).NotTo(HaveOccurred()) // There should be 1 old RS (nginx-controller, which is adopted) @@ -292,7 +293,7 @@ func testRollingUpdateDeployment(f *Framework) { Expect(len(allOldRSs[0].Spec.Template.Labels[extensions.DefaultDeploymentUniqueLabelKey])).Should(BeNumerically(">", 0)) } -func testRollingUpdateDeploymentEvents(f *Framework) { +func testRollingUpdateDeploymentEvents(f *framework.Framework) { ns := f.Namespace.Name // TODO: remove unversionedClient when the refactoring is done. Currently some // functions like verifyPod still expects a unversioned#Client. @@ -316,32 +317,32 @@ func testRollingUpdateDeploymentEvents(f *Framework) { _, err := c.Extensions().ReplicaSets(ns).Create(rs) Expect(err).NotTo(HaveOccurred()) // Verify that the required pods have come up. - err = verifyPods(unversionedClient, ns, "sample-pod-2", false, 1) + err = framework.VerifyPods(unversionedClient, ns, "sample-pod-2", false, 1) if err != nil { - Logf("error in waiting for pods to come up: %s", err) + framework.Logf("error in waiting for pods to come up: %s", err) Expect(err).NotTo(HaveOccurred()) } // Create a deployment to delete nginx pods and instead bring up redis pods. deploymentName := "test-rolling-scale-deployment" - Logf("Creating deployment %s", deploymentName) + framework.Logf("Creating deployment %s", deploymentName) _, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, redisImageName, redisImage, extensions.RollingUpdateDeploymentStrategyType, nil)) Expect(err).NotTo(HaveOccurred()) defer stopDeployment(c, f.Client, ns, deploymentName) // Wait for it to be updated to revision 3546343826724305833 - err = waitForDeploymentRevisionAndImage(c, ns, deploymentName, "3546343826724305833", redisImage) + err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3546343826724305833", redisImage) Expect(err).NotTo(HaveOccurred()) - err = waitForDeploymentStatus(c, ns, deploymentName, replicas, replicas-1, replicas+1, 0) + err = framework.WaitForDeploymentStatus(c, ns, deploymentName, replicas, replicas-1, replicas+1, 0) Expect(err).NotTo(HaveOccurred()) // Verify that the pods were scaled up and down as expected. We use events to verify that. deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) Expect(err).NotTo(HaveOccurred()) - waitForEvents(unversionedClient, ns, deployment, 2) + framework.WaitForEvents(unversionedClient, ns, deployment, 2) events, err := c.Core().Events(ns).Search(deployment) if err != nil { - Logf("error in listing events: %s", err) + framework.Logf("error in listing events: %s", err) Expect(err).NotTo(HaveOccurred()) } // There should be 2 events, one to scale up the new ReplicaSet and then to scale down @@ -354,7 +355,7 @@ func testRollingUpdateDeploymentEvents(f *Framework) { Expect(events.Items[1].Message).Should(Equal(fmt.Sprintf("Scaled down replica set %s to 0", rsName))) } -func testRecreateDeployment(f *Framework) { +func testRecreateDeployment(f *framework.Framework) { ns := f.Namespace.Name // TODO: remove unversionedClient when the refactoring is done. Currently some // functions like verifyPod still expects a unversioned#Client. @@ -372,33 +373,33 @@ func testRecreateDeployment(f *Framework) { _, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, nginxImageName, nginxImage)) Expect(err).NotTo(HaveOccurred()) // Verify that the required pods have come up. - err = verifyPods(unversionedClient, ns, "sample-pod-3", false, 3) + err = framework.VerifyPods(unversionedClient, ns, "sample-pod-3", false, 3) if err != nil { - Logf("error in waiting for pods to come up: %s", err) + framework.Logf("error in waiting for pods to come up: %s", err) Expect(err).NotTo(HaveOccurred()) } // Create a deployment to delete nginx pods and instead bring up redis pods. deploymentName := "test-recreate-deployment" - Logf("Creating deployment %s", deploymentName) + framework.Logf("Creating deployment %s", deploymentName) _, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, redisImageName, redisImage, extensions.RecreateDeploymentStrategyType, nil)) Expect(err).NotTo(HaveOccurred()) defer stopDeployment(c, f.Client, ns, deploymentName) // Wait for it to be updated to revision 1 - err = waitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", redisImage) + err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", redisImage) Expect(err).NotTo(HaveOccurred()) - err = waitForDeploymentStatus(c, ns, deploymentName, replicas, 0, replicas, 0) + err = framework.WaitForDeploymentStatus(c, ns, deploymentName, replicas, 0, replicas, 0) Expect(err).NotTo(HaveOccurred()) // Verify that the pods were scaled up and down as expected. We use events to verify that. deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) Expect(err).NotTo(HaveOccurred()) - waitForEvents(unversionedClient, ns, deployment, 2) + framework.WaitForEvents(unversionedClient, ns, deployment, 2) events, err := c.Core().Events(ns).Search(deployment) if err != nil { - Logf("error in listing events: %s", err) + framework.Logf("error in listing events: %s", err) Expect(err).NotTo(HaveOccurred()) } // There should be 2 events, one to scale up the new ReplicaSet and then to scale down the old ReplicaSet. @@ -411,7 +412,7 @@ func testRecreateDeployment(f *Framework) { } // testDeploymentCleanUpPolicy tests that deployment supports cleanup policy -func testDeploymentCleanUpPolicy(f *Framework) { +func testDeploymentCleanUpPolicy(f *framework.Framework) { ns := f.Namespace.Name unversionedClient := f.Client c := adapter.FromUnversionedClient(unversionedClient) @@ -428,15 +429,15 @@ func testDeploymentCleanUpPolicy(f *Framework) { Expect(err).NotTo(HaveOccurred()) // Verify that the required pods have come up. - err = verifyPods(unversionedClient, ns, "cleanup-pod", false, 1) + err = framework.VerifyPods(unversionedClient, ns, "cleanup-pod", false, 1) if err != nil { - Logf("error in waiting for pods to come up: %s", err) + framework.Logf("error in waiting for pods to come up: %s", err) Expect(err).NotTo(HaveOccurred()) } // Create a deployment to delete nginx pods and instead bring up redis pods. deploymentName := "test-cleanup-deployment" - Logf("Creating deployment %s", deploymentName) + framework.Logf("Creating deployment %s", deploymentName) pods, err := c.Pods(ns).List(api.ListOptions{LabelSelector: labels.Everything()}) if err != nil { @@ -459,14 +460,14 @@ func testDeploymentCleanUpPolicy(f *Framework) { } numPodCreation-- if numPodCreation < 0 { - Failf("Expect only one pod creation, the second creation event: %#v\n", event) + framework.Failf("Expect only one pod creation, the second creation event: %#v\n", event) } pod, ok := event.Object.(*api.Pod) if !ok { Fail("Expect event Object to be a pod") } if pod.Spec.Containers[0].Name != redisImageName { - Failf("Expect the created pod to have container name %s, got pod %#v\n", redisImageName, pod) + framework.Failf("Expect the created pod to have container name %s, got pod %#v\n", redisImageName, pod) } case <-stopCh: return @@ -477,14 +478,14 @@ func testDeploymentCleanUpPolicy(f *Framework) { Expect(err).NotTo(HaveOccurred()) defer stopDeployment(c, f.Client, ns, deploymentName) - err = waitForDeploymentOldRSsNum(c, ns, deploymentName, *revisionHistoryLimit) + err = framework.WaitForDeploymentOldRSsNum(c, ns, deploymentName, *revisionHistoryLimit) Expect(err).NotTo(HaveOccurred()) close(stopCh) } // testRolloverDeployment tests that deployment supports rollover. // i.e. we can change desired state and kick off rolling update, then change desired state again before it finishes. -func testRolloverDeployment(f *Framework) { +func testRolloverDeployment(f *framework.Framework) { ns := f.Namespace.Name // TODO: remove unversionedClient when the refactoring is done. Currently some // functions like verifyPod still expects a unversioned#Client. @@ -502,14 +503,14 @@ func testRolloverDeployment(f *Framework) { _, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, rsReplicas, rsPodLabels, nginxImageName, nginxImage)) Expect(err).NotTo(HaveOccurred()) // Verify that the required pods have come up. - err = verifyPods(unversionedClient, ns, podName, false, rsReplicas) + err = framework.VerifyPods(unversionedClient, ns, podName, false, rsReplicas) if err != nil { - Logf("error in waiting for pods to come up: %s", err) + framework.Logf("error in waiting for pods to come up: %s", err) Expect(err).NotTo(HaveOccurred()) } // Wait for the required pods to be ready for at least minReadySeconds (be available) deploymentMinReadySeconds := 5 - err = waitForPodsReady(c, ns, podName, deploymentMinReadySeconds) + err = framework.WaitForPodsReady(c, ns, podName, deploymentMinReadySeconds) Expect(err).NotTo(HaveOccurred()) // Create a deployment to delete nginx pods and instead bring up redis-slave pods. @@ -517,7 +518,7 @@ func testRolloverDeployment(f *Framework) { deploymentReplicas := 4 deploymentImage := "gcr.io/google_samples/gb-redisslave:v1" deploymentStrategyType := extensions.RollingUpdateDeploymentStrategyType - Logf("Creating deployment %s", deploymentName) + framework.Logf("Creating deployment %s", deploymentName) newDeployment := newDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType, nil) newDeployment.Spec.MinReadySeconds = deploymentMinReadySeconds newDeployment.Spec.Strategy.RollingUpdate = &extensions.RollingUpdateDeployment{ @@ -532,7 +533,7 @@ func testRolloverDeployment(f *Framework) { deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) Expect(err).NotTo(HaveOccurred()) // Make sure the deployment starts to scale up and down replica sets - waitForPartialEvents(unversionedClient, ns, deployment, 2) + framework.WaitForPartialEvents(unversionedClient, ns, deployment, 2) // Check if it's updated to revision 1 correctly _, newRS := checkDeploymentRevision(c, ns, deploymentName, "1", deploymentImageName, deploymentImage) @@ -540,25 +541,25 @@ func testRolloverDeployment(f *Framework) { // If the deployment already finished here, the test would fail. When this happens, increase its minReadySeconds or replicas to prevent it. Expect(newRS.Spec.Replicas).Should(BeNumerically("<", deploymentReplicas)) updatedDeploymentImageName, updatedDeploymentImage := redisImageName, redisImage - deployment, err = updateDeploymentWithRetries(c, ns, newDeployment.Name, func(update *extensions.Deployment) { + deployment, err = framework.UpdateDeploymentWithRetries(c, ns, newDeployment.Name, func(update *extensions.Deployment) { update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage }) Expect(err).NotTo(HaveOccurred()) // Use observedGeneration to determine if the controller noticed the pod template update. - err = waitForObservedDeployment(c, ns, deploymentName, deployment.Generation) + err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation) Expect(err).NotTo(HaveOccurred()) // Wait for it to be updated to revision 2 - err = waitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage) + err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage) Expect(err).NotTo(HaveOccurred()) - err = waitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, deploymentMinReadySeconds) + err = framework.WaitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, deploymentMinReadySeconds) Expect(err).NotTo(HaveOccurred()) } -func testPausedDeployment(f *Framework) { +func testPausedDeployment(f *framework.Framework) { ns := f.Namespace.Name // TODO: remove unversionedClient when the refactoring is done. Currently some // functions like verifyPod still expects a unversioned#Client. @@ -568,7 +569,7 @@ func testPausedDeployment(f *Framework) { podLabels := map[string]string{"name": nginxImageName} d := newDeployment(deploymentName, 1, podLabels, nginxImageName, nginxImage, extensions.RollingUpdateDeploymentStrategyType, nil) d.Spec.Paused = true - Logf("Creating paused deployment %s", deploymentName) + framework.Logf("Creating paused deployment %s", deploymentName) _, err := c.Extensions().Deployments(ns).Create(d) Expect(err).NotTo(HaveOccurred()) defer stopDeployment(c, f.Client, ns, deploymentName) @@ -585,13 +586,13 @@ func testPausedDeployment(f *Framework) { } // Update the deployment to run - deployment, err = updateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) { + deployment, err = framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) { update.Spec.Paused = false }) Expect(err).NotTo(HaveOccurred()) // Use observedGeneration to determine if the controller noticed the resume. - err = waitForObservedDeployment(c, ns, deploymentName, deployment.Generation) + err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation) Expect(err).NotTo(HaveOccurred()) selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector) @@ -612,18 +613,18 @@ func testPausedDeployment(f *Framework) { // Pause the deployment and delete the replica set. // The paused deployment shouldn't recreate a new one. - deployment, err = updateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) { + deployment, err = framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) { update.Spec.Paused = true }) Expect(err).NotTo(HaveOccurred()) // Use observedGeneration to determine if the controller noticed the pause. - err = waitForObservedDeployment(c, ns, deploymentName, deployment.Generation) + err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation) Expect(err).NotTo(HaveOccurred()) newRS, err := deploymentutil.GetNewReplicaSet(deployment, c) Expect(err).NotTo(HaveOccurred()) - Expect(DeleteReplicaSet(unversionedClient, ns, newRS.Name)).NotTo(HaveOccurred()) + Expect(framework.DeleteReplicaSet(unversionedClient, ns, newRS.Name)).NotTo(HaveOccurred()) deployment, err = c.Extensions().Deployments(ns).Get(deploymentName) Expect(err).NotTo(HaveOccurred()) @@ -643,7 +644,7 @@ func testPausedDeployment(f *Framework) { // testRollbackDeployment tests that a deployment is created (revision 1) and updated (revision 2), and // then rollback to revision 1 (should update template to revision 1, and then update revision 1 to 3), // and then rollback to last revision. -func testRollbackDeployment(f *Framework) { +func testRollbackDeployment(f *framework.Framework) { ns := f.Namespace.Name unversionedClient := f.Client c := adapter.FromUnversionedClient(unversionedClient) @@ -655,7 +656,7 @@ func testRollbackDeployment(f *Framework) { deploymentReplicas := 1 deploymentImage := nginxImage deploymentStrategyType := extensions.RollingUpdateDeploymentStrategyType - Logf("Creating deployment %s", deploymentName) + framework.Logf("Creating deployment %s", deploymentName) d := newDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType, nil) createAnnotation := map[string]string{"action": "create", "author": "minion"} d.Annotations = createAnnotation @@ -664,21 +665,21 @@ func testRollbackDeployment(f *Framework) { defer stopDeployment(c, f.Client, ns, deploymentName) // Wait for it to be updated to revision 1 - err = waitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage) + err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage) Expect(err).NotTo(HaveOccurred()) - err = waitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0) + err = framework.WaitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0) Expect(err).NotTo(HaveOccurred()) // Current newRS annotation should be "create" - err = checkNewRSAnnotations(c, ns, deploymentName, createAnnotation) + err = framework.CheckNewRSAnnotations(c, ns, deploymentName, createAnnotation) Expect(err).NotTo(HaveOccurred()) // 2. Update the deployment to create redis pods. updatedDeploymentImage := redisImage updatedDeploymentImageName := redisImageName updateAnnotation := map[string]string{"action": "update", "log": "I need to update it"} - deployment, err := updateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) { + deployment, err := framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) { update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage update.Annotations = updateAnnotation @@ -686,62 +687,62 @@ func testRollbackDeployment(f *Framework) { Expect(err).NotTo(HaveOccurred()) // Use observedGeneration to determine if the controller noticed the pod template update. - err = waitForObservedDeployment(c, ns, deploymentName, deployment.Generation) + err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation) Expect(err).NotTo(HaveOccurred()) // Wait for it to be updated to revision 2 - err = waitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage) + err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage) Expect(err).NotTo(HaveOccurred()) - err = waitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0) + err = framework.WaitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0) Expect(err).NotTo(HaveOccurred()) // Current newRS annotation should be "update" - err = checkNewRSAnnotations(c, ns, deploymentName, updateAnnotation) + err = framework.CheckNewRSAnnotations(c, ns, deploymentName, updateAnnotation) Expect(err).NotTo(HaveOccurred()) // 3. Update the deploymentRollback to rollback to revision 1 revision := int64(1) - Logf("rolling back deployment %s to revision %d", deploymentName, revision) + framework.Logf("rolling back deployment %s to revision %d", deploymentName, revision) rollback := newDeploymentRollback(deploymentName, nil, revision) err = c.Extensions().Deployments(ns).Rollback(rollback) Expect(err).NotTo(HaveOccurred()) // Wait for the deployment to start rolling back - err = waitForDeploymentRollbackCleared(c, ns, deploymentName) + err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName) Expect(err).NotTo(HaveOccurred()) // TODO: report RollbackDone in deployment status and check it here // Wait for it to be updated to revision 3 - err = waitForDeploymentRevisionAndImage(c, ns, deploymentName, "3", deploymentImage) + err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3", deploymentImage) Expect(err).NotTo(HaveOccurred()) - err = waitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0) + err = framework.WaitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0) Expect(err).NotTo(HaveOccurred()) // Current newRS annotation should be "create", after the rollback - err = checkNewRSAnnotations(c, ns, deploymentName, createAnnotation) + err = framework.CheckNewRSAnnotations(c, ns, deploymentName, createAnnotation) Expect(err).NotTo(HaveOccurred()) // 4. Update the deploymentRollback to rollback to last revision revision = 0 - Logf("rolling back deployment %s to last revision", deploymentName) + framework.Logf("rolling back deployment %s to last revision", deploymentName) rollback = newDeploymentRollback(deploymentName, nil, revision) err = c.Extensions().Deployments(ns).Rollback(rollback) Expect(err).NotTo(HaveOccurred()) - err = waitForDeploymentRollbackCleared(c, ns, deploymentName) + err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName) Expect(err).NotTo(HaveOccurred()) // Wait for it to be updated to revision 4 - err = waitForDeploymentRevisionAndImage(c, ns, deploymentName, "4", updatedDeploymentImage) + err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "4", updatedDeploymentImage) Expect(err).NotTo(HaveOccurred()) - err = waitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0) + err = framework.WaitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0) Expect(err).NotTo(HaveOccurred()) // Current newRS annotation should be "update", after the rollback - err = checkNewRSAnnotations(c, ns, deploymentName, updateAnnotation) + err = framework.CheckNewRSAnnotations(c, ns, deploymentName, updateAnnotation) Expect(err).NotTo(HaveOccurred()) } @@ -752,7 +753,7 @@ func testRollbackDeployment(f *Framework) { // becomes v3. Then rollback the deployment to v10 (doesn't exist in history) should fail. // Finally, rollback the deployment (v3) to v3 should be no-op. // TODO: When we finished reporting rollback status in deployment status, check the rollback status here in each case. -func testRollbackDeploymentRSNoRevision(f *Framework) { +func testRollbackDeploymentRSNoRevision(f *framework.Framework) { ns := f.Namespace.Name c := adapter.FromUnversionedClient(f.Client) podName := "nginx" @@ -776,17 +777,17 @@ func testRollbackDeploymentRSNoRevision(f *Framework) { deploymentReplicas := 1 deploymentImage := nginxImage deploymentStrategyType := extensions.RollingUpdateDeploymentStrategyType - Logf("Creating deployment %s", deploymentName) + framework.Logf("Creating deployment %s", deploymentName) d := newDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType, nil) _, err = c.Extensions().Deployments(ns).Create(d) Expect(err).NotTo(HaveOccurred()) defer stopDeployment(c, f.Client, ns, deploymentName) // Wait for it to be updated to revision 1 - err = waitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage) + err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage) Expect(err).NotTo(HaveOccurred()) - err = waitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0) + err = framework.WaitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0) Expect(err).NotTo(HaveOccurred()) // Check that the replica set we created still doesn't contain revision information @@ -797,13 +798,13 @@ func testRollbackDeploymentRSNoRevision(f *Framework) { // 2. Update the deploymentRollback to rollback to last revision // Since there's only 1 revision in history, it should stay as revision 1 revision := int64(0) - Logf("rolling back deployment %s to last revision", deploymentName) + framework.Logf("rolling back deployment %s to last revision", deploymentName) rollback := newDeploymentRollback(deploymentName, nil, revision) err = c.Extensions().Deployments(ns).Rollback(rollback) Expect(err).NotTo(HaveOccurred()) // Wait for the deployment to start rolling back - err = waitForDeploymentRollbackCleared(c, ns, deploymentName) + err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName) Expect(err).NotTo(HaveOccurred()) // TODO: report RollbackRevisionNotFound in deployment status and check it here @@ -814,53 +815,53 @@ func testRollbackDeploymentRSNoRevision(f *Framework) { // 3. Update the deployment to create redis pods. updatedDeploymentImage := redisImage updatedDeploymentImageName := redisImageName - deployment, err := updateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) { + deployment, err := framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) { update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage }) Expect(err).NotTo(HaveOccurred()) // Use observedGeneration to determine if the controller noticed the pod template update. - err = waitForObservedDeployment(c, ns, deploymentName, deployment.Generation) + err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation) Expect(err).NotTo(HaveOccurred()) // Wait for it to be updated to revision 2 - err = waitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage) + err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage) Expect(err).NotTo(HaveOccurred()) - err = waitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0) + err = framework.WaitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0) Expect(err).NotTo(HaveOccurred()) // 4. Update the deploymentRollback to rollback to revision 1 revision = 1 - Logf("rolling back deployment %s to revision %d", deploymentName, revision) + framework.Logf("rolling back deployment %s to revision %d", deploymentName, revision) rollback = newDeploymentRollback(deploymentName, nil, revision) err = c.Extensions().Deployments(ns).Rollback(rollback) Expect(err).NotTo(HaveOccurred()) // Wait for the deployment to start rolling back - err = waitForDeploymentRollbackCleared(c, ns, deploymentName) + err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName) Expect(err).NotTo(HaveOccurred()) // TODO: report RollbackDone in deployment status and check it here // The pod template should be updated to the one in revision 1 // Wait for it to be updated to revision 3 - err = waitForDeploymentRevisionAndImage(c, ns, deploymentName, "3", deploymentImage) + err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3", deploymentImage) Expect(err).NotTo(HaveOccurred()) - err = waitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0) + err = framework.WaitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0) Expect(err).NotTo(HaveOccurred()) // 5. Update the deploymentRollback to rollback to revision 10 // Since there's no revision 10 in history, it should stay as revision 3 revision = 10 - Logf("rolling back deployment %s to revision %d", deploymentName, revision) + framework.Logf("rolling back deployment %s to revision %d", deploymentName, revision) rollback = newDeploymentRollback(deploymentName, nil, revision) err = c.Extensions().Deployments(ns).Rollback(rollback) Expect(err).NotTo(HaveOccurred()) // Wait for the deployment to start rolling back - err = waitForDeploymentRollbackCleared(c, ns, deploymentName) + err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName) Expect(err).NotTo(HaveOccurred()) // TODO: report RollbackRevisionNotFound in deployment status and check it here @@ -871,13 +872,13 @@ func testRollbackDeploymentRSNoRevision(f *Framework) { // 6. Update the deploymentRollback to rollback to revision 3 // Since it's already revision 3, it should be no-op revision = 3 - Logf("rolling back deployment %s to revision %d", deploymentName, revision) + framework.Logf("rolling back deployment %s to revision %d", deploymentName, revision) rollback = newDeploymentRollback(deploymentName, nil, revision) err = c.Extensions().Deployments(ns).Rollback(rollback) Expect(err).NotTo(HaveOccurred()) // Wait for the deployment to start rolling back - err = waitForDeploymentRollbackCleared(c, ns, deploymentName) + err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName) Expect(err).NotTo(HaveOccurred()) // TODO: report RollbackTemplateUnchanged in deployment status and check it here @@ -886,7 +887,7 @@ func testRollbackDeploymentRSNoRevision(f *Framework) { checkDeploymentRevision(c, ns, deploymentName, "3", deploymentImageName, deploymentImage) } -func testDeploymentLabelAdopted(f *Framework) { +func testDeploymentLabelAdopted(f *framework.Framework) { ns := f.Namespace.Name // TODO: remove unversionedClient when the refactoring is done. Currently some // functions like verifyPod still expects a unversioned#Client. @@ -902,25 +903,25 @@ func testDeploymentLabelAdopted(f *Framework) { _, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, podLabels, podName, image)) Expect(err).NotTo(HaveOccurred()) // Verify that the required pods have come up. - err = verifyPods(unversionedClient, ns, podName, false, 3) + err = framework.VerifyPods(unversionedClient, ns, podName, false, 3) if err != nil { - Logf("error in waiting for pods to come up: %s", err) + framework.Logf("error in waiting for pods to come up: %s", err) Expect(err).NotTo(HaveOccurred()) } // Create a nginx deployment to adopt the old rs. deploymentName := "test-adopted-deployment" - Logf("Creating deployment %s", deploymentName) + framework.Logf("Creating deployment %s", deploymentName) _, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, podLabels, podName, image, extensions.RollingUpdateDeploymentStrategyType, nil)) Expect(err).NotTo(HaveOccurred()) defer stopDeployment(c, f.Client, ns, deploymentName) // Wait for it to be updated to revision 1 - err = waitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", image) + err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", image) Expect(err).NotTo(HaveOccurred()) // The RS and pods should be relabeled before the status is updated by syncRollingUpdateDeployment - err = waitForDeploymentStatus(c, ns, deploymentName, replicas, replicas-1, replicas+1, 0) + err = framework.WaitForDeploymentStatus(c, ns, deploymentName, replicas, replicas-1, replicas+1, 0) Expect(err).NotTo(HaveOccurred()) // There should be no old RSs (overlapping RS) @@ -933,7 +934,7 @@ func testDeploymentLabelAdopted(f *Framework) { // New RS should contain pod-template-hash in its selector, label, and template label newRS, err := deploymentutil.GetNewReplicaSet(deployment, c) Expect(err).NotTo(HaveOccurred()) - err = checkRSHashLabel(newRS) + err = framework.CheckRSHashLabel(newRS) Expect(err).NotTo(HaveOccurred()) // All pods targeted by the deployment should contain pod-template-hash in their labels, and there should be only 3 pods selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector) @@ -941,7 +942,7 @@ func testDeploymentLabelAdopted(f *Framework) { options := api.ListOptions{LabelSelector: selector} pods, err := c.Core().Pods(ns).List(options) Expect(err).NotTo(HaveOccurred()) - err = checkPodHashLabel(pods) + err = framework.CheckPodHashLabel(pods) Expect(err).NotTo(HaveOccurred()) Expect(len(pods.Items)).Should(Equal(replicas)) } diff --git a/test/e2e/dns.go b/test/e2e/dns.go index 355e309b77d..e22f42b4104 100644 --- a/test/e2e/dns.go +++ b/test/e2e/dns.go @@ -31,6 +31,7 @@ import ( "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/kubernetes/test/e2e/framework" ) const dnsTestPodHostName = "dns-querier-1" @@ -150,9 +151,9 @@ func createProbeCommand(namesToResolve []string, hostEntries []string, fileNameP func assertFilesExist(fileNames []string, fileDir string, pod *api.Pod, client *client.Client) { var failed []string - expectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) { + framework.ExpectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) { failed = []string{} - subResourceProxyAvailable, err := serverVersionGTE(subResourcePodProxyVersion, client) + subResourceProxyAvailable, err := framework.ServerVersionGTE(framework.SubResourcePodProxyVersion, client) if err != nil { return false, err } @@ -175,20 +176,20 @@ func assertFilesExist(fileNames []string, fileDir string, pod *api.Pod, client * Do().Raw() } if err != nil { - Logf("Unable to read %s from pod %s: %v", fileName, pod.Name, err) + framework.Logf("Unable to read %s from pod %s: %v", fileName, pod.Name, err) failed = append(failed, fileName) } } if len(failed) == 0 { return true, nil } - Logf("Lookups using %s failed for: %v\n", pod.Name, failed) + framework.Logf("Lookups using %s failed for: %v\n", pod.Name, failed) return false, nil })) Expect(len(failed)).To(Equal(0)) } -func validateDNSResults(f *Framework, pod *api.Pod, fileNames []string) { +func validateDNSResults(f *framework.Framework, pod *api.Pod, fileNames []string) { By("submitting the pod to kubernetes") podClient := f.Client.Pods(f.Namespace.Name) @@ -198,15 +199,15 @@ func validateDNSResults(f *Framework, pod *api.Pod, fileNames []string) { podClient.Delete(pod.Name, api.NewDeleteOptions(0)) }() if _, err := podClient.Create(pod); err != nil { - Failf("Failed to create %s pod: %v", pod.Name, err) + framework.Failf("Failed to create %s pod: %v", pod.Name, err) } - expectNoError(f.WaitForPodRunning(pod.Name)) + framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) By("retrieving the pod") pod, err := podClient.Get(pod.Name) if err != nil { - Failf("Failed to get pod %s: %v", pod.Name, err) + framework.Failf("Failed to get pod %s: %v", pod.Name, err) } // Try to find results for each expected name. By("looking for the results for each expected name from probiers") @@ -214,21 +215,21 @@ func validateDNSResults(f *Framework, pod *api.Pod, fileNames []string) { // TODO: probe from the host, too. - Logf("DNS probes using %s succeeded\n", pod.Name) + framework.Logf("DNS probes using %s succeeded\n", pod.Name) } -func verifyDNSPodIsRunning(f *Framework) { +func verifyDNSPodIsRunning(f *framework.Framework) { systemClient := f.Client.Pods(api.NamespaceSystem) By("Waiting for DNS Service to be Running") options := api.ListOptions{LabelSelector: dnsServiceLabelSelector} dnsPods, err := systemClient.List(options) if err != nil { - Failf("Failed to list all dns service pods") + framework.Failf("Failed to list all dns service pods") } if len(dnsPods.Items) != 1 { - Failf("Unexpected number of pods (%d) matches the label selector %v", len(dnsPods.Items), dnsServiceLabelSelector.String()) + framework.Failf("Unexpected number of pods (%d) matches the label selector %v", len(dnsPods.Items), dnsServiceLabelSelector.String()) } - expectNoError(waitForPodRunningInNamespace(f.Client, dnsPods.Items[0].Name, api.NamespaceSystem)) + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.Client, dnsPods.Items[0].Name, api.NamespaceSystem)) } func createServiceSpec(serviceName string, isHeadless bool, selector map[string]string) *api.Service { @@ -249,8 +250,8 @@ func createServiceSpec(serviceName string, isHeadless bool, selector map[string] return headlessService } -var _ = KubeDescribe("DNS", func() { - f := NewDefaultFramework("dns") +var _ = framework.KubeDescribe("DNS", func() { + f := framework.NewDefaultFramework("dns") It("should provide DNS for the cluster [Conformance]", func() { verifyDNSPodIsRunning(f) @@ -264,7 +265,7 @@ var _ = KubeDescribe("DNS", func() { "google.com", } // Added due to #8512. This is critical for GCE and GKE deployments. - if providerIs("gce", "gke") { + if framework.ProviderIs("gce", "gke") { namesToResolve = append(namesToResolve, "metadata") } diff --git a/test/e2e/docker_containers.go b/test/e2e/docker_containers.go index 11865d7aa42..b38cb93f2d4 100644 --- a/test/e2e/docker_containers.go +++ b/test/e2e/docker_containers.go @@ -20,22 +20,23 @@ import ( "k8s.io/kubernetes/pkg/api" client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" ) -var _ = KubeDescribe("Docker Containers", func() { - framework := NewDefaultFramework("containers") +var _ = framework.KubeDescribe("Docker Containers", func() { + f := framework.NewDefaultFramework("containers") var c *client.Client var ns string BeforeEach(func() { - c = framework.Client - ns = framework.Namespace.Name + c = f.Client + ns = f.Namespace.Name }) It("should use the image defaults if command and args are blank [Conformance]", func() { - testContainerOutput("use defaults", c, entrypointTestPod(), 0, []string{ + framework.TestContainerOutput("use defaults", c, entrypointTestPod(), 0, []string{ "[/ep default arguments]", }, ns) }) @@ -44,7 +45,7 @@ var _ = KubeDescribe("Docker Containers", func() { pod := entrypointTestPod() pod.Spec.Containers[0].Args = []string{"override", "arguments"} - testContainerOutput("override arguments", c, pod, 0, []string{ + framework.TestContainerOutput("override arguments", c, pod, 0, []string{ "[/ep override arguments]", }, ns) }) @@ -55,7 +56,7 @@ var _ = KubeDescribe("Docker Containers", func() { pod := entrypointTestPod() pod.Spec.Containers[0].Command = []string{"/ep-2"} - testContainerOutput("override command", c, pod, 0, []string{ + framework.TestContainerOutput("override command", c, pod, 0, []string{ "[/ep-2]", }, ns) }) @@ -65,7 +66,7 @@ var _ = KubeDescribe("Docker Containers", func() { pod.Spec.Containers[0].Command = []string{"/ep-2"} pod.Spec.Containers[0].Args = []string{"override", "arguments"} - testContainerOutput("override all", c, pod, 0, []string{ + framework.TestContainerOutput("override all", c, pod, 0, []string{ "[/ep-2 override arguments]", }, ns) }) diff --git a/test/e2e/downward_api.go b/test/e2e/downward_api.go index b75b13487ec..1014d8ee371 100644 --- a/test/e2e/downward_api.go +++ b/test/e2e/downward_api.go @@ -21,12 +21,13 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" ) -var _ = KubeDescribe("Downward API", func() { - framework := NewDefaultFramework("downward-api") +var _ = framework.KubeDescribe("Downward API", func() { + f := framework.NewDefaultFramework("downward-api") It("should provide pod name and namespace as env vars [Conformance]", func() { podName := "downward-api-" + string(util.NewUUID()) @@ -53,10 +54,10 @@ var _ = KubeDescribe("Downward API", func() { expectations := []string{ fmt.Sprintf("POD_NAME=%v", podName), - fmt.Sprintf("POD_NAMESPACE=%v", framework.Namespace.Name), + fmt.Sprintf("POD_NAMESPACE=%v", f.Namespace.Name), } - testDownwardAPI(framework, podName, env, expectations) + testDownwardAPI(f, podName, env, expectations) }) It("should provide pod IP as an env var", func() { @@ -77,11 +78,11 @@ var _ = KubeDescribe("Downward API", func() { "POD_IP=(?:\\d+)\\.(?:\\d+)\\.(?:\\d+)\\.(?:\\d+)", } - testDownwardAPI(framework, podName, env, expectations) + testDownwardAPI(f, podName, env, expectations) }) }) -func testDownwardAPI(framework *Framework, podName string, env []api.EnvVar, expectations []string) { +func testDownwardAPI(f *framework.Framework, podName string, env []api.EnvVar, expectations []string) { pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: podName, @@ -100,5 +101,5 @@ func testDownwardAPI(framework *Framework, podName string, env []api.EnvVar, exp }, } - framework.TestContainerOutputRegexp("downward api env vars", pod, 0, expectations) + f.TestContainerOutputRegexp("downward api env vars", pod, 0, expectations) } diff --git a/test/e2e/downwardapi_volume.go b/test/e2e/downwardapi_volume.go index 916a0053f0f..5b7232bf722 100644 --- a/test/e2e/downwardapi_volume.go +++ b/test/e2e/downwardapi_volume.go @@ -22,21 +22,22 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) -var _ = KubeDescribe("Downward API volume", func() { +var _ = framework.KubeDescribe("Downward API volume", func() { // How long to wait for a log pod to be displayed const podLogTimeout = 45 * time.Second - f := NewDefaultFramework("downward-api") + f := framework.NewDefaultFramework("downward-api") It("should provide podname only [Conformance]", func() { podName := "downwardapi-volume-" + string(util.NewUUID()) pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podname") - testContainerOutput("downward API volume plugin", f.Client, pod, 0, []string{ + framework.TestContainerOutput("downward API volume plugin", f.Client, pod, 0, []string{ fmt.Sprintf("%s\n", podName), }, f.Namespace.Name) }) @@ -50,7 +51,7 @@ var _ = KubeDescribe("Downward API volume", func() { RunAsUser: &uid, FSGroup: &gid, } - testContainerOutput("downward API volume plugin", f.Client, pod, 0, []string{ + framework.TestContainerOutput("downward API volume plugin", f.Client, pod, 0, []string{ fmt.Sprintf("%s\n", podName), }, f.Namespace.Name) }) @@ -71,15 +72,15 @@ var _ = KubeDescribe("Downward API volume", func() { _, err := f.Client.Pods(f.Namespace.Name).Create(pod) Expect(err).NotTo(HaveOccurred()) - expectNoError(waitForPodRunningInNamespace(f.Client, pod.Name, f.Namespace.Name)) + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.Client, pod.Name, f.Namespace.Name)) pod, err = f.Client.Pods(f.Namespace.Name).Get(pod.Name) Expect(err).NotTo(HaveOccurred()) Eventually(func() (string, error) { - return getPodLogs(f.Client, f.Namespace.Name, pod.Name, containerName) + return framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, containerName) }, - podLogTimeout, poll).Should(ContainSubstring("key1=\"value1\"\n")) + podLogTimeout, framework.Poll).Should(ContainSubstring("key1=\"value1\"\n")) //modify labels pod.Labels["key3"] = "value3" @@ -88,9 +89,9 @@ var _ = KubeDescribe("Downward API volume", func() { Expect(err).NotTo(HaveOccurred()) Eventually(func() (string, error) { - return getPodLogs(f.Client, f.Namespace.Name, pod.Name, containerName) + return framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, containerName) }, - podLogTimeout, poll).Should(ContainSubstring("key3=\"value3\"\n")) + podLogTimeout, framework.Poll).Should(ContainSubstring("key3=\"value3\"\n")) }) @@ -108,15 +109,15 @@ var _ = KubeDescribe("Downward API volume", func() { By("Creating the pod") _, err := f.Client.Pods(f.Namespace.Name).Create(pod) Expect(err).NotTo(HaveOccurred()) - expectNoError(waitForPodRunningInNamespace(f.Client, pod.Name, f.Namespace.Name)) + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.Client, pod.Name, f.Namespace.Name)) pod, err = f.Client.Pods(f.Namespace.Name).Get(pod.Name) Expect(err).NotTo(HaveOccurred()) Eventually(func() (string, error) { - return getPodLogs(f.Client, f.Namespace.Name, pod.Name, containerName) + return framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, containerName) }, - podLogTimeout, poll).Should(ContainSubstring("builder=\"bar\"\n")) + podLogTimeout, framework.Poll).Should(ContainSubstring("builder=\"bar\"\n")) //modify annotations pod.Annotations["builder"] = "foo" @@ -125,9 +126,9 @@ var _ = KubeDescribe("Downward API volume", func() { Expect(err).NotTo(HaveOccurred()) Eventually(func() (string, error) { - return getPodLogs(f.Client, f.Namespace.Name, pod.Name, containerName) + return framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, containerName) }, - podLogTimeout, poll).Should(ContainSubstring("builder=\"foo\"\n")) + podLogTimeout, framework.Poll).Should(ContainSubstring("builder=\"foo\"\n")) }) }) diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go index 7e34a3c0e46..43341a7693e 100644 --- a/test/e2e/e2e.go +++ b/test/e2e/e2e.go @@ -36,6 +36,7 @@ import ( gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/runtime" + "k8s.io/kubernetes/test/e2e/framework" ) const ( @@ -99,19 +100,19 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { // Delete any namespaces except default and kube-system. This ensures no // lingering resources are left over from a previous test run. - if testContext.CleanStart { - c, err := loadClient() + if framework.TestContext.CleanStart { + c, err := framework.LoadClient() if err != nil { glog.Fatal("Error loading client: ", err) } - deleted, err := deleteNamespaces(c, nil /* deleteFilter */, []string{api.NamespaceSystem, api.NamespaceDefault}) + deleted, err := framework.DeleteNamespaces(c, nil /* deleteFilter */, []string{api.NamespaceSystem, api.NamespaceDefault}) if err != nil { - Failf("Error deleting orphaned namespaces: %v", err) + framework.Failf("Error deleting orphaned namespaces: %v", err) } glog.Infof("Waiting for deletion of the following namespaces: %v", deleted) - if err := waitForNamespacesDeleted(c, deleted, namespaceCleanupTimeout); err != nil { - Failf("Failed to delete orphaned namespaces %v: %v", deleted, err) + if err := framework.WaitForNamespacesDeleted(c, deleted, framework.NamespaceCleanupTimeout); err != nil { + framework.Failf("Failed to delete orphaned namespaces %v: %v", deleted, err) } } @@ -119,15 +120,15 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { // cluster infrastructure pods that are being pulled or started can block // test pods from running, and tests that ensure all pods are running and // ready will fail). - if err := waitForPodsRunningReady(api.NamespaceSystem, testContext.MinStartupPods, podStartupTimeout); err != nil { - if c, errClient := loadClient(); errClient != nil { - Logf("Unable to dump cluster information because: %v", errClient) + if err := framework.WaitForPodsRunningReady(api.NamespaceSystem, framework.TestContext.MinStartupPods, podStartupTimeout); err != nil { + if c, errClient := framework.LoadClient(); errClient != nil { + framework.Logf("Unable to dump cluster information because: %v", errClient) } else { - dumpAllNamespaceInfo(c, api.NamespaceSystem) + framework.DumpAllNamespaceInfo(c, api.NamespaceSystem) } - logFailedContainers(api.NamespaceSystem) - runKubernetesServiceTestContainer(testContext.RepoRoot, api.NamespaceDefault) - Failf("Error waiting for all pods to be running and ready: %v", err) + framework.LogFailedContainers(api.NamespaceSystem) + framework.RunKubernetesServiceTestContainer(framework.TestContext.RepoRoot, api.NamespaceDefault) + framework.Failf("Error waiting for all pods to be running and ready: %v", err) } return nil @@ -188,7 +189,7 @@ var _ = ginkgo.SynchronizedAfterSuite(func() { }, func() { // Run only Ginkgo on node 1 if framework.TestContext.ReportDir != "" { - CoreDump(framework.TestContext.ReportDir) + framework.CoreDump(framework.TestContext.ReportDir) } }) @@ -225,6 +226,6 @@ func RunE2ETests(t *testing.T) { r = append(r, reporters.NewJUnitReporter(path.Join(framework.TestContext.ReportDir, fmt.Sprintf("junit_%v%02d.xml", framework.TestContext.ReportPrefix, config.GinkgoConfig.ParallelNode)))) } } - glog.Infof("Starting e2e run %q on Ginkgo node %d", runId, config.GinkgoConfig.ParallelNode) + glog.Infof("Starting e2e run %q on Ginkgo node %d", framework.RunId, config.GinkgoConfig.ParallelNode) ginkgo.RunSpecsWithDefaultAndCustomReporters(t, "Kubernetes e2e suite", r) } diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 38249884de0..076f2354bd6 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -18,10 +18,12 @@ package e2e import ( "testing" + + "k8s.io/kubernetes/test/e2e/framework" ) func init() { - RegisterFlags() + framework.RegisterFlags() } func TestE2E(t *testing.T) { diff --git a/test/e2e/empty_dir.go b/test/e2e/empty_dir.go index ec12f24f5b6..48b254198c7 100644 --- a/test/e2e/empty_dir.go +++ b/test/e2e/empty_dir.go @@ -24,6 +24,7 @@ import ( "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" ) @@ -33,9 +34,9 @@ const ( testImageNonRootUid = "gcr.io/google_containers/mounttest-user:0.3" ) -var _ = KubeDescribe("EmptyDir volumes", func() { +var _ = framework.KubeDescribe("EmptyDir volumes", func() { - f := NewDefaultFramework("emptydir") + f := framework.NewDefaultFramework("emptydir") Context("when FSGroup is specified [Feature:FSGroup]", func() { It("new files should be created with FSGroup ownership when container is root", func() { @@ -117,7 +118,7 @@ const ( volumeName = "test-volume" ) -func doTestSetgidFSGroup(f *Framework, image string, medium api.StorageMedium) { +func doTestSetgidFSGroup(f *framework.Framework, image string, medium api.StorageMedium) { var ( volumePath = "/test-volume" filePath = path.Join(volumePath, "test-file") @@ -147,7 +148,7 @@ func doTestSetgidFSGroup(f *Framework, image string, medium api.StorageMedium) { f.TestContainerOutput(msg, pod, 0, out) } -func doTestVolumeModeFSGroup(f *Framework, image string, medium api.StorageMedium) { +func doTestVolumeModeFSGroup(f *framework.Framework, image string, medium api.StorageMedium) { var ( volumePath = "/test-volume" source = &api.EmptyDirVolumeSource{Medium: medium} @@ -172,7 +173,7 @@ func doTestVolumeModeFSGroup(f *Framework, image string, medium api.StorageMediu f.TestContainerOutput(msg, pod, 0, out) } -func doTest0644FSGroup(f *Framework, image string, medium api.StorageMedium) { +func doTest0644FSGroup(f *framework.Framework, image string, medium api.StorageMedium) { var ( volumePath = "/test-volume" filePath = path.Join(volumePath, "test-file") @@ -200,7 +201,7 @@ func doTest0644FSGroup(f *Framework, image string, medium api.StorageMedium) { f.TestContainerOutput(msg, pod, 0, out) } -func doTestVolumeMode(f *Framework, image string, medium api.StorageMedium) { +func doTestVolumeMode(f *framework.Framework, image string, medium api.StorageMedium) { var ( volumePath = "/test-volume" source = &api.EmptyDirVolumeSource{Medium: medium} @@ -222,7 +223,7 @@ func doTestVolumeMode(f *Framework, image string, medium api.StorageMedium) { f.TestContainerOutput(msg, pod, 0, out) } -func doTest0644(f *Framework, image string, medium api.StorageMedium) { +func doTest0644(f *framework.Framework, image string, medium api.StorageMedium) { var ( volumePath = "/test-volume" filePath = path.Join(volumePath, "test-file") @@ -247,7 +248,7 @@ func doTest0644(f *Framework, image string, medium api.StorageMedium) { f.TestContainerOutput(msg, pod, 0, out) } -func doTest0666(f *Framework, image string, medium api.StorageMedium) { +func doTest0666(f *framework.Framework, image string, medium api.StorageMedium) { var ( volumePath = "/test-volume" filePath = path.Join(volumePath, "test-file") @@ -272,7 +273,7 @@ func doTest0666(f *Framework, image string, medium api.StorageMedium) { f.TestContainerOutput(msg, pod, 0, out) } -func doTest0777(f *Framework, image string, medium api.StorageMedium) { +func doTest0777(f *framework.Framework, image string, medium api.StorageMedium) { var ( volumePath = "/test-volume" filePath = path.Join(volumePath, "test-file") diff --git a/test/e2e/empty_dir_wrapper.go b/test/e2e/empty_dir_wrapper.go index dd4c5532fcc..c96979f18a4 100644 --- a/test/e2e/empty_dir_wrapper.go +++ b/test/e2e/empty_dir_wrapper.go @@ -20,6 +20,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/intstr" + "k8s.io/kubernetes/test/e2e/framework" "strconv" @@ -28,8 +29,8 @@ import ( // This test will create a pod with a secret volume and gitRepo volume // Thus requests a secret, a git server pod, and a git server service -var _ = KubeDescribe("EmptyDir wrapper volumes", func() { - f := NewDefaultFramework("emptydir-wrapper") +var _ = framework.KubeDescribe("EmptyDir wrapper volumes", func() { + f := framework.NewDefaultFramework("emptydir-wrapper") It("should becomes running", func() { name := "emptydir-wrapper-test-" + string(util.NewUUID()) @@ -48,7 +49,7 @@ var _ = KubeDescribe("EmptyDir wrapper volumes", func() { var err error if secret, err = f.Client.Secrets(f.Namespace.Name).Create(secret); err != nil { - Failf("unable to create test secret %s: %v", secret.Name, err) + framework.Failf("unable to create test secret %s: %v", secret.Name, err) } gitServerPodName := "git-server-" + string(util.NewUUID()) @@ -76,7 +77,7 @@ var _ = KubeDescribe("EmptyDir wrapper volumes", func() { } if gitServerPod, err = f.Client.Pods(f.Namespace.Name).Create(gitServerPod); err != nil { - Failf("unable to create test git server pod %s: %v", gitServerPod.Name, err) + framework.Failf("unable to create test git server pod %s: %v", gitServerPod.Name, err) } // Portal IP and port @@ -99,7 +100,7 @@ var _ = KubeDescribe("EmptyDir wrapper volumes", func() { } if gitServerSvc, err = f.Client.Services(f.Namespace.Name).Create(gitServerSvc); err != nil { - Failf("unable to create test git server service %s: %v", gitServerSvc.Name, err) + framework.Failf("unable to create test git server service %s: %v", gitServerSvc.Name, err) } gitVolumeName := "git-volume" @@ -152,28 +153,28 @@ var _ = KubeDescribe("EmptyDir wrapper volumes", func() { } if pod, err = f.Client.Pods(f.Namespace.Name).Create(pod); err != nil { - Failf("unable to create pod %v: %v", pod.Name, err) + framework.Failf("unable to create pod %v: %v", pod.Name, err) } defer func() { By("Cleaning up the secret") if err := f.Client.Secrets(f.Namespace.Name).Delete(secret.Name); err != nil { - Failf("unable to delete secret %v: %v", secret.Name, err) + framework.Failf("unable to delete secret %v: %v", secret.Name, err) } By("Cleaning up the git server pod") if err = f.Client.Pods(f.Namespace.Name).Delete(gitServerPod.Name, api.NewDeleteOptions(0)); err != nil { - Failf("unable to delete git server pod %v: %v", gitServerPod.Name, err) + framework.Failf("unable to delete git server pod %v: %v", gitServerPod.Name, err) } By("Cleaning up the git server svc") if err = f.Client.Services(f.Namespace.Name).Delete(gitServerSvc.Name); err != nil { - Failf("unable to delete git server svc %v: %v", gitServerSvc.Name, err) + framework.Failf("unable to delete git server svc %v: %v", gitServerSvc.Name, err) } By("Cleaning up the git vol pod") if err = f.Client.Pods(f.Namespace.Name).Delete(pod.Name, api.NewDeleteOptions(0)); err != nil { - Failf("unable to delete git vol pod %v: %v", pod.Name, err) + framework.Failf("unable to delete git vol pod %v: %v", pod.Name, err) } }() - expectNoError(waitForPodRunningInNamespace(f.Client, pod.Name, f.Namespace.Name)) + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.Client, pod.Name, f.Namespace.Name)) }) }) diff --git a/test/e2e/es_cluster_logging.go b/test/e2e/es_cluster_logging.go index d09339e38b3..632a3241992 100644 --- a/test/e2e/es_cluster_logging.go +++ b/test/e2e/es_cluster_logging.go @@ -25,19 +25,20 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) -var _ = KubeDescribe("Cluster level logging using Elasticsearch [Feature:Elasticsearch]", func() { - f := NewDefaultFramework("es-logging") +var _ = framework.KubeDescribe("Cluster level logging using Elasticsearch [Feature:Elasticsearch]", func() { + f := framework.NewDefaultFramework("es-logging") BeforeEach(func() { // TODO: For now assume we are only testing cluster logging with Elasticsearch // on GCE. Once we are sure that Elasticsearch cluster level logging // works for other providers we should widen this scope of this test. - SkipUnlessProviderIs("gce") + framework.SkipUnlessProviderIs("gce") }) It("should check that logs from pods on all nodes are ingested into Elasticsearch", func() { @@ -54,7 +55,7 @@ const ( func bodyToJSON(body []byte) (map[string]interface{}, error) { var r map[string]interface{} if err := json.Unmarshal(body, &r); err != nil { - Logf("Bad JSON: %s", string(body)) + framework.Logf("Bad JSON: %s", string(body)) return nil, fmt.Errorf("failed to unmarshal Elasticsearch response: %v", err) } return r, nil @@ -70,7 +71,7 @@ func nodeInNodeList(nodeName string, nodeList *api.NodeList) bool { } // ClusterLevelLoggingWithElasticsearch is an end to end test for cluster level logging. -func ClusterLevelLoggingWithElasticsearch(f *Framework) { +func ClusterLevelLoggingWithElasticsearch(f *framework.Framework) { // graceTime is how long to keep retrying requests for status information. const graceTime = 5 * time.Minute // ingestionTimeout is how long to keep retrying to wait for all the @@ -87,7 +88,7 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) { if _, err = s.Get("elasticsearch-logging"); err == nil { break } - Logf("Attempt to check for the existence of the Elasticsearch service failed after %v", time.Since(start)) + framework.Logf("Attempt to check for the existence of the Elasticsearch service failed after %v", time.Since(start)) } Expect(err).NotTo(HaveOccurred()) @@ -98,7 +99,7 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) { pods, err := f.Client.Pods(api.NamespaceSystem).List(options) Expect(err).NotTo(HaveOccurred()) for _, pod := range pods.Items { - err = waitForPodRunningInNamespace(f.Client, pod.Name, api.NamespaceSystem) + err = framework.WaitForPodRunningInNamespace(f.Client, pod.Name, api.NamespaceSystem) Expect(err).NotTo(HaveOccurred()) } @@ -109,9 +110,9 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) { err = nil var body []byte for start := time.Now(); time.Since(start) < graceTime; time.Sleep(10 * time.Second) { - proxyRequest, errProxy := getServicesProxyRequest(f.Client, f.Client.Get()) + proxyRequest, errProxy := framework.GetServicesProxyRequest(f.Client, f.Client.Get()) if errProxy != nil { - Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy) + framework.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy) continue } // Query against the root URL for Elasticsearch. @@ -119,42 +120,42 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) { Name("elasticsearch-logging"). DoRaw() if err != nil { - Logf("After %v proxy call to elasticsearch-loigging failed: %v", time.Since(start), err) + framework.Logf("After %v proxy call to elasticsearch-loigging failed: %v", time.Since(start), err) continue } esResponse, err = bodyToJSON(body) if err != nil { - Logf("After %v failed to convert Elasticsearch JSON response %v to map[string]interface{}: %v", time.Since(start), string(body), err) + framework.Logf("After %v failed to convert Elasticsearch JSON response %v to map[string]interface{}: %v", time.Since(start), string(body), err) continue } statusIntf, ok := esResponse["status"] if !ok { - Logf("After %v Elasticsearch response has no status field: %v", time.Since(start), esResponse) + framework.Logf("After %v Elasticsearch response has no status field: %v", time.Since(start), esResponse) continue } statusCode, ok = statusIntf.(float64) if !ok { // Assume this is a string returning Failure. Retry. - Logf("After %v expected status to be a float64 but got %v of type %T", time.Since(start), statusIntf, statusIntf) + framework.Logf("After %v expected status to be a float64 but got %v of type %T", time.Since(start), statusIntf, statusIntf) continue } if int(statusCode) != 200 { - Logf("After %v Elasticsearch cluster has a bad status: %v", time.Since(start), statusCode) + framework.Logf("After %v Elasticsearch cluster has a bad status: %v", time.Since(start), statusCode) continue } break } Expect(err).NotTo(HaveOccurred()) if int(statusCode) != 200 { - Failf("Elasticsearch cluster has a bad status: %v", statusCode) + framework.Failf("Elasticsearch cluster has a bad status: %v", statusCode) } // Check to see if have a cluster_name field. clusterName, ok := esResponse["cluster_name"] if !ok { - Failf("No cluster_name field in Elasticsearch response: %v", esResponse) + framework.Failf("No cluster_name field in Elasticsearch response: %v", esResponse) } if clusterName != "kubernetes-logging" { - Failf("Connected to wrong cluster %q (expecting kubernetes_logging)", clusterName) + framework.Failf("Connected to wrong cluster %q (expecting kubernetes_logging)", clusterName) } // Now assume we really are talking to an Elasticsearch instance. @@ -162,9 +163,9 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) { By("Checking health of Elasticsearch service.") healthy := false for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) { - proxyRequest, errProxy := getServicesProxyRequest(f.Client, f.Client.Get()) + proxyRequest, errProxy := framework.GetServicesProxyRequest(f.Client, f.Client.Get()) if errProxy != nil { - Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy) + framework.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy) continue } body, err = proxyRequest.Namespace(api.NamespaceSystem). @@ -177,17 +178,17 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) { } health, err := bodyToJSON(body) if err != nil { - Logf("Bad json response from elasticsearch: %v", err) + framework.Logf("Bad json response from elasticsearch: %v", err) continue } statusIntf, ok := health["status"] if !ok { - Logf("No status field found in cluster health response: %v", health) + framework.Logf("No status field found in cluster health response: %v", health) continue } status := statusIntf.(string) if status != "green" && status != "yellow" { - Logf("Cluster health has bad status: %v", health) + framework.Logf("Cluster health has bad status: %v", health) continue } if err == nil && ok { @@ -196,27 +197,27 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) { } } if !healthy { - Failf("After %v elasticsearch cluster is not healthy", graceTime) + framework.Failf("After %v elasticsearch cluster is not healthy", graceTime) } // Obtain a list of nodes so we can place one synthetic logger on each node. - nodes := ListSchedulableNodesOrDie(f.Client) + nodes := framework.ListSchedulableNodesOrDie(f.Client) nodeCount := len(nodes.Items) if nodeCount == 0 { - Failf("Failed to find any nodes") + framework.Failf("Failed to find any nodes") } - Logf("Found %d nodes.", len(nodes.Items)) + framework.Logf("Found %d nodes.", len(nodes.Items)) // Filter out unhealthy nodes. // Previous tests may have cause failures of some nodes. Let's skip // 'Not Ready' nodes, just in case (there is no need to fail the test). - filterNodes(nodes, func(node api.Node) bool { - return isNodeConditionSetAsExpected(&node, api.NodeReady, true) + framework.FilterNodes(nodes, func(node api.Node) bool { + return framework.IsNodeConditionSetAsExpected(&node, api.NodeReady, true) }) if len(nodes.Items) < 2 { - Failf("Less than two nodes were found Ready: %d", len(nodes.Items)) + framework.Failf("Less than two nodes were found Ready: %d", len(nodes.Items)) } - Logf("Found %d healthy nodes.", len(nodes.Items)) + framework.Logf("Found %d healthy nodes.", len(nodes.Items)) // Wait for the Fluentd pods to enter the running state. By("Checking to make sure the Fluentd pod are running on each healthy node") @@ -226,7 +227,7 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) { Expect(err).NotTo(HaveOccurred()) for _, pod := range fluentdPods.Items { if nodeInNodeList(pod.Spec.NodeName, nodes) { - err = waitForPodRunningInNamespace(f.Client, pod.Name, api.NamespaceSystem) + err = framework.WaitForPodRunningInNamespace(f.Client, pod.Name, api.NamespaceSystem) Expect(err).NotTo(HaveOccurred()) } } @@ -241,7 +242,7 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) { } } if !exists { - Failf("Node %v does not have fluentd pod running on it.", node.Name) + framework.Failf("Node %v does not have fluentd pod running on it.", node.Name) } } @@ -253,7 +254,7 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) { // Form a unique name to taint log lines to be collected. // Replace '-' characters with '_' to prevent the analyzer from breaking apart names. taintName := strings.Replace(ns+name, "-", "_", -1) - Logf("Tainting log lines with %v", taintName) + framework.Logf("Tainting log lines with %v", taintName) // podNames records the names of the synthetic logging pods that are created in the // loop below. var podNames []string @@ -288,7 +289,7 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) { defer func() { for _, pod := range podNames { if err = f.Client.Pods(ns).Delete(pod, nil); err != nil { - Logf("Failed to delete pod %s: %v", pod, err) + framework.Logf("Failed to delete pod %s: %v", pod, err) } } }() @@ -296,7 +297,7 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) { // Wait for the synthetic logging pods to finish. By("Waiting for the pods to succeed.") for _, pod := range podNames { - err = waitForPodSuccessInNamespace(f.Client, pod, "synth-logger", ns) + err = framework.WaitForPodSuccessInNamespace(f.Client, pod, "synth-logger", ns) Expect(err).NotTo(HaveOccurred()) } @@ -315,18 +316,18 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) { options := api.ListOptions{LabelSelector: selector} esPods, err := f.Client.Pods(api.NamespaceSystem).List(options) if err != nil { - Logf("Attempt to list Elasticsearch nodes encountered a problem -- may retry: %v", err) + framework.Logf("Attempt to list Elasticsearch nodes encountered a problem -- may retry: %v", err) continue } else { for i, pod := range esPods.Items { - Logf("pod %d: %s PodIP %s phase %s condition %+v", i, pod.Name, pod.Status.PodIP, pod.Status.Phase, + framework.Logf("pod %d: %s PodIP %s phase %s condition %+v", i, pod.Name, pod.Status.PodIP, pod.Status.Phase, pod.Status.Conditions) } } - proxyRequest, errProxy := getServicesProxyRequest(f.Client, f.Client.Get()) + proxyRequest, errProxy := framework.GetServicesProxyRequest(f.Client, f.Client.Get()) if errProxy != nil { - Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy) + framework.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy) continue } // Ask Elasticsearch to return all the log lines that were tagged with the underscore @@ -339,33 +340,33 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) { Param("size", strconv.Itoa(2*expected)). DoRaw() if err != nil { - Logf("After %v failed to make proxy call to elasticsearch-logging: %v", time.Since(start), err) + framework.Logf("After %v failed to make proxy call to elasticsearch-logging: %v", time.Since(start), err) continue } response, err := bodyToJSON(body) if err != nil { - Logf("After %v failed to unmarshal response: %v", time.Since(start), err) - Logf("Body: %s", string(body)) + framework.Logf("After %v failed to unmarshal response: %v", time.Since(start), err) + framework.Logf("Body: %s", string(body)) continue } hits, ok := response["hits"].(map[string]interface{}) if !ok { - Logf("response[hits] not of the expected type: %T", response["hits"]) + framework.Logf("response[hits] not of the expected type: %T", response["hits"]) continue } totalF, ok := hits["total"].(float64) if !ok { - Logf("After %v hits[total] not of the expected type: %T", time.Since(start), hits["total"]) + framework.Logf("After %v hits[total] not of the expected type: %T", time.Since(start), hits["total"]) continue } total := int(totalF) if total != expected { - Logf("After %v expecting to find %d log lines but saw %d", time.Since(start), expected, total) + framework.Logf("After %v expecting to find %d log lines but saw %d", time.Since(start), expected, total) } h, ok := hits["hits"].([]interface{}) if !ok { - Logf("After %v hits not of the expected type: %T", time.Since(start), hits["hits"]) + framework.Logf("After %v hits not of the expected type: %T", time.Since(start), hits["hits"]) continue } // Initialize data-structure for observing counts. @@ -377,44 +378,44 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) { for _, e := range h { l, ok := e.(map[string]interface{}) if !ok { - Logf("element of hit not of expected type: %T", e) + framework.Logf("element of hit not of expected type: %T", e) continue } source, ok := l["_source"].(map[string]interface{}) if !ok { - Logf("_source not of the expected type: %T", l["_source"]) + framework.Logf("_source not of the expected type: %T", l["_source"]) continue } msg, ok := source["log"].(string) if !ok { - Logf("log not of the expected type: %T", source["log"]) + framework.Logf("log not of the expected type: %T", source["log"]) continue } words := strings.Split(msg, " ") if len(words) != 4 { - Logf("Malformed log line: %s", msg) + framework.Logf("Malformed log line: %s", msg) continue } n, err := strconv.ParseUint(words[0], 10, 0) if err != nil { - Logf("Expecting numer of node as first field of %s", msg) + framework.Logf("Expecting numer of node as first field of %s", msg) continue } if n < 0 || int(n) >= nodeCount { - Logf("Node count index out of range: %d", nodeCount) + framework.Logf("Node count index out of range: %d", nodeCount) continue } index, err := strconv.ParseUint(words[2], 10, 0) if err != nil { - Logf("Expecting number as third field of %s", msg) + framework.Logf("Expecting number as third field of %s", msg) continue } if index < 0 || index >= countTo { - Logf("Index value out of range: %d", index) + framework.Logf("Index value out of range: %d", index) continue } if words[1] != taintName { - Logf("Elasticsearch query return unexpected log line: %s", msg) + framework.Logf("Elasticsearch query return unexpected log line: %s", msg) continue } // Record the observation of a log line from node n at the given index. @@ -431,45 +432,45 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) { missingPerNode[n]++ } if c < 0 || c > 1 { - Logf("Got incorrect count for node %d index %d: %d", n, i, c) + framework.Logf("Got incorrect count for node %d index %d: %d", n, i, c) incorrectCount = true } } } if incorrectCount { - Logf("After %v es still return duplicated log lines", time.Since(start)) + framework.Logf("After %v es still return duplicated log lines", time.Since(start)) continue } if totalMissing != 0 { - Logf("After %v still missing %d log lines", time.Since(start), totalMissing) + framework.Logf("After %v still missing %d log lines", time.Since(start), totalMissing) continue } - Logf("After %s found all %d log lines", time.Since(start), expected) + framework.Logf("After %s found all %d log lines", time.Since(start), expected) return } for n := range missingPerNode { if missingPerNode[n] > 0 { - Logf("Node %d %s is missing %d logs", n, nodes.Items[n].Name, missingPerNode[n]) + framework.Logf("Node %d %s is missing %d logs", n, nodes.Items[n].Name, missingPerNode[n]) opts := &api.PodLogOptions{} body, err = f.Client.Pods(ns).GetLogs(podNames[n], opts).DoRaw() if err != nil { - Logf("Cannot get logs from pod %v", podNames[n]) + framework.Logf("Cannot get logs from pod %v", podNames[n]) continue } - Logf("Pod %s has the following logs: %s", podNames[n], body) + framework.Logf("Pod %s has the following logs: %s", podNames[n], body) for _, pod := range fluentdPods.Items { if pod.Spec.NodeName == nodes.Items[n].Name { body, err = f.Client.Pods(api.NamespaceSystem).GetLogs(pod.Name, opts).DoRaw() if err != nil { - Logf("Cannot get logs from pod %v", pod.Name) + framework.Logf("Cannot get logs from pod %v", pod.Name) break } - Logf("Fluentd Pod %s on node %s has the following logs: %s", pod.Name, nodes.Items[n].Name, body) + framework.Logf("Fluentd Pod %s on node %s has the following logs: %s", pod.Name, nodes.Items[n].Name, body) break } } } } - Failf("Failed to find all %d log lines", expected) + framework.Failf("Failed to find all %d log lines", expected) } diff --git a/test/e2e/etcd_failure.go b/test/e2e/etcd_failure.go index 1768ac202a4..d585ee00c3c 100644 --- a/test/e2e/etcd_failure.go +++ b/test/e2e/etcd_failure.go @@ -22,14 +22,15 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) -var _ = KubeDescribe("Etcd failure [Disruptive]", func() { +var _ = framework.KubeDescribe("Etcd failure [Disruptive]", func() { - framework := NewDefaultFramework("etcd-failure") + f := framework.NewDefaultFramework("etcd-failure") BeforeEach(func() { // This test requires: @@ -37,12 +38,12 @@ var _ = KubeDescribe("Etcd failure [Disruptive]", func() { // - master access // ... so the provider check should be identical to the intersection of // providers that provide those capabilities. - SkipUnlessProviderIs("gce") + framework.SkipUnlessProviderIs("gce") - Expect(RunRC(RCConfig{ - Client: framework.Client, + Expect(framework.RunRC(framework.RCConfig{ + Client: f.Client, Name: "baz", - Namespace: framework.Namespace.Name, + Namespace: f.Namespace.Name, Image: "gcr.io/google_containers/pause:2.0", Replicas: 1, })).NotTo(HaveOccurred()) @@ -50,7 +51,7 @@ var _ = KubeDescribe("Etcd failure [Disruptive]", func() { It("should recover from network partition with master", func() { etcdFailTest( - framework, + f, "sudo iptables -A INPUT -p tcp --destination-port 4001 -j DROP", "sudo iptables -D INPUT -p tcp --destination-port 4001 -j DROP", ) @@ -58,19 +59,19 @@ var _ = KubeDescribe("Etcd failure [Disruptive]", func() { It("should recover from SIGKILL", func() { etcdFailTest( - framework, + f, "pgrep etcd | xargs -I {} sudo kill -9 {}", "echo 'do nothing. monit should restart etcd.'", ) }) }) -func etcdFailTest(framework *Framework, failCommand, fixCommand string) { +func etcdFailTest(f *framework.Framework, failCommand, fixCommand string) { doEtcdFailure(failCommand, fixCommand) - checkExistingRCRecovers(framework) + checkExistingRCRecovers(f) - ServeImageOrFail(framework, "basic", "gcr.io/google_containers/serve_hostname:v1.4") + ServeImageOrFail(f, "basic", "gcr.io/google_containers/serve_hostname:v1.4") } // For this duration, etcd will be failed by executing a failCommand on the master. @@ -89,25 +90,25 @@ func doEtcdFailure(failCommand, fixCommand string) { } func masterExec(cmd string) { - result, err := SSH(cmd, getMasterHost()+":22", testContext.Provider) + result, err := framework.SSH(cmd, framework.GetMasterHost()+":22", framework.TestContext.Provider) Expect(err).NotTo(HaveOccurred()) if result.Code != 0 { - LogSSHResult(result) - Failf("master exec command returned non-zero") + framework.LogSSHResult(result) + framework.Failf("master exec command returned non-zero") } } -func checkExistingRCRecovers(f *Framework) { +func checkExistingRCRecovers(f *framework.Framework) { By("assert that the pre-existing replication controller recovers") podClient := f.Client.Pods(f.Namespace.Name) rcSelector := labels.Set{"name": "baz"}.AsSelector() By("deleting pods from existing replication controller") - expectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) { + framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) { options := api.ListOptions{LabelSelector: rcSelector} pods, err := podClient.List(options) if err != nil { - Logf("apiserver returned error, as expected before recovery: %v", err) + framework.Logf("apiserver returned error, as expected before recovery: %v", err) return false, nil } if len(pods.Items) == 0 { @@ -117,12 +118,12 @@ func checkExistingRCRecovers(f *Framework) { err = podClient.Delete(pod.Name, api.NewDeleteOptions(0)) Expect(err).NotTo(HaveOccurred()) } - Logf("apiserver has recovered") + framework.Logf("apiserver has recovered") return true, nil })) By("waiting for replication controller to recover") - expectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) { + framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) { options := api.ListOptions{LabelSelector: rcSelector} pods, err := podClient.List(options) Expect(err).NotTo(HaveOccurred()) diff --git a/test/e2e/events.go b/test/e2e/events.go index d361da0e199..ee655500fa1 100644 --- a/test/e2e/events.go +++ b/test/e2e/events.go @@ -26,17 +26,18 @@ import ( "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) -var _ = KubeDescribe("Events", func() { - framework := NewDefaultFramework("events") +var _ = framework.KubeDescribe("Events", func() { + f := framework.NewDefaultFramework("events") It("should be sent by kubelets and the scheduler about pods scheduling and running [Conformance]", func() { - podClient := framework.Client.Pods(framework.Namespace.Name) + podClient := f.Client.Pods(f.Namespace.Name) By("creating the pod") name := "send-events-" + string(util.NewUUID()) @@ -66,10 +67,10 @@ var _ = KubeDescribe("Events", func() { podClient.Delete(pod.Name, nil) }() if _, err := podClient.Create(pod); err != nil { - Failf("Failed to create pod: %v", err) + framework.Failf("Failed to create pod: %v", err) } - expectNoError(framework.WaitForPodRunning(pod.Name)) + framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) By("verifying the pod is in kubernetes") selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) @@ -80,21 +81,21 @@ var _ = KubeDescribe("Events", func() { By("retrieving the pod") podWithUid, err := podClient.Get(pod.Name) if err != nil { - Failf("Failed to get pod: %v", err) + framework.Failf("Failed to get pod: %v", err) } fmt.Printf("%+v\n", podWithUid) var events *api.EventList // Check for scheduler event about the pod. By("checking for scheduler event about the pod") - expectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) { + framework.ExpectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) { selector := fields.Set{ "involvedObject.kind": "Pod", "involvedObject.uid": string(podWithUid.UID), - "involvedObject.namespace": framework.Namespace.Name, + "involvedObject.namespace": f.Namespace.Name, "source": api.DefaultSchedulerName, }.AsSelector() options := api.ListOptions{FieldSelector: selector} - events, err := framework.Client.Events(framework.Namespace.Name).List(options) + events, err := f.Client.Events(f.Namespace.Name).List(options) if err != nil { return false, err } @@ -106,15 +107,15 @@ var _ = KubeDescribe("Events", func() { })) // Check for kubelet event about the pod. By("checking for kubelet event about the pod") - expectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) { + framework.ExpectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) { selector := fields.Set{ "involvedObject.uid": string(podWithUid.UID), "involvedObject.kind": "Pod", - "involvedObject.namespace": framework.Namespace.Name, + "involvedObject.namespace": f.Namespace.Name, "source": "kubelet", }.AsSelector() options := api.ListOptions{FieldSelector: selector} - events, err = framework.Client.Events(framework.Namespace.Name).List(options) + events, err = f.Client.Events(f.Namespace.Name).List(options) if err != nil { return false, err } diff --git a/test/e2e/example_cluster_dns.go b/test/e2e/example_cluster_dns.go index abb1ae4b45b..a94c6795e9b 100644 --- a/test/e2e/example_cluster_dns.go +++ b/test/e2e/example_cluster_dns.go @@ -24,6 +24,7 @@ import ( "k8s.io/kubernetes/pkg/api" client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -41,17 +42,17 @@ try: except: print 'err'` -var _ = KubeDescribe("ClusterDns [Feature:Example]", func() { - framework := NewDefaultFramework("cluster-dns") +var _ = framework.KubeDescribe("ClusterDns [Feature:Example]", func() { + f := framework.NewDefaultFramework("cluster-dns") var c *client.Client BeforeEach(func() { - c = framework.Client + c = f.Client }) It("should create pod that uses dns [Conformance]", func() { mkpath := func(file string) string { - return filepath.Join(testContext.RepoRoot, "examples/cluster-dns", file) + return filepath.Join(framework.TestContext.RepoRoot, "examples/cluster-dns", file) } // contrary to the example, this test does not use contexts, for simplicity @@ -75,22 +76,22 @@ var _ = KubeDescribe("ClusterDns [Feature:Example]", func() { namespaces := []*api.Namespace{nil, nil} for i := range namespaces { var err error - namespaces[i], err = framework.CreateNamespace(fmt.Sprintf("dnsexample%d", i), nil) + namespaces[i], err = f.CreateNamespace(fmt.Sprintf("dnsexample%d", i), nil) Expect(err).NotTo(HaveOccurred()) } for _, ns := range namespaces { - runKubectlOrDie("create", "-f", backendRcYaml, getNsCmdFlag(ns)) + framework.RunKubectlOrDie("create", "-f", backendRcYaml, getNsCmdFlag(ns)) } for _, ns := range namespaces { - runKubectlOrDie("create", "-f", backendSvcYaml, getNsCmdFlag(ns)) + framework.RunKubectlOrDie("create", "-f", backendSvcYaml, getNsCmdFlag(ns)) } // wait for objects for _, ns := range namespaces { - waitForRCPodsRunning(c, ns.Name, backendRcName) - waitForService(c, ns.Name, backendSvcName, true, poll, serviceStartTimeout) + framework.WaitForRCPodsRunning(c, ns.Name, backendRcName) + framework.WaitForService(c, ns.Name, backendSvcName, true, framework.Poll, framework.ServiceStartTimeout) } // it is not enough that pods are running because they may be set to running, but // the application itself may have not been initialized. Just query the application. @@ -99,11 +100,11 @@ var _ = KubeDescribe("ClusterDns [Feature:Example]", func() { options := api.ListOptions{LabelSelector: label} pods, err := c.Pods(ns.Name).List(options) Expect(err).NotTo(HaveOccurred()) - err = podsResponding(c, ns.Name, backendPodName, false, pods) + err = framework.PodsResponding(c, ns.Name, backendPodName, false, pods) Expect(err).NotTo(HaveOccurred(), "waiting for all pods to respond") - Logf("found %d backend pods responding in namespace %s", len(pods.Items), ns.Name) + framework.Logf("found %d backend pods responding in namespace %s", len(pods.Items), ns.Name) - err = serviceResponding(c, ns.Name, backendSvcName) + err = framework.ServiceResponding(c, ns.Name, backendSvcName) Expect(err).NotTo(HaveOccurred(), "waiting for the service to respond") } @@ -120,31 +121,31 @@ var _ = KubeDescribe("ClusterDns [Feature:Example]", func() { pods, err := c.Pods(namespaces[0].Name).List(options) if err != nil || pods == nil || len(pods.Items) == 0 { - Failf("no running pods found") + framework.Failf("no running pods found") } podName := pods.Items[0].Name queryDns := fmt.Sprintf(queryDnsPythonTemplate, backendSvcName+"."+namespaces[0].Name) - _, err = lookForStringInPodExec(namespaces[0].Name, podName, []string{"python", "-c", queryDns}, "ok", dnsReadyTimeout) + _, err = framework.LookForStringInPodExec(namespaces[0].Name, podName, []string{"python", "-c", queryDns}, "ok", dnsReadyTimeout) Expect(err).NotTo(HaveOccurred(), "waiting for output from pod exec") updatedPodYaml := prepareResourceWithReplacedString(frontendPodYaml, "dns-backend.development.cluster.local", fmt.Sprintf("dns-backend.%s.svc.cluster.local", namespaces[0].Name)) // create a pod in each namespace for _, ns := range namespaces { - newKubectlCommand("create", "-f", "-", getNsCmdFlag(ns)).withStdinData(updatedPodYaml).execOrDie() + framework.NewKubectlCommand("create", "-f", "-", getNsCmdFlag(ns)).WithStdinData(updatedPodYaml).ExecOrDie() } // wait until the pods have been scheduler, i.e. are not Pending anymore. Remember // that we cannot wait for the pods to be running because our pods terminate by themselves. for _, ns := range namespaces { - err := waitForPodNotPending(c, ns.Name, frontendPodName) - expectNoError(err) + err := framework.WaitForPodNotPending(c, ns.Name, frontendPodName) + framework.ExpectNoError(err) } // wait for pods to print their result for _, ns := range namespaces { - _, err := lookForStringInLog(ns.Name, frontendPodName, frontendPodContainerName, podOutput, podStartTimeout) + _, err := framework.LookForStringInLog(ns.Name, frontendPodName, frontendPodContainerName, podOutput, framework.PodStartTimeout) Expect(err).NotTo(HaveOccurred()) } }) diff --git a/test/e2e/example_k8petstore.go b/test/e2e/example_k8petstore.go index 881479ec1f2..71c28389dc8 100644 --- a/test/e2e/example_k8petstore.go +++ b/test/e2e/example_k8petstore.go @@ -26,6 +26,7 @@ import ( "time" client "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -33,7 +34,7 @@ import ( const ( k8bpsContainerVersion = "r.2.8.19" // Container version, see the examples/k8petstore dockerfiles for details. - k8bpsThroughputDummy = "0" // Polling time = 0, since we poll in ginkgo rather than using the shell script tests. + k8bpsThroughputDummy = "0" // Polling time = 0, since we framework.Poll in ginkgo rather than using the shell script tests. k8bpsRedisSlaves = "1" // Number of redis slaves. k8bpsDontRunTest = "0" // Don't bother embedded test. k8bpsStartupTimeout = 30 * time.Second // Amount of elapsed time before petstore transactions are being stored. @@ -47,7 +48,7 @@ const ( // readTransactions reads # of transactions from the k8petstore web server endpoint. // for more details see the source of the k8petstore web server. func readTransactions(c *client.Client, ns string) (error, int) { - proxyRequest, errProxy := getServicesProxyRequest(c, c.Get()) + proxyRequest, errProxy := framework.GetServicesProxyRequest(c, c.Get()) if errProxy != nil { return errProxy, -1 } @@ -68,11 +69,11 @@ func readTransactions(c *client.Client, ns string) (error, int) { func runK8petstore(restServers int, loadGenerators int, c *client.Client, ns string, finalTransactionsExpected int, maxTime time.Duration) { var err error = nil - k8bpsScriptLocation := filepath.Join(testContext.RepoRoot, "examples/k8petstore/k8petstore-nodeport.sh") + k8bpsScriptLocation := filepath.Join(framework.TestContext.RepoRoot, "examples/k8petstore/k8petstore-nodeport.sh") cmd := exec.Command( k8bpsScriptLocation, - testContext.KubectlPath, + framework.TestContext.KubectlPath, k8bpsContainerVersion, k8bpsThroughputDummy, strconv.Itoa(restServers), @@ -85,25 +86,25 @@ func runK8petstore(restServers int, loadGenerators int, c *client.Client, ns str cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr - Logf("Starting k8petstore application....") + framework.Logf("Starting k8petstore application....") // Run the k8petstore app, and log / fail if it returns any errors. // This should return quickly, assuming containers are downloaded. if err = cmd.Start(); err != nil { - Failf("%v", err) + framework.Failf("%v", err) } // Make sure there are no command errors. if err = cmd.Wait(); err != nil { if exiterr, ok := err.(*exec.ExitError); ok { if status, ok := exiterr.Sys().(syscall.WaitStatus); ok { - Logf("Exit Status: %d", status.ExitStatus()) + framework.Logf("Exit Status: %d", status.ExitStatus()) } } } Expect(err).NotTo(HaveOccurred()) - Logf("... Done starting k8petstore ") + framework.Logf("... Done starting k8petstore ") totalTransactions := 0 - Logf("Start polling, timeout is %v seconds", maxTime) + framework.Logf("Start polling, timeout is %v seconds", maxTime) // How long until the FIRST transactions are created. startupTimeout := time.After(time.Duration(k8bpsStartupTimeout)) @@ -113,18 +114,18 @@ func runK8petstore(restServers int, loadGenerators int, c *client.Client, ns str tick := time.Tick(2 * time.Second) var ready = false - Logf("Now waiting %v seconds to see progress (transactions > 3)", k8bpsStartupTimeout) + framework.Logf("Now waiting %v seconds to see progress (transactions > 3)", k8bpsStartupTimeout) T: for { select { case <-transactionsCompleteTimeout: - Logf("Completion timeout %v reached, %v transactions not complete. Breaking!", time.Duration(maxTime), finalTransactionsExpected) + framework.Logf("Completion timeout %v reached, %v transactions not complete. Breaking!", time.Duration(maxTime), finalTransactionsExpected) break T case <-tick: // Don't fail if there's an error. We expect a few failures might happen in the cloud. err, totalTransactions = readTransactions(c, ns) if err == nil { - Logf("PetStore : Time: %v, %v = total petstore transactions stored into redis.", time.Now(), totalTransactions) + framework.Logf("PetStore : Time: %v, %v = total petstore transactions stored into redis.", time.Now(), totalTransactions) if totalTransactions >= k8bpsMinTransactionsOnStartup { ready = true } @@ -133,14 +134,14 @@ T: } } else { if ready { - Logf("Blip: during polling: %v", err) + framework.Logf("Blip: during polling: %v", err) } else { - Logf("Not ready yet: %v", err) + framework.Logf("Not ready yet: %v", err) } } case <-startupTimeout: if !ready { - Logf("Startup Timeout %v reached: Its been too long and we still haven't started accumulating %v transactions!", startupTimeout, k8bpsMinTransactionsOnStartup) + framework.Logf("Startup Timeout %v reached: Its been too long and we still haven't started accumulating %v transactions!", startupTimeout, k8bpsMinTransactionsOnStartup) break T } } @@ -152,19 +153,19 @@ T: Ω(totalTransactions).Should(BeNumerically(">", finalTransactionsExpected)) } -var _ = KubeDescribe("Pet Store [Feature:Example]", func() { +var _ = framework.KubeDescribe("Pet Store [Feature:Example]", func() { BeforeEach(func() { // The shell scripts in k8petstore break on jenkins... Pure golang rewrite is in progress. - SkipUnlessProviderIs("local") + framework.SkipUnlessProviderIs("local") }) // The number of nodes dictates total number of generators/transaction expectations. var nodeCount int - f := NewDefaultFramework("petstore") + f := framework.NewDefaultFramework("petstore") It(fmt.Sprintf("should scale to persist a nominal number ( %v ) of transactions in %v seconds", k8bpsSmokeTestFinalTransactions, k8bpsSmokeTestTimeout), func() { - nodes := ListSchedulableNodesOrDie(f.Client) + nodes := framework.ListSchedulableNodesOrDie(f.Client) nodeCount = len(nodes.Items) loadGenerators := nodeCount diff --git a/test/e2e/examples.go b/test/e2e/examples.go index 542e618d6bf..68f6d75626b 100644 --- a/test/e2e/examples.go +++ b/test/e2e/examples.go @@ -28,28 +28,29 @@ import ( "k8s.io/kubernetes/pkg/api" client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) const ( - serverStartTimeout = podStartTimeout + 3*time.Minute + serverStartTimeout = framework.PodStartTimeout + 3*time.Minute ) -var _ = KubeDescribe("[Feature:Example]", func() { - framework := NewDefaultFramework("examples") +var _ = framework.KubeDescribe("[Feature:Example]", func() { + f := framework.NewDefaultFramework("examples") var c *client.Client var ns string BeforeEach(func() { - c = framework.Client - ns = framework.Namespace.Name + c = f.Client + ns = f.Namespace.Name }) - KubeDescribe("Redis", func() { + framework.KubeDescribe("Redis", func() { It("should create and stop redis servers", func() { mkpath := func(file string) string { - return filepath.Join(testContext.RepoRoot, "examples/redis", file) + return filepath.Join(framework.TestContext.RepoRoot, "examples/redis", file) } bootstrapYaml := mkpath("redis-master.yaml") sentinelServiceYaml := mkpath("redis-sentinel-service.yaml") @@ -64,35 +65,35 @@ var _ = KubeDescribe("[Feature:Example]", func() { expectedOnSentinel := "+monitor master" By("starting redis bootstrap") - runKubectlOrDie("create", "-f", bootstrapYaml, nsFlag) - err := waitForPodRunningInNamespace(c, bootstrapPodName, ns) + framework.RunKubectlOrDie("create", "-f", bootstrapYaml, nsFlag) + err := framework.WaitForPodRunningInNamespace(c, bootstrapPodName, ns) Expect(err).NotTo(HaveOccurred()) - _, err = lookForStringInLog(ns, bootstrapPodName, "master", expectedOnServer, serverStartTimeout) + _, err = framework.LookForStringInLog(ns, bootstrapPodName, "master", expectedOnServer, serverStartTimeout) Expect(err).NotTo(HaveOccurred()) - _, err = lookForStringInLog(ns, bootstrapPodName, "sentinel", expectedOnSentinel, serverStartTimeout) + _, err = framework.LookForStringInLog(ns, bootstrapPodName, "sentinel", expectedOnSentinel, serverStartTimeout) Expect(err).NotTo(HaveOccurred()) By("setting up services and controllers") - runKubectlOrDie("create", "-f", sentinelServiceYaml, nsFlag) - runKubectlOrDie("create", "-f", sentinelControllerYaml, nsFlag) - runKubectlOrDie("create", "-f", controllerYaml, nsFlag) + framework.RunKubectlOrDie("create", "-f", sentinelServiceYaml, nsFlag) + framework.RunKubectlOrDie("create", "-f", sentinelControllerYaml, nsFlag) + framework.RunKubectlOrDie("create", "-f", controllerYaml, nsFlag) By("scaling up the deployment") - runKubectlOrDie("scale", "rc", redisRC, "--replicas=3", nsFlag) - runKubectlOrDie("scale", "rc", sentinelRC, "--replicas=3", nsFlag) + framework.RunKubectlOrDie("scale", "rc", redisRC, "--replicas=3", nsFlag) + framework.RunKubectlOrDie("scale", "rc", sentinelRC, "--replicas=3", nsFlag) By("checking up the services") checkAllLogs := func() { forEachPod(c, ns, "name", "redis", func(pod api.Pod) { if pod.Name != bootstrapPodName { - _, err := lookForStringInLog(ns, pod.Name, "redis", expectedOnServer, serverStartTimeout) + _, err := framework.LookForStringInLog(ns, pod.Name, "redis", expectedOnServer, serverStartTimeout) Expect(err).NotTo(HaveOccurred()) } }) forEachPod(c, ns, "name", "redis-sentinel", func(pod api.Pod) { if pod.Name != bootstrapPodName { - _, err := lookForStringInLog(ns, pod.Name, "sentinel", expectedOnSentinel, serverStartTimeout) + _, err := framework.LookForStringInLog(ns, pod.Name, "sentinel", expectedOnSentinel, serverStartTimeout) Expect(err).NotTo(HaveOccurred()) } }) @@ -100,18 +101,18 @@ var _ = KubeDescribe("[Feature:Example]", func() { checkAllLogs() By("turning down bootstrap") - runKubectlOrDie("delete", "-f", bootstrapYaml, nsFlag) - err = waitForRCPodToDisappear(c, ns, redisRC, bootstrapPodName) + framework.RunKubectlOrDie("delete", "-f", bootstrapYaml, nsFlag) + err = framework.WaitForRCPodToDisappear(c, ns, redisRC, bootstrapPodName) Expect(err).NotTo(HaveOccurred()) By("waiting for the new master election") checkAllLogs() }) }) - KubeDescribe("Celery-RabbitMQ", func() { + framework.KubeDescribe("Celery-RabbitMQ", func() { It("should create and stop celery+rabbitmq servers", func() { mkpath := func(file string) string { - return filepath.Join(testContext.RepoRoot, "examples", "celery-rabbitmq", file) + return filepath.Join(framework.TestContext.RepoRoot, "examples", "celery-rabbitmq", file) } rabbitmqServiceYaml := mkpath("rabbitmq-service.yaml") rabbitmqControllerYaml := mkpath("rabbitmq-controller.yaml") @@ -121,40 +122,40 @@ var _ = KubeDescribe("[Feature:Example]", func() { nsFlag := fmt.Sprintf("--namespace=%v", ns) By("starting rabbitmq") - runKubectlOrDie("create", "-f", rabbitmqServiceYaml, nsFlag) - runKubectlOrDie("create", "-f", rabbitmqControllerYaml, nsFlag) + framework.RunKubectlOrDie("create", "-f", rabbitmqServiceYaml, nsFlag) + framework.RunKubectlOrDie("create", "-f", rabbitmqControllerYaml, nsFlag) forEachPod(c, ns, "component", "rabbitmq", func(pod api.Pod) { - _, err := lookForStringInLog(ns, pod.Name, "rabbitmq", "Server startup complete", serverStartTimeout) + _, err := framework.LookForStringInLog(ns, pod.Name, "rabbitmq", "Server startup complete", serverStartTimeout) Expect(err).NotTo(HaveOccurred()) }) - err := waitForEndpoint(c, ns, "rabbitmq-service") + err := framework.WaitForEndpoint(c, ns, "rabbitmq-service") Expect(err).NotTo(HaveOccurred()) By("starting celery") - runKubectlOrDie("create", "-f", celeryControllerYaml, nsFlag) + framework.RunKubectlOrDie("create", "-f", celeryControllerYaml, nsFlag) forEachPod(c, ns, "component", "celery", func(pod api.Pod) { - _, err := lookForStringInFile(ns, pod.Name, "celery", "/data/celery.log", " ready.", serverStartTimeout) + _, err := framework.LookForStringInFile(ns, pod.Name, "celery", "/data/celery.log", " ready.", serverStartTimeout) Expect(err).NotTo(HaveOccurred()) }) By("starting flower") - runKubectlOrDie("create", "-f", flowerServiceYaml, nsFlag) - runKubectlOrDie("create", "-f", flowerControllerYaml, nsFlag) + framework.RunKubectlOrDie("create", "-f", flowerServiceYaml, nsFlag) + framework.RunKubectlOrDie("create", "-f", flowerControllerYaml, nsFlag) forEachPod(c, ns, "component", "flower", func(pod api.Pod) { // Do nothing. just wait for it to be up and running. }) - content, err := makeHttpRequestToService(c, ns, "flower-service", "/", endpointRegisterTimeout) + content, err := makeHttpRequestToService(c, ns, "flower-service", "/", framework.EndpointRegisterTimeout) Expect(err).NotTo(HaveOccurred()) if !strings.Contains(content, "Celery Flower") { - Failf("Flower HTTP request failed") + framework.Failf("Flower HTTP request failed") } }) }) - KubeDescribe("Spark", func() { + framework.KubeDescribe("Spark", func() { It("should start spark master, driver and workers", func() { mkpath := func(file string) string { - return filepath.Join(testContext.RepoRoot, "examples", "spark", file) + return filepath.Join(framework.TestContext.RepoRoot, "examples", "spark", file) } // TODO: Add Zepplin and Web UI to this example. @@ -165,33 +166,33 @@ var _ = KubeDescribe("[Feature:Example]", func() { master := func() { By("starting master") - runKubectlOrDie("create", "-f", serviceYaml, nsFlag) - runKubectlOrDie("create", "-f", masterYaml, nsFlag) + framework.RunKubectlOrDie("create", "-f", serviceYaml, nsFlag) + framework.RunKubectlOrDie("create", "-f", masterYaml, nsFlag) - Logf("Now polling for Master startup...") + framework.Logf("Now polling for Master startup...") // Only one master pod: But its a natural way to look up pod names. forEachPod(c, ns, "component", "spark-master", func(pod api.Pod) { - Logf("Now waiting for master to startup in %v", pod.Name) - _, err := lookForStringInLog(ns, pod.Name, "spark-master", "Starting Spark master at", serverStartTimeout) + framework.Logf("Now waiting for master to startup in %v", pod.Name) + _, err := framework.LookForStringInLog(ns, pod.Name, "spark-master", "Starting Spark master at", serverStartTimeout) Expect(err).NotTo(HaveOccurred()) }) By("waiting for master endpoint") - err := waitForEndpoint(c, ns, "spark-master") + err := framework.WaitForEndpoint(c, ns, "spark-master") Expect(err).NotTo(HaveOccurred()) } worker := func() { By("starting workers") - Logf("Now starting Workers") - runKubectlOrDie("create", "-f", workerControllerYaml, nsFlag) + framework.Logf("Now starting Workers") + framework.RunKubectlOrDie("create", "-f", workerControllerYaml, nsFlag) // For now, scaling is orthogonal to the core test. - // ScaleRC(c, ns, "spark-worker-controller", 2, true) + // framework.ScaleRC(c, ns, "spark-worker-controller", 2, true) - Logf("Now polling for worker startup...") + framework.Logf("Now polling for worker startup...") forEachPod(c, ns, "component", "spark-worker", func(pod api.Pod) { - _, err := lookForStringInLog(ns, pod.Name, "spark-worker", "Successfully registered with master", serverStartTimeout) + _, err := framework.LookForStringInLog(ns, pod.Name, "spark-worker", "Successfully registered with master", serverStartTimeout) Expect(err).NotTo(HaveOccurred()) }) } @@ -201,10 +202,10 @@ var _ = KubeDescribe("[Feature:Example]", func() { }) }) - KubeDescribe("Cassandra", func() { + framework.KubeDescribe("Cassandra", func() { It("should create and scale cassandra", func() { mkpath := func(file string) string { - return filepath.Join(testContext.RepoRoot, "examples", "cassandra", file) + return filepath.Join(framework.TestContext.RepoRoot, "examples", "cassandra", file) } serviceYaml := mkpath("cassandra-service.yaml") podYaml := mkpath("cassandra.yaml") @@ -212,46 +213,46 @@ var _ = KubeDescribe("[Feature:Example]", func() { nsFlag := fmt.Sprintf("--namespace=%v", ns) By("Starting the cassandra service and pod") - runKubectlOrDie("create", "-f", serviceYaml, nsFlag) - runKubectlOrDie("create", "-f", podYaml, nsFlag) + framework.RunKubectlOrDie("create", "-f", serviceYaml, nsFlag) + framework.RunKubectlOrDie("create", "-f", podYaml, nsFlag) - Logf("waiting for first cassandra pod") - err := waitForPodRunningInNamespace(c, "cassandra", ns) + framework.Logf("waiting for first cassandra pod") + err := framework.WaitForPodRunningInNamespace(c, "cassandra", ns) Expect(err).NotTo(HaveOccurred()) - Logf("waiting for thrift listener online") - _, err = lookForStringInLog(ns, "cassandra", "cassandra", "Listening for thrift clients", serverStartTimeout) + framework.Logf("waiting for thrift listener online") + _, err = framework.LookForStringInLog(ns, "cassandra", "cassandra", "Listening for thrift clients", serverStartTimeout) Expect(err).NotTo(HaveOccurred()) - Logf("wait for service") - err = waitForEndpoint(c, ns, "cassandra") + framework.Logf("wait for service") + err = framework.WaitForEndpoint(c, ns, "cassandra") Expect(err).NotTo(HaveOccurred()) // Create an RC with n nodes in it. Each node will then be verified. By("Creating a Cassandra RC") - runKubectlOrDie("create", "-f", controllerYaml, nsFlag) + framework.RunKubectlOrDie("create", "-f", controllerYaml, nsFlag) forEachPod(c, ns, "app", "cassandra", func(pod api.Pod) { - Logf("Verifying pod %v ", pod.Name) - _, err = lookForStringInLog(ns, pod.Name, "cassandra", "Listening for thrift clients", serverStartTimeout) + framework.Logf("Verifying pod %v ", pod.Name) + _, err = framework.LookForStringInLog(ns, pod.Name, "cassandra", "Listening for thrift clients", serverStartTimeout) Expect(err).NotTo(HaveOccurred()) - _, err = lookForStringInLog(ns, pod.Name, "cassandra", "Handshaking version", serverStartTimeout) + _, err = framework.LookForStringInLog(ns, pod.Name, "cassandra", "Handshaking version", serverStartTimeout) Expect(err).NotTo(HaveOccurred()) }) By("Finding each node in the nodetool status lines") - output := runKubectlOrDie("exec", "cassandra", nsFlag, "--", "nodetool", "status") + output := framework.RunKubectlOrDie("exec", "cassandra", nsFlag, "--", "nodetool", "status") forEachPod(c, ns, "app", "cassandra", func(pod api.Pod) { if !strings.Contains(output, pod.Status.PodIP) { - Failf("Pod ip %s not found in nodetool status", pod.Status.PodIP) + framework.Failf("Pod ip %s not found in nodetool status", pod.Status.PodIP) } }) }) }) - KubeDescribe("Storm", func() { + framework.KubeDescribe("Storm", func() { It("should create and stop Zookeeper, Nimbus and Storm worker servers", func() { mkpath := func(file string) string { - return filepath.Join(testContext.RepoRoot, "examples", "storm", file) + return filepath.Join(framework.TestContext.RepoRoot, "examples", "storm", file) } zookeeperServiceJson := mkpath("zookeeper-service.json") zookeeperPodJson := mkpath("zookeeper.json") @@ -262,28 +263,28 @@ var _ = KubeDescribe("[Feature:Example]", func() { zookeeperPod := "zookeeper" By("starting Zookeeper") - runKubectlOrDie("create", "-f", zookeeperPodJson, nsFlag) - runKubectlOrDie("create", "-f", zookeeperServiceJson, nsFlag) - err := waitForPodRunningInNamespace(c, zookeeperPod, ns) + framework.RunKubectlOrDie("create", "-f", zookeeperPodJson, nsFlag) + framework.RunKubectlOrDie("create", "-f", zookeeperServiceJson, nsFlag) + err := framework.WaitForPodRunningInNamespace(c, zookeeperPod, ns) Expect(err).NotTo(HaveOccurred()) By("checking if zookeeper is up and running") - _, err = lookForStringInLog(ns, zookeeperPod, "zookeeper", "binding to port", serverStartTimeout) + _, err = framework.LookForStringInLog(ns, zookeeperPod, "zookeeper", "binding to port", serverStartTimeout) Expect(err).NotTo(HaveOccurred()) - err = waitForEndpoint(c, ns, "zookeeper") + err = framework.WaitForEndpoint(c, ns, "zookeeper") Expect(err).NotTo(HaveOccurred()) By("starting Nimbus") - runKubectlOrDie("create", "-f", nimbusPodJson, nsFlag) - runKubectlOrDie("create", "-f", nimbusServiceJson, nsFlag) - err = waitForPodRunningInNamespace(c, "nimbus", ns) + framework.RunKubectlOrDie("create", "-f", nimbusPodJson, nsFlag) + framework.RunKubectlOrDie("create", "-f", nimbusServiceJson, nsFlag) + err = framework.WaitForPodRunningInNamespace(c, "nimbus", ns) Expect(err).NotTo(HaveOccurred()) - err = waitForEndpoint(c, ns, "nimbus") + err = framework.WaitForEndpoint(c, ns, "nimbus") Expect(err).NotTo(HaveOccurred()) By("starting workers") - runKubectlOrDie("create", "-f", workerControllerJson, nsFlag) + framework.RunKubectlOrDie("create", "-f", workerControllerJson, nsFlag) forEachPod(c, ns, "name", "storm-worker", func(pod api.Pod) { //do nothing, just wait for the pod to be running }) @@ -292,46 +293,46 @@ var _ = KubeDescribe("[Feature:Example]", func() { time.Sleep(20 * time.Second) By("checking if there are established connections to Zookeeper") - _, err = lookForStringInLog(ns, zookeeperPod, "zookeeper", "Established session", serverStartTimeout) + _, err = framework.LookForStringInLog(ns, zookeeperPod, "zookeeper", "Established session", serverStartTimeout) Expect(err).NotTo(HaveOccurred()) By("checking if Nimbus responds to requests") - lookForString("No topologies running.", time.Minute, func() string { - return runKubectlOrDie("exec", "nimbus", nsFlag, "--", "bin/storm", "list") + framework.LookForString("No topologies running.", time.Minute, func() string { + return framework.RunKubectlOrDie("exec", "nimbus", nsFlag, "--", "bin/storm", "list") }) }) }) - KubeDescribe("Liveness", func() { + framework.KubeDescribe("Liveness", func() { It("liveness pods should be automatically restarted", func() { mkpath := func(file string) string { - return filepath.Join(testContext.RepoRoot, "docs", "user-guide", "liveness", file) + return filepath.Join(framework.TestContext.RepoRoot, "docs", "user-guide", "liveness", file) } execYaml := mkpath("exec-liveness.yaml") httpYaml := mkpath("http-liveness.yaml") nsFlag := fmt.Sprintf("--namespace=%v", ns) - runKubectlOrDie("create", "-f", execYaml, nsFlag) - runKubectlOrDie("create", "-f", httpYaml, nsFlag) + framework.RunKubectlOrDie("create", "-f", execYaml, nsFlag) + framework.RunKubectlOrDie("create", "-f", httpYaml, nsFlag) // Since both containers start rapidly, we can easily run this test in parallel. var wg sync.WaitGroup passed := true checkRestart := func(podName string, timeout time.Duration) { - err := waitForPodRunningInNamespace(c, podName, ns) + err := framework.WaitForPodRunningInNamespace(c, podName, ns) Expect(err).NotTo(HaveOccurred()) - for t := time.Now(); time.Since(t) < timeout; time.Sleep(poll) { + for t := time.Now(); time.Since(t) < timeout; time.Sleep(framework.Poll) { pod, err := c.Pods(ns).Get(podName) - expectNoError(err, fmt.Sprintf("getting pod %s", podName)) + framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podName)) stat := api.GetExistingContainerStatus(pod.Status.ContainerStatuses, podName) - Logf("Pod: %s, restart count:%d", stat.Name, stat.RestartCount) + framework.Logf("Pod: %s, restart count:%d", stat.Name, stat.RestartCount) if stat.RestartCount > 0 { - Logf("Saw %v restart, succeeded...", podName) + framework.Logf("Saw %v restart, succeeded...", podName) wg.Done() return } } - Logf("Failed waiting for %v restart! ", podName) + framework.Logf("Failed waiting for %v restart! ", podName) passed = false wg.Done() } @@ -347,15 +348,15 @@ var _ = KubeDescribe("[Feature:Example]", func() { } wg.Wait() if !passed { - Failf("At least one liveness example failed. See the logs above.") + framework.Failf("At least one liveness example failed. See the logs above.") } }) }) - KubeDescribe("Secret", func() { + framework.KubeDescribe("Secret", func() { It("should create a pod that reads a secret", func() { mkpath := func(file string) string { - return filepath.Join(testContext.RepoRoot, "docs", "user-guide", "secrets", file) + return filepath.Join(framework.TestContext.RepoRoot, "docs", "user-guide", "secrets", file) } secretYaml := mkpath("secret.yaml") podYaml := mkpath("secret-pod.yaml") @@ -363,43 +364,43 @@ var _ = KubeDescribe("[Feature:Example]", func() { podName := "secret-test-pod" By("creating secret and pod") - runKubectlOrDie("create", "-f", secretYaml, nsFlag) - runKubectlOrDie("create", "-f", podYaml, nsFlag) - err := waitForPodNoLongerRunningInNamespace(c, podName, ns) + framework.RunKubectlOrDie("create", "-f", secretYaml, nsFlag) + framework.RunKubectlOrDie("create", "-f", podYaml, nsFlag) + err := framework.WaitForPodNoLongerRunningInNamespace(c, podName, ns) Expect(err).NotTo(HaveOccurred()) By("checking if secret was read correctly") - _, err = lookForStringInLog(ns, "secret-test-pod", "test-container", "value-1", serverStartTimeout) + _, err = framework.LookForStringInLog(ns, "secret-test-pod", "test-container", "value-1", serverStartTimeout) Expect(err).NotTo(HaveOccurred()) }) }) - KubeDescribe("Downward API", func() { + framework.KubeDescribe("Downward API", func() { It("should create a pod that prints his name and namespace", func() { mkpath := func(file string) string { - return filepath.Join(testContext.RepoRoot, "docs", "user-guide", "downward-api", file) + return filepath.Join(framework.TestContext.RepoRoot, "docs", "user-guide", "downward-api", file) } podYaml := mkpath("dapi-pod.yaml") nsFlag := fmt.Sprintf("--namespace=%v", ns) podName := "dapi-test-pod" By("creating the pod") - runKubectlOrDie("create", "-f", podYaml, nsFlag) - err := waitForPodNoLongerRunningInNamespace(c, podName, ns) + framework.RunKubectlOrDie("create", "-f", podYaml, nsFlag) + err := framework.WaitForPodNoLongerRunningInNamespace(c, podName, ns) Expect(err).NotTo(HaveOccurred()) By("checking if name and namespace were passed correctly") - _, err = lookForStringInLog(ns, podName, "test-container", fmt.Sprintf("MY_POD_NAMESPACE=%v", ns), serverStartTimeout) + _, err = framework.LookForStringInLog(ns, podName, "test-container", fmt.Sprintf("MY_POD_NAMESPACE=%v", ns), serverStartTimeout) Expect(err).NotTo(HaveOccurred()) - _, err = lookForStringInLog(ns, podName, "test-container", fmt.Sprintf("MY_POD_NAME=%v", podName), serverStartTimeout) + _, err = framework.LookForStringInLog(ns, podName, "test-container", fmt.Sprintf("MY_POD_NAME=%v", podName), serverStartTimeout) Expect(err).NotTo(HaveOccurred()) }) }) - KubeDescribe("RethinkDB", func() { + framework.KubeDescribe("RethinkDB", func() { It("should create and stop rethinkdb servers", func() { mkpath := func(file string) string { - return filepath.Join(testContext.RepoRoot, "examples", "rethinkdb", file) + return filepath.Join(framework.TestContext.RepoRoot, "examples", "rethinkdb", file) } driverServiceYaml := mkpath("driver-service.yaml") rethinkDbControllerYaml := mkpath("rc.yaml") @@ -408,62 +409,62 @@ var _ = KubeDescribe("[Feature:Example]", func() { nsFlag := fmt.Sprintf("--namespace=%v", ns) By("starting rethinkdb") - runKubectlOrDie("create", "-f", driverServiceYaml, nsFlag) - runKubectlOrDie("create", "-f", rethinkDbControllerYaml, nsFlag) + framework.RunKubectlOrDie("create", "-f", driverServiceYaml, nsFlag) + framework.RunKubectlOrDie("create", "-f", rethinkDbControllerYaml, nsFlag) checkDbInstances := func() { forEachPod(c, ns, "db", "rethinkdb", func(pod api.Pod) { - _, err := lookForStringInLog(ns, pod.Name, "rethinkdb", "Server ready", serverStartTimeout) + _, err := framework.LookForStringInLog(ns, pod.Name, "rethinkdb", "Server ready", serverStartTimeout) Expect(err).NotTo(HaveOccurred()) }) } checkDbInstances() - err := waitForEndpoint(c, ns, "rethinkdb-driver") + err := framework.WaitForEndpoint(c, ns, "rethinkdb-driver") Expect(err).NotTo(HaveOccurred()) By("scaling rethinkdb") - ScaleRC(c, ns, "rethinkdb-rc", 2, true) + framework.ScaleRC(c, ns, "rethinkdb-rc", 2, true) checkDbInstances() By("starting admin") - runKubectlOrDie("create", "-f", adminServiceYaml, nsFlag) - runKubectlOrDie("create", "-f", adminPodYaml, nsFlag) - err = waitForPodRunningInNamespace(c, "rethinkdb-admin", ns) + framework.RunKubectlOrDie("create", "-f", adminServiceYaml, nsFlag) + framework.RunKubectlOrDie("create", "-f", adminPodYaml, nsFlag) + err = framework.WaitForPodRunningInNamespace(c, "rethinkdb-admin", ns) Expect(err).NotTo(HaveOccurred()) checkDbInstances() - content, err := makeHttpRequestToService(c, ns, "rethinkdb-admin", "/", endpointRegisterTimeout) + content, err := makeHttpRequestToService(c, ns, "rethinkdb-admin", "/", framework.EndpointRegisterTimeout) Expect(err).NotTo(HaveOccurred()) if !strings.Contains(content, "RethinkDB Administration Console") { - Failf("RethinkDB console is not running") + framework.Failf("RethinkDB console is not running") } }) }) - KubeDescribe("Hazelcast", func() { + framework.KubeDescribe("Hazelcast", func() { It("should create and scale hazelcast", func() { mkpath := func(file string) string { - return filepath.Join(testContext.RepoRoot, "examples", "hazelcast", file) + return filepath.Join(framework.TestContext.RepoRoot, "examples", "hazelcast", file) } serviceYaml := mkpath("hazelcast-service.yaml") controllerYaml := mkpath("hazelcast-controller.yaml") nsFlag := fmt.Sprintf("--namespace=%v", ns) By("starting hazelcast") - runKubectlOrDie("create", "-f", serviceYaml, nsFlag) - runKubectlOrDie("create", "-f", controllerYaml, nsFlag) + framework.RunKubectlOrDie("create", "-f", serviceYaml, nsFlag) + framework.RunKubectlOrDie("create", "-f", controllerYaml, nsFlag) forEachPod(c, ns, "name", "hazelcast", func(pod api.Pod) { - _, err := lookForStringInLog(ns, pod.Name, "hazelcast", "Members [1]", serverStartTimeout) + _, err := framework.LookForStringInLog(ns, pod.Name, "hazelcast", "Members [1]", serverStartTimeout) Expect(err).NotTo(HaveOccurred()) - _, err = lookForStringInLog(ns, pod.Name, "hazelcast", "is STARTED", serverStartTimeout) + _, err = framework.LookForStringInLog(ns, pod.Name, "hazelcast", "is STARTED", serverStartTimeout) Expect(err).NotTo(HaveOccurred()) }) - err := waitForEndpoint(c, ns, "hazelcast") + err := framework.WaitForEndpoint(c, ns, "hazelcast") Expect(err).NotTo(HaveOccurred()) By("scaling hazelcast") - ScaleRC(c, ns, "hazelcast", 2, true) + framework.ScaleRC(c, ns, "hazelcast", 2, true) forEachPod(c, ns, "name", "hazelcast", func(pod api.Pod) { - _, err := lookForStringInLog(ns, pod.Name, "hazelcast", "Members [2]", serverStartTimeout) + _, err := framework.LookForStringInLog(ns, pod.Name, "hazelcast", "Members [2]", serverStartTimeout) Expect(err).NotTo(HaveOccurred()) }) }) @@ -473,8 +474,8 @@ var _ = KubeDescribe("[Feature:Example]", func() { func makeHttpRequestToService(c *client.Client, ns, service, path string, timeout time.Duration) (string, error) { var result []byte var err error - for t := time.Now(); time.Since(t) < timeout; time.Sleep(poll) { - proxyRequest, errProxy := getServicesProxyRequest(c, c.Get()) + for t := time.Now(); time.Since(t) < timeout; time.Sleep(framework.Poll) { + proxyRequest, errProxy := framework.GetServicesProxyRequest(c, c.Get()) if errProxy != nil { break } @@ -503,7 +504,7 @@ func prepareResourceWithReplacedString(inputFile, old, new string) string { func forEachPod(c *client.Client, ns, selectorKey, selectorValue string, fn func(api.Pod)) { pods := []*api.Pod{} - for t := time.Now(); time.Since(t) < podListTimeout; time.Sleep(poll) { + for t := time.Now(); time.Since(t) < framework.PodListTimeout; time.Sleep(framework.Poll) { selector := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue})) options := api.ListOptions{LabelSelector: selector} podList, err := c.Pods(ns).List(options) @@ -518,10 +519,10 @@ func forEachPod(c *client.Client, ns, selectorKey, selectorValue string, fn func } } if pods == nil || len(pods) == 0 { - Failf("No pods found") + framework.Failf("No pods found") } for _, pod := range pods { - err := waitForPodRunningInNamespace(c, pod.Name, ns) + err := framework.WaitForPodRunningInNamespace(c, pod.Name, ns) Expect(err).NotTo(HaveOccurred()) fn(*pod) } diff --git a/test/e2e/expansion.go b/test/e2e/expansion.go index 1eecb3f9b59..f0a5f44cb90 100644 --- a/test/e2e/expansion.go +++ b/test/e2e/expansion.go @@ -19,14 +19,15 @@ package e2e import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" ) // These tests exercise the Kubernetes expansion syntax $(VAR). // For more information, see: docs/design/expansion.md -var _ = KubeDescribe("Variable Expansion", func() { - framework := NewDefaultFramework("var-expansion") +var _ = framework.KubeDescribe("Variable Expansion", func() { + f := framework.NewDefaultFramework("var-expansion") It("should allow composing env vars into new env vars [Conformance]", func() { podName := "var-expansion-" + string(util.NewUUID()) @@ -61,7 +62,7 @@ var _ = KubeDescribe("Variable Expansion", func() { }, } - framework.TestContainerOutput("env composition", pod, 0, []string{ + f.TestContainerOutput("env composition", pod, 0, []string{ "FOO=foo-value", "BAR=bar-value", "FOOBAR=foo-value;;bar-value", @@ -93,7 +94,7 @@ var _ = KubeDescribe("Variable Expansion", func() { }, } - framework.TestContainerOutput("substitution in container's command", pod, 0, []string{ + f.TestContainerOutput("substitution in container's command", pod, 0, []string{ "test-value", }) }) @@ -124,7 +125,7 @@ var _ = KubeDescribe("Variable Expansion", func() { }, } - framework.TestContainerOutput("substitution in container's args", pod, 0, []string{ + f.TestContainerOutput("substitution in container's args", pod, 0, []string{ "test-value", }) }) diff --git a/test/e2e/framework/cleanup.go b/test/e2e/framework/cleanup.go index 7d724850053..deffc4e49aa 100644 --- a/test/e2e/framework/cleanup.go +++ b/test/e2e/framework/cleanup.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/test/e2e/framework/framework.go b/test/e2e/framework/framework.go index ea2c2f00c97..7238c51fef7 100644 --- a/test/e2e/framework/framework.go +++ b/test/e2e/framework/framework.go @@ -56,15 +56,15 @@ type Framework struct { // Constraints that passed to a check which is executed after data is gathered to // see if 99% of results are within acceptable bounds. It as to be injected in the test, // as expectations vary greatly. Constraints are groupped by the container names. - addonResourceConstraints map[string]resourceConstraint + AddonResourceConstraints map[string]ResourceConstraint logsSizeWaitGroup sync.WaitGroup logsSizeCloseChannel chan bool logsSizeVerifier *LogsSizeVerifier // To make sure that this framework cleans up after itself, no matter what, - // we install a cleanup action before each test and clear it after. If we - // should abort, the AfterSuite hook should run all cleanup actions. + // we install a Cleanup action before each test and clear it after. If we + // should abort, the AfterSuite hook should run all Cleanup actions. cleanupHandle CleanupActionHandle // configuration for framework's client @@ -77,16 +77,16 @@ type TestDataSummary interface { } type FrameworkOptions struct { - clientQPS float32 - clientBurst int + ClientQPS float32 + ClientBurst int } // NewFramework makes a new framework and sets up a BeforeEach/AfterEach for // you (you can write additional before/after each functions). func NewDefaultFramework(baseName string) *Framework { options := FrameworkOptions{ - clientQPS: 20, - clientBurst: 50, + ClientQPS: 20, + ClientBurst: 50, } return NewFramework(baseName, options) } @@ -94,27 +94,27 @@ func NewDefaultFramework(baseName string) *Framework { func NewFramework(baseName string, options FrameworkOptions) *Framework { f := &Framework{ BaseName: baseName, - addonResourceConstraints: make(map[string]resourceConstraint), + AddonResourceConstraints: make(map[string]ResourceConstraint), options: options, } - BeforeEach(f.beforeEach) - AfterEach(f.afterEach) + BeforeEach(f.BeforeEach) + AfterEach(f.AfterEach) return f } -// beforeEach gets a client and makes a namespace. -func (f *Framework) beforeEach() { +// BeforeEach gets a client and makes a namespace. +func (f *Framework) BeforeEach() { // The fact that we need this feels like a bug in ginkgo. // https://github.com/onsi/ginkgo/issues/222 - f.cleanupHandle = AddCleanupAction(f.afterEach) + f.cleanupHandle = AddCleanupAction(f.AfterEach) By("Creating a kubernetes client") - config, err := loadConfig() + config, err := LoadConfig() Expect(err).NotTo(HaveOccurred()) - config.QPS = f.options.clientQPS - config.Burst = f.options.clientBurst + config.QPS = f.options.ClientQPS + config.Burst = f.options.ClientBurst c, err := loadClientFromConfig(config) Expect(err).NotTo(HaveOccurred()) @@ -129,15 +129,15 @@ func (f *Framework) beforeEach() { f.Namespace = namespace - if testContext.VerifyServiceAccount { + if TestContext.VerifyServiceAccount { By("Waiting for a default service account to be provisioned in namespace") - err = waitForDefaultServiceAccountInNamespace(c, namespace.Name) + err = WaitForDefaultServiceAccountInNamespace(c, namespace.Name) Expect(err).NotTo(HaveOccurred()) } else { Logf("Skipping waiting for service account") } - if testContext.GatherKubeSystemResourceUsageData { + if TestContext.GatherKubeSystemResourceUsageData { f.gatherer, err = NewResourceUsageGatherer(c) if err != nil { Logf("Error while creating NewResourceUsageGatherer: %v", err) @@ -146,7 +146,7 @@ func (f *Framework) beforeEach() { } } - if testContext.GatherLogsSizes { + if TestContext.GatherLogsSizes { f.logsSizeWaitGroup = sync.WaitGroup{} f.logsSizeWaitGroup.Add(1) f.logsSizeCloseChannel = make(chan bool) @@ -158,14 +158,14 @@ func (f *Framework) beforeEach() { } } -// afterEach deletes the namespace, after reading its events. -func (f *Framework) afterEach() { +// AfterEach deletes the namespace, after reading its events. +func (f *Framework) AfterEach() { RemoveCleanupAction(f.cleanupHandle) // DeleteNamespace at the very end in defer, to avoid any // expectation failures preventing deleting the namespace. defer func() { - if testContext.DeleteNamespace { + if TestContext.DeleteNamespace { for _, ns := range f.namespacesToDelete { By(fmt.Sprintf("Destroying namespace %q for this suite.", ns.Name)) @@ -193,23 +193,23 @@ func (f *Framework) afterEach() { // Print events if the test failed. if CurrentGinkgoTestDescription().Failed { - dumpAllNamespaceInfo(f.Client, f.Namespace.Name) + DumpAllNamespaceInfo(f.Client, f.Namespace.Name) } summaries := make([]TestDataSummary, 0) - if testContext.GatherKubeSystemResourceUsageData && f.gatherer != nil { + if TestContext.GatherKubeSystemResourceUsageData && f.gatherer != nil { By("Collecting resource usage data") - summaries = append(summaries, f.gatherer.stopAndSummarize([]int{90, 99, 100}, f.addonResourceConstraints)) + summaries = append(summaries, f.gatherer.stopAndSummarize([]int{90, 99, 100}, f.AddonResourceConstraints)) } - if testContext.GatherLogsSizes { + if TestContext.GatherLogsSizes { By("Gathering log sizes data") close(f.logsSizeCloseChannel) f.logsSizeWaitGroup.Wait() summaries = append(summaries, f.logsSizeVerifier.GetSummary()) } - if testContext.GatherMetricsAfterTest { + if TestContext.GatherMetricsAfterTest { By("Gathering metrics") // TODO: enable Scheduler and ControllerManager metrics grabbing when Master's Kubelet will be registered. grabber, err := metrics.NewMetricsGrabber(f.Client, true, false, false, true) @@ -225,7 +225,7 @@ func (f *Framework) afterEach() { } } - outputTypes := strings.Split(testContext.OutputPrintType, ",") + outputTypes := strings.Split(TestContext.OutputPrintType, ",") for _, printType := range outputTypes { switch printType { case "hr": @@ -246,13 +246,13 @@ func (f *Framework) afterEach() { // Check whether all nodes are ready after the test. // This is explicitly done at the very end of the test, to avoid // e.g. not removing namespace in case of this failure. - if err := allNodesReady(f.Client, time.Minute); err != nil { + if err := AllNodesReady(f.Client, time.Minute); err != nil { Failf("All nodes should be ready after test, %v", err) } } func (f *Framework) CreateNamespace(baseName string, labels map[string]string) (*api.Namespace, error) { - createTestingNS := testContext.CreateTestingNS + createTestingNS := TestContext.CreateTestingNS if createTestingNS == nil { createTestingNS = CreateTestingNS } @@ -270,12 +270,12 @@ func (f *Framework) WaitForPodTerminated(podName, reason string) error { // WaitForPodRunning waits for the pod to run in the namespace. func (f *Framework) WaitForPodRunning(podName string) error { - return waitForPodRunningInNamespace(f.Client, podName, f.Namespace.Name) + return WaitForPodRunningInNamespace(f.Client, podName, f.Namespace.Name) } // WaitForPodReady waits for the pod to flip to ready in the namespace. func (f *Framework) WaitForPodReady(podName string) error { - return waitTimeoutForPodReadyInNamespace(f.Client, podName, f.Namespace.Name, podStartTimeout) + return waitTimeoutForPodReadyInNamespace(f.Client, podName, f.Namespace.Name, PodStartTimeout) } // WaitForPodRunningSlow waits for the pod to run in the namespace. @@ -287,12 +287,12 @@ func (f *Framework) WaitForPodRunningSlow(podName string) error { // WaitForPodNoLongerRunning waits for the pod to no longer be running in the namespace, for either // success or failure. func (f *Framework) WaitForPodNoLongerRunning(podName string) error { - return waitForPodNoLongerRunningInNamespace(f.Client, podName, f.Namespace.Name) + return WaitForPodNoLongerRunningInNamespace(f.Client, podName, f.Namespace.Name) } // Runs the given pod and verifies that the output of exact container matches the desired output. func (f *Framework) TestContainerOutput(scenarioName string, pod *api.Pod, containerIndex int, expectedOutput []string) { - testContainerOutput(scenarioName, f.Client, pod, containerIndex, expectedOutput, f.Namespace.Name) + TestContainerOutput(scenarioName, f.Client, pod, containerIndex, expectedOutput, f.Namespace.Name) } // Runs the given pod and verifies that the output of exact container matches the desired regexps. @@ -406,7 +406,7 @@ func kubectlExec(namespace string, podName, containerName string, args ...string } cmdArgs = append(cmdArgs, args...) - cmd := kubectlCmd(cmdArgs...) + cmd := KubectlCmd(cmdArgs...) cmd.Stdout, cmd.Stderr = &stdout, &stderr Logf("Running '%s %s'", cmd.Path, strings.Join(cmd.Args, " ")) diff --git a/test/e2e/framework/kubelet_stats.go b/test/e2e/framework/kubelet_stats.go index bf768854dfe..0be0539eda7 100644 --- a/test/e2e/framework/kubelet_stats.go +++ b/test/e2e/framework/kubelet_stats.go @@ -156,7 +156,7 @@ func getContainerInfo(c *client.Client, nodeName string, req *kubeletstats.Stats if err != nil { return nil, err } - subResourceProxyAvailable, err := serverVersionGTE(subResourceServiceAndNodeProxyVersion, c) + subResourceProxyAvailable, err := ServerVersionGTE(subResourceServiceAndNodeProxyVersion, c) if err != nil { return nil, err } @@ -207,7 +207,7 @@ func getContainerInfo(c *client.Client, nodeName string, req *kubeletstats.Stats // polls every second, we'd need to get N stats points for N-second interval. // Note that this is an approximation and may not be accurate, hence we also // write the actual interval used for calculation (based on the timestamps of -// the stats points in containerResourceUsage.CPUInterval. +// the stats points in ContainerResourceUsage.CPUInterval. // // containerNames is a function returning a collection of container names in which // user is interested in. ExpectMissingContainers is a flag which says if the test @@ -222,7 +222,7 @@ func getOneTimeResourceUsageOnNode( cpuInterval time.Duration, containerNames func() []string, expectMissingContainers bool, -) (resourceUsagePerContainer, error) { +) (ResourceUsagePerContainer, error) { const ( // cadvisor records stats about every second. cadvisorStatsPollingIntervalInSeconds float64 = 1.0 @@ -244,8 +244,8 @@ func getOneTimeResourceUsageOnNode( return nil, err } - f := func(name string, oldStats, newStats *cadvisorapi.ContainerStats) *containerResourceUsage { - return &containerResourceUsage{ + f := func(name string, oldStats, newStats *cadvisorapi.ContainerStats) *ContainerResourceUsage { + return &ContainerResourceUsage{ Name: name, Timestamp: newStats.Timestamp, CPUUsageInCores: float64(newStats.Cpu.Usage.Total-oldStats.Cpu.Usage.Total) / float64(newStats.Timestamp.Sub(oldStats.Timestamp).Nanoseconds()), @@ -257,7 +257,7 @@ func getOneTimeResourceUsageOnNode( } // Process container infos that are relevant to us. containers := containerNames() - usageMap := make(resourceUsagePerContainer, len(containers)) + usageMap := make(ResourceUsagePerContainer, len(containers)) for _, name := range containers { info, ok := containerInfos[name] if !ok { @@ -274,7 +274,7 @@ func getOneTimeResourceUsageOnNode( } func getNodeStatsSummary(c *client.Client, nodeName string) (*stats.Summary, error) { - subResourceProxyAvailable, err := serverVersionGTE(subResourceServiceAndNodeProxyVersion, c) + subResourceProxyAvailable, err := ServerVersionGTE(subResourceServiceAndNodeProxyVersion, c) if err != nil { return nil, err } @@ -331,7 +331,7 @@ const ( ) // A list of containers for which we want to collect resource usage. -func targetContainers() []string { +func TargetContainers() []string { return []string{ rootContainerName, stats.SystemContainerRuntime, @@ -340,7 +340,7 @@ func targetContainers() []string { } } -type containerResourceUsage struct { +type ContainerResourceUsage struct { Name string Timestamp time.Time CPUUsageInCores float64 @@ -351,14 +351,14 @@ type containerResourceUsage struct { CPUInterval time.Duration } -func (r *containerResourceUsage) isStrictlyGreaterThan(rhs *containerResourceUsage) bool { +func (r *ContainerResourceUsage) isStrictlyGreaterThan(rhs *ContainerResourceUsage) bool { return r.CPUUsageInCores > rhs.CPUUsageInCores && r.MemoryWorkingSetInBytes > rhs.MemoryWorkingSetInBytes } -type resourceUsagePerContainer map[string]*containerResourceUsage -type resourceUsagePerNode map[string]resourceUsagePerContainer +type ResourceUsagePerContainer map[string]*ContainerResourceUsage +type ResourceUsagePerNode map[string]ResourceUsagePerContainer -func formatResourceUsageStats(nodeName string, containerStats resourceUsagePerContainer) string { +func formatResourceUsageStats(nodeName string, containerStats ResourceUsagePerContainer) string { // Example output: // // Resource usage for node "e2e-test-foo-minion-abcde": @@ -417,7 +417,7 @@ func getKubeletMetricsThroughNode(nodeName string) (string, error) { return string(body), nil } -func getKubeletHeapStats(c *client.Client, nodeName string) (string, error) { +func GetKubeletHeapStats(c *client.Client, nodeName string) (string, error) { client, err := NodeProxyRequest(c, nodeName, "debug/pprof/heap") if err != nil { return "", err @@ -448,8 +448,8 @@ func PrintAllKubeletPods(c *client.Client, nodeName string) { } } -func computeContainerResourceUsage(name string, oldStats, newStats *stats.ContainerStats) *containerResourceUsage { - return &containerResourceUsage{ +func computeContainerResourceUsage(name string, oldStats, newStats *stats.ContainerStats) *ContainerResourceUsage { + return &ContainerResourceUsage{ Name: name, Timestamp: newStats.CPU.Time.Time, CPUUsageInCores: float64(*newStats.CPU.UsageCoreNanoSeconds-*oldStats.CPU.UsageCoreNanoSeconds) / float64(newStats.CPU.Time.Time.Sub(oldStats.CPU.Time.Time).Nanoseconds()), @@ -468,13 +468,13 @@ type resourceCollector struct { node string containers []string client *client.Client - buffers map[string][]*containerResourceUsage + buffers map[string][]*ContainerResourceUsage pollingInterval time.Duration stopCh chan struct{} } func newResourceCollector(c *client.Client, nodeName string, containerNames []string, pollingInterval time.Duration) *resourceCollector { - buffers := make(map[string][]*containerResourceUsage) + buffers := make(map[string][]*ContainerResourceUsage) return &resourceCollector{ node: nodeName, containers: containerNames, @@ -484,7 +484,7 @@ func newResourceCollector(c *client.Client, nodeName string, containerNames []st } } -// Start starts a goroutine to poll the node every pollingInterval. +// Start starts a goroutine to Poll the node every pollingInterval. func (r *resourceCollector) Start() { r.stopCh = make(chan struct{}, 1) // Keep the last observed stats for comparison. @@ -527,10 +527,10 @@ func (r *resourceCollector) collectStats(oldStatsMap map[string]*stats.Container } } -func (r *resourceCollector) GetLatest() (resourceUsagePerContainer, error) { +func (r *resourceCollector) GetLatest() (ResourceUsagePerContainer, error) { r.lock.RLock() defer r.lock.RUnlock() - stats := make(resourceUsagePerContainer) + stats := make(ResourceUsagePerContainer) for _, name := range r.containers { contStats, ok := r.buffers[name] if !ok || len(contStats) == 0 { @@ -546,11 +546,11 @@ func (r *resourceCollector) Reset() { r.lock.Lock() defer r.lock.Unlock() for _, name := range r.containers { - r.buffers[name] = []*containerResourceUsage{} + r.buffers[name] = []*ContainerResourceUsage{} } } -type resourceUsageByCPU []*containerResourceUsage +type resourceUsageByCPU []*ContainerResourceUsage func (r resourceUsageByCPU) Len() int { return len(r) } func (r resourceUsageByCPU) Swap(i, j int) { r[i], r[j] = r[j], r[i] } @@ -579,27 +579,27 @@ func (r *resourceCollector) GetBasicCPUStats(containerName string) map[float64]f return result } -// resourceMonitor manages a resourceCollector per node. -type resourceMonitor struct { +// ResourceMonitor manages a resourceCollector per node. +type ResourceMonitor struct { client *client.Client containers []string pollingInterval time.Duration collectors map[string]*resourceCollector } -func newResourceMonitor(c *client.Client, containerNames []string, pollingInterval time.Duration) *resourceMonitor { - return &resourceMonitor{ +func NewResourceMonitor(c *client.Client, containerNames []string, pollingInterval time.Duration) *ResourceMonitor { + return &ResourceMonitor{ containers: containerNames, client: c, pollingInterval: pollingInterval, } } -func (r *resourceMonitor) Start() { +func (r *ResourceMonitor) Start() { // It should be OK to monitor unschedulable Nodes nodes, err := r.client.Nodes().List(api.ListOptions{}) if err != nil { - Failf("resourceMonitor: unable to get list of nodes: %v", err) + Failf("ResourceMonitor: unable to get list of nodes: %v", err) } r.collectors = make(map[string]*resourceCollector, 0) for _, node := range nodes.Items { @@ -609,19 +609,19 @@ func (r *resourceMonitor) Start() { } } -func (r *resourceMonitor) Stop() { +func (r *ResourceMonitor) Stop() { for _, collector := range r.collectors { collector.Stop() } } -func (r *resourceMonitor) Reset() { +func (r *ResourceMonitor) Reset() { for _, collector := range r.collectors { collector.Reset() } } -func (r *resourceMonitor) LogLatest() { +func (r *ResourceMonitor) LogLatest() { summary, err := r.GetLatest() if err != nil { Logf("%v", err) @@ -629,7 +629,7 @@ func (r *resourceMonitor) LogLatest() { Logf("%s", r.FormatResourceUsage(summary)) } -func (r *resourceMonitor) FormatResourceUsage(s resourceUsagePerNode) string { +func (r *ResourceMonitor) FormatResourceUsage(s ResourceUsagePerNode) string { summary := []string{} for node, usage := range s { summary = append(summary, formatResourceUsageStats(node, usage)) @@ -637,8 +637,8 @@ func (r *resourceMonitor) FormatResourceUsage(s resourceUsagePerNode) string { return strings.Join(summary, "\n") } -func (r *resourceMonitor) GetLatest() (resourceUsagePerNode, error) { - result := make(resourceUsagePerNode) +func (r *ResourceMonitor) GetLatest() (ResourceUsagePerNode, error) { + result := make(ResourceUsagePerNode) errs := []error{} for key, collector := range r.collectors { s, err := collector.GetLatest() @@ -651,15 +651,15 @@ func (r *resourceMonitor) GetLatest() (resourceUsagePerNode, error) { return result, utilerrors.NewAggregate(errs) } -// containersCPUSummary is indexed by the container name with each entry a +// ContainersCPUSummary is indexed by the container name with each entry a // (percentile, value) map. -type containersCPUSummary map[string]map[float64]float64 +type ContainersCPUSummary map[string]map[float64]float64 -// nodesCPUSummary is indexed by the node name with each entry a -// containersCPUSummary map. -type nodesCPUSummary map[string]containersCPUSummary +// NodesCPUSummary is indexed by the node name with each entry a +// ContainersCPUSummary map. +type NodesCPUSummary map[string]ContainersCPUSummary -func (r *resourceMonitor) FormatCPUSummary(summary nodesCPUSummary) string { +func (r *ResourceMonitor) FormatCPUSummary(summary NodesCPUSummary) string { // Example output for a node (the percentiles may differ): // CPU usage of containers on node "e2e-test-foo-minion-0vj7": // container 5th% 50th% 90th% 95th% @@ -677,7 +677,7 @@ func (r *resourceMonitor) FormatCPUSummary(summary nodesCPUSummary) string { buf := &bytes.Buffer{} w := tabwriter.NewWriter(buf, 1, 0, 1, ' ', 0) fmt.Fprintf(w, "%s\n", strings.Join(header, "\t")) - for _, containerName := range targetContainers() { + for _, containerName := range TargetContainers() { var s []string s = append(s, fmt.Sprintf("%q", containerName)) data, ok := containers[containerName] @@ -696,16 +696,16 @@ func (r *resourceMonitor) FormatCPUSummary(summary nodesCPUSummary) string { return strings.Join(summaryStrings, "\n") } -func (r *resourceMonitor) LogCPUSummary() { +func (r *ResourceMonitor) LogCPUSummary() { summary := r.GetCPUSummary() Logf("%s", r.FormatCPUSummary(summary)) } -func (r *resourceMonitor) GetCPUSummary() nodesCPUSummary { - result := make(nodesCPUSummary) +func (r *ResourceMonitor) GetCPUSummary() NodesCPUSummary { + result := make(NodesCPUSummary) for nodeName, collector := range r.collectors { - result[nodeName] = make(containersCPUSummary) - for _, containerName := range targetContainers() { + result[nodeName] = make(ContainersCPUSummary) + for _, containerName := range TargetContainers() { data := collector.GetBasicCPUStats(containerName) result[nodeName][containerName] = data } diff --git a/test/e2e/framework/log_size_monitoring.go b/test/e2e/framework/log_size_monitoring.go index 8c9394ec30d..2ab7457e31d 100644 --- a/test/e2e/framework/log_size_monitoring.go +++ b/test/e2e/framework/log_size_monitoring.go @@ -101,7 +101,7 @@ func (s *LogsSizeDataSummary) PrintHumanReadable() string { } func (s *LogsSizeDataSummary) PrintJSON() string { - return prettyPrintJSON(*s) + return PrettyPrintJSON(*s) } type LogsSizeData struct { @@ -144,8 +144,8 @@ func (d *LogsSizeData) AddNewData(ip, path string, timestamp time.Time, size int // NewLogsVerifier creates a new LogsSizeVerifier which will stop when stopChannel is closed func NewLogsVerifier(c *client.Client, stopChannel chan bool) *LogsSizeVerifier { nodeAddresses, err := NodeSSHHosts(c) - expectNoError(err) - masterAddress := getMasterHost() + ":22" + ExpectNoError(err) + masterAddress := GetMasterHost() + ":22" workChannel := make(chan WorkItem, len(nodeAddresses)+1) workers := make([]*LogSizeGatherer, workersNo) @@ -241,7 +241,7 @@ func (g *LogSizeGatherer) Work() bool { sshResult, err := SSH( fmt.Sprintf("ls -l %v | awk '{print $9, $5}' | tr '\n' ' '", strings.Join(workItem.paths, " ")), workItem.ip, - testContext.Provider, + TestContext.Provider, ) if err != nil { Logf("Error while trying to SSH to %v, skipping probe. Error: %v", workItem.ip, err) diff --git a/test/e2e/framework/metrics_util.go b/test/e2e/framework/metrics_util.go index a83c7483edf..189028eaecd 100644 --- a/test/e2e/framework/metrics_util.go +++ b/test/e2e/framework/metrics_util.go @@ -91,7 +91,7 @@ func (m *MetricsForE2E) PrintHumanReadable() string { func (m *MetricsForE2E) PrintJSON() string { m.filterMetrics() - return prettyPrintJSON(*m) + return PrettyPrintJSON(*m) } var InterestingApiServerMetrics = []string{ @@ -287,7 +287,7 @@ func HighLatencyRequests(c *client.Client) (int, error) { } } - Logf("API calls latencies: %s", prettyPrintJSON(metrics)) + Logf("API calls latencies: %s", PrettyPrintJSON(metrics)) return badMetrics, nil } @@ -295,7 +295,7 @@ func HighLatencyRequests(c *client.Client) (int, error) { // Verifies whether 50, 90 and 99th percentiles of PodStartupLatency are // within the threshold. func VerifyPodStartupLatency(latency PodStartupLatency) error { - Logf("Pod startup latency: %s", prettyPrintJSON(latency)) + Logf("Pod startup latency: %s", PrettyPrintJSON(latency)) if latency.Latency.Perc50 > podStartupThreshold { return fmt.Errorf("too high pod startup latency 50th percentile: %v", latency.Latency.Perc50) @@ -310,9 +310,9 @@ func VerifyPodStartupLatency(latency PodStartupLatency) error { } // Resets latency metrics in apiserver. -func resetMetrics(c *client.Client) error { +func ResetMetrics(c *client.Client) error { Logf("Resetting latency metrics in apiserver...") - body, err := c.Get().AbsPath("/resetMetrics").DoRaw() + body, err := c.Get().AbsPath("/ResetMetrics").DoRaw() if err != nil { return err } @@ -337,7 +337,7 @@ func getSchedulingLatency(c *client.Client) (SchedulingLatency, error) { // Check if master Node is registered nodes, err := c.Nodes().List(api.ListOptions{}) - expectNoError(err) + ExpectNoError(err) var data string var masterRegistered = false @@ -351,16 +351,16 @@ func getSchedulingLatency(c *client.Client) (SchedulingLatency, error) { Prefix("proxy"). Namespace(api.NamespaceSystem). Resource("pods"). - Name(fmt.Sprintf("kube-scheduler-%v:%v", testContext.CloudConfig.MasterName, ports.SchedulerPort)). + Name(fmt.Sprintf("kube-scheduler-%v:%v", TestContext.CloudConfig.MasterName, ports.SchedulerPort)). Suffix("metrics"). Do().Raw() - expectNoError(err) + ExpectNoError(err) data = string(rawData) } else { // If master is not registered fall back to old method of using SSH. cmd := "curl http://localhost:10251/metrics" - sshResult, err := SSH(cmd, getMasterHost()+":22", testContext.Provider) + sshResult, err := SSH(cmd, GetMasterHost()+":22", TestContext.Provider) if err != nil || sshResult.Code != 0 { return result, fmt.Errorf("unexpected error (code: %d) in ssh connection to master: %#v", sshResult.Code, err) } @@ -401,13 +401,13 @@ func VerifySchedulerLatency(c *client.Client) error { if err != nil { return err } - Logf("Scheduling latency: %s", prettyPrintJSON(latency)) + Logf("Scheduling latency: %s", PrettyPrintJSON(latency)) // TODO: Add some reasonable checks once we know more about the values. return nil } -func prettyPrintJSON(metrics interface{}) string { +func PrettyPrintJSON(metrics interface{}) string { output := &bytes.Buffer{} if err := json.NewEncoder(output).Encode(metrics); err != nil { Logf("Error building encoder: %v", err) @@ -446,8 +446,8 @@ func extractMetricSamples(metricsBlob string) ([]*model.Sample, error) { } } -// podLatencyData encapsulates pod startup latency information. -type podLatencyData struct { +// PodLatencyData encapsulates pod startup latency information. +type PodLatencyData struct { // Name of the pod Name string // Node this pod was running on @@ -456,13 +456,13 @@ type podLatencyData struct { Latency time.Duration } -type latencySlice []podLatencyData +type LatencySlice []PodLatencyData -func (a latencySlice) Len() int { return len(a) } -func (a latencySlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a latencySlice) Less(i, j int) bool { return a[i].Latency < a[j].Latency } +func (a LatencySlice) Len() int { return len(a) } +func (a LatencySlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a LatencySlice) Less(i, j int) bool { return a[i].Latency < a[j].Latency } -func extractLatencyMetrics(latencies []podLatencyData) LatencyMetric { +func ExtractLatencyMetrics(latencies []PodLatencyData) LatencyMetric { length := len(latencies) perc50 := latencies[int(math.Ceil(float64(length*50)/100))-1].Latency perc90 := latencies[int(math.Ceil(float64(length*90)/100))-1].Latency @@ -470,9 +470,9 @@ func extractLatencyMetrics(latencies []podLatencyData) LatencyMetric { return LatencyMetric{Perc50: perc50, Perc90: perc90, Perc99: perc99} } -// logSuspiciousLatency logs metrics/docker errors from all nodes that had slow startup times +// LogSuspiciousLatency logs metrics/docker errors from all nodes that had slow startup times // If latencyDataLag is nil then it will be populated from latencyData -func logSuspiciousLatency(latencyData []podLatencyData, latencyDataLag []podLatencyData, nodeCount int, c *client.Client) { +func LogSuspiciousLatency(latencyData []PodLatencyData, latencyDataLag []PodLatencyData, nodeCount int, c *client.Client) { if latencyDataLag == nil { latencyDataLag = latencyData } @@ -489,15 +489,15 @@ func logSuspiciousLatency(latencyData []podLatencyData, latencyDataLag []podLate // the given time.Duration. Since the arrays are sorted we are looking at the last // element which will always be the highest. If the latency is higher than the max Failf // is called. -func testMaximumLatencyValue(latencies []podLatencyData, max time.Duration, name string) { +func testMaximumLatencyValue(latencies []PodLatencyData, max time.Duration, name string) { highestLatency := latencies[len(latencies)-1] if !(highestLatency.Latency <= max) { Failf("%s were not all under %s: %#v", name, max.String(), latencies) } } -func printLatencies(latencies []podLatencyData, header string) { - metrics := extractLatencyMetrics(latencies) +func PrintLatencies(latencies []PodLatencyData, header string) { + metrics := ExtractLatencyMetrics(latencies) Logf("10%% %s: %v", header, latencies[(len(latencies)*9)/10:]) Logf("perc50: %v, perc90: %v, perc99: %v", metrics.Perc50, metrics.Perc90, metrics.Perc99) } diff --git a/test/e2e/framework/prompush.go b/test/e2e/framework/prompush.go index 8f3a036718e..246387a0a2b 100644 --- a/test/e2e/framework/prompush.go +++ b/test/e2e/framework/prompush.go @@ -39,11 +39,11 @@ var prom_registered = false // Reusable function for pushing metrics to prometheus. Handles initialization and so on. func promPushRunningPending(running, pending int) error { - if testContext.PrometheusPushGateway == "" { + if TestContext.PrometheusPushGateway == "" { return nil } else { // Register metrics if necessary - if !prom_registered && testContext.PrometheusPushGateway != "" { + if !prom_registered && TestContext.PrometheusPushGateway != "" { prometheus.Register(runningMetric) prometheus.Register(pendingMetric) prom_registered = true @@ -57,7 +57,7 @@ func promPushRunningPending(running, pending int) error { if err := prometheus.Push( "e2e", "none", - testContext.PrometheusPushGateway, //i.e. "127.0.0.1:9091" + TestContext.PrometheusPushGateway, //i.e. "127.0.0.1:9091" ); err != nil { fmt.Println("failed at pushing to pushgateway ", err) return err diff --git a/test/e2e/framework/resource_usage_gatherer.go b/test/e2e/framework/resource_usage_gatherer.go index c4a6ead5332..d8365c0458e 100644 --- a/test/e2e/framework/resource_usage_gatherer.go +++ b/test/e2e/framework/resource_usage_gatherer.go @@ -38,9 +38,9 @@ const ( probeDuration = 15 * time.Second ) -type resourceConstraint struct { - cpuConstraint float64 - memoryConstraint uint64 +type ResourceConstraint struct { + CPUConstraint float64 + MemoryConstraint uint64 } type SingleContainerSummary struct { @@ -67,12 +67,12 @@ func (s *ResourceUsageSummary) PrintHumanReadable() string { } func (s *ResourceUsageSummary) PrintJSON() string { - return prettyPrintJSON(*s) + return PrettyPrintJSON(*s) } -func computePercentiles(timeSeries []resourceUsagePerContainer, percentilesToCompute []int) map[int]resourceUsagePerContainer { +func computePercentiles(timeSeries []ResourceUsagePerContainer, percentilesToCompute []int) map[int]ResourceUsagePerContainer { if len(timeSeries) == 0 { - return make(map[int]resourceUsagePerContainer) + return make(map[int]ResourceUsagePerContainer) } dataMap := make(map[string]*usageDataPerContainer) for i := range timeSeries { @@ -95,12 +95,12 @@ func computePercentiles(timeSeries []resourceUsagePerContainer, percentilesToCom sort.Sort(uint64arr(v.memWorkSetData)) } - result := make(map[int]resourceUsagePerContainer) + result := make(map[int]ResourceUsagePerContainer) for _, perc := range percentilesToCompute { - data := make(resourceUsagePerContainer) + data := make(ResourceUsagePerContainer) for k, v := range dataMap { percentileIndex := int(math.Ceil(float64(len(v.cpuData)*perc)/100)) - 1 - data[k] = &containerResourceUsage{ + data[k] = &ContainerResourceUsage{ Name: k, CPUUsageInCores: v.cpuData[percentileIndex], MemoryUsageInBytes: v.memUseData[percentileIndex], @@ -112,8 +112,8 @@ func computePercentiles(timeSeries []resourceUsagePerContainer, percentilesToCom return result } -func leftMergeData(left, right map[int]resourceUsagePerContainer) map[int]resourceUsagePerContainer { - result := make(map[int]resourceUsagePerContainer) +func leftMergeData(left, right map[int]ResourceUsagePerContainer) map[int]ResourceUsagePerContainer { + result := make(map[int]ResourceUsagePerContainer) for percentile, data := range left { result[percentile] = data if _, ok := right[percentile]; !ok { @@ -133,12 +133,12 @@ type resourceGatherWorker struct { containerIDToNameMap map[string]string containerIDs []string stopCh chan struct{} - dataSeries []resourceUsagePerContainer + dataSeries []ResourceUsagePerContainer finished bool } func (w *resourceGatherWorker) singleProbe() { - data := make(resourceUsagePerContainer) + data := make(ResourceUsagePerContainer) nodeUsage, err := getOneTimeResourceUsageOnNode(w.c, w.nodeName, probeDuration, func() []string { return w.containerIDs }, true) if err != nil { Logf("Error while reading data from %v: %v", w.nodeName, err) @@ -236,7 +236,7 @@ func (g *containerResourceGatherer) startGatheringData() { g.getKubeSystemContainersResourceUsage(g.client) } -func (g *containerResourceGatherer) stopAndSummarize(percentiles []int, constraints map[string]resourceConstraint) *ResourceUsageSummary { +func (g *containerResourceGatherer) stopAndSummarize(percentiles []int, constraints map[string]ResourceConstraint) *ResourceUsageSummary { close(g.stopCh) Logf("Closed stop channel. Waiting for %v workers", len(g.workers)) finished := make(chan struct{}) @@ -261,7 +261,7 @@ func (g *containerResourceGatherer) stopAndSummarize(percentiles []int, constrai Logf("Warning! Empty percentile list for stopAndPrintData.") return &ResourceUsageSummary{} } - data := make(map[int]resourceUsagePerContainer) + data := make(map[int]ResourceUsagePerContainer) for i := range g.workers { if g.workers[i].finished { stats := computePercentiles(g.workers[i].dataSeries, percentiles) @@ -290,23 +290,23 @@ func (g *containerResourceGatherer) stopAndSummarize(percentiles []int, constrai // Name has a form: / containerName := strings.Split(name, "/")[1] if constraint, ok := constraints[containerName]; ok { - if usage.CPUUsageInCores > constraint.cpuConstraint { + if usage.CPUUsageInCores > constraint.CPUConstraint { violatedConstraints = append( violatedConstraints, fmt.Sprintf("Container %v is using %v/%v CPU", name, usage.CPUUsageInCores, - constraint.cpuConstraint, + constraint.CPUConstraint, ), ) } - if usage.MemoryWorkingSetInBytes > constraint.memoryConstraint { + if usage.MemoryWorkingSetInBytes > constraint.MemoryConstraint { violatedConstraints = append( violatedConstraints, fmt.Sprintf("Container %v is using %v/%v MB of memory", name, float64(usage.MemoryWorkingSetInBytes)/(1024*1024), - float64(constraint.memoryConstraint)/(1024*1024), + float64(constraint.MemoryConstraint)/(1024*1024), ), ) } diff --git a/test/e2e/framework/test_context.go b/test/e2e/framework/test_context.go index 73c2d1e64fa..54511b6121f 100644 --- a/test/e2e/framework/test_context.go +++ b/test/e2e/framework/test_context.go @@ -98,7 +98,7 @@ func RegisterFlags() { flag.StringVar(&TestContext.KubectlPath, "kubectl-path", "kubectl", "The kubectl binary to use. For development, you might use 'cluster/kubectl.sh' here.") flag.StringVar(&TestContext.OutputDir, "e2e-output-dir", "/tmp", "Output directory for interesting/useful test data, like performance data, benchmarks, and other metrics.") flag.StringVar(&TestContext.ReportDir, "report-dir", "", "Path to the directory where the JUnit XML reports should be saved. Default is empty, which doesn't generate these reports.") - flag.StringVar(&testContext.ReportPrefix, "report-prefix", "", "Optional prefix for JUnit XML reports. Default is empty, which doesn't prepend anything to the default name.") + flag.StringVar(&TestContext.ReportPrefix, "report-prefix", "", "Optional prefix for JUnit XML reports. Default is empty, which doesn't prepend anything to the default name.") flag.StringVar(&TestContext.Prefix, "prefix", "e2e", "A prefix to be added to cloud resources created during testing.") flag.StringVar(&TestContext.OSDistro, "os-distro", "debian", "The OS distribution of cluster VM instances (debian, trusty, or coreos).") diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 8c8c6aa7269..05b0d3287e0 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -80,10 +80,10 @@ import ( const ( // How long to wait for the pod to be listable - podListTimeout = time.Minute + PodListTimeout = time.Minute // Initial pod start can be delayed O(minutes) by slow docker pulls // TODO: Make this 30 seconds once #4566 is resolved. - podStartTimeout = 5 * time.Minute + PodStartTimeout = 5 * time.Minute // How long to wait for the pod to no longer be running podNoLongerRunningTimeout = 30 * time.Second @@ -91,45 +91,45 @@ const ( // If there are any orphaned namespaces to clean up, this test is running // on a long lived cluster. A long wait here is preferably to spurious test // failures caused by leaked resources from a previous test run. - namespaceCleanupTimeout = 15 * time.Minute + NamespaceCleanupTimeout = 15 * time.Minute // Some pods can take much longer to get ready due to volume attach/detach latency. slowPodStartTimeout = 15 * time.Minute // How long to wait for a service endpoint to be resolvable. - serviceStartTimeout = 1 * time.Minute + ServiceStartTimeout = 1 * time.Minute // String used to mark pod deletion nonExist = "NonExist" - // How often to poll pods, nodes and claims. - poll = 2 * time.Second + // How often to Poll pods, nodes and claims. + Poll = 2 * time.Second // service accounts are provisioned after namespace creation // a service account is required to support pod creation in a namespace as part of admission control - serviceAccountProvisionTimeout = 2 * time.Minute + ServiceAccountProvisionTimeout = 2 * time.Minute // How long to try single API calls (like 'get' or 'list'). Used to prevent // transient failures from failing tests. // TODO: client should not apply this timeout to Watch calls. Increased from 30s until that is fixed. - singleCallTimeout = 5 * time.Minute + SingleCallTimeout = 5 * time.Minute // How long nodes have to be "ready" when a test begins. They should already // be "ready" before the test starts, so this is small. - nodeReadyInitialTimeout = 20 * time.Second + NodeReadyInitialTimeout = 20 * time.Second // How long pods have to be "ready" when a test begins. - podReadyBeforeTimeout = 2 * time.Minute + PodReadyBeforeTimeout = 2 * time.Minute // How long pods have to become scheduled onto nodes - podScheduledBeforeTimeout = podListTimeout + (20 * time.Second) + podScheduledBeforeTimeout = PodListTimeout + (20 * time.Second) podRespondingTimeout = 2 * time.Minute - serviceRespondingTimeout = 2 * time.Minute - endpointRegisterTimeout = time.Minute + ServiceRespondingTimeout = 2 * time.Minute + EndpointRegisterTimeout = time.Minute // How long claims have to become dynamically provisioned - claimProvisionTimeout = 5 * time.Minute + ClaimProvisionTimeout = 5 * time.Minute ) // SubResource proxy should have been functional in v1.0.0, but SubResource @@ -138,11 +138,11 @@ const ( // // TODO(ihmccreery): remove once we don't care about v1.0 anymore, (tentatively // in v1.3). -var subResourcePodProxyVersion = version.MustParse("v1.1.0") +var SubResourcePodProxyVersion = version.MustParse("v1.1.0") var subResourceServiceAndNodeProxyVersion = version.MustParse("v1.2.0") -func getServicesProxyRequest(c *client.Client, request *restclient.Request) (*restclient.Request, error) { - subResourceProxyAvailable, err := serverVersionGTE(subResourceServiceAndNodeProxyVersion, c) +func GetServicesProxyRequest(c *client.Client, request *restclient.Request) (*restclient.Request, error) { + subResourceProxyAvailable, err := ServerVersionGTE(subResourceServiceAndNodeProxyVersion, c) if err != nil { return nil, err } @@ -153,7 +153,7 @@ func getServicesProxyRequest(c *client.Client, request *restclient.Request) (*re } // unique identifier of the e2e run -var runId = util.NewUUID() +var RunId = util.NewUUID() type CreateTestingNSFn func(baseName string, c *client.Client, labels map[string]string) (*api.Namespace, error) @@ -190,10 +190,10 @@ func NewPodStore(c *client.Client, namespace string, label labels.Selector, fiel store := cache.NewStore(cache.MetaNamespaceKeyFunc) stopCh := make(chan struct{}) cache.NewReflector(lw, &api.Pod{}, store, 0).RunUntil(stopCh) - return &podStore{store, stopCh} + return &PodStore{store, stopCh} } -func (s *podStore) List() []*api.Pod { +func (s *PodStore) List() []*api.Pod { objects := s.Store.List() pods := make([]*api.Pod, 0) for _, o := range objects { @@ -202,7 +202,7 @@ func (s *podStore) List() []*api.Pod { return pods } -func (s *podStore) Stop() { +func (s *PodStore) Stop() { close(s.stopCh) } @@ -278,8 +278,8 @@ func Skipf(format string, args ...interface{}) { } func SkipUnlessNodeCountIsAtLeast(minNodeCount int) { - if testContext.CloudConfig.NumNodes < minNodeCount { - Skipf("Requires at least %d nodes (not %d)", minNodeCount, testContext.CloudConfig.NumNodes) + if TestContext.CloudConfig.NumNodes < minNodeCount { + Skipf("Requires at least %d nodes (not %d)", minNodeCount, TestContext.CloudConfig.NumNodes) } } @@ -290,20 +290,20 @@ func SkipUnlessAtLeast(value int, minValue int, message string) { } func SkipIfProviderIs(unsupportedProviders ...string) { - if providerIs(unsupportedProviders...) { - Skipf("Not supported for providers %v (found %s)", unsupportedProviders, testContext.Provider) + if ProviderIs(unsupportedProviders...) { + Skipf("Not supported for providers %v (found %s)", unsupportedProviders, TestContext.Provider) } } func SkipUnlessProviderIs(supportedProviders ...string) { - if !providerIs(supportedProviders...) { - Skipf("Only supported for providers %v (not %s)", supportedProviders, testContext.Provider) + if !ProviderIs(supportedProviders...) { + Skipf("Only supported for providers %v (not %s)", supportedProviders, TestContext.Provider) } } -func providerIs(providers ...string) bool { +func ProviderIs(providers ...string) bool { for _, provider := range providers { - if strings.ToLower(provider) == strings.ToLower(testContext.Provider) { + if strings.ToLower(provider) == strings.ToLower(TestContext.Provider) { return true } } @@ -311,7 +311,7 @@ func providerIs(providers ...string) bool { } func SkipUnlessServerVersionGTE(v semver.Version, c discovery.ServerVersionInterface) { - gte, err := serverVersionGTE(v, c) + gte, err := ServerVersionGTE(v, c) if err != nil { Failf("Failed to get server version: %v", err) } @@ -320,8 +320,8 @@ func SkipUnlessServerVersionGTE(v semver.Version, c discovery.ServerVersionInter } } -// providersWithSSH are those providers where each node is accessible with SSH -var providersWithSSH = []string{"gce", "gke", "aws"} +// ProvidersWithSSH are those providers where each node is accessible with SSH +var ProvidersWithSSH = []string{"gce", "gke", "aws"} // providersWithMasterSSH are those providers where master node is accessible with SSH var providersWithMasterSSH = []string{"gce", "gke", "kubemark", "aws"} @@ -374,9 +374,9 @@ func logPodStates(pods []api.Pod) { Logf("") // Final empty line helps for readability. } -// podRunningReady checks whether pod p's phase is running and it has a ready +// PodRunningReady checks whether pod p's phase is running and it has a ready // condition of status true. -func podRunningReady(p *api.Pod) (bool, error) { +func PodRunningReady(p *api.Pod) (bool, error) { // Check the phase is running. if p.Status.Phase != api.PodRunning { return false, fmt.Errorf("want pod '%s' on '%s' to be '%v' but was '%v'", @@ -390,8 +390,8 @@ func podRunningReady(p *api.Pod) (bool, error) { return true, nil } -// podNotReady checks whether pod p's has a ready condition of status false. -func podNotReady(p *api.Pod) (bool, error) { +// PodNotReady checks whether pod p's has a ready condition of status false. +func PodNotReady(p *api.Pod) (bool, error) { // Check the ready condition is false. if podReady(p) { return false, fmt.Errorf("pod '%s' on '%s' didn't have condition {%v %v}; conditions: %v", @@ -411,22 +411,22 @@ func hasReplicationControllersForPod(rcs *api.ReplicationControllerList, pod api return false } -// waitForPodsRunningReady waits up to timeout to ensure that all pods in +// WaitForPodsRunningReady waits up to timeout to ensure that all pods in // namespace ns are either running and ready, or failed but controlled by a // replication controller. Also, it ensures that at least minPods are running // and ready. It has separate behavior from other 'wait for' pods functions in // that it requires the list of pods on every iteration. This is useful, for // example, in cluster startup, because the number of pods increases while // waiting. -func waitForPodsRunningReady(ns string, minPods int, timeout time.Duration) error { - c, err := loadClient() +func WaitForPodsRunningReady(ns string, minPods int, timeout time.Duration) error { + c, err := LoadClient() if err != nil { return err } start := time.Now() Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready", timeout, minPods, ns) - if wait.PollImmediate(poll, timeout, func() (bool, error) { + if wait.PollImmediate(Poll, timeout, func() (bool, error) { // We get the new list of pods and replication controllers in every // iteration because more pods come online during startup and we want to // ensure they are also checked. @@ -447,7 +447,7 @@ func waitForPodsRunningReady(ns string, minPods int, timeout time.Duration) erro } nOk, replicaOk, badPods := 0, 0, []api.Pod{} for _, pod := range podList.Items { - if res, err := podRunningReady(&pod); res && err == nil { + if res, err := PodRunningReady(&pod); res && err == nil { nOk++ if hasReplicationControllersForPod(rcList, pod) { replicaOk++ @@ -498,8 +498,8 @@ func podFromManifest(filename string) (*api.Pod, error) { // Run a test container to try and contact the Kubernetes api-server from a pod, wait for it // to flip to Ready, log its output and delete it. -func runKubernetesServiceTestContainer(repoRoot string, ns string) { - c, err := loadClient() +func RunKubernetesServiceTestContainer(repoRoot string, ns string) { + c, err := LoadClient() if err != nil { Logf("Failed to load client") return @@ -521,11 +521,11 @@ func runKubernetesServiceTestContainer(repoRoot string, ns string) { } }() timeout := 5 * time.Minute - if err := waitForPodCondition(c, ns, p.Name, "clusterapi-tester", timeout, podRunningReady); err != nil { + if err := waitForPodCondition(c, ns, p.Name, "clusterapi-tester", timeout, PodRunningReady); err != nil { Logf("Pod %v took longer than %v to enter running/ready: %v", p.Name, timeout, err) return } - logs, err := getPodLogs(c, ns, p.Name, p.Spec.Containers[0].Name) + logs, err := GetPodLogs(c, ns, p.Name, p.Spec.Containers[0].Name) if err != nil { Logf("Failed to retrieve logs from %v: %v", p.Name, err) } else { @@ -533,8 +533,8 @@ func runKubernetesServiceTestContainer(repoRoot string, ns string) { } } -func logFailedContainers(ns string) { - c, err := loadClient() +func LogFailedContainers(ns string) { + c, err := LoadClient() if err != nil { Logf("Failed to load client") return @@ -546,9 +546,9 @@ func logFailedContainers(ns string) { } Logf("Running kubectl logs on non-ready containers in %v", ns) for _, pod := range podList.Items { - if res, err := podRunningReady(&pod); !res || err != nil { + if res, err := PodRunningReady(&pod); !res || err != nil { for _, container := range pod.Spec.Containers { - logs, err := getPodLogs(c, ns, pod.Name, container.Name) + logs, err := GetPodLogs(c, ns, pod.Name, container.Name) if err != nil { logs, err = getPreviousPodLogs(c, ns, pod.Name, container.Name) if err != nil { @@ -562,10 +562,10 @@ func logFailedContainers(ns string) { } } -// deleteNamespaces deletes all namespaces that match the given delete and skip filters. +// DeleteNamespaces deletes all namespaces that match the given delete and skip filters. // Filter is by simple strings.Contains; first skip filter, then delete filter. // Returns the list of deleted namespaces or an error. -func deleteNamespaces(c *client.Client, deleteFilter, skipFilter []string) ([]string, error) { +func DeleteNamespaces(c *client.Client, deleteFilter, skipFilter []string) ([]string, error) { By("Deleting namespaces") nsList, err := c.Namespaces().List(api.ListOptions{}) Expect(err).NotTo(HaveOccurred()) @@ -605,7 +605,7 @@ OUTER: return deleted, nil } -func waitForNamespacesDeleted(c *client.Client, namespaces []string, timeout time.Duration) error { +func WaitForNamespacesDeleted(c *client.Client, namespaces []string, timeout time.Duration) error { By("Waiting for namespaces to vanish") nsMap := map[string]bool{} for _, ns := range namespaces { @@ -629,10 +629,10 @@ func waitForNamespacesDeleted(c *client.Client, namespaces []string, timeout tim func waitForServiceAccountInNamespace(c *client.Client, ns, serviceAccountName string, timeout time.Duration) error { Logf("Waiting up to %v for service account %s to be provisioned in ns %s", timeout, serviceAccountName, ns) - for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { + for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { sa, err := c.ServiceAccounts(ns).Get(serviceAccountName) if apierrs.IsNotFound(err) { - Logf("Get service account %s in ns %s failed, ignoring for %v: %v", serviceAccountName, ns, poll, err) + Logf("Get service account %s in ns %s failed, ignoring for %v: %v", serviceAccountName, ns, Poll, err) continue } if err != nil { @@ -640,7 +640,7 @@ func waitForServiceAccountInNamespace(c *client.Client, ns, serviceAccountName s return err } if len(sa.Secrets) == 0 { - Logf("Service account %s in ns %s had 0 secrets, ignoring for %v: %v", serviceAccountName, ns, poll, err) + Logf("Service account %s in ns %s had 0 secrets, ignoring for %v: %v", serviceAccountName, ns, Poll, err) continue } Logf("Service account %s in ns %s with secrets found. (%v)", serviceAccountName, ns, time.Since(start)) @@ -651,7 +651,7 @@ func waitForServiceAccountInNamespace(c *client.Client, ns, serviceAccountName s func waitForPodCondition(c *client.Client, ns, podName, desc string, timeout time.Duration, condition podCondition) error { Logf("Waiting up to %[1]v for pod %[2]s status to be %[3]s", timeout, podName, desc) - for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { + for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { pod, err := c.Pods(ns).Get(podName) if err != nil { if apierrs.IsNotFound(err) { @@ -660,7 +660,7 @@ func waitForPodCondition(c *client.Client, ns, podName, desc string, timeout tim } // Aligning this text makes it much more readable Logf("Get pod %[1]s in namespace '%[2]s' failed, ignoring for %[3]v. Error: %[4]v", - podName, ns, poll, err) + podName, ns, Poll, err) continue } done, err := condition(pod) @@ -674,11 +674,11 @@ func waitForPodCondition(c *client.Client, ns, podName, desc string, timeout tim return fmt.Errorf("gave up waiting for pod '%s' to be '%s' after %v", podName, desc, timeout) } -// waitForMatchPodsCondition finds match pods based on the input ListOptions. +// WaitForMatchPodsCondition finds match pods based on the input ListOptions. // waits and checks if all match pods are in the given podCondition -func waitForMatchPodsCondition(c *client.Client, opts api.ListOptions, desc string, timeout time.Duration, condition podCondition) error { +func WaitForMatchPodsCondition(c *client.Client, opts api.ListOptions, desc string, timeout time.Duration, condition podCondition) error { Logf("Waiting up to %v for matching pods' status to be %s", timeout, desc) - for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { + for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { pods, err := c.Pods(api.NamespaceAll).List(opts) if err != nil { return err @@ -701,20 +701,20 @@ func waitForMatchPodsCondition(c *client.Client, opts api.ListOptions, desc stri return fmt.Errorf("gave up waiting for matching pods to be '%s' after %v", desc, timeout) } -// waitForDefaultServiceAccountInNamespace waits for the default service account to be provisioned +// WaitForDefaultServiceAccountInNamespace waits for the default service account to be provisioned // the default service account is what is associated with pods when they do not specify a service account // as a result, pods are not able to be provisioned in a namespace until the service account is provisioned -func waitForDefaultServiceAccountInNamespace(c *client.Client, namespace string) error { - return waitForServiceAccountInNamespace(c, namespace, "default", serviceAccountProvisionTimeout) +func WaitForDefaultServiceAccountInNamespace(c *client.Client, namespace string) error { + return waitForServiceAccountInNamespace(c, namespace, "default", ServiceAccountProvisionTimeout) } -// waitForPersistentVolumePhase waits for a PersistentVolume to be in a specific phase or until timeout occurs, whichever comes first. -func waitForPersistentVolumePhase(phase api.PersistentVolumePhase, c *client.Client, pvName string, poll, timeout time.Duration) error { +// WaitForPersistentVolumePhase waits for a PersistentVolume to be in a specific phase or until timeout occurs, whichever comes first. +func WaitForPersistentVolumePhase(phase api.PersistentVolumePhase, c *client.Client, pvName string, Poll, timeout time.Duration) error { Logf("Waiting up to %v for PersistentVolume %s to have phase %s", timeout, pvName, phase) - for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { + for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { pv, err := c.PersistentVolumes().Get(pvName) if err != nil { - Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, poll, err) + Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, Poll, err) continue } else { if pv.Status.Phase == phase { @@ -728,10 +728,10 @@ func waitForPersistentVolumePhase(phase api.PersistentVolumePhase, c *client.Cli return fmt.Errorf("PersistentVolume %s not in phase %s within %v", pvName, phase, timeout) } -// waitForPersistentVolumeDeleted waits for a PersistentVolume to get deleted or until timeout occurs, whichever comes first. -func waitForPersistentVolumeDeleted(c *client.Client, pvName string, poll, timeout time.Duration) error { +// WaitForPersistentVolumeDeleted waits for a PersistentVolume to get deleted or until timeout occurs, whichever comes first. +func WaitForPersistentVolumeDeleted(c *client.Client, pvName string, Poll, timeout time.Duration) error { Logf("Waiting up to %v for PersistentVolume %s to get deleted", timeout, pvName) - for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { + for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { pv, err := c.PersistentVolumes().Get(pvName) if err == nil { Logf("PersistentVolume %s found and phase=%s (%v)", pvName, pv.Status.Phase, time.Since(start)) @@ -741,20 +741,20 @@ func waitForPersistentVolumeDeleted(c *client.Client, pvName string, poll, timeo Logf("PersistentVolume %s was removed", pvName) return nil } else { - Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, poll, err) + Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, Poll, err) } } } return fmt.Errorf("PersistentVolume %s still exists within %v", pvName, timeout) } -// waitForPersistentVolumeClaimPhase waits for a PersistentVolumeClaim to be in a specific phase or until timeout occurs, whichever comes first. -func waitForPersistentVolumeClaimPhase(phase api.PersistentVolumeClaimPhase, c *client.Client, ns string, pvcName string, poll, timeout time.Duration) error { +// WaitForPersistentVolumeClaimPhase waits for a PersistentVolumeClaim to be in a specific phase or until timeout occurs, whichever comes first. +func WaitForPersistentVolumeClaimPhase(phase api.PersistentVolumeClaimPhase, c *client.Client, ns string, pvcName string, Poll, timeout time.Duration) error { Logf("Waiting up to %v for PersistentVolumeClaim %s to have phase %s", timeout, pvcName, phase) - for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { + for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { pvc, err := c.PersistentVolumeClaims(ns).Get(pvcName) if err != nil { - Logf("Get persistent volume claim %s in failed, ignoring for %v: %v", pvcName, poll, err) + Logf("Get persistent volume claim %s in failed, ignoring for %v: %v", pvcName, Poll, err) continue } else { if pvc.Status.Phase == phase { @@ -774,7 +774,7 @@ func CreateTestingNS(baseName string, c *client.Client, labels map[string]string if labels == nil { labels = map[string]string{} } - labels["e2e-run"] = string(runId) + labels["e2e-run"] = string(RunId) namespaceObj := &api.Namespace{ ObjectMeta: api.ObjectMeta{ @@ -786,7 +786,7 @@ func CreateTestingNS(baseName string, c *client.Client, labels map[string]string } // Be robust about making the namespace creation call. var got *api.Namespace - if err := wait.PollImmediate(poll, singleCallTimeout, func() (bool, error) { + if err := wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) { var err error got, err = c.Namespaces().Create(namespaceObj) if err != nil { @@ -797,17 +797,17 @@ func CreateTestingNS(baseName string, c *client.Client, labels map[string]string return nil, err } - if testContext.VerifyServiceAccount { - if err := waitForDefaultServiceAccountInNamespace(c, got.Name); err != nil { + if TestContext.VerifyServiceAccount { + if err := WaitForDefaultServiceAccountInNamespace(c, got.Name); err != nil { return nil, err } } return got, nil } -// checkTestingNSDeletedExcept checks whether all e2e based existing namespaces are in the Terminating state +// CheckTestingNSDeletedExcept checks whether all e2e based existing namespaces are in the Terminating state // and waits until they are finally deleted. It ignores namespace skip. -func checkTestingNSDeletedExcept(c *client.Client, skip string) error { +func CheckTestingNSDeletedExcept(c *client.Client, skip string) error { // TODO: Since we don't have support for bulk resource deletion in the API, // while deleting a namespace we are deleting all objects from that namespace // one by one (one deletion == one API call). This basically exposes us to @@ -889,10 +889,10 @@ func deleteNS(c *client.Client, namespace string, timeout time.Duration) error { return nil } -// Waits default amount of time (podStartTimeout) for the specified pod to become running. +// Waits default amount of time (PodStartTimeout) for the specified pod to become running. // Returns an error if timeout occurs first, or pod goes in to failed state. -func waitForPodRunningInNamespace(c *client.Client, podName string, namespace string) error { - return waitTimeoutForPodRunningInNamespace(c, podName, namespace, podStartTimeout) +func WaitForPodRunningInNamespace(c *client.Client, podName string, namespace string) error { + return waitTimeoutForPodRunningInNamespace(c, podName, namespace, PodStartTimeout) } // Waits an extended amount of time (slowPodStartTimeout) for the specified pod to become running. @@ -916,7 +916,7 @@ func waitTimeoutForPodRunningInNamespace(c *client.Client, podName string, names // Waits default amount of time (podNoLongerRunningTimeout) for the specified pod to stop running. // Returns an error if timeout occurs first. -func waitForPodNoLongerRunningInNamespace(c *client.Client, podName string, namespace string) error { +func WaitForPodNoLongerRunningInNamespace(c *client.Client, podName string, namespace string) error { return waitTimeoutForPodNoLongerRunningInNamespace(c, podName, namespace, podNoLongerRunningTimeout) } @@ -943,9 +943,9 @@ func waitTimeoutForPodReadyInNamespace(c *client.Client, podName string, namespa }) } -// waitForPodNotPending returns an error if it took too long for the pod to go out of pending state. -func waitForPodNotPending(c *client.Client, ns, podName string) error { - return waitForPodCondition(c, ns, podName, "!pending", podStartTimeout, func(pod *api.Pod) (bool, error) { +// WaitForPodNotPending returns an error if it took too long for the pod to go out of pending state. +func WaitForPodNotPending(c *client.Client, ns, podName string) error { + return waitForPodCondition(c, ns, podName, "!pending", PodStartTimeout, func(pod *api.Pod) (bool, error) { if pod.Status.Phase != api.PodPending { Logf("Saw pod '%s' in namespace '%s' out of pending state (found '%q')", podName, ns, pod.Status.Phase) return true, nil @@ -957,7 +957,7 @@ func waitForPodNotPending(c *client.Client, ns, podName string) error { // waitForPodTerminatedInNamespace returns an error if it took too long for the pod // to terminate or if the pod terminated with an unexpected reason. func waitForPodTerminatedInNamespace(c *client.Client, podName, reason, namespace string) error { - return waitForPodCondition(c, namespace, podName, "terminated due to deadline exceeded", podStartTimeout, func(pod *api.Pod) (bool, error) { + return waitForPodCondition(c, namespace, podName, "terminated due to deadline exceeded", PodStartTimeout, func(pod *api.Pod) (bool, error) { if pod.Status.Phase == api.PodFailed { if pod.Status.Reason == reason { return true, nil @@ -991,13 +991,13 @@ func waitForPodSuccessInNamespaceTimeout(c *client.Client, podName string, contN }) } -// waitForPodSuccessInNamespace returns nil if the pod reached state success, or an error if it reached failure or until podStartupTimeout. -func waitForPodSuccessInNamespace(c *client.Client, podName string, contName string, namespace string) error { - return waitForPodSuccessInNamespaceTimeout(c, podName, contName, namespace, podStartTimeout) +// WaitForPodSuccessInNamespace returns nil if the pod reached state success, or an error if it reached failure or until podStartupTimeout. +func WaitForPodSuccessInNamespace(c *client.Client, podName string, contName string, namespace string) error { + return waitForPodSuccessInNamespaceTimeout(c, podName, contName, namespace, PodStartTimeout) } -// waitForPodSuccessInNamespaceSlow returns nil if the pod reached state success, or an error if it reached failure or until slowPodStartupTimeout. -func waitForPodSuccessInNamespaceSlow(c *client.Client, podName string, contName string, namespace string) error { +// WaitForPodSuccessInNamespaceSlow returns nil if the pod reached state success, or an error if it reached failure or until slowPodStartupTimeout. +func WaitForPodSuccessInNamespaceSlow(c *client.Client, podName string, contName string, namespace string) error { return waitForPodSuccessInNamespaceTimeout(c, podName, contName, namespace, slowPodStartTimeout) } @@ -1025,7 +1025,7 @@ func waitForRCPodOnNode(c *client.Client, ns, rcName, node string) (*api.Pod, er return p, err } -func waitForPodToDisappear(c *client.Client, ns, podName string, label labels.Selector, interval, timeout time.Duration) error { +func WaitForPodToDisappear(c *client.Client, ns, podName string, label labels.Selector, interval, timeout time.Duration) error { return wait.PollImmediate(interval, timeout, func() (bool, error) { Logf("Waiting for pod %s to disappear", podName) options := api.ListOptions{LabelSelector: label} @@ -1048,18 +1048,18 @@ func waitForPodToDisappear(c *client.Client, ns, podName string, label labels.Se }) } -// waitForRCPodToDisappear returns nil if the pod from the given replication controller (described by rcName) no longer exists. +// WaitForRCPodToDisappear returns nil if the pod from the given replication controller (described by rcName) no longer exists. // In case of failure or too long waiting time, an error is returned. -func waitForRCPodToDisappear(c *client.Client, ns, rcName, podName string) error { +func WaitForRCPodToDisappear(c *client.Client, ns, rcName, podName string) error { label := labels.SelectorFromSet(labels.Set(map[string]string{"name": rcName})) // NodeController evicts pod after 5 minutes, so we need timeout greater than that. // Additionally, there can be non-zero grace period, so we are setting 10 minutes // to be on the safe size. - return waitForPodToDisappear(c, ns, podName, label, 20*time.Second, 10*time.Minute) + return WaitForPodToDisappear(c, ns, podName, label, 20*time.Second, 10*time.Minute) } -// waitForService waits until the service appears (exist == true), or disappears (exist == false) -func waitForService(c *client.Client, namespace, name string, exist bool, interval, timeout time.Duration) error { +// WaitForService waits until the service appears (exist == true), or disappears (exist == false) +func WaitForService(c *client.Client, namespace, name string, exist bool, interval, timeout time.Duration) error { err := wait.PollImmediate(interval, timeout, func() (bool, error) { _, err := c.Services(namespace).Get(name) switch { @@ -1087,8 +1087,8 @@ func waitForService(c *client.Client, namespace, name string, exist bool, interv return nil } -//waitForServiceEndpointsNum waits until the amount of endpoints that implement service to expectNum. -func waitForServiceEndpointsNum(c *client.Client, namespace, serviceName string, expectNum int, interval, timeout time.Duration) error { +//WaitForServiceEndpointsNum waits until the amount of endpoints that implement service to expectNum. +func WaitForServiceEndpointsNum(c *client.Client, namespace, serviceName string, expectNum int, interval, timeout time.Duration) error { return wait.Poll(interval, timeout, func() (bool, error) { Logf("Waiting for amount of service:%s endpoints to %d", serviceName, expectNum) list, err := c.Endpoints(namespace).List(api.ListOptions{}) @@ -1113,8 +1113,8 @@ func countEndpointsNum(e *api.Endpoints) int { return num } -// waitForReplicationController waits until the RC appears (exist == true), or disappears (exist == false) -func waitForReplicationController(c *client.Client, namespace, name string, exist bool, interval, timeout time.Duration) error { +// WaitForReplicationController waits until the RC appears (exist == true), or disappears (exist == false) +func WaitForReplicationController(c *client.Client, namespace, name string, exist bool, interval, timeout time.Duration) error { err := wait.PollImmediate(interval, timeout, func() (bool, error) { _, err := c.ReplicationControllers(namespace).Get(name) if err != nil { @@ -1132,8 +1132,8 @@ func waitForReplicationController(c *client.Client, namespace, name string, exis return nil } -func waitForEndpoint(c *client.Client, ns, name string) error { - for t := time.Now(); time.Since(t) < endpointRegisterTimeout; time.Sleep(poll) { +func WaitForEndpoint(c *client.Client, ns, name string) error { + for t := time.Now(); time.Since(t) < EndpointRegisterTimeout; time.Sleep(Poll) { endpoint, err := c.Endpoints(ns).Get(name) Expect(err).NotTo(HaveOccurred()) if len(endpoint.Subsets) == 0 || len(endpoint.Subsets[0].Addresses) == 0 { @@ -1157,9 +1157,13 @@ type podProxyResponseChecker struct { pods *api.PodList } -// checkAllResponses issues GETs to all pods in the context and verify they +func PodProxyResponseChecker(c *client.Client, ns string, label labels.Selector, controllerName string, respondName bool, pods *api.PodList) podProxyResponseChecker { + return podProxyResponseChecker{c, ns, label, controllerName, respondName, pods} +} + +// CheckAllResponses issues GETs to all pods in the context and verify they // reply with their own pod name. -func (r podProxyResponseChecker) checkAllResponses() (done bool, err error) { +func (r podProxyResponseChecker) CheckAllResponses() (done bool, err error) { successes := 0 options := api.ListOptions{LabelSelector: r.label} currentPods, err := r.c.Pods(r.ns).List(options) @@ -1169,7 +1173,7 @@ func (r podProxyResponseChecker) checkAllResponses() (done bool, err error) { if !isElementOf(pod.UID, currentPods) { return false, fmt.Errorf("pod with UID %s is no longer a member of the replica set. Must have been restarted for some reason. Current replica set: %v", pod.UID, currentPods) } - subResourceProxyAvailable, err := serverVersionGTE(subResourcePodProxyVersion, r.c) + subResourceProxyAvailable, err := ServerVersionGTE(SubResourcePodProxyVersion, r.c) if err != nil { return false, err } @@ -1225,11 +1229,11 @@ func (r podProxyResponseChecker) checkAllResponses() (done bool, err error) { return true, nil } -// serverVersionGTE returns true if v is greater than or equal to the server +// ServerVersionGTE returns true if v is greater than or equal to the server // version. // // TODO(18726): This should be incorporated into client.VersionInterface. -func serverVersionGTE(v semver.Version, c discovery.ServerVersionInterface) (bool, error) { +func ServerVersionGTE(v semver.Version, c discovery.ServerVersionInterface) (bool, error) { serverVersion, err := c.ServerVersion() if err != nil { return false, fmt.Errorf("Unable to get server version: %v", err) @@ -1241,13 +1245,13 @@ func serverVersionGTE(v semver.Version, c discovery.ServerVersionInterface) (boo return sv.GTE(v), nil } -func podsResponding(c *client.Client, ns, name string, wantName bool, pods *api.PodList) error { +func PodsResponding(c *client.Client, ns, name string, wantName bool, pods *api.PodList) error { By("trying to dial each unique pod") label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) - return wait.PollImmediate(poll, podRespondingTimeout, podProxyResponseChecker{c, ns, label, name, wantName, pods}.checkAllResponses) + return wait.PollImmediate(Poll, podRespondingTimeout, PodProxyResponseChecker(c, ns, label, name, wantName, pods).CheckAllResponses) } -func podsCreated(c *client.Client, ns, name string, replicas int) (*api.PodList, error) { +func PodsCreated(c *client.Client, ns, name string, replicas int) (*api.PodList, error) { timeout := 2 * time.Minute // List the pods, making sure we observe all the replicas. label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) @@ -1282,7 +1286,7 @@ func podsRunning(c *client.Client, pods *api.PodList) []error { e := []error{} for _, pod := range pods.Items { // TODO: make waiting parallel. - err := waitForPodRunningInNamespace(c, pod.Name, pod.Namespace) + err := WaitForPodRunningInNamespace(c, pod.Name, pod.Namespace) if err != nil { e = append(e, err) } @@ -1290,8 +1294,8 @@ func podsRunning(c *client.Client, pods *api.PodList) []error { return e } -func verifyPods(c *client.Client, ns, name string, wantName bool, replicas int) error { - pods, err := podsCreated(c, ns, name, replicas) +func VerifyPods(c *client.Client, ns, name string, wantName bool, replicas int) error { + pods, err := PodsCreated(c, ns, name, replicas) if err != nil { return err } @@ -1299,18 +1303,18 @@ func verifyPods(c *client.Client, ns, name string, wantName bool, replicas int) if len(e) > 0 { return fmt.Errorf("failed to wait for pods running: %v", e) } - err = podsResponding(c, ns, name, wantName, pods) + err = PodsResponding(c, ns, name, wantName, pods) if err != nil { return fmt.Errorf("failed to wait for pods responding: %v", err) } return nil } -func serviceResponding(c *client.Client, ns, name string) error { +func ServiceResponding(c *client.Client, ns, name string) error { By(fmt.Sprintf("trying to dial the service %s.%s via the proxy", ns, name)) - return wait.PollImmediate(poll, serviceRespondingTimeout, func() (done bool, err error) { - proxyRequest, errProxy := getServicesProxyRequest(c, c.Get()) + return wait.PollImmediate(Poll, ServiceRespondingTimeout, func() (done bool, err error) { + proxyRequest, errProxy := GetServicesProxyRequest(c, c.Get()) if errProxy != nil { Logf("Failed to get services proxy request: %v:", errProxy) return false, nil @@ -1333,19 +1337,19 @@ func serviceResponding(c *client.Client, ns, name string) error { }) } -func loadConfig() (*restclient.Config, error) { +func LoadConfig() (*restclient.Config, error) { switch { - case testContext.KubeConfig != "": - Logf(">>> testContext.KubeConfig: %s\n", testContext.KubeConfig) - c, err := clientcmd.LoadFromFile(testContext.KubeConfig) + case TestContext.KubeConfig != "": + Logf(">>> TestContext.KubeConfig: %s\n", TestContext.KubeConfig) + c, err := clientcmd.LoadFromFile(TestContext.KubeConfig) if err != nil { return nil, fmt.Errorf("error loading KubeConfig: %v", err.Error()) } - if testContext.KubeContext != "" { - Logf(">>> testContext.KubeContext: %s\n", testContext.KubeContext) - c.CurrentContext = testContext.KubeContext + if TestContext.KubeContext != "" { + Logf(">>> TestContext.KubeContext: %s\n", TestContext.KubeContext) + c.CurrentContext = TestContext.KubeContext } - return clientcmd.NewDefaultClientConfig(*c, &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: testContext.Host}}).ClientConfig() + return clientcmd.NewDefaultClientConfig(*c, &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: TestContext.Host}}).ClientConfig() default: return nil, fmt.Errorf("KubeConfig must be specified to load client config") } @@ -1357,13 +1361,13 @@ func loadClientFromConfig(config *restclient.Config) (*client.Client, error) { return nil, fmt.Errorf("error creating client: %v", err.Error()) } if c.Client.Timeout == 0 { - c.Client.Timeout = singleCallTimeout + c.Client.Timeout = SingleCallTimeout } return c, nil } -func loadClient() (*client.Client, error) { - config, err := loadConfig() +func LoadClient() (*client.Client, error) { + config, err := LoadConfig() if err != nil { return nil, fmt.Errorf("error creating client: %v", err.Error()) } @@ -1380,7 +1384,7 @@ func randomSuffix() string { return strconv.Itoa(r.Int() % 10000) } -func expectNoError(err error, explain ...interface{}) { +func ExpectNoError(err error, explain ...interface{}) { if err != nil { Logf("Unexpected error occurred: %v", err) } @@ -1388,20 +1392,20 @@ func expectNoError(err error, explain ...interface{}) { } // Stops everything from filePath from namespace ns and checks if everything matching selectors from the given namespace is correctly stopped. -func cleanup(filePath string, ns string, selectors ...string) { +func Cleanup(filePath string, ns string, selectors ...string) { By("using delete to clean up resources") var nsArg string if ns != "" { nsArg = fmt.Sprintf("--namespace=%s", ns) } - runKubectlOrDie("delete", "--grace-period=0", "-f", filePath, nsArg) + RunKubectlOrDie("delete", "--grace-period=0", "-f", filePath, nsArg) for _, selector := range selectors { - resources := runKubectlOrDie("get", "rc,svc", "-l", selector, "--no-headers", nsArg) + resources := RunKubectlOrDie("get", "rc,svc", "-l", selector, "--no-headers", nsArg) if resources != "" { Failf("Resources left running after stop:\n%s", resources) } - pods := runKubectlOrDie("get", "pods", "-l", selector, nsArg, "-o", "go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ \"\\n\" }}{{ end }}{{ end }}") + pods := RunKubectlOrDie("get", "pods", "-l", selector, nsArg, "-o", "go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ \"\\n\" }}{{ end }}{{ end }}") if pods != "" { Failf("Pods left unterminated after stop:\n%s", pods) } @@ -1412,13 +1416,13 @@ func cleanup(filePath string, ns string, selectors ...string) { // we may want it to return more than just an error, at some point. type validatorFn func(c *client.Client, podID string) error -// validateController is a generic mechanism for testing RC's that are running. +// ValidateController is a generic mechanism for testing RC's that are running. // It takes a container name, a test name, and a validator function which is plugged in by a specific test. // "containername": this is grepped for. // "containerImage" : this is the name of the image we expect to be launched. Not to confuse w/ images (kitten.jpg) which are validated. // "testname": which gets bubbled up to the logging/failure messages if errors happen. // "validator" function: This function is given a podID and a client, and it can do some specific validations that way. -func validateController(c *client.Client, containerImage string, replicas int, containername string, testname string, validator validatorFn, ns string) { +func ValidateController(c *client.Client, containerImage string, replicas int, containername string, testname string, validator validatorFn, ns string) { getPodsTemplate := "--template={{range.items}}{{.metadata.name}} {{end}}" // NB: kubectl adds the "exists" function to the standard template functions. // This lets us check to see if the "running" entry exists for each of the containers @@ -1434,8 +1438,8 @@ func validateController(c *client.Client, containerImage string, replicas int, c By(fmt.Sprintf("waiting for all containers in %s pods to come up.", testname)) //testname should be selector waitLoop: - for start := time.Now(); time.Since(start) < podStartTimeout; time.Sleep(5 * time.Second) { - getPodsOutput := runKubectlOrDie("get", "pods", "-o", "template", getPodsTemplate, "--api-version=v1", "-l", testname, fmt.Sprintf("--namespace=%v", ns)) + for start := time.Now(); time.Since(start) < PodStartTimeout; time.Sleep(5 * time.Second) { + getPodsOutput := RunKubectlOrDie("get", "pods", "-o", "template", getPodsTemplate, "--api-version=v1", "-l", testname, fmt.Sprintf("--namespace=%v", ns)) pods := strings.Fields(getPodsOutput) if numPods := len(pods); numPods != replicas { By(fmt.Sprintf("Replicas for %s: expected=%d actual=%d", testname, replicas, numPods)) @@ -1443,13 +1447,13 @@ waitLoop: } var runningPods []string for _, podID := range pods { - running := runKubectlOrDie("get", "pods", podID, "-o", "template", getContainerStateTemplate, "--api-version=v1", fmt.Sprintf("--namespace=%v", ns)) + running := RunKubectlOrDie("get", "pods", podID, "-o", "template", getContainerStateTemplate, "--api-version=v1", fmt.Sprintf("--namespace=%v", ns)) if running != "true" { Logf("%s is created but not running", podID) continue waitLoop } - currentImage := runKubectlOrDie("get", "pods", podID, "-o", "template", getImageTemplate, "--api-version=v1", fmt.Sprintf("--namespace=%v", ns)) + currentImage := RunKubectlOrDie("get", "pods", podID, "-o", "template", getImageTemplate, "--api-version=v1", fmt.Sprintf("--namespace=%v", ns)) if currentImage != containerImage { Logf("%s is created but running wrong image; expected: %s, actual: %s", podID, containerImage, currentImage) continue waitLoop @@ -1471,38 +1475,38 @@ waitLoop: } } // Reaching here means that one of more checks failed multiple times. Assuming its not a race condition, something is broken. - Failf("Timed out after %v seconds waiting for %s pods to reach valid state", podStartTimeout.Seconds(), testname) + Failf("Timed out after %v seconds waiting for %s pods to reach valid state", PodStartTimeout.Seconds(), testname) } -// kubectlCmd runs the kubectl executable through the wrapper script. -func kubectlCmd(args ...string) *exec.Cmd { +// KubectlCmd runs the kubectl executable through the wrapper script. +func KubectlCmd(args ...string) *exec.Cmd { defaultArgs := []string{} // Reference a --server option so tests can run anywhere. - if testContext.Host != "" { - defaultArgs = append(defaultArgs, "--"+clientcmd.FlagAPIServer+"="+testContext.Host) + if TestContext.Host != "" { + defaultArgs = append(defaultArgs, "--"+clientcmd.FlagAPIServer+"="+TestContext.Host) } - if testContext.KubeConfig != "" { - defaultArgs = append(defaultArgs, "--"+clientcmd.RecommendedConfigPathFlag+"="+testContext.KubeConfig) + if TestContext.KubeConfig != "" { + defaultArgs = append(defaultArgs, "--"+clientcmd.RecommendedConfigPathFlag+"="+TestContext.KubeConfig) // Reference the KubeContext - if testContext.KubeContext != "" { - defaultArgs = append(defaultArgs, "--"+clientcmd.FlagContext+"="+testContext.KubeContext) + if TestContext.KubeContext != "" { + defaultArgs = append(defaultArgs, "--"+clientcmd.FlagContext+"="+TestContext.KubeContext) } } else { - if testContext.CertDir != "" { + if TestContext.CertDir != "" { defaultArgs = append(defaultArgs, - fmt.Sprintf("--certificate-authority=%s", filepath.Join(testContext.CertDir, "ca.crt")), - fmt.Sprintf("--client-certificate=%s", filepath.Join(testContext.CertDir, "kubecfg.crt")), - fmt.Sprintf("--client-key=%s", filepath.Join(testContext.CertDir, "kubecfg.key"))) + fmt.Sprintf("--certificate-authority=%s", filepath.Join(TestContext.CertDir, "ca.crt")), + fmt.Sprintf("--client-certificate=%s", filepath.Join(TestContext.CertDir, "kubecfg.crt")), + fmt.Sprintf("--client-key=%s", filepath.Join(TestContext.CertDir, "kubecfg.key"))) } } kubectlArgs := append(defaultArgs, args...) //We allow users to specify path to kubectl, so you can test either "kubectl" or "cluster/kubectl.sh" //and so on. - cmd := exec.Command(testContext.KubectlPath, kubectlArgs...) + cmd := exec.Command(TestContext.KubectlPath, kubectlArgs...) //caller will invoke this and wait on it. return cmd @@ -1515,35 +1519,35 @@ type kubectlBuilder struct { timeout <-chan time.Time } -func newKubectlCommand(args ...string) *kubectlBuilder { +func NewKubectlCommand(args ...string) *kubectlBuilder { b := new(kubectlBuilder) - b.cmd = kubectlCmd(args...) + b.cmd = KubectlCmd(args...) return b } -func (b *kubectlBuilder) withTimeout(t <-chan time.Time) *kubectlBuilder { +func (b *kubectlBuilder) WithTimeout(t <-chan time.Time) *kubectlBuilder { b.timeout = t return b } -func (b kubectlBuilder) withStdinData(data string) *kubectlBuilder { +func (b kubectlBuilder) WithStdinData(data string) *kubectlBuilder { b.cmd.Stdin = strings.NewReader(data) return &b } -func (b kubectlBuilder) withStdinReader(reader io.Reader) *kubectlBuilder { +func (b kubectlBuilder) WithStdinReader(reader io.Reader) *kubectlBuilder { b.cmd.Stdin = reader return &b } -func (b kubectlBuilder) execOrDie() string { - str, err := b.exec() +func (b kubectlBuilder) ExecOrDie() string { + str, err := b.Exec() Logf("stdout: %q", str) Expect(err).NotTo(HaveOccurred()) return str } -func (b kubectlBuilder) exec() (string, error) { +func (b kubectlBuilder) Exec() (string, error) { var stdout, stderr bytes.Buffer cmd := b.cmd cmd.Stdout, cmd.Stderr = &stdout, &stderr @@ -1570,22 +1574,22 @@ func (b kubectlBuilder) exec() (string, error) { return strings.TrimSpace(stdout.String()), nil } -// runKubectlOrDie is a convenience wrapper over kubectlBuilder -func runKubectlOrDie(args ...string) string { - return newKubectlCommand(args...).execOrDie() +// RunKubectlOrDie is a convenience wrapper over kubectlBuilder +func RunKubectlOrDie(args ...string) string { + return NewKubectlCommand(args...).ExecOrDie() } -// runKubectl is a convenience wrapper over kubectlBuilder -func runKubectl(args ...string) (string, error) { - return newKubectlCommand(args...).exec() +// RunKubectl is a convenience wrapper over kubectlBuilder +func RunKubectl(args ...string) (string, error) { + return NewKubectlCommand(args...).Exec() } // runKubectlOrDieInput is a convenience wrapper over kubectlBuilder that takes input to stdin func runKubectlOrDieInput(data string, args ...string) string { - return newKubectlCommand(args...).withStdinData(data).execOrDie() + return NewKubectlCommand(args...).WithStdinData(data).ExecOrDie() } -func startCmdAndStreamOutput(cmd *exec.Cmd) (stdout, stderr io.ReadCloser, err error) { +func StartCmdAndStreamOutput(cmd *exec.Cmd) (stdout, stderr io.ReadCloser, err error) { stdout, err = cmd.StdoutPipe() if err != nil { return @@ -1600,16 +1604,16 @@ func startCmdAndStreamOutput(cmd *exec.Cmd) (stdout, stderr io.ReadCloser, err e } // Rough equivalent of ctrl+c for cleaning up processes. Intended to be run in defer. -func tryKill(cmd *exec.Cmd) { +func TryKill(cmd *exec.Cmd) { if err := cmd.Process.Kill(); err != nil { Logf("ERROR failed to kill command %v! The process may leak", cmd) } } -// testContainerOutput runs the given pod in the given namespace and waits +// TestContainerOutput runs the given pod in the given namespace and waits // for all of the containers in the podSpec to move into the 'Success' status, and tests // the specified container log against the given expected output using a substring matcher. -func testContainerOutput(scenarioName string, c *client.Client, pod *api.Pod, containerIndex int, expectedOutput []string, ns string) { +func TestContainerOutput(scenarioName string, c *client.Client, pod *api.Pod, containerIndex int, expectedOutput []string, ns string) { testContainerOutputMatcher(scenarioName, c, pod, containerIndex, expectedOutput, ns, ContainSubstring) } @@ -1639,7 +1643,7 @@ func testContainerOutputMatcher(scenarioName string, // Wait for client pod to complete. var containerName string for id, container := range pod.Spec.Containers { - expectNoError(waitForPodSuccessInNamespace(c, pod.Name, container.Name, ns)) + ExpectNoError(WaitForPodSuccessInNamespace(c, pod.Name, container.Name, ns)) if id == containerIndex { containerName = container.Name } @@ -1662,7 +1666,7 @@ func testContainerOutputMatcher(scenarioName string, // Sometimes the actual containers take a second to get started, try to get logs for 60s for time.Now().Sub(start) < (60 * time.Second) { err = nil - logs, err = getPodLogs(c, ns, pod.Name, containerName) + logs, err = GetPodLogs(c, ns, pod.Name, containerName) if err != nil { By(fmt.Sprintf("Warning: Failed to get logs from node %q pod %q container %q. %v", podStatus.Spec.NodeName, podStatus.Name, containerName, err)) @@ -1747,7 +1751,7 @@ func Diff(oldPods []*api.Pod, curPods []*api.Pod) PodDiff { // RunDeployment Launches (and verifies correctness) of a Deployment // and will wait for all pods it spawns to become "Running". // It's the caller's responsibility to clean up externally (i.e. use the -// namespace lifecycle for handling cleanup). +// namespace lifecycle for handling Cleanup). func RunDeployment(config DeploymentConfig) error { err := config.create() if err != nil { @@ -1800,7 +1804,7 @@ func (config *DeploymentConfig) create() error { // RunReplicaSet launches (and verifies correctness) of a ReplicaSet // and waits until all the pods it launches to reach the "Running" state. // It's the caller's responsibility to clean up externally (i.e. use the -// namespace lifecycle for handling cleanup). +// namespace lifecycle for handling Cleanup). func RunReplicaSet(config ReplicaSetConfig) error { err := config.create() if err != nil { @@ -1853,7 +1857,7 @@ func (config *ReplicaSetConfig) create() error { // RunRC Launches (and verifies correctness) of a Replication Controller // and will wait for all pods it spawns to become "Running". // It's the caller's responsibility to clean up externally (i.e. use the -// namespace lifecycle for handling cleanup). +// namespace lifecycle for handling Cleanup). func RunRC(config RCConfig) error { err := config.create() if err != nil { @@ -1964,8 +1968,8 @@ func (config *RCConfig) start() error { label := labels.SelectorFromSet(labels.Set(map[string]string{"name": config.Name})) - podStore := newPodStore(config.Client, config.Namespace, label, fields.Everything()) - defer podStore.Stop() + PodStore := NewPodStore(config.Client, config.Namespace, label, fields.Everything()) + defer PodStore.Stop() interval := config.PollInterval if interval <= 0 { @@ -1992,7 +1996,7 @@ func (config *RCConfig) start() error { failedContainers := 0 containerRestartNodes := sets.NewString() - pods := podStore.List() + pods := PodStore.List() created := []*api.Pod{} for _, p := range pods { if p.DeletionTimestamp != nil { @@ -2015,7 +2019,7 @@ func (config *RCConfig) start() error { runningButNotReady++ } for _, v := range FailedContainers(p) { - failedContainers = failedContainers + v.restarts + failedContainers = failedContainers + v.Restarts containerRestartNodes.Insert(p.Spec.NodeName) } } else if p.Status.Phase == api.PodPending { @@ -2045,9 +2049,9 @@ func (config *RCConfig) start() error { } if failedContainers > maxContainerFailures { - dumpNodeDebugInfo(config.Client, containerRestartNodes.List()) + DumpNodeDebugInfo(config.Client, containerRestartNodes.List()) // Get the logs from the failed containers to help diagnose what caused them to fail - logFailedContainers(config.Namespace) + LogFailedContainers(config.Namespace) return fmt.Errorf("%d containers failed which is more than allowed %d", failedContainers, maxContainerFailures) } if len(pods) < len(oldPods) || len(pods) > config.Replicas { @@ -2092,7 +2096,7 @@ func (config *RCConfig) start() error { // Simplified version of RunRC, that does not create RC, but creates plain Pods. // optionally waits for pods to start running (if waitForRunning == true) -func startPods(c *client.Client, replicas int, namespace string, podNamePrefix string, pod api.Pod, waitForRunning bool) { +func StartPods(c *client.Client, replicas int, namespace string, podNamePrefix string, pod api.Pod, waitForRunning bool) { startPodsID := string(util.NewUUID()) // So that we can label and find them for i := 0; i < replicas; i++ { podName := fmt.Sprintf("%v-%v", podNamePrefix, i) @@ -2101,13 +2105,13 @@ func startPods(c *client.Client, replicas int, namespace string, podNamePrefix s pod.ObjectMeta.Labels["startPodsID"] = startPodsID pod.Spec.Containers[0].Name = podName _, err := c.Pods(namespace).Create(&pod) - expectNoError(err) + ExpectNoError(err) } Logf("Waiting for running...") if waitForRunning { label := labels.SelectorFromSet(labels.Set(map[string]string{"startPodsID": startPodsID})) - err := waitForPodsWithLabelRunning(c, namespace, label) - expectNoError(err, "Error waiting for %d pods to be running - probably a timeout", replicas) + err := WaitForPodsWithLabelRunning(c, namespace, label) + ExpectNoError(err, "Error waiting for %d pods to be running - probably a timeout", replicas) } } @@ -2123,10 +2127,10 @@ func dumpPodDebugInfo(c *client.Client, pods []*api.Pod) { } } } - dumpNodeDebugInfo(c, badNodes.List()) + DumpNodeDebugInfo(c, badNodes.List()) } -func dumpAllNamespaceInfo(c *client.Client, namespace string) { +func DumpAllNamespaceInfo(c *client.Client, namespace string) { By(fmt.Sprintf("Collecting events from namespace %q.", namespace)) events, err := c.Events(namespace).List(api.ListOptions{}) Expect(err).NotTo(HaveOccurred()) @@ -2139,9 +2143,9 @@ func dumpAllNamespaceInfo(c *client.Client, namespace string) { for _, e := range sortedEvents { Logf("At %v - event for %v: %v %v: %v", e.FirstTimestamp, e.InvolvedObject.Name, e.Source, e.Reason, e.Message) } - // Note that we don't wait for any cleanup to propagate, which means + // Note that we don't wait for any Cleanup to propagate, which means // that if you delete a bunch of pods right before ending your test, - // you may or may not see the killing/deletion/cleanup events. + // you may or may not see the killing/deletion/Cleanup events. dumpAllPodInfo(c) @@ -2180,10 +2184,10 @@ func dumpAllNodeInfo(c *client.Client) { for ix := range nodes.Items { names[ix] = nodes.Items[ix].Name } - dumpNodeDebugInfo(c, names) + DumpNodeDebugInfo(c, names) } -func dumpNodeDebugInfo(c *client.Client, nodeNames []string) { +func DumpNodeDebugInfo(c *client.Client, nodeNames []string) { for _, n := range nodeNames { Logf("\nLogging node info for node %v", n) node, err := c.Nodes().Get(n) @@ -2238,13 +2242,13 @@ func getNodeEvents(c *client.Client, nodeName string) []api.Event { func ListSchedulableNodesOrDie(c *client.Client) *api.NodeList { var nodes *api.NodeList var err error - if wait.PollImmediate(poll, singleCallTimeout, func() (bool, error) { + if wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) { nodes, err = c.Nodes().List(api.ListOptions{FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector()}) return err == nil, nil }) != nil { - expectNoError(err, "Timed out while listing nodes for e2e cluster.") + ExpectNoError(err, "Timed out while listing nodes for e2e cluster.") } return nodes } @@ -2263,14 +2267,14 @@ func ScaleRC(c *client.Client, ns, name string, size uint, wait bool) error { if !wait { return nil } - return waitForRCPodsRunning(c, ns, name) + return WaitForRCPodsRunning(c, ns, name) } // Wait up to 10 minutes for pods to become Running. Assume that the pods of the // rc are labels with {"name":rcName}. -func waitForRCPodsRunning(c *client.Client, ns, rcName string) error { +func WaitForRCPodsRunning(c *client.Client, ns, rcName string) error { selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": rcName})) - err := waitForPodsWithLabelRunning(c, ns, selector) + err := WaitForPodsWithLabelRunning(c, ns, selector) if err != nil { return fmt.Errorf("Error while waiting for replication controller %s pods to be running: %v", rcName, err) } @@ -2279,13 +2283,13 @@ func waitForRCPodsRunning(c *client.Client, ns, rcName string) error { // Wait up to 10 minutes for all matching pods to become Running and at least one // matching pod exists. -func waitForPodsWithLabelRunning(c *client.Client, ns string, label labels.Selector) error { +func WaitForPodsWithLabelRunning(c *client.Client, ns string, label labels.Selector) error { running := false - podStore := newPodStore(c, ns, label, fields.Everything()) - defer podStore.Stop() + PodStore := NewPodStore(c, ns, label, fields.Everything()) + defer PodStore.Stop() waitLoop: for start := time.Now(); time.Since(start) < 10*time.Minute; time.Sleep(5 * time.Second) { - pods := podStore.List() + pods := PodStore.List() if len(pods) == 0 { continue waitLoop } @@ -2305,9 +2309,9 @@ waitLoop: // Returns true if all the specified pods are scheduled, else returns false. func podsWithLabelScheduled(c *client.Client, ns string, label labels.Selector) (bool, error) { - podStore := newPodStore(c, ns, label, fields.Everything()) - defer podStore.Stop() - pods := podStore.List() + PodStore := NewPodStore(c, ns, label, fields.Everything()) + defer PodStore.Stop() + pods := PodStore.List() if len(pods) == 0 { return false, nil } @@ -2321,10 +2325,10 @@ func podsWithLabelScheduled(c *client.Client, ns string, label labels.Selector) // Wait for all matching pods to become scheduled and at least one // matching pod exists. Return the list of matching pods. -func waitForPodsWithLabelScheduled(c *client.Client, ns string, label labels.Selector) (pods *api.PodList, err error) { - err = wait.PollImmediate(poll, podScheduledBeforeTimeout, +func WaitForPodsWithLabelScheduled(c *client.Client, ns string, label labels.Selector) (pods *api.PodList, err error) { + err = wait.PollImmediate(Poll, podScheduledBeforeTimeout, func() (bool, error) { - pods, err = waitForPodsWithLabel(c, ns, label) + pods, err = WaitForPodsWithLabel(c, ns, label) if err != nil { return false, err } @@ -2338,9 +2342,9 @@ func waitForPodsWithLabelScheduled(c *client.Client, ns string, label labels.Sel return pods, err } -// Wait up to podListTimeout for getting pods with certain label -func waitForPodsWithLabel(c *client.Client, ns string, label labels.Selector) (pods *api.PodList, err error) { - for t := time.Now(); time.Since(t) < podListTimeout; time.Sleep(poll) { +// Wait up to PodListTimeout for getting pods with certain label +func WaitForPodsWithLabel(c *client.Client, ns string, label labels.Selector) (pods *api.PodList, err error) { + for t := time.Now(); time.Since(t) < PodListTimeout; time.Sleep(Poll) { options := api.ListOptions{LabelSelector: label} pods, err = c.Pods(ns).List(options) Expect(err).NotTo(HaveOccurred()) @@ -2397,11 +2401,11 @@ func DeleteRC(c *client.Client, ns, name string) error { // have completed termination). func waitForRCPodsGone(c *client.Client, rc *api.ReplicationController) error { labels := labels.SelectorFromSet(rc.Spec.Selector) - podStore := newPodStore(c, rc.Namespace, labels, fields.Everything()) - defer podStore.Stop() + PodStore := NewPodStore(c, rc.Namespace, labels, fields.Everything()) + defer PodStore.Stop() - return wait.PollImmediate(poll, 2*time.Minute, func() (bool, error) { - if pods := podStore.List(); len(pods) == 0 { + return wait.PollImmediate(Poll, 2*time.Minute, func() (bool, error) { + if pods := PodStore.List(); len(pods) == 0 { return true, nil } return false, nil @@ -2446,9 +2450,9 @@ func DeleteReplicaSet(c *client.Client, ns, name string) error { // waitForReplicaSetPodsGone waits until there are no pods reported under a // ReplicaSet selector (because the pods have completed termination). func waitForReplicaSetPodsGone(c *client.Client, rs *extensions.ReplicaSet) error { - return wait.PollImmediate(poll, 2*time.Minute, func() (bool, error) { + return wait.PollImmediate(Poll, 2*time.Minute, func() (bool, error) { selector, err := unversioned.LabelSelectorAsSelector(rs.Spec.Selector) - expectNoError(err) + ExpectNoError(err) options := api.ListOptions{LabelSelector: selector} if pods, err := c.Pods(rs.Namespace).List(options); err == nil && len(pods.Items) == 0 { return true, nil @@ -2459,11 +2463,11 @@ func waitForReplicaSetPodsGone(c *client.Client, rs *extensions.ReplicaSet) erro // Waits for the deployment to reach desired state. // Returns an error if minAvailable or maxCreated is broken at any times. -func waitForDeploymentStatus(c clientset.Interface, ns, deploymentName string, desiredUpdatedReplicas, minAvailable, maxCreated, minReadySeconds int) error { +func WaitForDeploymentStatus(c clientset.Interface, ns, deploymentName string, desiredUpdatedReplicas, minAvailable, maxCreated, minReadySeconds int) error { var oldRSs, allOldRSs, allRSs []*extensions.ReplicaSet var newRS *extensions.ReplicaSet var deployment *extensions.Deployment - err := wait.Poll(poll, 5*time.Minute, func() (bool, error) { + err := wait.Poll(Poll, 5*time.Minute, func() (bool, error) { var err error deployment, err = c.Extensions().Deployments(ns).Get(deploymentName) @@ -2519,10 +2523,10 @@ func waitForDeploymentStatus(c clientset.Interface, ns, deploymentName string, d return nil } -// waitForDeploymentRollbackCleared waits for given deployment either started rolling back or doesn't need to rollback. +// WaitForDeploymentRollbackCleared waits for given deployment either started rolling back or doesn't need to rollback. // Note that rollback should be cleared shortly, so we only wait for 1 minute here to fail early. -func waitForDeploymentRollbackCleared(c clientset.Interface, ns, deploymentName string) error { - err := wait.Poll(poll, 1*time.Minute, func() (bool, error) { +func WaitForDeploymentRollbackCleared(c clientset.Interface, ns, deploymentName string) error { + err := wait.Poll(Poll, 1*time.Minute, func() (bool, error) { deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) if err != nil { return false, err @@ -2539,12 +2543,12 @@ func waitForDeploymentRollbackCleared(c clientset.Interface, ns, deploymentName return nil } -// waitForDeploymentRevisionAndImage waits for the deployment's and its new RS's revision and container image to match the given revision and image. +// WaitForDeploymentRevisionAndImage waits for the deployment's and its new RS's revision and container image to match the given revision and image. // Note that deployment revision and its new RS revision should be updated shortly, so we only wait for 1 minute here to fail early. -func waitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName string, revision, image string) error { +func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName string, revision, image string) error { var deployment *extensions.Deployment var newRS *extensions.ReplicaSet - err := wait.Poll(poll, 1*time.Minute, func() (bool, error) { + err := wait.Poll(Poll, 1*time.Minute, func() (bool, error) { var err error deployment, err = c.Extensions().Deployments(ns).Get(deploymentName) if err != nil { @@ -2571,8 +2575,8 @@ func waitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName return nil } -// checkNewRSAnnotations check if the new RS's annotation is as expected -func checkNewRSAnnotations(c clientset.Interface, ns, deploymentName string, expectedAnnotations map[string]string) error { +// CheckNewRSAnnotations check if the new RS's annotation is as expected +func CheckNewRSAnnotations(c clientset.Interface, ns, deploymentName string, expectedAnnotations map[string]string) error { deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) if err != nil { return err @@ -2590,10 +2594,10 @@ func checkNewRSAnnotations(c clientset.Interface, ns, deploymentName string, exp return nil } -func waitForPodsReady(c *clientset.Clientset, ns, name string, minReadySeconds int) error { +func WaitForPodsReady(c *clientset.Clientset, ns, name string, minReadySeconds int) error { label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) options := api.ListOptions{LabelSelector: label} - return wait.Poll(poll, 5*time.Minute, func() (bool, error) { + return wait.Poll(Poll, 5*time.Minute, func() (bool, error) { pods, err := c.Pods(ns).List(options) if err != nil { return false, nil @@ -2608,8 +2612,8 @@ func waitForPodsReady(c *clientset.Clientset, ns, name string, minReadySeconds i } // Waits for the deployment to clean up old rcs. -func waitForDeploymentOldRSsNum(c *clientset.Clientset, ns, deploymentName string, desiredRSNum int) error { - return wait.Poll(poll, 5*time.Minute, func() (bool, error) { +func WaitForDeploymentOldRSsNum(c *clientset.Clientset, ns, deploymentName string, desiredRSNum int) error { + return wait.Poll(Poll, 5*time.Minute, func() (bool, error) { deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) if err != nil { return false, err @@ -2630,8 +2634,8 @@ func logReplicaSetsOfDeployment(deployment *extensions.Deployment, allOldRSs []* Logf("New ReplicaSet of deployment %s: %+v. Selector = %+v", deployment.Name, newRS, newRS.Spec.Selector) } -func waitForObservedDeployment(c *clientset.Clientset, ns, deploymentName string, desiredGeneration int64) error { - return deploymentutil.WaitForObservedDeployment(func() (*extensions.Deployment, error) { return c.Extensions().Deployments(ns).Get(deploymentName) }, desiredGeneration, poll, 1*time.Minute) +func WaitForObservedDeployment(c *clientset.Clientset, ns, deploymentName string, desiredGeneration int64) error { + return deploymentutil.WaitForObservedDeployment(func() (*extensions.Deployment, error) { return c.Extensions().Deployments(ns).Get(deploymentName) }, desiredGeneration, Poll, 1*time.Minute) } func logPodsOfReplicaSets(c clientset.Interface, rss []*extensions.ReplicaSet, minReadySeconds int) { @@ -2648,8 +2652,8 @@ func logPodsOfReplicaSets(c clientset.Interface, rss []*extensions.ReplicaSet, m } // Waits for the number of events on the given object to reach a desired count. -func waitForEvents(c *client.Client, ns string, objOrRef runtime.Object, desiredEventsCount int) error { - return wait.Poll(poll, 5*time.Minute, func() (bool, error) { +func WaitForEvents(c *client.Client, ns string, objOrRef runtime.Object, desiredEventsCount int) error { + return wait.Poll(Poll, 5*time.Minute, func() (bool, error) { events, err := c.Events(ns).Search(objOrRef) if err != nil { return false, fmt.Errorf("error in listing events: %s", err) @@ -2667,8 +2671,8 @@ func waitForEvents(c *client.Client, ns string, objOrRef runtime.Object, desired } // Waits for the number of events on the given object to be at least a desired count. -func waitForPartialEvents(c *client.Client, ns string, objOrRef runtime.Object, atLeastEventsCount int) error { - return wait.Poll(poll, 5*time.Minute, func() (bool, error) { +func WaitForPartialEvents(c *client.Client, ns string, objOrRef runtime.Object, atLeastEventsCount int) error { + return wait.Poll(Poll, 5*time.Minute, func() (bool, error) { events, err := c.Events(ns).Search(objOrRef) if err != nil { return false, fmt.Errorf("error in listing events: %s", err) @@ -2683,7 +2687,7 @@ func waitForPartialEvents(c *client.Client, ns string, objOrRef runtime.Object, type updateDeploymentFunc func(d *extensions.Deployment) -func updateDeploymentWithRetries(c *clientset.Clientset, namespace, name string, applyUpdate updateDeploymentFunc) (deployment *extensions.Deployment, err error) { +func UpdateDeploymentWithRetries(c *clientset.Clientset, namespace, name string, applyUpdate updateDeploymentFunc) (deployment *extensions.Deployment, err error) { deployments := c.Extensions().Deployments(namespace) err = wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) { if deployment, err = deployments.Get(name); err != nil { @@ -2723,7 +2727,7 @@ func FailedContainers(pod *api.Pod) map[string]ContainerFailures { if state, ok = states[status.ContainerID]; !ok { state = ContainerFailures{} } - state.restarts = status.RestartCount + state.Restarts = status.RestartCount states[status.ContainerID] = state } } @@ -2816,7 +2820,7 @@ func SSH(cmd, host, provider string) (SSHResult, error) { result := SSHResult{Host: host, Cmd: cmd} // Get a signer for the provider. - signer, err := getSigner(provider) + signer, err := GetSigner(provider) if err != nil { return result, fmt.Errorf("error getting signer for provider %s: '%v'", provider, err) } @@ -2844,7 +2848,7 @@ func LogSSHResult(result SSHResult) { Logf("ssh %s: exit code: %d", remote, result.Code) } -func issueSSHCommand(cmd, provider string, node *api.Node) error { +func IssueSSHCommand(cmd, provider string, node *api.Node) error { Logf("Getting external IP address for %s", node.Name) host := "" for _, a := range node.Status.Addresses { @@ -2895,14 +2899,14 @@ func NewHostExecPodSpec(ns, name string) *api.Pod { // RunHostCmd runs the given cmd in the context of the given pod using `kubectl exec` // inside of a shell. func RunHostCmd(ns, name, cmd string) (string, error) { - return runKubectl("exec", fmt.Sprintf("--namespace=%v", ns), name, "--", "/bin/sh", "-c", cmd) + return RunKubectl("exec", fmt.Sprintf("--namespace=%v", ns), name, "--", "/bin/sh", "-c", cmd) } // RunHostCmdOrDie calls RunHostCmd and dies on error. func RunHostCmdOrDie(ns, name, cmd string) string { stdout, err := RunHostCmd(ns, name, cmd) Logf("stdout: %v", stdout) - expectNoError(err) + ExpectNoError(err) return stdout } @@ -2911,15 +2915,15 @@ func RunHostCmdOrDie(ns, name, cmd string) string { func LaunchHostExecPod(client *client.Client, ns, name string) *api.Pod { hostExecPod := NewHostExecPodSpec(ns, name) pod, err := client.Pods(ns).Create(hostExecPod) - expectNoError(err) - err = waitForPodRunningInNamespace(client, pod.Name, pod.Namespace) - expectNoError(err) + ExpectNoError(err) + err = WaitForPodRunningInNamespace(client, pod.Name, pod.Namespace) + ExpectNoError(err) return pod } -// getSigner returns an ssh.Signer for the provider ("gce", etc.) that can be +// GetSigner returns an ssh.Signer for the provider ("gce", etc.) that can be // used to SSH to their nodes. -func getSigner(provider string) (ssh.Signer, error) { +func GetSigner(provider string) (ssh.Signer, error) { // Get the directory in which SSH keys are located. keydir := filepath.Join(os.Getenv("HOME"), ".ssh") @@ -2939,7 +2943,7 @@ func getSigner(provider string) (ssh.Signer, error) { // Otherwise revert to home dir keyfile = "kube_aws_rsa" default: - return nil, fmt.Errorf("getSigner(...) not implemented for %s", provider) + return nil, fmt.Errorf("GetSigner(...) not implemented for %s", provider) } key := filepath.Join(keydir, keyfile) @@ -2948,14 +2952,14 @@ func getSigner(provider string) (ssh.Signer, error) { // checkPodsRunning returns whether all pods whose names are listed in podNames // in namespace ns are running and ready, using c and waiting at most timeout. -func checkPodsRunningReady(c *client.Client, ns string, podNames []string, timeout time.Duration) bool { +func CheckPodsRunningReady(c *client.Client, ns string, podNames []string, timeout time.Duration) bool { np, desc := len(podNames), "running and ready" Logf("Waiting up to %v for %d pods to be %s: %s", timeout, np, desc, podNames) result := make(chan bool, len(podNames)) for ix := range podNames { // Launch off pod readiness checkers. go func(name string) { - err := waitForPodCondition(c, ns, name, desc, timeout, podRunningReady) + err := waitForPodCondition(c, ns, name, desc, timeout, PodRunningReady) result <- err == nil }(podNames[ix]) } @@ -2973,19 +2977,19 @@ func checkPodsRunningReady(c *client.Client, ns string, podNames []string, timeo return success } -// waitForNodeToBeReady returns whether node name is ready within timeout. -func waitForNodeToBeReady(c *client.Client, name string, timeout time.Duration) bool { - return waitForNodeToBe(c, name, api.NodeReady, true, timeout) +// WaitForNodeToBeReady returns whether node name is ready within timeout. +func WaitForNodeToBeReady(c *client.Client, name string, timeout time.Duration) bool { + return WaitForNodeToBe(c, name, api.NodeReady, true, timeout) } -// waitForNodeToBeNotReady returns whether node name is not ready (i.e. the +// WaitForNodeToBeNotReady returns whether node name is not ready (i.e. the // readiness condition is anything but ready, e.g false or unknown) within // timeout. -func waitForNodeToBeNotReady(c *client.Client, name string, timeout time.Duration) bool { - return waitForNodeToBe(c, name, api.NodeReady, false, timeout) +func WaitForNodeToBeNotReady(c *client.Client, name string, timeout time.Duration) bool { + return WaitForNodeToBe(c, name, api.NodeReady, false, timeout) } -func isNodeConditionSetAsExpected(node *api.Node, conditionType api.NodeConditionType, wantTrue bool) bool { +func IsNodeConditionSetAsExpected(node *api.Node, conditionType api.NodeConditionType, wantTrue bool) bool { // Check the node readiness condition (logging all). for _, cond := range node.Status.Conditions { // Ensure that the condition type and the status matches as desired. @@ -3003,20 +3007,20 @@ func isNodeConditionSetAsExpected(node *api.Node, conditionType api.NodeConditio return false } -// waitForNodeToBe returns whether node "name's" condition state matches wantTrue +// WaitForNodeToBe returns whether node "name's" condition state matches wantTrue // within timeout. If wantTrue is true, it will ensure the node condition status // is ConditionTrue; if it's false, it ensures the node condition is in any state // other than ConditionTrue (e.g. not true or unknown). -func waitForNodeToBe(c *client.Client, name string, conditionType api.NodeConditionType, wantTrue bool, timeout time.Duration) bool { +func WaitForNodeToBe(c *client.Client, name string, conditionType api.NodeConditionType, wantTrue bool, timeout time.Duration) bool { Logf("Waiting up to %v for node %s condition %s to be %t", timeout, name, conditionType, wantTrue) - for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { + for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { node, err := c.Nodes().Get(name) if err != nil { Logf("Couldn't get node %s", name) continue } - if isNodeConditionSetAsExpected(node, conditionType, wantTrue) { + if IsNodeConditionSetAsExpected(node, conditionType, wantTrue) { return true } } @@ -3025,11 +3029,11 @@ func waitForNodeToBe(c *client.Client, name string, conditionType api.NodeCondit } // checks whether all registered nodes are ready -func allNodesReady(c *client.Client, timeout time.Duration) error { +func AllNodesReady(c *client.Client, timeout time.Duration) error { Logf("Waiting up to %v for all nodes to be ready", timeout) var notReady []api.Node - err := wait.PollImmediate(poll, timeout, func() (bool, error) { + err := wait.PollImmediate(Poll, timeout, func() (bool, error) { notReady = nil // It should be OK to list unschedulable Nodes here. nodes, err := c.Nodes().List(api.ListOptions{}) @@ -3037,7 +3041,7 @@ func allNodesReady(c *client.Client, timeout time.Duration) error { return false, err } for _, node := range nodes.Items { - if !isNodeConditionSetAsExpected(&node, api.NodeReady, true) { + if !IsNodeConditionSetAsExpected(&node, api.NodeReady, true) { notReady = append(notReady, node) } } @@ -3057,7 +3061,7 @@ func allNodesReady(c *client.Client, timeout time.Duration) error { // Filters nodes in NodeList in place, removing nodes that do not // satisfy the given condition // TODO: consider merging with pkg/client/cache.NodeLister -func filterNodes(nodeList *api.NodeList, fn func(node api.Node) bool) { +func FilterNodes(nodeList *api.NodeList, fn func(node api.Node) bool) { var l []api.Node for _, node := range nodeList.Items { @@ -3068,9 +3072,9 @@ func filterNodes(nodeList *api.NodeList, fn func(node api.Node) bool) { nodeList.Items = l } -// parseKVLines parses output that looks like lines containing ": " +// ParseKVLines parses output that looks like lines containing ": " // and returns if is found. Otherwise, it returns the empty string. -func parseKVLines(output, key string) string { +func ParseKVLines(output, key string) string { delim := ":" key = key + delim for _, line := range strings.Split(output, "\n") { @@ -3086,20 +3090,20 @@ func parseKVLines(output, key string) string { return "" } -func restartKubeProxy(host string) error { +func RestartKubeProxy(host string) error { // TODO: Make it work for all providers. - if !providerIs("gce", "gke", "aws") { - return fmt.Errorf("unsupported provider: %s", testContext.Provider) + if !ProviderIs("gce", "gke", "aws") { + return fmt.Errorf("unsupported provider: %s", TestContext.Provider) } // kubelet will restart the kube-proxy since it's running in a static pod - result, err := SSH("sudo pkill kube-proxy", host, testContext.Provider) + result, err := SSH("sudo pkill kube-proxy", host, TestContext.Provider) if err != nil || result.Code != 0 { LogSSHResult(result) return fmt.Errorf("couldn't restart kube-proxy: %v", err) } // wait for kube-proxy to come back up err = wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) { - result, err := SSH("sudo /bin/sh -c 'pgrep kube-proxy | wc -l'", host, testContext.Provider) + result, err := SSH("sudo /bin/sh -c 'pgrep kube-proxy | wc -l'", host, TestContext.Provider) if err != nil { return false, err } @@ -3119,18 +3123,18 @@ func restartKubeProxy(host string) error { return nil } -func restartApiserver() error { +func RestartApiserver() error { // TODO: Make it work for all providers. - if !providerIs("gce", "gke", "aws") { - return fmt.Errorf("unsupported provider: %s", testContext.Provider) + if !ProviderIs("gce", "gke", "aws") { + return fmt.Errorf("unsupported provider: %s", TestContext.Provider) } var command string - if providerIs("gce", "gke") { + if ProviderIs("gce", "gke") { command = "sudo docker ps | grep /kube-apiserver | cut -d ' ' -f 1 | xargs sudo docker kill" } else { command = "sudo /etc/init.d/kube-apiserver restart" } - result, err := SSH(command, getMasterHost()+":22", testContext.Provider) + result, err := SSH(command, GetMasterHost()+":22", TestContext.Provider) if err != nil || result.Code != 0 { LogSSHResult(result) return fmt.Errorf("couldn't restart apiserver: %v", err) @@ -3138,7 +3142,7 @@ func restartApiserver() error { return nil } -func waitForApiserverUp(c *client.Client) error { +func WaitForApiserverUp(c *client.Client) error { for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) { body, err := c.Get().AbsPath("/healthz").Do().Raw() if err == nil && string(body) == "ok" { @@ -3148,9 +3152,9 @@ func waitForApiserverUp(c *client.Client) error { return fmt.Errorf("waiting for apiserver timed out") } -// waitForClusterSize waits until the cluster has desired size and there is no not-ready nodes in it. +// WaitForClusterSize waits until the cluster has desired size and there is no not-ready nodes in it. // By cluster size we mean number of Nodes excluding Master Node. -func waitForClusterSize(c *client.Client, size int, timeout time.Duration) error { +func WaitForClusterSize(c *client.Client, size int, timeout time.Duration) error { for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) { nodes, err := c.Nodes().List(api.ListOptions{FieldSelector: fields.Set{ "spec.unschedulable": "false", @@ -3162,8 +3166,8 @@ func waitForClusterSize(c *client.Client, size int, timeout time.Duration) error numNodes := len(nodes.Items) // Filter out not-ready nodes. - filterNodes(nodes, func(node api.Node) bool { - return isNodeConditionSetAsExpected(&node, api.NodeReady, true) + FilterNodes(nodes, func(node api.Node) bool { + return IsNodeConditionSetAsExpected(&node, api.NodeReady, true) }) numReady := len(nodes.Items) @@ -3176,10 +3180,10 @@ func waitForClusterSize(c *client.Client, size int, timeout time.Duration) error return fmt.Errorf("timeout waiting %v for cluster size to be %d", timeout, size) } -// getHostExternalAddress gets the node for a pod and returns the first External +// GetHostExternalAddress gets the node for a pod and returns the first External // address. Returns an error if the node the pod is on doesn't have an External // address. -func getHostExternalAddress(client *client.Client, p *api.Pod) (externalAddress string, err error) { +func GetHostExternalAddress(client *client.Client, p *api.Pod) (externalAddress string, err error) { node, err := client.Nodes().Get(p.Spec.NodeName) if err != nil { return "", err @@ -3272,8 +3276,8 @@ func getIngressAddress(client *client.Client, ns, name string) ([]string, error) return addresses, nil } -// waitForIngressAddress waits for the Ingress to acquire an address. -func waitForIngressAddress(c *client.Client, ns, ingName string, timeout time.Duration) (string, error) { +// WaitForIngressAddress waits for the Ingress to acquire an address. +func WaitForIngressAddress(c *client.Client, ns, ingName string, timeout time.Duration) (string, error) { var address string err := wait.PollImmediate(10*time.Second, timeout, func() (bool, error) { ipOrNameList, err := getIngressAddress(c, ns, ingName) @@ -3288,34 +3292,34 @@ func waitForIngressAddress(c *client.Client, ns, ingName string, timeout time.Du } // Looks for the given string in the log of a specific pod container -func lookForStringInLog(ns, podName, container, expectedString string, timeout time.Duration) (result string, err error) { - return lookForString(expectedString, timeout, func() string { - return runKubectlOrDie("log", podName, container, fmt.Sprintf("--namespace=%v", ns)) +func LookForStringInLog(ns, podName, container, expectedString string, timeout time.Duration) (result string, err error) { + return LookForString(expectedString, timeout, func() string { + return RunKubectlOrDie("log", podName, container, fmt.Sprintf("--namespace=%v", ns)) }) } // Looks for the given string in a file in a specific pod container -func lookForStringInFile(ns, podName, container, file, expectedString string, timeout time.Duration) (result string, err error) { - return lookForString(expectedString, timeout, func() string { - return runKubectlOrDie("exec", podName, "-c", container, fmt.Sprintf("--namespace=%v", ns), "--", "cat", file) +func LookForStringInFile(ns, podName, container, file, expectedString string, timeout time.Duration) (result string, err error) { + return LookForString(expectedString, timeout, func() string { + return RunKubectlOrDie("exec", podName, "-c", container, fmt.Sprintf("--namespace=%v", ns), "--", "cat", file) }) } // Looks for the given string in the output of a command executed in a specific pod container -func lookForStringInPodExec(ns, podName string, command []string, expectedString string, timeout time.Duration) (result string, err error) { - return lookForString(expectedString, timeout, func() string { +func LookForStringInPodExec(ns, podName string, command []string, expectedString string, timeout time.Duration) (result string, err error) { + return LookForString(expectedString, timeout, func() string { // use the first container args := []string{"exec", podName, fmt.Sprintf("--namespace=%v", ns), "--"} args = append(args, command...) - return runKubectlOrDie(args...) + return RunKubectlOrDie(args...) }) } // Looks for the given string in the output of fn, repeatedly calling fn until // the timeout is reached or the string is found. Returns last log and possibly // error if the string was not found. -func lookForString(expectedString string, timeout time.Duration, fn func() string) (result string, err error) { - for t := time.Now(); time.Since(t) < timeout; time.Sleep(poll) { +func LookForString(expectedString string, timeout time.Duration, fn func() string) (result string, err error) { + for t := time.Now(); time.Since(t) < timeout; time.Sleep(Poll) { result = fn() if strings.Contains(result, expectedString) { return @@ -3342,8 +3346,8 @@ func getSvcNodePort(client *client.Client, ns, name string, svcPort int) (int, e "No node port found for service %v, port %v", name, svcPort) } -// getNodePortURL returns the url to a nodeport Service. -func getNodePortURL(client *client.Client, ns, name string, svcPort int) (string, error) { +// GetNodePortURL returns the url to a nodeport Service. +func GetNodePortURL(client *client.Client, ns, name string, svcPort int) (string, error) { nodePort, err := getSvcNodePort(client, ns, name, svcPort) if err != nil { return "", err @@ -3352,7 +3356,7 @@ func getNodePortURL(client *client.Client, ns, name string, svcPort int) (string // unschedulable, since the master doesn't run kube-proxy. Without // kube-proxy NodePorts won't work. var nodes *api.NodeList - if wait.PollImmediate(poll, singleCallTimeout, func() (bool, error) { + if wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) { nodes, err = client.Nodes().List(api.ListOptions{FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector()}) @@ -3375,9 +3379,9 @@ func getNodePortURL(client *client.Client, ns, name string, svcPort int) (string return "", fmt.Errorf("Failed to find external address for service %v", name) } -// scaleRCByLabels scales an RC via ns/label lookup. If replicas == 0 it waits till +// ScaleRCByLabels scales an RC via ns/label lookup. If replicas == 0 it waits till // none are running, otherwise it does what a synchronous scale operation would do. -func scaleRCByLabels(client *client.Client, ns string, l map[string]string, replicas uint) error { +func ScaleRCByLabels(client *client.Client, ns string, l map[string]string, replicas uint) error { listOpts := api.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set(l))} rcs, err := client.ReplicationControllers(ns).List(listOpts) if err != nil { @@ -3401,7 +3405,7 @@ func scaleRCByLabels(client *client.Client, ns string, l map[string]string, repl return err } } else { - if err := waitForPodsWithLabelRunning( + if err := WaitForPodsWithLabelRunning( client, ns, labels.SelectorFromSet(labels.Set(rc.Spec.Selector))); err != nil { return err } @@ -3410,7 +3414,7 @@ func scaleRCByLabels(client *client.Client, ns string, l map[string]string, repl return nil } -func getPodLogs(c *client.Client, namespace, podName, containerName string) (string, error) { +func GetPodLogs(c *client.Client, namespace, podName, containerName string) (string, error) { return getPodLogsInternal(c, namespace, podName, containerName, false) } @@ -3440,21 +3444,21 @@ func getPodLogsInternal(c *client.Client, namespace, podName, containerName stri // EnsureLoadBalancerResourcesDeleted ensures that cloud load balancer resources that were created // are actually cleaned up. Currently only implemented for GCE/GKE. func EnsureLoadBalancerResourcesDeleted(ip, portRange string) error { - if testContext.Provider == "gce" || testContext.Provider == "gke" { + if TestContext.Provider == "gce" || TestContext.Provider == "gke" { return ensureGCELoadBalancerResourcesDeleted(ip, portRange) } return nil } func ensureGCELoadBalancerResourcesDeleted(ip, portRange string) error { - gceCloud, ok := testContext.CloudConfig.Provider.(*gcecloud.GCECloud) + gceCloud, ok := TestContext.CloudConfig.Provider.(*gcecloud.GCECloud) if !ok { - return fmt.Errorf("failed to convert CloudConfig.Provider to GCECloud: %#v", testContext.CloudConfig.Provider) + return fmt.Errorf("failed to convert CloudConfig.Provider to GCECloud: %#v", TestContext.CloudConfig.Provider) } - project := testContext.CloudConfig.ProjectID - region, err := gcecloud.GetGCERegion(testContext.CloudConfig.Zone) + project := TestContext.CloudConfig.ProjectID + region, err := gcecloud.GetGCERegion(TestContext.CloudConfig.Zone) if err != nil { - return fmt.Errorf("could not get region for zone %q: %v", testContext.CloudConfig.Zone, err) + return fmt.Errorf("could not get region for zone %q: %v", TestContext.CloudConfig.Zone, err) } return wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) { @@ -3486,22 +3490,22 @@ func ensureGCELoadBalancerResourcesDeleted(ip, portRange string) error { // Suggested usage pattern: // func foo() { // ... -// defer unblockNetwork(from, to) -// blockNetwork(from, to) +// defer UnblockNetwork(from, to) +// BlockNetwork(from, to) // ... // } // -func blockNetwork(from string, to string) { +func BlockNetwork(from string, to string) { Logf("block network traffic from %s to %s", from, to) iptablesRule := fmt.Sprintf("OUTPUT --destination %s --jump REJECT", to) dropCmd := fmt.Sprintf("sudo iptables --insert %s", iptablesRule) - if result, err := SSH(dropCmd, from, testContext.Provider); result.Code != 0 || err != nil { + if result, err := SSH(dropCmd, from, TestContext.Provider); result.Code != 0 || err != nil { LogSSHResult(result) Failf("Unexpected error: %v", err) } } -func unblockNetwork(from string, to string) { +func UnblockNetwork(from string, to string) { Logf("Unblock network traffic from %s to %s", from, to) iptablesRule := fmt.Sprintf("OUTPUT --destination %s --jump REJECT", to) undropCmd := fmt.Sprintf("sudo iptables --delete %s", iptablesRule) @@ -3512,7 +3516,7 @@ func unblockNetwork(from string, to string) { // may fail). Manual intervention is required in such case (recreating the // cluster solves the problem too). err := wait.Poll(time.Millisecond*100, time.Second*30, func() (bool, error) { - result, err := SSH(undropCmd, from, testContext.Provider) + result, err := SSH(undropCmd, from, TestContext.Provider) if result.Code == 0 && err == nil { return true, nil } @@ -3537,7 +3541,7 @@ func isElementOf(podUID types.UID, pods *api.PodList) bool { return false } -func checkRSHashLabel(rs *extensions.ReplicaSet) error { +func CheckRSHashLabel(rs *extensions.ReplicaSet) error { if len(rs.Labels[extensions.DefaultDeploymentUniqueLabelKey]) == 0 || len(rs.Spec.Selector.MatchLabels[extensions.DefaultDeploymentUniqueLabelKey]) == 0 || len(rs.Spec.Template.Labels[extensions.DefaultDeploymentUniqueLabelKey]) == 0 { @@ -3546,7 +3550,7 @@ func checkRSHashLabel(rs *extensions.ReplicaSet) error { return nil } -func checkPodHashLabel(pods *api.PodList) error { +func CheckPodHashLabel(pods *api.PodList) error { invalidPod := "" for _, pod := range pods.Items { if len(pod.Labels[extensions.DefaultDeploymentUniqueLabelKey]) == 0 { @@ -3568,8 +3572,8 @@ func GetReadyNodes(f *Framework) (nodes *api.NodeList, err error) { nodes = ListSchedulableNodesOrDie(f.Client) // previous tests may have cause failures of some nodes. Let's skip // 'Not Ready' nodes, just in case (there is no need to fail the test). - filterNodes(nodes, func(node api.Node) bool { - return !node.Spec.Unschedulable && isNodeConditionSetAsExpected(&node, api.NodeReady, true) + FilterNodes(nodes, func(node api.Node) bool { + return !node.Spec.Unschedulable && IsNodeConditionSetAsExpected(&node, api.NodeReady, true) }) if len(nodes.Items) == 0 { @@ -3585,7 +3589,7 @@ const proxyTimeout = 2 * time.Minute func NodeProxyRequest(c *client.Client, node, endpoint string) (restclient.Result, error) { // proxy tends to hang in some cases when Node is not ready. Add an artificial timeout for this call. // This will leak a goroutine if proxy hangs. #22165 - subResourceProxyAvailable, err := serverVersionGTE(subResourceServiceAndNodeProxyVersion, c) + subResourceProxyAvailable, err := ServerVersionGTE(subResourceServiceAndNodeProxyVersion, c) if err != nil { return restclient.Result{}, err } @@ -3671,10 +3675,10 @@ func LaunchWebserverPod(f *Framework, podName, nodeName string) (ip string) { } podClient := f.Client.Pods(f.Namespace.Name) _, err := podClient.Create(pod) - expectNoError(err) - expectNoError(f.WaitForPodRunning(podName)) + ExpectNoError(err) + ExpectNoError(f.WaitForPodRunning(podName)) createdPod, err := podClient.Get(podName) - expectNoError(err) + ExpectNoError(err) ip = fmt.Sprintf("%s:%d", createdPod.Status.PodIP, port) Logf("Target pod IP:port is %s", ip) return @@ -3710,13 +3714,13 @@ func CheckConnectivityToHost(f *Framework, nodeName, podName, host string) error return err } defer podClient.Delete(podName, nil) - return waitForPodSuccessInNamespace(f.Client, podName, contName, f.Namespace.Name) + return WaitForPodSuccessInNamespace(f.Client, podName, contName, f.Namespace.Name) } // CoreDump SSHs to the master and all nodes and dumps their logs into dir. // It shells out to cluster/log-dump.sh to accomplish this. func CoreDump(dir string) { - cmd := exec.Command(path.Join(testContext.RepoRoot, "cluster", "log-dump.sh"), dir) + cmd := exec.Command(path.Join(TestContext.RepoRoot, "cluster", "log-dump.sh"), dir) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr if err := cmd.Run(); err != nil { diff --git a/test/e2e/garbage_collector.go b/test/e2e/garbage_collector.go index 462adc5dabc..707d8db2337 100644 --- a/test/e2e/garbage_collector.go +++ b/test/e2e/garbage_collector.go @@ -25,13 +25,14 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/kubernetes/test/e2e/framework" ) // This test requires that --terminated-pod-gc-threshold=100 be set on the controller manager // // Slow by design (7 min) -var _ = KubeDescribe("Garbage collector [Feature:GarbageCollector] [Slow]", func() { - f := NewDefaultFramework("garbage-collector") +var _ = framework.KubeDescribe("Garbage collector [Feature:GarbageCollector] [Slow]", func() { + f := framework.NewDefaultFramework("garbage-collector") It("should handle the creation of 1000 pods", func() { var count int for count < 1000 { @@ -40,16 +41,16 @@ var _ = KubeDescribe("Garbage collector [Feature:GarbageCollector] [Slow]", func pod.Status.Phase = api.PodFailed pod, err = f.Client.Pods(f.Namespace.Name).UpdateStatus(pod) if err != nil { - Failf("err failing pod: %v", err) + framework.Failf("err failing pod: %v", err) } count++ if count%50 == 0 { - Logf("count: %v", count) + framework.Logf("count: %v", count) } } - Logf("created: %v", count) + framework.Logf("created: %v", count) // The gc controller polls every 30s and fires off a goroutine per // pod to terminate. @@ -62,22 +63,22 @@ var _ = KubeDescribe("Garbage collector [Feature:GarbageCollector] [Slow]", func pollErr := wait.Poll(1*time.Minute, timeout, func() (bool, error) { pods, err = f.Client.Pods(f.Namespace.Name).List(api.ListOptions{}) if err != nil { - Logf("Failed to list pod %v", err) + framework.Logf("Failed to list pod %v", err) return false, nil } if len(pods.Items) != gcThreshold { - Logf("Number of observed pods %v, waiting for %v", len(pods.Items), gcThreshold) + framework.Logf("Number of observed pods %v, waiting for %v", len(pods.Items), gcThreshold) return false, nil } return true, nil }) if pollErr != nil { - Failf("Failed to GC pods within %v, %v pods remaining, error: %v", timeout, len(pods.Items), err) + framework.Failf("Failed to GC pods within %v, %v pods remaining, error: %v", timeout, len(pods.Items), err) } }) }) -func createTerminatingPod(f *Framework) (*api.Pod, error) { +func createTerminatingPod(f *framework.Framework) (*api.Pod, error) { uuid := util.NewUUID() pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ diff --git a/test/e2e/generated_clientset.go b/test/e2e/generated_clientset.go index a7ecd64ee1f..bc24c67c877 100644 --- a/test/e2e/generated_clientset.go +++ b/test/e2e/generated_clientset.go @@ -27,15 +27,16 @@ import ( "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/watch" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) -var _ = KubeDescribe("Generated release_1_2 clientset", func() { - framework := NewDefaultFramework("clientset") +var _ = framework.KubeDescribe("Generated release_1_2 clientset", func() { + f := framework.NewDefaultFramework("clientset") It("should create pods, delete pods, watch pods", func() { - podClient := framework.Clientset_1_2.Core().Pods(framework.Namespace.Name) + podClient := f.Clientset_1_2.Core().Pods(f.Namespace.Name) By("creating the pod") name := "pod" + string(util.NewUUID()) value := strconv.Itoa(time.Now().Nanosecond()) @@ -72,7 +73,7 @@ var _ = KubeDescribe("Generated release_1_2 clientset", func() { options := api.ListOptions{LabelSelector: selector} pods, err := podClient.List(options) if err != nil { - Failf("Failed to query for pods: %v", err) + framework.Failf("Failed to query for pods: %v", err) } Expect(len(pods.Items)).To(Equal(0)) options = api.ListOptions{ @@ -81,7 +82,7 @@ var _ = KubeDescribe("Generated release_1_2 clientset", func() { } w, err := podClient.Watch(options) if err != nil { - Failf("Failed to set up watch: %v", err) + framework.Failf("Failed to set up watch: %v", err) } By("submitting the pod to kubernetes") @@ -91,7 +92,7 @@ var _ = KubeDescribe("Generated release_1_2 clientset", func() { defer podClient.Delete(pod.Name, api.NewDeleteOptions(0)) pod, err = podClient.Create(pod) if err != nil { - Failf("Failed to create pod: %v", err) + framework.Failf("Failed to create pod: %v", err) } By("verifying the pod is in kubernetes") @@ -102,7 +103,7 @@ var _ = KubeDescribe("Generated release_1_2 clientset", func() { } pods, err = podClient.List(options) if err != nil { - Failf("Failed to query for pods: %v", err) + framework.Failf("Failed to query for pods: %v", err) } Expect(len(pods.Items)).To(Equal(1)) @@ -110,19 +111,19 @@ var _ = KubeDescribe("Generated release_1_2 clientset", func() { select { case event, _ := <-w.ResultChan(): if event.Type != watch.Added { - Failf("Failed to observe pod creation: %v", event) + framework.Failf("Failed to observe pod creation: %v", event) } - case <-time.After(podStartTimeout): + case <-time.After(framework.PodStartTimeout): Fail("Timeout while waiting for pod creation") } // We need to wait for the pod to be scheduled, otherwise the deletion // will be carried out immediately rather than gracefully. - expectNoError(framework.WaitForPodRunning(pod.Name)) + framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) By("deleting the pod gracefully") if err := podClient.Delete(pod.Name, api.NewDeleteOptions(30)); err != nil { - Failf("Failed to delete pod: %v", err) + framework.Failf("Failed to delete pod: %v", err) } By("verifying pod deletion was observed") diff --git a/test/e2e/google_compute.go b/test/e2e/google_compute.go index 17fcd1c149b..c3588b3855e 100644 --- a/test/e2e/google_compute.go +++ b/test/e2e/google_compute.go @@ -24,6 +24,8 @@ import ( "time" "github.com/golang/glog" + + "k8s.io/kubernetes/test/e2e/framework" ) // TODO: These should really just use the GCE API client library or at least use @@ -36,12 +38,12 @@ func createGCEStaticIP(name string) (string, error) { // NAME REGION ADDRESS STATUS // test-static-ip us-central1 104.197.143.7 RESERVED - glog.Infof("Creating static IP with name %q in project %q", name, testContext.CloudConfig.ProjectID) + glog.Infof("Creating static IP with name %q in project %q", name, framework.TestContext.CloudConfig.ProjectID) var outputBytes []byte var err error for attempts := 0; attempts < 4; attempts++ { outputBytes, err = exec.Command("gcloud", "compute", "addresses", "create", - name, "--project", testContext.CloudConfig.ProjectID, + name, "--project", framework.TestContext.CloudConfig.ProjectID, "--region", "us-central1", "-q").CombinedOutput() if err == nil { break @@ -76,7 +78,7 @@ func deleteGCEStaticIP(name string) error { // test-static-ip us-central1 104.197.143.7 RESERVED outputBytes, err := exec.Command("gcloud", "compute", "addresses", "delete", - name, "--project", testContext.CloudConfig.ProjectID, + name, "--project", framework.TestContext.CloudConfig.ProjectID, "--region", "us-central1", "-q").CombinedOutput() if err != nil { // Ditch the error, since the stderr in the output is what actually contains diff --git a/test/e2e/horizontal_pod_autoscaling.go b/test/e2e/horizontal_pod_autoscaling.go index 064f2cfc47a..a6be6185e2b 100644 --- a/test/e2e/horizontal_pod_autoscaling.go +++ b/test/e2e/horizontal_pod_autoscaling.go @@ -21,6 +21,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" ) @@ -34,15 +35,15 @@ const ( // These tests don't seem to be running properly in parallel: issue: #20338. // -var _ = KubeDescribe("Horizontal pod autoscaling (scale resource: CPU)", func() { +var _ = framework.KubeDescribe("Horizontal pod autoscaling (scale resource: CPU)", func() { var rc *ResourceConsumer - f := NewDefaultFramework("horizontal-pod-autoscaling") + f := framework.NewDefaultFramework("horizontal-pod-autoscaling") titleUp := "Should scale from 1 pod to 3 pods and from 3 to 5 and verify decision stability" titleDown := "Should scale from 5 pods to 3 pods and from 3 to 1 and verify decision stability" // These tests take ~20 minutes each. - KubeDescribe("[Serial] [Slow] Deployment", func() { + framework.KubeDescribe("[Serial] [Slow] Deployment", func() { // CPU tests via deployments It(titleUp, func() { scaleUp("test-deployment", kindDeployment, rc, f) @@ -53,7 +54,7 @@ var _ = KubeDescribe("Horizontal pod autoscaling (scale resource: CPU)", func() }) // These tests take ~20 minutes each. - KubeDescribe("[Serial] [Slow] ReplicaSet", func() { + framework.KubeDescribe("[Serial] [Slow] ReplicaSet", func() { // CPU tests via deployments It(titleUp, func() { scaleUp("rs", kindReplicaSet, rc, f) @@ -63,7 +64,7 @@ var _ = KubeDescribe("Horizontal pod autoscaling (scale resource: CPU)", func() }) }) // These tests take ~20 minutes each. - KubeDescribe("[Serial] [Slow] ReplicationController", func() { + framework.KubeDescribe("[Serial] [Slow] ReplicationController", func() { // CPU tests via replication controllers It(titleUp, func() { scaleUp("rc", kindRC, rc, f) @@ -73,7 +74,7 @@ var _ = KubeDescribe("Horizontal pod autoscaling (scale resource: CPU)", func() }) }) - KubeDescribe("ReplicationController light", func() { + framework.KubeDescribe("ReplicationController light", func() { It("Should scale from 1 pod to 2 pods", func() { scaleTest := &HPAScaleTest{ initPods: 1, @@ -123,7 +124,7 @@ type HPAScaleTest struct { // The first state change is due to the CPU being consumed initially, which HPA responds to by changing pod counts. // The second state change (optional) is due to the CPU burst parameter, which HPA again responds to. // TODO The use of 3 states is arbitrary, we could eventually make this test handle "n" states once this test stabilizes. -func (scaleTest *HPAScaleTest) run(name, kind string, rc *ResourceConsumer, f *Framework) { +func (scaleTest *HPAScaleTest) run(name, kind string, rc *ResourceConsumer, f *framework.Framework) { rc = NewDynamicResourceConsumer(name, kind, scaleTest.initPods, scaleTest.totalInitialCPUUsage, 0, 0, scaleTest.perPodCPURequest, 100, f) defer rc.CleanUp() createCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods, scaleTest.useV1) @@ -137,7 +138,7 @@ func (scaleTest *HPAScaleTest) run(name, kind string, rc *ResourceConsumer, f *F } } -func scaleUp(name, kind string, rc *ResourceConsumer, f *Framework) { +func scaleUp(name, kind string, rc *ResourceConsumer, f *framework.Framework) { scaleTest := &HPAScaleTest{ initPods: 1, totalInitialCPUUsage: 250, @@ -153,7 +154,7 @@ func scaleUp(name, kind string, rc *ResourceConsumer, f *Framework) { scaleTest.run(name, kind, rc, f) } -func scaleDown(name, kind string, rc *ResourceConsumer, f *Framework) { +func scaleDown(name, kind string, rc *ResourceConsumer, f *framework.Framework) { scaleTest := &HPAScaleTest{ initPods: 5, totalInitialCPUUsage: 400, @@ -192,5 +193,5 @@ func createCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, ma } else { _, errHPA = rc.framework.Client.Extensions().HorizontalPodAutoscalers(rc.framework.Namespace.Name).Create(hpa) } - expectNoError(errHPA) + framework.ExpectNoError(errHPA) } diff --git a/test/e2e/host_path.go b/test/e2e/host_path.go index ef7bbbbc7b4..58a5a1dc848 100644 --- a/test/e2e/host_path.go +++ b/test/e2e/host_path.go @@ -25,20 +25,21 @@ import ( "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/apimachinery/registered" client "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" ) //TODO : Consolidate this code with the code for emptyDir. //This will require some smart. -var _ = KubeDescribe("hostPath", func() { - framework := NewDefaultFramework("hostpath") +var _ = framework.KubeDescribe("hostPath", func() { + f := framework.NewDefaultFramework("hostpath") var c *client.Client var namespace *api.Namespace BeforeEach(func() { - c = framework.Client - namespace = framework.Namespace + c = f.Client + namespace = f.Namespace //cleanup before running the test. _ = os.Remove("/tmp/test-file") @@ -55,7 +56,7 @@ var _ = KubeDescribe("hostPath", func() { fmt.Sprintf("--fs_type=%v", volumePath), fmt.Sprintf("--file_mode=%v", volumePath), } - testContainerOutput("hostPath mode", c, pod, 0, []string{ + framework.TestContainerOutput("hostPath mode", c, pod, 0, []string{ "mode of file \"/test-volume\": dtrwxrwxrwx", // we expect the sticky bit (mode flag t) to be set for the dir }, namespace.Name) @@ -82,7 +83,7 @@ var _ = KubeDescribe("hostPath", func() { } //Read the content of the file with the second container to //verify volumes being shared properly among containers within the pod. - testContainerOutput("hostPath r/w", c, pod, 1, []string{ + framework.TestContainerOutput("hostPath r/w", c, pod, 1, []string{ "content of file \"/test-volume/test-file\": mount-tester new file", }, namespace.Name, ) diff --git a/test/e2e/ingress.go b/test/e2e/ingress.go index 47e974ba44e..7e7bf2cda9d 100644 --- a/test/e2e/ingress.go +++ b/test/e2e/ingress.go @@ -34,6 +34,7 @@ import ( utilexec "k8s.io/kubernetes/pkg/util/exec" "k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -79,7 +80,7 @@ var ( verifyHTTPGET = true // On average it takes ~6 minutes for a single backend to come online. - // We *don't* expect this poll to consistently take 15 minutes for every + // We *don't* expect this framework.Poll to consistently take 15 minutes for every // Ingress as GCE is creating/checking backends in parallel, but at the // same time, we're not testing GCE startup latency. So give it enough // time, and fail if the average is too high. @@ -181,13 +182,13 @@ func createApp(c *client.Client, ns string, i int) { name := fmt.Sprintf("%v%d", appPrefix, i) l := map[string]string{} - Logf("Creating svc %v", name) + framework.Logf("Creating svc %v", name) svc := svcByName(name, httpContainerPort) svc.Spec.Type = api.ServiceTypeNodePort _, err := c.Services(ns).Create(svc) Expect(err).NotTo(HaveOccurred()) - Logf("Creating rc %v", name) + framework.Logf("Creating rc %v", name) rc := rcByNamePort(name, 1, testImage, httpContainerPort, api.ProtocolTCP, l) rc.Spec.Template.Spec.Containers[0].Args = []string{ "--num=1", @@ -215,19 +216,19 @@ func gcloudUnmarshal(resource, regex, project string, out interface{}) { if exitErr, ok := err.(utilexec.ExitError); ok { errCode = exitErr.ExitStatus() } - Logf("Error running gcloud command 'gcloud %s': err: %v, output: %v, status: %d", strings.Join(command, " "), err, string(output), errCode) + framework.Logf("Error running gcloud command 'gcloud %s': err: %v, output: %v, status: %d", strings.Join(command, " "), err, string(output), errCode) } if err := json.Unmarshal([]byte(output), out); err != nil { - Logf("Error unmarshalling gcloud output for %v: %v, output: %v", resource, err, string(output)) + framework.Logf("Error unmarshalling gcloud output for %v: %v, output: %v", resource, err, string(output)) } } func gcloudDelete(resource, name, project string) { - Logf("Deleting %v: %v", resource, name) + framework.Logf("Deleting %v: %v", resource, name) output, err := exec.Command("gcloud", "compute", resource, "delete", name, fmt.Sprintf("--project=%v", project), "-q").CombinedOutput() if err != nil { - Logf("Error deleting %v, output: %v\nerror: %+v", resource, string(output), err) + framework.Logf("Error deleting %v, output: %v\nerror: %+v", resource, string(output), err) } } @@ -237,17 +238,17 @@ func kubectlLogLBController(c *client.Client, ns string) { options := api.ListOptions{LabelSelector: selector} podList, err := c.Pods(api.NamespaceAll).List(options) if err != nil { - Logf("Cannot log L7 controller output, error listing pods %v", err) + framework.Logf("Cannot log L7 controller output, error listing pods %v", err) return } if len(podList.Items) == 0 { - Logf("Loadbalancer controller pod not found") + framework.Logf("Loadbalancer controller pod not found") return } for _, p := range podList.Items { - Logf("\nLast 100 log lines of %v\n", p.Name) - l, _ := runKubectl("logs", p.Name, fmt.Sprintf("--namespace=%v", ns), "-c", lbContainerName, "--tail=100") - Logf(l) + framework.Logf("\nLast 100 log lines of %v\n", p.Name) + l, _ := framework.RunKubectl("logs", p.Name, fmt.Sprintf("--namespace=%v", ns), "-c", lbContainerName, "--tail=100") + framework.Logf(l) } } @@ -270,7 +271,7 @@ func (cont *IngressController) create() { // for issues like #16337. Currently, all names should fall within 63 chars. testName := fmt.Sprintf("k8s-fw-foo-app-X-%v--%v", cont.ns, cont.UID) if len(testName) > nameLenLimit { - Failf("Cannot reliably test the given namespace(%v)/uid(%v), too close to GCE limit of %v", + framework.Failf("Cannot reliably test the given namespace(%v)/uid(%v), too close to GCE limit of %v", cont.ns, cont.UID, nameLenLimit) } @@ -289,7 +290,7 @@ func (cont *IngressController) create() { existingRCs, err := cont.c.ReplicationControllers(api.NamespaceSystem).List(listOpts) Expect(err).NotTo(HaveOccurred()) if len(existingRCs.Items) != 1 { - Failf("Unexpected number of lb cluster addons %v with label %v in kube-system namespace", len(existingRCs.Items), clusterAddonLBLabels) + framework.Failf("Unexpected number of lb cluster addons %v with label %v in kube-system namespace", len(existingRCs.Items), clusterAddonLBLabels) } // Merge the existing spec and new spec. The modifications should not @@ -321,7 +322,7 @@ func (cont *IngressController) create() { cont.rc = rc _, err = cont.c.ReplicationControllers(cont.ns).Create(cont.rc) Expect(err).NotTo(HaveOccurred()) - Expect(waitForRCPodsRunning(cont.c, cont.ns, cont.rc.Name)).NotTo(HaveOccurred()) + Expect(framework.WaitForRCPodsRunning(cont.c, cont.ns, cont.rc.Name)).NotTo(HaveOccurred()) } func (cont *IngressController) Cleanup(del bool) error { @@ -336,11 +337,11 @@ func (cont *IngressController) Cleanup(del bool) error { for _, f := range fwList { msg += fmt.Sprintf("%v\n", f.Name) if del { - Logf("Deleting forwarding-rule: %v", f.Name) + framework.Logf("Deleting forwarding-rule: %v", f.Name) output, err := exec.Command("gcloud", "compute", "forwarding-rules", "delete", f.Name, fmt.Sprintf("--project=%v", cont.Project), "-q", "--global").CombinedOutput() if err != nil { - Logf("Error deleting forwarding rules, output: %v\nerror:%v", string(output), err) + framework.Logf("Error deleting forwarding rules, output: %v\nerror:%v", string(output), err) } } } @@ -440,7 +441,7 @@ func (cont *IngressController) Cleanup(del bool) error { // test requires at least 5. // // Slow by design (10 min) -var _ = KubeDescribe("GCE L7 LoadBalancer Controller [Feature:Ingress]", func() { +var _ = framework.KubeDescribe("GCE L7 LoadBalancer Controller [Feature:Ingress]", func() { // These variables are initialized after framework's beforeEach. var ns string var addonDir string @@ -448,18 +449,18 @@ var _ = KubeDescribe("GCE L7 LoadBalancer Controller [Feature:Ingress]", func() var responseTimes, creationTimes []time.Duration var ingController *IngressController - framework := Framework{BaseName: "glbc"} + f := framework.Framework{BaseName: "glbc"} BeforeEach(func() { // This test requires a GCE/GKE only cluster-addon - SkipUnlessProviderIs("gce", "gke") - framework.beforeEach() - client = framework.Client - ns = framework.Namespace.Name + framework.SkipUnlessProviderIs("gce", "gke") + f.BeforeEach() + client = f.Client + ns = f.Namespace.Name // Scaled down the existing Ingress controller so it doesn't interfere with the test. - Expect(scaleRCByLabels(client, api.NamespaceSystem, clusterAddonLBLabels, 0)).NotTo(HaveOccurred()) + Expect(framework.ScaleRCByLabels(client, api.NamespaceSystem, clusterAddonLBLabels, 0)).NotTo(HaveOccurred()) addonDir = filepath.Join( - testContext.RepoRoot, "cluster", "addons", "cluster-loadbalancing", "glbc") + framework.TestContext.RepoRoot, "cluster", "addons", "cluster-loadbalancing", "glbc") nsParts := strings.Split(ns, "-") ingController = &IngressController{ @@ -467,13 +468,13 @@ var _ = KubeDescribe("GCE L7 LoadBalancer Controller [Feature:Ingress]", func() // The UID in the namespace was generated by the master, so it's // global to the cluster. UID: nsParts[len(nsParts)-1], - Project: testContext.CloudConfig.ProjectID, + Project: framework.TestContext.CloudConfig.ProjectID, rcPath: filepath.Join(addonDir, "glbc-controller.yaml"), defaultSvcPath: filepath.Join(addonDir, "default-svc.yaml"), c: client, } ingController.create() - Logf("Finished creating ingress controller") + framework.Logf("Finished creating ingress controller") // If we somehow get the same namespace uid as someone else in this // gce project, just back off. Expect(ingController.Cleanup(false)).NotTo(HaveOccurred()) @@ -482,47 +483,47 @@ var _ = KubeDescribe("GCE L7 LoadBalancer Controller [Feature:Ingress]", func() }) AfterEach(func() { - Logf("Average creation time %+v, health check time %+v", creationTimes, responseTimes) + framework.Logf("Average creation time %+v, health check time %+v", creationTimes, responseTimes) if CurrentGinkgoTestDescription().Failed { kubectlLogLBController(client, ns) - Logf("\nOutput of kubectl describe ing:\n") - desc, _ := runKubectl("describe", "ing", fmt.Sprintf("--namespace=%v", ns)) - Logf(desc) + framework.Logf("\nOutput of kubectl describe ing:\n") + desc, _ := framework.RunKubectl("describe", "ing", fmt.Sprintf("--namespace=%v", ns)) + framework.Logf(desc) } // Delete all Ingress, then wait for the controller to cleanup. ings, err := client.Extensions().Ingress(ns).List(api.ListOptions{}) if err != nil { - Logf("WARNING: Failed to list ingress: %+v", err) + framework.Logf("WARNING: Failed to list ingress: %+v", err) } else { for _, ing := range ings.Items { - Logf("Deleting ingress %v/%v", ing.Namespace, ing.Name) + framework.Logf("Deleting ingress %v/%v", ing.Namespace, ing.Name) if err := client.Extensions().Ingress(ns).Delete(ing.Name, nil); err != nil { - Logf("WARNING: Failed to delete ingress %v: %v", ing.Name, err) + framework.Logf("WARNING: Failed to delete ingress %v: %v", ing.Name, err) } } } pollErr := wait.Poll(5*time.Second, lbCleanupTimeout, func() (bool, error) { if err := ingController.Cleanup(false); err != nil { - Logf("Still waiting for glbc to cleanup: %v", err) + framework.Logf("Still waiting for glbc to cleanup: %v", err) return false, nil } return true, nil }) // TODO: Remove this once issue #17802 is fixed - Expect(scaleRCByLabels(client, ingController.rc.Namespace, ingController.rc.Labels, 0)).NotTo(HaveOccurred()) + Expect(framework.ScaleRCByLabels(client, ingController.rc.Namespace, ingController.rc.Labels, 0)).NotTo(HaveOccurred()) // If the controller failed to cleanup the test will fail, but we want to cleanup // resources before that. if pollErr != nil { if cleanupErr := ingController.Cleanup(true); cleanupErr != nil { - Logf("WARNING: Failed to cleanup resources %v", cleanupErr) + framework.Logf("WARNING: Failed to cleanup resources %v", cleanupErr) } - Failf("Failed to cleanup GCE L7 resources.") + framework.Failf("Failed to cleanup GCE L7 resources.") } // Restore the cluster Addon. - Expect(scaleRCByLabels(client, api.NamespaceSystem, clusterAddonLBLabels, 1)).NotTo(HaveOccurred()) - framework.afterEach() - Logf("Successfully verified GCE L7 loadbalancer via Ingress.") + Expect(framework.ScaleRCByLabels(client, api.NamespaceSystem, clusterAddonLBLabels, 1)).NotTo(HaveOccurred()) + f.AfterEach() + framework.Logf("Successfully verified GCE L7 loadbalancer via Ingress.") }) It("should create GCE L7 loadbalancers and verify Ingress", func() { @@ -536,9 +537,9 @@ var _ = KubeDescribe("GCE L7 LoadBalancer Controller [Feature:Ingress]", func() // foo0.bar.com: /foo0 // foo1.bar.com: /foo1 if numApps < numIng { - Failf("Need more apps than Ingress") + framework.Failf("Need more apps than Ingress") } - Logf("Starting ingress test") + framework.Logf("Starting ingress test") appsPerIngress := numApps / numIng By(fmt.Sprintf("Creating %d rcs + svc, and %d apps per Ingress", numApps, appsPerIngress)) @@ -569,9 +570,9 @@ var _ = KubeDescribe("GCE L7 LoadBalancer Controller [Feature:Ingress]", func() for _, ing := range ings.Items { // Wait for the loadbalancer IP. start := time.Now() - address, err := waitForIngressAddress(client, ing.Namespace, ing.Name, lbPollTimeout) + address, err := framework.WaitForIngressAddress(client, ing.Namespace, ing.Name, lbPollTimeout) if err != nil { - Failf("Ingress failed to acquire an IP address within %v", lbPollTimeout) + framework.Failf("Ingress failed to acquire an IP address within %v", lbPollTimeout) } Expect(err).NotTo(HaveOccurred()) By(fmt.Sprintf("Found address %v for ingress %v, took %v to come online", @@ -592,9 +593,9 @@ var _ = KubeDescribe("GCE L7 LoadBalancer Controller [Feature:Ingress]", func() for _, p := range rules.IngressRuleValue.HTTP.Paths { route := fmt.Sprintf("https://%v%v", address, p.Path) - Logf("Testing route %v host %v with simple GET", route, rules.Host) + framework.Logf("Testing route %v host %v with simple GET", route, rules.Host) if err != nil { - Failf("Unable to create transport: %v", err) + framework.Failf("Unable to create transport: %v", err) } // Make sure the service node port is reachable Expect(curlServiceNodePort(client, ns, p.Backend.ServiceName, int(p.Backend.ServicePort.IntVal))).NotTo(HaveOccurred()) @@ -605,7 +606,7 @@ var _ = KubeDescribe("GCE L7 LoadBalancer Controller [Feature:Ingress]", func() var err error lastBody, err = simpleGET(timeoutClient, route, rules.Host) if err != nil { - Logf("host %v path %v: %v", rules.Host, route, err) + framework.Logf("host %v path %v: %v", rules.Host, route, err) return false, nil } return true, nil @@ -618,7 +619,7 @@ var _ = KubeDescribe("GCE L7 LoadBalancer Controller [Feature:Ingress]", func() if err := curlServiceNodePort(client, ns, p.Backend.ServiceName, int(p.Backend.ServicePort.IntVal)); err != nil { msg += fmt.Sprintf("Also unable to curl service node port: %v", err) } - Failf(msg) + framework.Failf(msg) } rt := time.Since(GETStart) By(fmt.Sprintf("Route %v host %v took %v to respond", route, rules.Host, rt)) @@ -632,7 +633,7 @@ var _ = KubeDescribe("GCE L7 LoadBalancer Controller [Feature:Ingress]", func() sort.Sort(timeSlice(creationTimes)) perc50 := creationTimes[len(creationTimes)/2] if perc50 > expectedLBCreationTime { - Logf("WARNING: Average creation time is too high: %+v", creationTimes) + framework.Logf("WARNING: Average creation time is too high: %+v", creationTimes) } if !verifyHTTPGET { return @@ -640,14 +641,14 @@ var _ = KubeDescribe("GCE L7 LoadBalancer Controller [Feature:Ingress]", func() sort.Sort(timeSlice(responseTimes)) perc50 = responseTimes[len(responseTimes)/2] if perc50 > expectedLBHealthCheckTime { - Logf("WARNING: Average startup time is too high: %+v", responseTimes) + framework.Logf("WARNING: Average startup time is too high: %+v", responseTimes) } }) }) func curlServiceNodePort(client *client.Client, ns, name string, port int) error { // TODO: Curl all nodes? - u, err := getNodePortURL(client, ns, name, port) + u, err := framework.GetNodePortURL(client, ns, name, port) if err != nil { return err } @@ -656,7 +657,7 @@ func curlServiceNodePort(client *client.Client, ns, name string, port int) error pollErr := wait.Poll(10*time.Second, timeout, func() (bool, error) { svcCurlBody, err = simpleGET(timeoutClient, u, "") if err != nil { - Logf("Failed to curl service node port, body: %v\nerror %v", svcCurlBody, err) + framework.Logf("Failed to curl service node port, body: %v\nerror %v", svcCurlBody, err) return false, nil } return true, nil @@ -664,6 +665,6 @@ func curlServiceNodePort(client *client.Client, ns, name string, port int) error if pollErr != nil { return fmt.Errorf("Failed to curl service node port in %v, body: %v\nerror %v", timeout, svcCurlBody, err) } - Logf("Successfully curled service node port, body: %v", svcCurlBody) + framework.Logf("Successfully curled service node port, body: %v", svcCurlBody) return nil } diff --git a/test/e2e/ingress_utils.go b/test/e2e/ingress_utils.go index 49fb3af6ff8..2f3f54cfad8 100644 --- a/test/e2e/ingress_utils.go +++ b/test/e2e/ingress_utils.go @@ -36,6 +36,7 @@ import ( "k8s.io/kubernetes/pkg/apis/extensions" client "k8s.io/kubernetes/pkg/client/unversioned" utilnet "k8s.io/kubernetes/pkg/util/net" + "k8s.io/kubernetes/test/e2e/framework" ) const ( @@ -125,7 +126,7 @@ func createSecret(kubeClient *client.Client, ing *extensions.Ingress) (host stri var k, c bytes.Buffer tls := ing.Spec.TLS[0] host = strings.Join(tls.Hosts, ",") - Logf("Generating RSA cert for host %v", host) + framework.Logf("Generating RSA cert for host %v", host) if err = generateRSACerts(host, true, &k, &c); err != nil { return @@ -141,7 +142,7 @@ func createSecret(kubeClient *client.Client, ing *extensions.Ingress) (host stri api.TLSPrivateKeyKey: key, }, } - Logf("Creating secret %v in ns %v with hosts %v for ingress %v", secret.Name, secret.Namespace, host, ing.Name) + framework.Logf("Creating secret %v in ns %v with hosts %v for ingress %v", secret.Name, secret.Namespace, host, ing.Name) _, err = kubeClient.Secrets(ing.Namespace).Create(secret) return host, cert, key, err } diff --git a/test/e2e/initial_resources.go b/test/e2e/initial_resources.go index e9b2bb47263..8a6322075c3 100644 --- a/test/e2e/initial_resources.go +++ b/test/e2e/initial_resources.go @@ -23,14 +23,15 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/test/e2e/framework" ) // [Feature:InitialResources]: Initial resources is an experimental feature, so // these tests are not run by default. // // Flaky issue #20272 -var _ = KubeDescribe("Initial Resources [Feature:InitialResources] [Flaky]", func() { - f := NewDefaultFramework("initial-resources") +var _ = framework.KubeDescribe("Initial Resources [Feature:InitialResources] [Flaky]", func() { + f := framework.NewDefaultFramework("initial-resources") It("should set initial resources based on historical data", func() { // TODO(piosz): Add cleanup data in InfluxDB that left from previous tests. @@ -50,7 +51,7 @@ var _ = KubeDescribe("Initial Resources [Feature:InitialResources] [Flaky]", fun }) }) -func runPod(f *Framework, name, image string) *api.Pod { +func runPod(f *framework.Framework, name, image string) *api.Pod { pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: name, @@ -65,7 +66,7 @@ func runPod(f *Framework, name, image string) *api.Pod { }, } createdPod, err := f.Client.Pods(f.Namespace.Name).Create(pod) - expectNoError(err) - expectNoError(waitForPodRunningInNamespace(f.Client, name, f.Namespace.Name)) + framework.ExpectNoError(err) + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.Client, name, f.Namespace.Name)) return createdPod } diff --git a/test/e2e/job.go b/test/e2e/job.go index 87fb829c26d..80c2510f22f 100644 --- a/test/e2e/job.go +++ b/test/e2e/job.go @@ -26,6 +26,7 @@ import ( "k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -39,8 +40,8 @@ const ( jobSelectorKey = "job" ) -var _ = KubeDescribe("Job", func() { - f := NewDefaultFramework("job") +var _ = framework.KubeDescribe("Job", func() { + f := framework.NewDefaultFramework("job") parallelism := 2 completions := 4 lotsOfFailures := 5 // more than completions @@ -101,7 +102,7 @@ var _ = KubeDescribe("Job", func() { Expect(err).NotTo(HaveOccurred()) By("Ensuring job shows many failures") - err = wait.Poll(poll, jobTimeout, func() (bool, error) { + err = wait.Poll(framework.Poll, jobTimeout, func() (bool, error) { curr, err := f.Client.Extensions().Jobs(f.Namespace.Name).Get(job.Name) if err != nil { return false, err @@ -271,7 +272,7 @@ func deleteJob(c *client.Client, ns, name string) error { // Wait for all pods to become Running. Only use when pods will run for a long time, or it will be racy. func waitForAllPodsRunning(c *client.Client, ns, jobName string, parallelism int) error { label := labels.SelectorFromSet(labels.Set(map[string]string{jobSelectorKey: jobName})) - return wait.Poll(poll, jobTimeout, func() (bool, error) { + return wait.Poll(framework.Poll, jobTimeout, func() (bool, error) { options := api.ListOptions{LabelSelector: label} pods, err := c.Pods(ns).List(options) if err != nil { @@ -289,7 +290,7 @@ func waitForAllPodsRunning(c *client.Client, ns, jobName string, parallelism int // Wait for job to reach completions. func waitForJobFinish(c *client.Client, ns, jobName string, completions int) error { - return wait.Poll(poll, jobTimeout, func() (bool, error) { + return wait.Poll(framework.Poll, jobTimeout, func() (bool, error) { curr, err := c.Extensions().Jobs(ns).Get(jobName) if err != nil { return false, err @@ -300,7 +301,7 @@ func waitForJobFinish(c *client.Client, ns, jobName string, completions int) err // Wait for job fail. func waitForJobFail(c *client.Client, ns, jobName string) error { - return wait.Poll(poll, jobTimeout, func() (bool, error) { + return wait.Poll(framework.Poll, jobTimeout, func() (bool, error) { curr, err := c.Extensions().Jobs(ns).Get(jobName) if err != nil { return false, err diff --git a/test/e2e/kibana_logging.go b/test/e2e/kibana_logging.go index 5a78ccbff5f..a5defbc0443 100644 --- a/test/e2e/kibana_logging.go +++ b/test/e2e/kibana_logging.go @@ -21,19 +21,20 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) -var _ = KubeDescribe("Kibana Logging Instances Is Alive", func() { - f := NewDefaultFramework("kibana-logging") +var _ = framework.KubeDescribe("Kibana Logging Instances Is Alive", func() { + f := framework.NewDefaultFramework("kibana-logging") BeforeEach(func() { // TODO: For now assume we are only testing cluster logging with Elasticsearch // and Kibana on GCE. Once we are sure that Elasticsearch and Kibana cluster level logging // works for other providers we should widen this scope of this test. - SkipUnlessProviderIs("gce") + framework.SkipUnlessProviderIs("gce") }) It("should check that the Kibana logging instance is alive", func() { @@ -47,7 +48,7 @@ const ( ) // ClusterLevelLoggingWithKibana is an end to end test that checks to see if Kibana is alive. -func ClusterLevelLoggingWithKibana(f *Framework) { +func ClusterLevelLoggingWithKibana(f *framework.Framework) { // graceTime is how long to keep retrying requests for status information. const graceTime = 2 * time.Minute @@ -61,7 +62,7 @@ func ClusterLevelLoggingWithKibana(f *Framework) { if _, err = s.Get("kibana-logging"); err == nil { break } - Logf("Attempt to check for the existence of the Kibana service failed after %v", time.Since(start)) + framework.Logf("Attempt to check for the existence of the Kibana service failed after %v", time.Since(start)) } Expect(err).NotTo(HaveOccurred()) @@ -72,16 +73,16 @@ func ClusterLevelLoggingWithKibana(f *Framework) { pods, err := f.Client.Pods(api.NamespaceSystem).List(options) Expect(err).NotTo(HaveOccurred()) for _, pod := range pods.Items { - err = waitForPodRunningInNamespace(f.Client, pod.Name, api.NamespaceSystem) + err = framework.WaitForPodRunningInNamespace(f.Client, pod.Name, api.NamespaceSystem) Expect(err).NotTo(HaveOccurred()) } By("Checking to make sure we get a response from the Kibana UI.") err = nil for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) { - proxyRequest, errProxy := getServicesProxyRequest(f.Client, f.Client.Get()) + proxyRequest, errProxy := framework.GetServicesProxyRequest(f.Client, f.Client.Get()) if errProxy != nil { - Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy) + framework.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy) continue } // Query against the root URL for Kibana. @@ -89,7 +90,7 @@ func ClusterLevelLoggingWithKibana(f *Framework) { Name("kibana-logging"). DoRaw() if err != nil { - Logf("After %v proxy call to kibana-logging failed: %v", time.Since(start), err) + framework.Logf("After %v proxy call to kibana-logging failed: %v", time.Since(start), err) continue } break diff --git a/test/e2e/kubectl.go b/test/e2e/kubectl.go index dad906a8407..ed65d4593be 100644 --- a/test/e2e/kubectl.go +++ b/test/e2e/kubectl.go @@ -48,6 +48,7 @@ import ( utilnet "k8s.io/kubernetes/pkg/util/net" "k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/version" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -111,135 +112,135 @@ var ( podProbeParametersVersion = version.MustParse("v1.2.0-alpha.4") ) -var _ = KubeDescribe("Kubectl client", func() { +var _ = framework.KubeDescribe("Kubectl client", func() { defer GinkgoRecover() - framework := NewDefaultFramework("kubectl") + f := framework.NewDefaultFramework("kubectl") var c *client.Client var ns string BeforeEach(func() { - c = framework.Client - ns = framework.Namespace.Name + c = f.Client + ns = f.Namespace.Name }) - KubeDescribe("Update Demo", func() { + framework.KubeDescribe("Update Demo", func() { var updateDemoRoot, nautilusPath, kittenPath string BeforeEach(func() { - updateDemoRoot = filepath.Join(testContext.RepoRoot, "docs/user-guide/update-demo") + updateDemoRoot = filepath.Join(framework.TestContext.RepoRoot, "docs/user-guide/update-demo") nautilusPath = filepath.Join(updateDemoRoot, "nautilus-rc.yaml") kittenPath = filepath.Join(updateDemoRoot, "kitten-rc.yaml") }) It("should create and stop a replication controller [Conformance]", func() { - defer cleanup(nautilusPath, ns, updateDemoSelector) + defer framework.Cleanup(nautilusPath, ns, updateDemoSelector) By("creating a replication controller") - runKubectlOrDie("create", "-f", nautilusPath, fmt.Sprintf("--namespace=%v", ns)) - validateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) + framework.RunKubectlOrDie("create", "-f", nautilusPath, fmt.Sprintf("--namespace=%v", ns)) + framework.ValidateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) }) It("should scale a replication controller [Conformance]", func() { - defer cleanup(nautilusPath, ns, updateDemoSelector) + defer framework.Cleanup(nautilusPath, ns, updateDemoSelector) By("creating a replication controller") - runKubectlOrDie("create", "-f", nautilusPath, fmt.Sprintf("--namespace=%v", ns)) - validateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) + framework.RunKubectlOrDie("create", "-f", nautilusPath, fmt.Sprintf("--namespace=%v", ns)) + framework.ValidateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) By("scaling down the replication controller") - runKubectlOrDie("scale", "rc", "update-demo-nautilus", "--replicas=1", "--timeout=5m", fmt.Sprintf("--namespace=%v", ns)) - validateController(c, nautilusImage, 1, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) + framework.RunKubectlOrDie("scale", "rc", "update-demo-nautilus", "--replicas=1", "--timeout=5m", fmt.Sprintf("--namespace=%v", ns)) + framework.ValidateController(c, nautilusImage, 1, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) By("scaling up the replication controller") - runKubectlOrDie("scale", "rc", "update-demo-nautilus", "--replicas=2", "--timeout=5m", fmt.Sprintf("--namespace=%v", ns)) - validateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) + framework.RunKubectlOrDie("scale", "rc", "update-demo-nautilus", "--replicas=2", "--timeout=5m", fmt.Sprintf("--namespace=%v", ns)) + framework.ValidateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) }) It("should do a rolling update of a replication controller [Conformance]", func() { By("creating the initial replication controller") - runKubectlOrDie("create", "-f", nautilusPath, fmt.Sprintf("--namespace=%v", ns)) - validateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) + framework.RunKubectlOrDie("create", "-f", nautilusPath, fmt.Sprintf("--namespace=%v", ns)) + framework.ValidateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) By("rolling-update to new replication controller") - runKubectlOrDie("rolling-update", "update-demo-nautilus", "--update-period=1s", "-f", kittenPath, fmt.Sprintf("--namespace=%v", ns)) - validateController(c, kittenImage, 2, "update-demo", updateDemoSelector, getUDData("kitten.jpg", ns), ns) + framework.RunKubectlOrDie("rolling-update", "update-demo-nautilus", "--update-period=1s", "-f", kittenPath, fmt.Sprintf("--namespace=%v", ns)) + framework.ValidateController(c, kittenImage, 2, "update-demo", updateDemoSelector, getUDData("kitten.jpg", ns), ns) // Everything will hopefully be cleaned up when the namespace is deleted. }) }) - KubeDescribe("Guestbook application", func() { + framework.KubeDescribe("Guestbook application", func() { var guestbookPath string BeforeEach(func() { - guestbookPath = filepath.Join(testContext.RepoRoot, "examples/guestbook") + guestbookPath = filepath.Join(framework.TestContext.RepoRoot, "examples/guestbook") }) It("should create and stop a working application [Conformance]", func() { - SkipUnlessServerVersionGTE(nodePortsOptionalVersion, c) + framework.SkipUnlessServerVersionGTE(nodePortsOptionalVersion, c) - defer cleanup(guestbookPath, ns, frontendSelector, redisMasterSelector, redisSlaveSelector) + defer framework.Cleanup(guestbookPath, ns, frontendSelector, redisMasterSelector, redisSlaveSelector) By("creating all guestbook components") - runKubectlOrDie("create", "-f", guestbookPath, fmt.Sprintf("--namespace=%v", ns)) + framework.RunKubectlOrDie("create", "-f", guestbookPath, fmt.Sprintf("--namespace=%v", ns)) By("validating guestbook app") validateGuestbookApp(c, ns) }) }) - KubeDescribe("Simple pod", func() { + framework.KubeDescribe("Simple pod", func() { var podPath string BeforeEach(func() { - podPath = filepath.Join(testContext.RepoRoot, "test", "e2e", "testing-manifests", "kubectl", "pod-with-readiness-probe.yaml") + podPath = filepath.Join(framework.TestContext.RepoRoot, "test", "e2e", "testing-manifests", "kubectl", "pod-with-readiness-probe.yaml") By(fmt.Sprintf("creating the pod from %v", podPath)) - runKubectlOrDie("create", "-f", podPath, fmt.Sprintf("--namespace=%v", ns)) - checkPodsRunningReady(c, ns, []string{simplePodName}, podStartTimeout) + framework.RunKubectlOrDie("create", "-f", podPath, fmt.Sprintf("--namespace=%v", ns)) + framework.CheckPodsRunningReady(c, ns, []string{simplePodName}, framework.PodStartTimeout) }) AfterEach(func() { - cleanup(podPath, ns, simplePodSelector) + framework.Cleanup(podPath, ns, simplePodSelector) }) It("should support exec", func() { By("executing a command in the container") - execOutput := runKubectlOrDie("exec", fmt.Sprintf("--namespace=%v", ns), simplePodName, "echo", "running", "in", "container") + execOutput := framework.RunKubectlOrDie("exec", fmt.Sprintf("--namespace=%v", ns), simplePodName, "echo", "running", "in", "container") if e, a := "running in container", execOutput; e != a { - Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a) + framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a) } By("executing a command in the container with noninteractive stdin") - execOutput = newKubectlCommand("exec", fmt.Sprintf("--namespace=%v", ns), "-i", simplePodName, "cat"). - withStdinData("abcd1234"). - execOrDie() + execOutput = framework.NewKubectlCommand("exec", fmt.Sprintf("--namespace=%v", ns), "-i", simplePodName, "cat"). + WithStdinData("abcd1234"). + ExecOrDie() if e, a := "abcd1234", execOutput; e != a { - Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a) + framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a) } // pretend that we're a user in an interactive shell r, closer, err := newBlockingReader("echo hi\nexit\n") if err != nil { - Failf("Error creating blocking reader: %v", err) + framework.Failf("Error creating blocking reader: %v", err) } // NOTE this is solely for test cleanup! defer closer.Close() By("executing a command in the container with pseudo-interactive stdin") - execOutput = newKubectlCommand("exec", fmt.Sprintf("--namespace=%v", ns), "-i", simplePodName, "bash"). - withStdinReader(r). - execOrDie() + execOutput = framework.NewKubectlCommand("exec", fmt.Sprintf("--namespace=%v", ns), "-i", simplePodName, "bash"). + WithStdinReader(r). + ExecOrDie() if e, a := "hi", execOutput; e != a { - Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a) + framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a) } }) It("should support exec through an HTTP proxy", func() { // Note: We are skipping local since we want to verify an apiserver with HTTPS. // At this time local only supports plain HTTP. - SkipIfProviderIs("local") + framework.SkipIfProviderIs("local") // Fail if the variable isn't set - if testContext.Host == "" { - Failf("--host variable must be set to the full URI to the api server on e2e run.") + if framework.TestContext.Host == "" { + framework.Failf("--host variable must be set to the full URI to the api server on e2e run.") } // Make sure the apiServer is set to what kubectl requires - apiServer := testContext.Host + apiServer := framework.TestContext.Host apiServerUrl, err := url.Parse(apiServer) if err != nil { - Failf("Unable to parse URL %s. Error=%s", apiServer, err) + framework.Failf("Unable to parse URL %s. Error=%s", apiServer, err) } apiServerUrl.Scheme = "https" if !strings.Contains(apiServer, ":443") { @@ -251,49 +252,49 @@ var _ = KubeDescribe("Kubectl client", func() { By("Finding a static kubectl for upload") testStaticKubectlPath, err := findBinary("kubectl", "linux/386") if err != nil { - Logf("No kubectl found: %v.\nAttempting a local build...", err) + framework.Logf("No kubectl found: %v.\nAttempting a local build...", err) // Fall back to trying to build a local static kubectl - kubectlContainerPath := path.Join(testContext.RepoRoot, "/examples/kubectl-container/") - if _, err := os.Stat(path.Join(testContext.RepoRoot, "hack/build-go.sh")); err != nil { - Failf("Can't build static kubectl due to missing hack/build-go.sh. Error=%s", err) + kubectlContainerPath := path.Join(framework.TestContext.RepoRoot, "/examples/kubectl-container/") + if _, err := os.Stat(path.Join(framework.TestContext.RepoRoot, "hack/build-go.sh")); err != nil { + framework.Failf("Can't build static kubectl due to missing hack/build-go.sh. Error=%s", err) } By("Building a static kubectl for upload") staticKubectlBuild := exec.Command("make", "-C", kubectlContainerPath) if out, err := staticKubectlBuild.Output(); err != nil { - Failf("Unable to create static kubectl. Error=%s, Output=%q", err, out) + framework.Failf("Unable to create static kubectl. Error=%s, Output=%q", err, out) } // Verify the static kubectl path testStaticKubectlPath = path.Join(kubectlContainerPath, "kubectl") _, err := os.Stat(testStaticKubectlPath) if err != nil { - Failf("static kubectl path could not be found in %s. Error=%s", testStaticKubectlPath, err) + framework.Failf("static kubectl path could not be found in %s. Error=%s", testStaticKubectlPath, err) } } By(fmt.Sprintf("Using the kubectl in %s", testStaticKubectlPath)) // Verify the kubeconfig path - kubeConfigFilePath := testContext.KubeConfig + kubeConfigFilePath := framework.TestContext.KubeConfig _, err = os.Stat(kubeConfigFilePath) if err != nil { - Failf("kube config path could not be accessed. Error=%s", err) + framework.Failf("kube config path could not be accessed. Error=%s", err) } // start exec-proxy-tester container - netexecPodPath := filepath.Join(testContext.RepoRoot, "test/images/netexec/pod.yaml") + netexecPodPath := filepath.Join(framework.TestContext.RepoRoot, "test/images/netexec/pod.yaml") // Add "validate=false" if the server version is less than 1.2. // More details: https://github.com/kubernetes/kubernetes/issues/22884. validateFlag := "--validate=true" - gte, err := serverVersionGTE(podProbeParametersVersion, c) + gte, err := framework.ServerVersionGTE(podProbeParametersVersion, c) if err != nil { - Failf("Failed to get server version: %v", err) + framework.Failf("Failed to get server version: %v", err) } if !gte { validateFlag = "--validate=false" } - runKubectlOrDie("create", "-f", netexecPodPath, fmt.Sprintf("--namespace=%v", ns), validateFlag) - checkPodsRunningReady(c, ns, []string{netexecContainer}, podStartTimeout) + framework.RunKubectlOrDie("create", "-f", netexecPodPath, fmt.Sprintf("--namespace=%v", ns), validateFlag) + framework.CheckPodsRunningReady(c, ns, []string{netexecContainer}, framework.PodStartTimeout) // Clean up - defer cleanup(netexecPodPath, ns, netexecPodSelector) + defer framework.Cleanup(netexecPodPath, ns, netexecPodSelector) // Upload kubeconfig type NetexecOutput struct { Output string `json:"output"` @@ -305,12 +306,12 @@ var _ = KubeDescribe("Kubectl client", func() { By("uploading kubeconfig to netexec") pipeConfigReader, postConfigBodyWriter, err := newStreamingUpload(kubeConfigFilePath) if err != nil { - Failf("unable to create streaming upload. Error: %s", err) + framework.Failf("unable to create streaming upload. Error: %s", err) } - subResourceProxyAvailable, err := serverVersionGTE(subResourcePodProxyVersion, c) + subResourceProxyAvailable, err := framework.ServerVersionGTE(framework.SubResourcePodProxyVersion, c) if err != nil { - Failf("Unable to determine server version. Error: %s", err) + framework.Failf("Unable to determine server version. Error: %s", err) } var resp []byte @@ -336,18 +337,18 @@ var _ = KubeDescribe("Kubectl client", func() { Do().Raw() } if err != nil { - Failf("Unable to upload kubeconfig to the remote exec server due to error: %s", err) + framework.Failf("Unable to upload kubeconfig to the remote exec server due to error: %s", err) } if err := json.Unmarshal(resp, &uploadConfigOutput); err != nil { - Failf("Unable to read the result from the netexec server. Error: %s", err) + framework.Failf("Unable to read the result from the netexec server. Error: %s", err) } kubecConfigRemotePath := uploadConfigOutput.Output // Upload pipeReader, postBodyWriter, err := newStreamingUpload(testStaticKubectlPath) if err != nil { - Failf("unable to create streaming upload. Error: %s", err) + framework.Failf("unable to create streaming upload. Error: %s", err) } By("uploading kubectl to netexec") @@ -375,35 +376,35 @@ var _ = KubeDescribe("Kubectl client", func() { Do().Raw() } if err != nil { - Failf("Unable to upload kubectl binary to the remote exec server due to error: %s", err) + framework.Failf("Unable to upload kubectl binary to the remote exec server due to error: %s", err) } if err := json.Unmarshal(resp, &uploadOutput); err != nil { - Failf("Unable to read the result from the netexec server. Error: %s", err) + framework.Failf("Unable to read the result from the netexec server. Error: %s", err) } uploadBinaryName := uploadOutput.Output // Verify that we got the expected response back in the body if !strings.HasPrefix(uploadBinaryName, "/uploads/") { - Failf("Unable to upload kubectl binary to remote exec server. /uploads/ not in response. Response: %s", uploadBinaryName) + framework.Failf("Unable to upload kubectl binary to remote exec server. /uploads/ not in response. Response: %s", uploadBinaryName) } for _, proxyVar := range []string{"https_proxy", "HTTPS_PROXY"} { By("Running kubectl in netexec via an HTTP proxy using " + proxyVar) // start the proxy container - goproxyPodPath := filepath.Join(testContext.RepoRoot, "test/images/goproxy/pod.yaml") - runKubectlOrDie("create", "-f", goproxyPodPath, fmt.Sprintf("--namespace=%v", ns)) - checkPodsRunningReady(c, ns, []string{goproxyContainer}, podStartTimeout) + goproxyPodPath := filepath.Join(framework.TestContext.RepoRoot, "test/images/goproxy/pod.yaml") + framework.RunKubectlOrDie("create", "-f", goproxyPodPath, fmt.Sprintf("--namespace=%v", ns)) + framework.CheckPodsRunningReady(c, ns, []string{goproxyContainer}, framework.PodStartTimeout) // get the proxy address goproxyPod, err := c.Pods(ns).Get(goproxyContainer) if err != nil { - Failf("Unable to get the goproxy pod. Error: %s", err) + framework.Failf("Unable to get the goproxy pod. Error: %s", err) } proxyAddr := fmt.Sprintf("http://%s:8080", goproxyPod.Status.PodIP) shellCommand := fmt.Sprintf("%s=%s .%s --kubeconfig=%s --server=%s --namespace=%s exec nginx echo running in container", proxyVar, proxyAddr, uploadBinaryName, kubecConfigRemotePath, apiServer, ns) - Logf("About to remote exec: %v", shellCommand) + framework.Logf("About to remote exec: %v", shellCommand) // Execute kubectl on remote exec server. var netexecShellOutput []byte if subResourceProxyAvailable { @@ -426,78 +427,78 @@ var _ = KubeDescribe("Kubectl client", func() { Do().Raw() } if err != nil { - Failf("Unable to execute kubectl binary on the remote exec server due to error: %s", err) + framework.Failf("Unable to execute kubectl binary on the remote exec server due to error: %s", err) } var netexecOuput NetexecOutput if err := json.Unmarshal(netexecShellOutput, &netexecOuput); err != nil { - Failf("Unable to read the result from the netexec server. Error: %s", err) + framework.Failf("Unable to read the result from the netexec server. Error: %s", err) } // Get (and print!) the proxy logs here, so // they'll be present in case the below check // fails the test, to help diagnose #19500 if // it recurs. - proxyLog := runKubectlOrDie("log", "goproxy", fmt.Sprintf("--namespace=%v", ns)) + proxyLog := framework.RunKubectlOrDie("log", "goproxy", fmt.Sprintf("--namespace=%v", ns)) // Verify we got the normal output captured by the exec server expectedExecOutput := "running in container\n" if netexecOuput.Output != expectedExecOutput { - Failf("Unexpected kubectl exec output. Wanted %q, got %q", expectedExecOutput, netexecOuput.Output) + framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", expectedExecOutput, netexecOuput.Output) } // Verify the proxy server logs saw the connection - expectedProxyLog := fmt.Sprintf("Accepting CONNECT to %s", strings.TrimRight(strings.TrimLeft(testContext.Host, "https://"), "/api")) + expectedProxyLog := fmt.Sprintf("Accepting CONNECT to %s", strings.TrimRight(strings.TrimLeft(framework.TestContext.Host, "https://"), "/api")) if !strings.Contains(proxyLog, expectedProxyLog) { - Failf("Missing expected log result on proxy server for %s. Expected: %q, got %q", proxyVar, expectedProxyLog, proxyLog) + framework.Failf("Missing expected log result on proxy server for %s. Expected: %q, got %q", proxyVar, expectedProxyLog, proxyLog) } // Clean up the goproxyPod - cleanup(goproxyPodPath, ns, goproxyPodSelector) + framework.Cleanup(goproxyPodPath, ns, goproxyPodSelector) } }) It("should support inline execution and attach", func() { - SkipUnlessServerVersionGTE(jobsVersion, c) + framework.SkipUnlessServerVersionGTE(jobsVersion, c) nsFlag := fmt.Sprintf("--namespace=%v", ns) By("executing a command with run and attach with stdin") - runOutput := newKubectlCommand(nsFlag, "run", "run-test", "--image="+busyboxImage, "--restart=Never", "--attach=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'"). - withStdinData("abcd1234"). - execOrDie() + runOutput := framework.NewKubectlCommand(nsFlag, "run", "run-test", "--image="+busyboxImage, "--restart=Never", "--attach=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'"). + WithStdinData("abcd1234"). + ExecOrDie() Expect(runOutput).To(ContainSubstring("abcd1234")) Expect(runOutput).To(ContainSubstring("stdin closed")) Expect(c.Extensions().Jobs(ns).Delete("run-test", nil)).To(BeNil()) By("executing a command with run and attach without stdin") - runOutput = newKubectlCommand(fmt.Sprintf("--namespace=%v", ns), "run", "run-test-2", "--image="+busyboxImage, "--restart=Never", "--attach=true", "--leave-stdin-open=true", "--", "sh", "-c", "cat && echo 'stdin closed'"). - withStdinData("abcd1234"). - execOrDie() + runOutput = framework.NewKubectlCommand(fmt.Sprintf("--namespace=%v", ns), "run", "run-test-2", "--image="+busyboxImage, "--restart=Never", "--attach=true", "--leave-stdin-open=true", "--", "sh", "-c", "cat && echo 'stdin closed'"). + WithStdinData("abcd1234"). + ExecOrDie() Expect(runOutput).ToNot(ContainSubstring("abcd1234")) Expect(runOutput).To(ContainSubstring("stdin closed")) Expect(c.Extensions().Jobs(ns).Delete("run-test-2", nil)).To(BeNil()) By("executing a command with run and attach with stdin with open stdin should remain running") - runOutput = newKubectlCommand(nsFlag, "run", "run-test-3", "--image="+busyboxImage, "--restart=Never", "--attach=true", "--leave-stdin-open=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'"). - withStdinData("abcd1234\n"). - execOrDie() + runOutput = framework.NewKubectlCommand(nsFlag, "run", "run-test-3", "--image="+busyboxImage, "--restart=Never", "--attach=true", "--leave-stdin-open=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'"). + WithStdinData("abcd1234\n"). + ExecOrDie() Expect(runOutput).ToNot(ContainSubstring("stdin closed")) runTestPod, _, err := util.GetFirstPod(c, ns, labels.SelectorFromSet(map[string]string{"run": "run-test-3"})) if err != nil { os.Exit(1) } - if !checkPodsRunningReady(c, ns, []string{runTestPod.Name}, time.Minute) { - Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test-3") + if !framework.CheckPodsRunningReady(c, ns, []string{runTestPod.Name}, time.Minute) { + framework.Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test-3") } // NOTE: we cannot guarantee our output showed up in the container logs before stdin was closed, so we have // to loop test. err = wait.PollImmediate(time.Second, time.Minute, func() (bool, error) { - if !checkPodsRunningReady(c, ns, []string{runTestPod.Name}, 1*time.Second) { - Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test-3") + if !framework.CheckPodsRunningReady(c, ns, []string{runTestPod.Name}, 1*time.Second) { + framework.Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test-3") } - logOutput := runKubectlOrDie(nsFlag, "logs", runTestPod.Name) + logOutput := framework.RunKubectlOrDie(nsFlag, "logs", runTestPod.Name) Expect(logOutput).ToNot(ContainSubstring("stdin closed")) return strings.Contains(logOutput, "abcd1234"), nil }) @@ -517,79 +518,79 @@ var _ = KubeDescribe("Kubectl client", func() { By("curling local port output") localAddr := fmt.Sprintf("http://localhost:%d", cmd.port) body, err := curl(localAddr) - Logf("got: %s", body) + framework.Logf("got: %s", body) if err != nil { - Failf("Failed http.Get of forwarded port (%s): %v", localAddr, err) + framework.Failf("Failed http.Get of forwarded port (%s): %v", localAddr, err) } if !strings.Contains(body, nginxDefaultOutput) { - Failf("Container port output missing expected value. Wanted:'%s', got: %s", nginxDefaultOutput, body) + framework.Failf("Container port output missing expected value. Wanted:'%s', got: %s", nginxDefaultOutput, body) } }) }) - KubeDescribe("Kubectl api-versions", func() { + framework.KubeDescribe("Kubectl api-versions", func() { It("should check if v1 is in available api versions [Conformance]", func() { By("validating api verions") - output := runKubectlOrDie("api-versions") + output := framework.RunKubectlOrDie("api-versions") if !strings.Contains(output, "v1") { - Failf("No v1 in kubectl api-versions") + framework.Failf("No v1 in kubectl api-versions") } }) }) - KubeDescribe("Kubectl apply", func() { + framework.KubeDescribe("Kubectl apply", func() { It("should apply a new configuration to an existing RC", func() { mkpath := func(file string) string { - return filepath.Join(testContext.RepoRoot, "examples/guestbook-go", file) + return filepath.Join(framework.TestContext.RepoRoot, "examples/guestbook-go", file) } controllerJson := mkpath("redis-master-controller.json") nsFlag := fmt.Sprintf("--namespace=%v", ns) By("creating Redis RC") - runKubectlOrDie("create", "-f", controllerJson, nsFlag) + framework.RunKubectlOrDie("create", "-f", controllerJson, nsFlag) By("applying a modified configuration") stdin := modifyReplicationControllerConfiguration(controllerJson) - newKubectlCommand("apply", "-f", "-", nsFlag). - withStdinReader(stdin). - execOrDie() + framework.NewKubectlCommand("apply", "-f", "-", nsFlag). + WithStdinReader(stdin). + ExecOrDie() By("checking the result") forEachReplicationController(c, ns, "app", "redis", validateReplicationControllerConfiguration) }) }) - KubeDescribe("Kubectl cluster-info", func() { + framework.KubeDescribe("Kubectl cluster-info", func() { It("should check if Kubernetes master services is included in cluster-info [Conformance]", func() { By("validating cluster-info") - output := runKubectlOrDie("cluster-info") + output := framework.RunKubectlOrDie("cluster-info") // Can't check exact strings due to terminal control commands (colors) requiredItems := []string{"Kubernetes master", "is running at"} - if providerIs("gce", "gke") { + if framework.ProviderIs("gce", "gke") { requiredItems = append(requiredItems, "KubeDNS", "Heapster") } for _, item := range requiredItems { if !strings.Contains(output, item) { - Failf("Missing %s in kubectl cluster-info", item) + framework.Failf("Missing %s in kubectl cluster-info", item) } } }) }) - KubeDescribe("Kubectl describe", func() { + framework.KubeDescribe("Kubectl describe", func() { It("should check if kubectl describe prints relevant information for rc and pods [Conformance]", func() { - SkipUnlessServerVersionGTE(nodePortsOptionalVersion, c) + framework.SkipUnlessServerVersionGTE(nodePortsOptionalVersion, c) mkpath := func(file string) string { - return filepath.Join(testContext.RepoRoot, "examples/guestbook-go", file) + return filepath.Join(framework.TestContext.RepoRoot, "examples/guestbook-go", file) } controllerJson := mkpath("redis-master-controller.json") serviceJson := mkpath("redis-master-service.json") nsFlag := fmt.Sprintf("--namespace=%v", ns) - runKubectlOrDie("create", "-f", controllerJson, nsFlag) - runKubectlOrDie("create", "-f", serviceJson, nsFlag) + framework.RunKubectlOrDie("create", "-f", controllerJson, nsFlag) + framework.RunKubectlOrDie("create", "-f", serviceJson, nsFlag) // Pod forEachPod(c, ns, "app", "redis", func(pod api.Pod) { - output := runKubectlOrDie("describe", "pod", pod.Name, nsFlag) + output := framework.RunKubectlOrDie("describe", "pod", pod.Name, nsFlag) requiredStrings := [][]string{ {"Name:", "redis-master-"}, {"Namespace:", ns}, @@ -606,7 +607,7 @@ var _ = KubeDescribe("Kubectl client", func() { }) // Rc - output := runKubectlOrDie("describe", "rc", "redis-master", nsFlag) + output := framework.RunKubectlOrDie("describe", "rc", "redis-master", nsFlag) requiredStrings := [][]string{ {"Name:", "redis-master"}, {"Namespace:", ns}, @@ -624,7 +625,7 @@ var _ = KubeDescribe("Kubectl client", func() { checkOutput(output, requiredStrings) // Service - output = runKubectlOrDie("describe", "service", "redis-master", nsFlag) + output = framework.RunKubectlOrDie("describe", "service", "redis-master", nsFlag) requiredStrings = [][]string{ {"Name:", "redis-master"}, {"Namespace:", ns}, @@ -642,7 +643,7 @@ var _ = KubeDescribe("Kubectl client", func() { nodes, err := c.Nodes().List(api.ListOptions{}) Expect(err).NotTo(HaveOccurred()) node := nodes.Items[0] - output = runKubectlOrDie("describe", "node", node.Name) + output = framework.RunKubectlOrDie("describe", "node", node.Name) requiredStrings = [][]string{ {"Name:", node.Name}, {"Labels:"}, @@ -661,7 +662,7 @@ var _ = KubeDescribe("Kubectl client", func() { checkOutput(output, requiredStrings) // Namespace - output = runKubectlOrDie("describe", "namespace", ns) + output = framework.RunKubectlOrDie("describe", "namespace", ns) requiredStrings = [][]string{ {"Name:", ns}, {"Labels:"}, @@ -672,10 +673,10 @@ var _ = KubeDescribe("Kubectl client", func() { }) }) - KubeDescribe("Kubectl expose", func() { + framework.KubeDescribe("Kubectl expose", func() { It("should create services for rc [Conformance]", func() { mkpath := func(file string) string { - return filepath.Join(testContext.RepoRoot, "examples/guestbook-go", file) + return filepath.Join(framework.TestContext.RepoRoot, "examples/guestbook-go", file) } controllerJson := mkpath("redis-master-controller.json") nsFlag := fmt.Sprintf("--namespace=%v", ns) @@ -683,24 +684,24 @@ var _ = KubeDescribe("Kubectl client", func() { redisPort := 6379 By("creating Redis RC") - runKubectlOrDie("create", "-f", controllerJson, nsFlag) + framework.RunKubectlOrDie("create", "-f", controllerJson, nsFlag) forEachPod(c, ns, "app", "redis", func(pod api.Pod) { - lookForStringInLog(ns, pod.Name, "redis-master", "The server is now ready to accept connections", podStartTimeout) + framework.LookForStringInLog(ns, pod.Name, "redis-master", "The server is now ready to accept connections", framework.PodStartTimeout) }) validateService := func(name string, servicePort int, timeout time.Duration) { - err := wait.Poll(poll, timeout, func() (bool, error) { + err := wait.Poll(framework.Poll, timeout, func() (bool, error) { endpoints, err := c.Endpoints(ns).Get(name) if err != nil { if apierrs.IsNotFound(err) { err = nil } - Logf("Get endpoints failed (interval %v): %v", poll, err) + framework.Logf("Get endpoints failed (interval %v): %v", framework.Poll, err) return false, err } uidToPort := getContainerPortsByPodUID(endpoints) if len(uidToPort) == 0 { - Logf("No endpoint found, retrying") + framework.Logf("No endpoint found, retrying") return false, nil } if len(uidToPort) > 1 { @@ -708,7 +709,7 @@ var _ = KubeDescribe("Kubectl client", func() { } for _, port := range uidToPort { if port[0] != redisPort { - Failf("Wrong endpoint port: %d", port[0]) + framework.Failf("Wrong endpoint port: %d", port[0]) } } return true, nil @@ -719,41 +720,41 @@ var _ = KubeDescribe("Kubectl client", func() { Expect(err).NotTo(HaveOccurred()) if len(service.Spec.Ports) != 1 { - Failf("1 port is expected") + framework.Failf("1 port is expected") } port := service.Spec.Ports[0] if port.Port != servicePort { - Failf("Wrong service port: %d", port.Port) + framework.Failf("Wrong service port: %d", port.Port) } if port.TargetPort.IntValue() != redisPort { - Failf("Wrong target port: %d") + framework.Failf("Wrong target port: %d") } } By("exposing RC") - runKubectlOrDie("expose", "rc", "redis-master", "--name=rm2", "--port=1234", fmt.Sprintf("--target-port=%d", redisPort), nsFlag) - waitForService(c, ns, "rm2", true, poll, serviceStartTimeout) - validateService("rm2", 1234, serviceStartTimeout) + framework.RunKubectlOrDie("expose", "rc", "redis-master", "--name=rm2", "--port=1234", fmt.Sprintf("--target-port=%d", redisPort), nsFlag) + framework.WaitForService(c, ns, "rm2", true, framework.Poll, framework.ServiceStartTimeout) + validateService("rm2", 1234, framework.ServiceStartTimeout) By("exposing service") - runKubectlOrDie("expose", "service", "rm2", "--name=rm3", "--port=2345", fmt.Sprintf("--target-port=%d", redisPort), nsFlag) - waitForService(c, ns, "rm3", true, poll, serviceStartTimeout) - validateService("rm3", 2345, serviceStartTimeout) + framework.RunKubectlOrDie("expose", "service", "rm2", "--name=rm3", "--port=2345", fmt.Sprintf("--target-port=%d", redisPort), nsFlag) + framework.WaitForService(c, ns, "rm3", true, framework.Poll, framework.ServiceStartTimeout) + validateService("rm3", 2345, framework.ServiceStartTimeout) }) }) - KubeDescribe("Kubectl label", func() { + framework.KubeDescribe("Kubectl label", func() { var podPath string var nsFlag string BeforeEach(func() { - podPath = filepath.Join(testContext.RepoRoot, "docs/user-guide/pod.yaml") + podPath = filepath.Join(framework.TestContext.RepoRoot, "docs/user-guide/pod.yaml") By("creating the pod") nsFlag = fmt.Sprintf("--namespace=%v", ns) - runKubectlOrDie("create", "-f", podPath, nsFlag) - checkPodsRunningReady(c, ns, []string{simplePodName}, podStartTimeout) + framework.RunKubectlOrDie("create", "-f", podPath, nsFlag) + framework.CheckPodsRunningReady(c, ns, []string{simplePodName}, framework.PodStartTimeout) }) AfterEach(func() { - cleanup(podPath, ns, simplePodSelector) + framework.Cleanup(podPath, ns, simplePodSelector) }) It("should update the label on a resource [Conformance]", func() { @@ -761,67 +762,67 @@ var _ = KubeDescribe("Kubectl client", func() { labelValue := "testing-label-value" By("adding the label " + labelName + " with value " + labelValue + " to a pod") - runKubectlOrDie("label", "pods", simplePodName, labelName+"="+labelValue, nsFlag) + framework.RunKubectlOrDie("label", "pods", simplePodName, labelName+"="+labelValue, nsFlag) By("verifying the pod has the label " + labelName + " with the value " + labelValue) - output := runKubectlOrDie("get", "pod", simplePodName, "-L", labelName, nsFlag) + output := framework.RunKubectlOrDie("get", "pod", simplePodName, "-L", labelName, nsFlag) if !strings.Contains(output, labelValue) { - Failf("Failed updating label " + labelName + " to the pod " + simplePodName) + framework.Failf("Failed updating label " + labelName + " to the pod " + simplePodName) } By("removing the label " + labelName + " of a pod") - runKubectlOrDie("label", "pods", simplePodName, labelName+"-", nsFlag) + framework.RunKubectlOrDie("label", "pods", simplePodName, labelName+"-", nsFlag) By("verifying the pod doesn't have the label " + labelName) - output = runKubectlOrDie("get", "pod", simplePodName, "-L", labelName, nsFlag) + output = framework.RunKubectlOrDie("get", "pod", simplePodName, "-L", labelName, nsFlag) if strings.Contains(output, labelValue) { - Failf("Failed removing label " + labelName + " of the pod " + simplePodName) + framework.Failf("Failed removing label " + labelName + " of the pod " + simplePodName) } }) }) - KubeDescribe("Kubectl logs", func() { + framework.KubeDescribe("Kubectl logs", func() { var rcPath string var nsFlag string containerName := "redis-master" BeforeEach(func() { mkpath := func(file string) string { - return filepath.Join(testContext.RepoRoot, "examples/guestbook-go", file) + return filepath.Join(framework.TestContext.RepoRoot, "examples/guestbook-go", file) } rcPath = mkpath("redis-master-controller.json") By("creating an rc") nsFlag = fmt.Sprintf("--namespace=%v", ns) - runKubectlOrDie("create", "-f", rcPath, nsFlag) + framework.RunKubectlOrDie("create", "-f", rcPath, nsFlag) }) AfterEach(func() { - cleanup(rcPath, ns, simplePodSelector) + framework.Cleanup(rcPath, ns, simplePodSelector) }) It("should be able to retrieve and filter logs [Conformance]", func() { - SkipUnlessServerVersionGTE(extendedPodLogFilterVersion, c) + framework.SkipUnlessServerVersionGTE(extendedPodLogFilterVersion, c) forEachPod(c, ns, "app", "redis", func(pod api.Pod) { By("checking for a matching strings") - _, err := lookForStringInLog(ns, pod.Name, containerName, "The server is now ready to accept connections", podStartTimeout) + _, err := framework.LookForStringInLog(ns, pod.Name, containerName, "The server is now ready to accept connections", framework.PodStartTimeout) Expect(err).NotTo(HaveOccurred()) By("limiting log lines") - out := runKubectlOrDie("log", pod.Name, containerName, nsFlag, "--tail=1") + out := framework.RunKubectlOrDie("log", pod.Name, containerName, nsFlag, "--tail=1") Expect(len(out)).NotTo(BeZero()) Expect(len(strings.Split(out, "\n"))).To(Equal(1)) By("limiting log bytes") - out = runKubectlOrDie("log", pod.Name, containerName, nsFlag, "--limit-bytes=1") + out = framework.RunKubectlOrDie("log", pod.Name, containerName, nsFlag, "--limit-bytes=1") Expect(len(strings.Split(out, "\n"))).To(Equal(1)) Expect(len(out)).To(Equal(1)) By("exposing timestamps") - out = runKubectlOrDie("log", pod.Name, containerName, nsFlag, "--tail=1", "--timestamps") + out = framework.RunKubectlOrDie("log", pod.Name, containerName, nsFlag, "--tail=1", "--timestamps") lines := strings.Split(out, "\n") Expect(len(lines)).To(Equal(1)) words := strings.Split(lines[0], " ") Expect(len(words)).To(BeNumerically(">", 1)) if _, err := time.Parse(time.RFC3339Nano, words[0]); err != nil { if _, err := time.Parse(time.RFC3339, words[0]); err != nil { - Failf("expected %q to be RFC3339 or RFC3339Nano", words[0]) + framework.Failf("expected %q to be RFC3339 or RFC3339Nano", words[0]) } } @@ -830,27 +831,27 @@ var _ = KubeDescribe("Kubectl client", func() { // because the granularity is only 1 second and // it could end up rounding the wrong way. time.Sleep(2500 * time.Millisecond) // ensure that startup logs on the node are seen as older than 1s - recent_out := runKubectlOrDie("log", pod.Name, containerName, nsFlag, "--since=1s") + recent_out := framework.RunKubectlOrDie("log", pod.Name, containerName, nsFlag, "--since=1s") recent := len(strings.Split(recent_out, "\n")) - older_out := runKubectlOrDie("log", pod.Name, containerName, nsFlag, "--since=24h") + older_out := framework.RunKubectlOrDie("log", pod.Name, containerName, nsFlag, "--since=24h") older := len(strings.Split(older_out, "\n")) Expect(recent).To(BeNumerically("<", older), "expected recent(%v) to be less than older(%v)\nrecent lines:\n%v\nolder lines:\n%v\n", recent, older, recent_out, older_out) }) }) }) - KubeDescribe("Kubectl patch", func() { + framework.KubeDescribe("Kubectl patch", func() { It("should add annotations for pods in rc [Conformance]", func() { mkpath := func(file string) string { - return filepath.Join(testContext.RepoRoot, "examples/guestbook-go", file) + return filepath.Join(framework.TestContext.RepoRoot, "examples/guestbook-go", file) } controllerJson := mkpath("redis-master-controller.json") nsFlag := fmt.Sprintf("--namespace=%v", ns) By("creating Redis RC") - runKubectlOrDie("create", "-f", controllerJson, nsFlag) + framework.RunKubectlOrDie("create", "-f", controllerJson, nsFlag) By("patching all pods") forEachPod(c, ns, "app", "redis", func(pod api.Pod) { - runKubectlOrDie("patch", "pod", pod.Name, nsFlag, "-p", "{\"metadata\":{\"annotations\":{\"x\":\"y\"}}}") + framework.RunKubectlOrDie("patch", "pod", pod.Name, nsFlag, "-p", "{\"metadata\":{\"annotations\":{\"x\":\"y\"}}}") }) By("checking annotations") @@ -862,25 +863,25 @@ var _ = KubeDescribe("Kubectl client", func() { } } if !found { - Failf("Added annotation not found") + framework.Failf("Added annotation not found") } }) }) }) - KubeDescribe("Kubectl version", func() { + framework.KubeDescribe("Kubectl version", func() { It("should check is all data is printed [Conformance]", func() { - version := runKubectlOrDie("version") + version := framework.RunKubectlOrDie("version") requiredItems := []string{"Client Version:", "Server Version:", "Major:", "Minor:", "GitCommit:"} for _, item := range requiredItems { if !strings.Contains(version, item) { - Failf("Required item %s not found in %s", item, version) + framework.Failf("Required item %s not found in %s", item, version) } } }) }) - KubeDescribe("Kubectl run default", func() { + framework.KubeDescribe("Kubectl run default", func() { var nsFlag string var name string @@ -888,16 +889,16 @@ var _ = KubeDescribe("Kubectl client", func() { BeforeEach(func() { nsFlag = fmt.Sprintf("--namespace=%v", ns) - gte, err := serverVersionGTE(deploymentsVersion, c) + gte, err := framework.ServerVersionGTE(deploymentsVersion, c) if err != nil { - Failf("Failed to get server version: %v", err) + framework.Failf("Failed to get server version: %v", err) } if gte { name = "e2e-test-nginx-deployment" - cleanUp = func() { runKubectlOrDie("delete", "deployment", name, nsFlag) } + cleanUp = func() { framework.RunKubectlOrDie("delete", "deployment", name, nsFlag) } } else { name = "e2e-test-nginx-rc" - cleanUp = func() { runKubectlOrDie("delete", "rc", name, nsFlag) } + cleanUp = func() { framework.RunKubectlOrDie("delete", "rc", name, nsFlag) } } }) @@ -907,22 +908,22 @@ var _ = KubeDescribe("Kubectl client", func() { It("should create an rc or deployment from an image [Conformance]", func() { By("running the image " + nginxImage) - runKubectlOrDie("run", name, "--image="+nginxImage, nsFlag) + framework.RunKubectlOrDie("run", name, "--image="+nginxImage, nsFlag) By("verifying the pod controlled by " + name + " gets created") label := labels.SelectorFromSet(labels.Set(map[string]string{"run": name})) - podlist, err := waitForPodsWithLabel(c, ns, label) + podlist, err := framework.WaitForPodsWithLabel(c, ns, label) if err != nil { - Failf("Failed getting pod controlled by %s: %v", name, err) + framework.Failf("Failed getting pod controlled by %s: %v", name, err) } pods := podlist.Items if pods == nil || len(pods) != 1 || len(pods[0].Spec.Containers) != 1 || pods[0].Spec.Containers[0].Image != nginxImage { - runKubectlOrDie("get", "pods", "-L", "run", nsFlag) - Failf("Failed creating 1 pod with expected image %s. Number of pods = %v", nginxImage, len(pods)) + framework.RunKubectlOrDie("get", "pods", "-L", "run", nsFlag) + framework.Failf("Failed creating 1 pod with expected image %s. Number of pods = %v", nginxImage, len(pods)) } }) }) - KubeDescribe("Kubectl run rc", func() { + framework.KubeDescribe("Kubectl run rc", func() { var nsFlag string var rcName string @@ -932,32 +933,32 @@ var _ = KubeDescribe("Kubectl client", func() { }) AfterEach(func() { - runKubectlOrDie("delete", "rc", rcName, nsFlag) + framework.RunKubectlOrDie("delete", "rc", rcName, nsFlag) }) It("should create an rc from an image [Conformance]", func() { By("running the image " + nginxImage) - runKubectlOrDie("run", rcName, "--image="+nginxImage, "--generator=run/v1", nsFlag) + framework.RunKubectlOrDie("run", rcName, "--image="+nginxImage, "--generator=run/v1", nsFlag) By("verifying the rc " + rcName + " was created") rc, err := c.ReplicationControllers(ns).Get(rcName) if err != nil { - Failf("Failed getting rc %s: %v", rcName, err) + framework.Failf("Failed getting rc %s: %v", rcName, err) } containers := rc.Spec.Template.Spec.Containers if containers == nil || len(containers) != 1 || containers[0].Image != nginxImage { - Failf("Failed creating rc %s for 1 pod with expected image %s", rcName, nginxImage) + framework.Failf("Failed creating rc %s for 1 pod with expected image %s", rcName, nginxImage) } By("verifying the pod controlled by rc " + rcName + " was created") label := labels.SelectorFromSet(labels.Set(map[string]string{"run": rcName})) - podlist, err := waitForPodsWithLabel(c, ns, label) + podlist, err := framework.WaitForPodsWithLabel(c, ns, label) if err != nil { - Failf("Failed getting pod controlled by rc %s: %v", rcName, err) + framework.Failf("Failed getting pod controlled by rc %s: %v", rcName, err) } pods := podlist.Items if pods == nil || len(pods) != 1 || len(pods[0].Spec.Containers) != 1 || pods[0].Spec.Containers[0].Image != nginxImage { - runKubectlOrDie("get", "pods", "-L", "run", nsFlag) - Failf("Failed creating 1 pod with expected image %s. Number of pods = %v", nginxImage, len(pods)) + framework.RunKubectlOrDie("get", "pods", "-L", "run", nsFlag) + framework.Failf("Failed creating 1 pod with expected image %s. Number of pods = %v", nginxImage, len(pods)) } By("confirm that you can get logs from an rc") @@ -965,18 +966,18 @@ var _ = KubeDescribe("Kubectl client", func() { for _, pod := range pods { podNames = append(podNames, pod.Name) } - if !checkPodsRunningReady(c, ns, podNames, podStartTimeout) { - Failf("Pods for rc %s were not ready", rcName) + if !framework.CheckPodsRunningReady(c, ns, podNames, framework.PodStartTimeout) { + framework.Failf("Pods for rc %s were not ready", rcName) } - _, err = runKubectl("logs", "rc/"+rcName, nsFlag) + _, err = framework.RunKubectl("logs", "rc/"+rcName, nsFlag) // a non-nil error is fine as long as we actually found a pod. if err != nil && !strings.Contains(err.Error(), " in pod ") { - Failf("Failed getting logs by rc %s: %v", rcName, err) + framework.Failf("Failed getting logs by rc %s: %v", rcName, err) } }) }) - KubeDescribe("Kubectl run deployment", func() { + framework.KubeDescribe("Kubectl run deployment", func() { var nsFlag string var dName string @@ -986,39 +987,39 @@ var _ = KubeDescribe("Kubectl client", func() { }) AfterEach(func() { - runKubectlOrDie("delete", "deployment", dName, nsFlag) + framework.RunKubectlOrDie("delete", "deployment", dName, nsFlag) }) It("should create a deployment from an image [Conformance]", func() { - SkipUnlessServerVersionGTE(deploymentsVersion, c) + framework.SkipUnlessServerVersionGTE(deploymentsVersion, c) By("running the image " + nginxImage) - runKubectlOrDie("run", dName, "--image="+nginxImage, "--generator=deployment/v1beta1", nsFlag) + framework.RunKubectlOrDie("run", dName, "--image="+nginxImage, "--generator=deployment/v1beta1", nsFlag) By("verifying the deployment " + dName + " was created") d, err := c.Extensions().Deployments(ns).Get(dName) if err != nil { - Failf("Failed getting deployment %s: %v", dName, err) + framework.Failf("Failed getting deployment %s: %v", dName, err) } containers := d.Spec.Template.Spec.Containers if containers == nil || len(containers) != 1 || containers[0].Image != nginxImage { - Failf("Failed creating deployment %s for 1 pod with expected image %s", dName, nginxImage) + framework.Failf("Failed creating deployment %s for 1 pod with expected image %s", dName, nginxImage) } By("verifying the pod controlled by deployment " + dName + " was created") label := labels.SelectorFromSet(labels.Set(map[string]string{"run": dName})) - podlist, err := waitForPodsWithLabel(c, ns, label) + podlist, err := framework.WaitForPodsWithLabel(c, ns, label) if err != nil { - Failf("Failed getting pod controlled by deployment %s: %v", dName, err) + framework.Failf("Failed getting pod controlled by deployment %s: %v", dName, err) } pods := podlist.Items if pods == nil || len(pods) != 1 || len(pods[0].Spec.Containers) != 1 || pods[0].Spec.Containers[0].Image != nginxImage { - runKubectlOrDie("get", "pods", "-L", "run", nsFlag) - Failf("Failed creating 1 pod with expected image %s. Number of pods = %v", nginxImage, len(pods)) + framework.RunKubectlOrDie("get", "pods", "-L", "run", nsFlag) + framework.Failf("Failed creating 1 pod with expected image %s. Number of pods = %v", nginxImage, len(pods)) } }) }) - KubeDescribe("Kubectl run job", func() { + framework.KubeDescribe("Kubectl run job", func() { var nsFlag string var jobName string @@ -1028,62 +1029,62 @@ var _ = KubeDescribe("Kubectl client", func() { }) AfterEach(func() { - runKubectlOrDie("delete", "jobs", jobName, nsFlag) + framework.RunKubectlOrDie("delete", "jobs", jobName, nsFlag) }) It("should create a job from an image when restart is OnFailure [Conformance]", func() { - SkipUnlessServerVersionGTE(jobsVersion, c) + framework.SkipUnlessServerVersionGTE(jobsVersion, c) By("running the image " + nginxImage) - runKubectlOrDie("run", jobName, "--restart=OnFailure", "--image="+nginxImage, nsFlag) + framework.RunKubectlOrDie("run", jobName, "--restart=OnFailure", "--image="+nginxImage, nsFlag) By("verifying the job " + jobName + " was created") job, err := c.Extensions().Jobs(ns).Get(jobName) if err != nil { - Failf("Failed getting job %s: %v", jobName, err) + framework.Failf("Failed getting job %s: %v", jobName, err) } containers := job.Spec.Template.Spec.Containers if containers == nil || len(containers) != 1 || containers[0].Image != nginxImage { - Failf("Failed creating job %s for 1 pod with expected image %s", jobName, nginxImage) + framework.Failf("Failed creating job %s for 1 pod with expected image %s", jobName, nginxImage) } if job.Spec.Template.Spec.RestartPolicy != api.RestartPolicyOnFailure { - Failf("Failed creating a job with correct restart policy for --restart=OnFailure") + framework.Failf("Failed creating a job with correct restart policy for --restart=OnFailure") } }) It("should create a job from an image when restart is Never [Conformance]", func() { - SkipUnlessServerVersionGTE(jobsVersion, c) + framework.SkipUnlessServerVersionGTE(jobsVersion, c) By("running the image " + nginxImage) - runKubectlOrDie("run", jobName, "--restart=Never", "--image="+nginxImage, nsFlag) + framework.RunKubectlOrDie("run", jobName, "--restart=Never", "--image="+nginxImage, nsFlag) By("verifying the job " + jobName + " was created") job, err := c.Extensions().Jobs(ns).Get(jobName) if err != nil { - Failf("Failed getting job %s: %v", jobName, err) + framework.Failf("Failed getting job %s: %v", jobName, err) } containers := job.Spec.Template.Spec.Containers if containers == nil || len(containers) != 1 || containers[0].Image != nginxImage { - Failf("Failed creating job %s for 1 pod with expected image %s", jobName, nginxImage) + framework.Failf("Failed creating job %s for 1 pod with expected image %s", jobName, nginxImage) } if job.Spec.Template.Spec.RestartPolicy != api.RestartPolicyNever { - Failf("Failed creating a job with correct restart policy for --restart=OnFailure") + framework.Failf("Failed creating a job with correct restart policy for --restart=OnFailure") } }) }) - KubeDescribe("Kubectl run --rm job", func() { + framework.KubeDescribe("Kubectl run --rm job", func() { nsFlag := fmt.Sprintf("--namespace=%v", ns) jobName := "e2e-test-rm-busybox-job" It("should create a job from an image, then delete the job [Conformance]", func() { - SkipUnlessServerVersionGTE(jobsVersion, c) + framework.SkipUnlessServerVersionGTE(jobsVersion, c) By("executing a command with run --rm and attach with stdin") t := time.NewTimer(runJobTimeout) defer t.Stop() - runOutput := newKubectlCommand(nsFlag, "run", jobName, "--image="+busyboxImage, "--rm=true", "--restart=Never", "--attach=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'"). - withStdinData("abcd1234"). - withTimeout(t.C). - execOrDie() + runOutput := framework.NewKubectlCommand(nsFlag, "run", jobName, "--image="+busyboxImage, "--rm=true", "--restart=Never", "--attach=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'"). + WithStdinData("abcd1234"). + WithTimeout(t.C). + ExecOrDie() Expect(runOutput).To(ContainSubstring("abcd1234")) Expect(runOutput).To(ContainSubstring("stdin closed")) @@ -1094,25 +1095,25 @@ var _ = KubeDescribe("Kubectl client", func() { }) }) - KubeDescribe("Proxy server", func() { + framework.KubeDescribe("Proxy server", func() { // TODO: test proxy options (static, prefix, etc) It("should support proxy with --port 0 [Conformance]", func() { By("starting the proxy server") port, cmd, err := startProxyServer() if cmd != nil { - defer tryKill(cmd) + defer framework.TryKill(cmd) } if err != nil { - Failf("Failed to start proxy server: %v", err) + framework.Failf("Failed to start proxy server: %v", err) } By("curling proxy /api/ output") localAddr := fmt.Sprintf("http://localhost:%d/api/", port) apiVersions, err := getAPIVersions(localAddr) if err != nil { - Failf("Expected at least one supported apiversion, got error %v", err) + framework.Failf("Expected at least one supported apiversion, got error %v", err) } if len(apiVersions.Versions) < 1 { - Failf("Expected at least one supported apiversion, got %v", apiVersions) + framework.Failf("Expected at least one supported apiversion, got %v", apiVersions) } }) @@ -1120,27 +1121,27 @@ var _ = KubeDescribe("Kubectl client", func() { By("Starting the proxy") tmpdir, err := ioutil.TempDir("", "kubectl-proxy-unix") if err != nil { - Failf("Failed to create temporary directory: %v", err) + framework.Failf("Failed to create temporary directory: %v", err) } path := filepath.Join(tmpdir, "test") defer os.Remove(path) defer os.Remove(tmpdir) - cmd := kubectlCmd("proxy", fmt.Sprintf("--unix-socket=%s", path)) - stdout, stderr, err := startCmdAndStreamOutput(cmd) + cmd := framework.KubectlCmd("proxy", fmt.Sprintf("--unix-socket=%s", path)) + stdout, stderr, err := framework.StartCmdAndStreamOutput(cmd) if err != nil { - Failf("Failed to start kubectl command: %v", err) + framework.Failf("Failed to start kubectl command: %v", err) } defer stdout.Close() defer stderr.Close() - defer tryKill(cmd) + defer framework.TryKill(cmd) buf := make([]byte, 128) if _, err = stdout.Read(buf); err != nil { - Failf("Expected output from kubectl proxy: %v", err) + framework.Failf("Expected output from kubectl proxy: %v", err) } By("retrieving proxy /api/ output") _, err = curlUnix("http://unused/api", path) if err != nil { - Failf("Failed get of /api at %s: %v", path, err) + framework.Failf("Failed get of /api at %s: %v", path, err) } }) }) @@ -1155,11 +1156,11 @@ func checkOutput(output string, required [][]string) { currentLine++ } if currentLine == len(outputLines) { - Failf("Failed to find %s in %s", requirement[0], output) + framework.Failf("Failed to find %s in %s", requirement[0], output) } for _, item := range requirement[1:] { if !strings.Contains(outputLines[currentLine], item) { - Failf("Failed to find %s in %s", item, outputLines[currentLine]) + framework.Failf("Failed to find %s in %s", item, outputLines[currentLine]) } } } @@ -1179,8 +1180,8 @@ func getAPIVersions(apiEndpoint string) (*unversioned.APIVersions, error) { func startProxyServer() (int, *exec.Cmd, error) { // Specifying port 0 indicates we want the os to pick a random port. - cmd := kubectlCmd("proxy", "-p", "0") - stdout, stderr, err := startCmdAndStreamOutput(cmd) + cmd := framework.KubectlCmd("proxy", "-p", "0") + stdout, stderr, err := framework.StartCmdAndStreamOutput(cmd) if err != nil { return -1, nil, err } @@ -1230,19 +1231,19 @@ func curl(url string) (string, error) { } func validateGuestbookApp(c *client.Client, ns string) { - Logf("Waiting for frontend to serve content.") + framework.Logf("Waiting for frontend to serve content.") if !waitForGuestbookResponse(c, "get", "", `{"data": ""}`, guestbookStartupTimeout, ns) { - Failf("Frontend service did not start serving content in %v seconds.", guestbookStartupTimeout.Seconds()) + framework.Failf("Frontend service did not start serving content in %v seconds.", guestbookStartupTimeout.Seconds()) } - Logf("Trying to add a new entry to the guestbook.") + framework.Logf("Trying to add a new entry to the guestbook.") if !waitForGuestbookResponse(c, "set", "TestEntry", `{"message": "Updated"}`, guestbookResponseTimeout, ns) { - Failf("Cannot added new entry in %v seconds.", guestbookResponseTimeout.Seconds()) + framework.Failf("Cannot added new entry in %v seconds.", guestbookResponseTimeout.Seconds()) } - Logf("Verifying that added entry can be retrieved.") + framework.Logf("Verifying that added entry can be retrieved.") if !waitForGuestbookResponse(c, "get", "", `{"data": "TestEntry"}`, guestbookResponseTimeout, ns) { - Failf("Entry to guestbook wasn't correctly added in %v seconds.", guestbookResponseTimeout.Seconds()) + framework.Failf("Entry to guestbook wasn't correctly added in %v seconds.", guestbookResponseTimeout.Seconds()) } } @@ -1253,13 +1254,13 @@ func waitForGuestbookResponse(c *client.Client, cmd, arg, expectedResponse strin if err == nil && res == expectedResponse { return true } - Logf("Failed to get response from guestbook. err: %v, response: %s", err, res) + framework.Logf("Failed to get response from guestbook. err: %v, response: %s", err, res) } return false } func makeRequestToGuestbook(c *client.Client, cmd, value string, ns string) (string, error) { - proxyRequest, errProxy := getServicesProxyRequest(c, c.Get()) + proxyRequest, errProxy := framework.GetServicesProxyRequest(c, c.Get()) if errProxy != nil { return "", errProxy } @@ -1283,12 +1284,12 @@ const applyTestLabel = "kubectl.kubernetes.io/apply-test" func readBytesFromFile(filename string) []byte { file, err := os.Open(filename) if err != nil { - Failf(err.Error()) + framework.Failf(err.Error()) } data, err := ioutil.ReadAll(file) if err != nil { - Failf(err.Error()) + framework.Failf(err.Error()) } return data @@ -1298,7 +1299,7 @@ func readReplicationControllerFromFile(filename string) *api.ReplicationControll data := readBytesFromFile(filename) rc := api.ReplicationController{} if err := yaml.Unmarshal(data, &rc); err != nil { - Failf(err.Error()) + framework.Failf(err.Error()) } return &rc @@ -1311,7 +1312,7 @@ func modifyReplicationControllerConfiguration(filename string) io.Reader { rc.Spec.Template.Labels[applyTestLabel] = "ADDED" data, err := json.Marshal(rc) if err != nil { - Failf("json marshal failed: %s\n", err) + framework.Failf("json marshal failed: %s\n", err) } return bytes.NewReader(data) @@ -1320,7 +1321,7 @@ func modifyReplicationControllerConfiguration(filename string) io.Reader { func forEachReplicationController(c *client.Client, ns, selectorKey, selectorValue string, fn func(api.ReplicationController)) { var rcs *api.ReplicationControllerList var err error - for t := time.Now(); time.Since(t) < podListTimeout; time.Sleep(poll) { + for t := time.Now(); time.Since(t) < framework.PodListTimeout; time.Sleep(framework.Poll) { label := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue})) options := api.ListOptions{LabelSelector: label} rcs, err = c.ReplicationControllers(ns).List(options) @@ -1331,7 +1332,7 @@ func forEachReplicationController(c *client.Client, ns, selectorKey, selectorVal } if rcs == nil || len(rcs.Items) == 0 { - Failf("No replication controllers found") + framework.Failf("No replication controllers found") } for _, rc := range rcs.Items { @@ -1342,11 +1343,11 @@ func forEachReplicationController(c *client.Client, ns, selectorKey, selectorVal func validateReplicationControllerConfiguration(rc api.ReplicationController) { if rc.Name == "redis-master" { if _, ok := rc.Annotations[kubectl.LastAppliedConfigAnnotation]; !ok { - Failf("Annotation not found in modified configuration:\n%v\n", rc) + framework.Failf("Annotation not found in modified configuration:\n%v\n", rc) } if value, ok := rc.Labels[applyTestLabel]; !ok || value != "ADDED" { - Failf("Added label %s not found in modified configuration:\n%v\n", applyTestLabel, rc) + framework.Failf("Added label %s not found in modified configuration:\n%v\n", applyTestLabel, rc) } } } @@ -1358,8 +1359,8 @@ func getUDData(jpgExpected string, ns string) func(*client.Client, string) error // getUDData validates data.json in the update-demo (returns nil if data is ok). return func(c *client.Client, podID string) error { - Logf("validating pod %s", podID) - subResourceProxyAvailable, err := serverVersionGTE(subResourcePodProxyVersion, c) + framework.Logf("validating pod %s", podID) + subResourceProxyAvailable, err := framework.ServerVersionGTE(framework.SubResourcePodProxyVersion, c) if err != nil { return err } @@ -1386,12 +1387,12 @@ func getUDData(jpgExpected string, ns string) func(*client.Client, string) error if err != nil { return err } - Logf("got data: %s", body) + framework.Logf("got data: %s", body) var data updateDemoData if err := json.Unmarshal(body, &data); err != nil { return err } - Logf("Unmarshalled json jpg/img => %s , expecting %s .", data, jpgExpected) + framework.Logf("Unmarshalled json jpg/img => %s , expecting %s .", data, jpgExpected) if strings.Contains(data.Image, jpgExpected) { return nil } else { @@ -1443,17 +1444,17 @@ func streamingUpload(file *os.File, fileName string, postBodyWriter *multipart.W // Set up the form file fileWriter, err := postBodyWriter.CreateFormFile("file", fileName) if err != nil { - Failf("Unable to to write file at %s to buffer. Error: %s", fileName, err) + framework.Failf("Unable to to write file at %s to buffer. Error: %s", fileName, err) } // Copy kubectl binary into the file writer if _, err := io.Copy(fileWriter, file); err != nil { - Failf("Unable to to copy file at %s into the file writer. Error: %s", fileName, err) + framework.Failf("Unable to to copy file at %s into the file writer. Error: %s", fileName, err) } // Nothing more should be written to this instance of the postBodyWriter if err := postBodyWriter.Close(); err != nil { - Failf("Unable to close the writer for file upload. Error: %s", err) + framework.Failf("Unable to close the writer for file upload. Error: %s", err) } } @@ -1471,7 +1472,7 @@ func findBinary(binName string, platform string) (string, error) { var binPath string for _, pre := range binPrefixes { - tryPath := path.Join(testContext.RepoRoot, pre, platform, binName) + tryPath := path.Join(framework.TestContext.RepoRoot, pre, platform, binName) fi, err := os.Stat(tryPath) if err != nil { continue diff --git a/test/e2e/kubelet.go b/test/e2e/kubelet.go index 3b47cfdb408..d9da606ba2b 100644 --- a/test/e2e/kubelet.go +++ b/test/e2e/kubelet.go @@ -25,15 +25,16 @@ import ( "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) const ( - // Interval to poll /runningpods on a node + // Interval to framework.Poll /runningpods on a node pollInterval = 1 * time.Second - // Interval to poll /stats/container on a node + // Interval to framework.Poll /stats/container on a node containerStatsPollingInterval = 5 * time.Second ) @@ -41,10 +42,10 @@ const ( // podNamePrefix and namespace. func getPodMatches(c *client.Client, nodeName string, podNamePrefix string, namespace string) sets.String { matches := sets.NewString() - Logf("Checking pods on node %v via /runningpods endpoint", nodeName) - runningPods, err := GetKubeletPods(c, nodeName) + framework.Logf("Checking pods on node %v via /runningpods endpoint", nodeName) + runningPods, err := framework.GetKubeletPods(c, nodeName) if err != nil { - Logf("Error checking running pods on %v: %v", nodeName, err) + framework.Logf("Error checking running pods on %v: %v", nodeName, err) return matches } for _, pod := range runningPods.Items { @@ -81,25 +82,25 @@ func waitTillNPodsRunningOnNodes(c *client.Client, nodeNames sets.String, podNam if seen.Len() == targetNumPods { return true, nil } - Logf("Waiting for %d pods to be running on the node; %d are currently running;", targetNumPods, seen.Len()) + framework.Logf("Waiting for %d pods to be running on the node; %d are currently running;", targetNumPods, seen.Len()) return false, nil }) } -var _ = KubeDescribe("kubelet", func() { +var _ = framework.KubeDescribe("kubelet", func() { var numNodes int var nodeNames sets.String - framework := NewDefaultFramework("kubelet") - var resourceMonitor *resourceMonitor + f := framework.NewDefaultFramework("kubelet") + var resourceMonitor *framework.ResourceMonitor BeforeEach(func() { - nodes := ListSchedulableNodesOrDie(framework.Client) + nodes := framework.ListSchedulableNodesOrDie(f.Client) numNodes = len(nodes.Items) nodeNames = sets.NewString() for _, node := range nodes.Items { nodeNames.Insert(node.Name) } - resourceMonitor = newResourceMonitor(framework.Client, targetContainers(), containerStatsPollingInterval) + resourceMonitor = framework.NewResourceMonitor(f.Client, framework.TargetContainers(), containerStatsPollingInterval) resourceMonitor.Start() }) @@ -107,7 +108,7 @@ var _ = KubeDescribe("kubelet", func() { resourceMonitor.Stop() }) - KubeDescribe("Clean up pods on node", func() { + framework.KubeDescribe("Clean up pods on node", func() { type DeleteTest struct { podsPerNode int timeout time.Duration @@ -123,23 +124,23 @@ var _ = KubeDescribe("kubelet", func() { By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods)) rcName := fmt.Sprintf("cleanup%d-%s", totalPods, string(util.NewUUID())) - Expect(RunRC(RCConfig{ - Client: framework.Client, + Expect(framework.RunRC(framework.RCConfig{ + Client: f.Client, Name: rcName, - Namespace: framework.Namespace.Name, + Namespace: f.Namespace.Name, Image: "gcr.io/google_containers/pause:2.0", Replicas: totalPods, })).NotTo(HaveOccurred()) // Perform a sanity check so that we know all desired pods are // running on the nodes according to kubelet. The timeout is set to - // only 30 seconds here because RunRC already waited for all pods to + // only 30 seconds here because framework.RunRC already waited for all pods to // transition to the running status. - Expect(waitTillNPodsRunningOnNodes(framework.Client, nodeNames, rcName, framework.Namespace.Name, totalPods, + Expect(waitTillNPodsRunningOnNodes(f.Client, nodeNames, rcName, f.Namespace.Name, totalPods, time.Second*30)).NotTo(HaveOccurred()) resourceMonitor.LogLatest() By("Deleting the RC") - DeleteRC(framework.Client, framework.Namespace.Name, rcName) + framework.DeleteRC(f.Client, f.Namespace.Name, rcName) // Check that the pods really are gone by querying /runningpods on the // node. The /runningpods handler checks the container runtime (or its // cache) and returns a list of running pods. Some possible causes of @@ -148,9 +149,9 @@ var _ = KubeDescribe("kubelet", func() { // - a bug in graceful termination (if it is enabled) // - docker slow to delete pods (or resource problems causing slowness) start := time.Now() - Expect(waitTillNPodsRunningOnNodes(framework.Client, nodeNames, rcName, framework.Namespace.Name, 0, + Expect(waitTillNPodsRunningOnNodes(f.Client, nodeNames, rcName, f.Namespace.Name, 0, itArg.timeout)).NotTo(HaveOccurred()) - Logf("Deleting %d pods on %d nodes completed in %v after the RC was deleted", totalPods, len(nodeNames), + framework.Logf("Deleting %d pods on %d nodes completed in %v after the RC was deleted", totalPods, len(nodeNames), time.Since(start)) resourceMonitor.LogCPUSummary() }) diff --git a/test/e2e/kubelet_etc_hosts.go b/test/e2e/kubelet_etc_hosts.go index 876c53c817f..677b90a7dfd 100644 --- a/test/e2e/kubelet_etc_hosts.go +++ b/test/e2e/kubelet_etc_hosts.go @@ -25,6 +25,7 @@ import ( "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/apimachinery/registered" client "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/test/e2e/framework" ) const ( @@ -37,11 +38,11 @@ const ( type KubeletManagedHostConfig struct { hostNetworkPod *api.Pod pod *api.Pod - f *Framework + f *framework.Framework } -var _ = KubeDescribe("KubeletManagedEtcHosts", func() { - f := NewDefaultFramework("e2e-kubelet-etc-hosts") +var _ = framework.KubeDescribe("KubeletManagedEtcHosts", func() { + f := framework.NewDefaultFramework("e2e-kubelet-etc-hosts") config := &KubeletManagedHostConfig{ f: f, } @@ -94,12 +95,12 @@ func (config *KubeletManagedHostConfig) createPodWithHostNetwork() { func (config *KubeletManagedHostConfig) createPod(podSpec *api.Pod) *api.Pod { createdPod, err := config.getPodClient().Create(podSpec) if err != nil { - Failf("Failed to create %s pod: %v", podSpec.Name, err) + framework.Failf("Failed to create %s pod: %v", podSpec.Name, err) } - expectNoError(config.f.WaitForPodRunning(podSpec.Name)) + framework.ExpectNoError(config.f.WaitForPodRunning(podSpec.Name)) createdPod, err = config.getPodClient().Get(podSpec.Name) if err != nil { - Failf("Failed to retrieve %s pod: %v", podSpec.Name, err) + framework.Failf("Failed to retrieve %s pod: %v", podSpec.Name, err) } return createdPod } @@ -111,31 +112,31 @@ func (config *KubeletManagedHostConfig) getPodClient() client.PodInterface { func assertEtcHostsIsKubeletManaged(etcHostsContent string) { isKubeletManaged := strings.Contains(etcHostsContent, etcHostsPartialContent) if !isKubeletManaged { - Failf("/etc/hosts file should be kubelet managed, but is not: %q", etcHostsContent) + framework.Failf("/etc/hosts file should be kubelet managed, but is not: %q", etcHostsContent) } } func assertEtcHostsIsNotKubeletManaged(etcHostsContent string) { isKubeletManaged := strings.Contains(etcHostsContent, etcHostsPartialContent) if isKubeletManaged { - Failf("/etc/hosts file should not be kubelet managed, but is: %q", etcHostsContent) + framework.Failf("/etc/hosts file should not be kubelet managed, but is: %q", etcHostsContent) } } func (config *KubeletManagedHostConfig) getEtcHostsContent(podName, containerName string) string { - cmd := kubectlCmd("exec", fmt.Sprintf("--namespace=%v", config.f.Namespace.Name), podName, "-c", containerName, "cat", "/etc/hosts") - stdout, stderr, err := startCmdAndStreamOutput(cmd) + cmd := framework.KubectlCmd("exec", fmt.Sprintf("--namespace=%v", config.f.Namespace.Name), podName, "-c", containerName, "cat", "/etc/hosts") + stdout, stderr, err := framework.StartCmdAndStreamOutput(cmd) if err != nil { - Failf("Failed to retrieve /etc/hosts, err: %q", err) + framework.Failf("Failed to retrieve /etc/hosts, err: %q", err) } defer stdout.Close() defer stderr.Close() buf := make([]byte, 1000) var n int - Logf("reading from `kubectl exec` command's stdout") + framework.Logf("reading from `kubectl exec` command's stdout") if n, err = stdout.Read(buf); err != nil { - Failf("Failed to read from kubectl exec stdout: %v", err) + framework.Failf("Failed to read from kubectl exec stdout: %v", err) } return string(buf[:n]) } diff --git a/test/e2e/kubelet_perf.go b/test/e2e/kubelet_perf.go index 857afb46c8f..e689e4b6e4b 100644 --- a/test/e2e/kubelet_perf.go +++ b/test/e2e/kubelet_perf.go @@ -25,6 +25,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats" "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -41,33 +42,33 @@ const ( type resourceTest struct { podsPerNode int - cpuLimits containersCPUSummary - memLimits resourceUsagePerContainer + cpuLimits framework.ContainersCPUSummary + memLimits framework.ResourceUsagePerContainer } func logPodsOnNodes(c *client.Client, nodeNames []string) { for _, n := range nodeNames { - podList, err := GetKubeletRunningPods(c, n) + podList, err := framework.GetKubeletRunningPods(c, n) if err != nil { - Logf("Unable to retrieve kubelet pods for node %v", n) + framework.Logf("Unable to retrieve kubelet pods for node %v", n) continue } - Logf("%d pods are running on node %v", len(podList.Items), n) + framework.Logf("%d pods are running on node %v", len(podList.Items), n) } } -func runResourceTrackingTest(framework *Framework, podsPerNode int, nodeNames sets.String, rm *resourceMonitor, - expectedCPU map[string]map[float64]float64, expectedMemory resourceUsagePerContainer) { +func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames sets.String, rm *framework.ResourceMonitor, + expectedCPU map[string]map[float64]float64, expectedMemory framework.ResourceUsagePerContainer) { numNodes := nodeNames.Len() totalPods := podsPerNode * numNodes By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods)) rcName := fmt.Sprintf("resource%d-%s", totalPods, string(util.NewUUID())) // TODO: Use a more realistic workload - Expect(RunRC(RCConfig{ - Client: framework.Client, + Expect(framework.RunRC(framework.RCConfig{ + Client: f.Client, Name: rcName, - Namespace: framework.Namespace.Name, + Namespace: f.Namespace.Name, Image: "gcr.io/google_containers/pause:2.0", Replicas: totalPods, })).NotTo(HaveOccurred()) @@ -78,38 +79,38 @@ func runResourceTrackingTest(framework *Framework, podsPerNode int, nodeNames se By("Start monitoring resource usage") // Periodically dump the cpu summary until the deadline is met. - // Note that without calling resourceMonitor.Reset(), the stats + // Note that without calling framework.ResourceMonitor.Reset(), the stats // would occupy increasingly more memory. This should be fine // for the current test duration, but we should reclaim the // entries if we plan to monitor longer (e.g., 8 hours). deadline := time.Now().Add(monitoringTime) for time.Now().Before(deadline) { timeLeft := deadline.Sub(time.Now()) - Logf("Still running...%v left", timeLeft) + framework.Logf("Still running...%v left", timeLeft) if timeLeft < reportingPeriod { time.Sleep(timeLeft) } else { time.Sleep(reportingPeriod) } - logPodsOnNodes(framework.Client, nodeNames.List()) + logPodsOnNodes(f.Client, nodeNames.List()) } By("Reporting overall resource usage") - logPodsOnNodes(framework.Client, nodeNames.List()) + logPodsOnNodes(f.Client, nodeNames.List()) usageSummary, err := rm.GetLatest() Expect(err).NotTo(HaveOccurred()) - Logf("%s", rm.FormatResourceUsage(usageSummary)) - verifyMemoryLimits(framework.Client, expectedMemory, usageSummary) + framework.Logf("%s", rm.FormatResourceUsage(usageSummary)) + verifyMemoryLimits(f.Client, expectedMemory, usageSummary) cpuSummary := rm.GetCPUSummary() - Logf("%s", rm.FormatCPUSummary(cpuSummary)) + framework.Logf("%s", rm.FormatCPUSummary(cpuSummary)) verifyCPULimits(expectedCPU, cpuSummary) By("Deleting the RC") - DeleteRC(framework.Client, framework.Namespace.Name, rcName) + framework.DeleteRC(f.Client, f.Namespace.Name, rcName) } -func verifyMemoryLimits(c *client.Client, expected resourceUsagePerContainer, actual resourceUsagePerNode) { +func verifyMemoryLimits(c *client.Client, expected framework.ResourceUsagePerContainer, actual framework.ResourceUsagePerNode) { if expected == nil { return } @@ -132,20 +133,20 @@ func verifyMemoryLimits(c *client.Client, expected resourceUsagePerContainer, ac } if len(nodeErrs) > 0 { errList = append(errList, fmt.Sprintf("node %v:\n %s", nodeName, strings.Join(nodeErrs, ", "))) - heapStats, err := getKubeletHeapStats(c, nodeName) + heapStats, err := framework.GetKubeletHeapStats(c, nodeName) if err != nil { - Logf("Unable to get heap stats from %q", nodeName) + framework.Logf("Unable to get heap stats from %q", nodeName) } else { - Logf("Heap stats on %q\n:%v", nodeName, heapStats) + framework.Logf("Heap stats on %q\n:%v", nodeName, heapStats) } } } if len(errList) > 0 { - Failf("Memory usage exceeding limits:\n %s", strings.Join(errList, "\n")) + framework.Failf("Memory usage exceeding limits:\n %s", strings.Join(errList, "\n")) } } -func verifyCPULimits(expected containersCPUSummary, actual nodesCPUSummary) { +func verifyCPULimits(expected framework.ContainersCPUSummary, actual framework.NodesCPUSummary) { if expected == nil { return } @@ -175,30 +176,30 @@ func verifyCPULimits(expected containersCPUSummary, actual nodesCPUSummary) { } } if len(errList) > 0 { - Failf("CPU usage exceeding limits:\n %s", strings.Join(errList, "\n")) + framework.Failf("CPU usage exceeding limits:\n %s", strings.Join(errList, "\n")) } } // Slow by design (1 hour) -var _ = KubeDescribe("Kubelet [Serial] [Slow]", func() { +var _ = framework.KubeDescribe("Kubelet [Serial] [Slow]", func() { var nodeNames sets.String - framework := NewDefaultFramework("kubelet-perf") - var rm *resourceMonitor + f := framework.NewDefaultFramework("kubelet-perf") + var rm *framework.ResourceMonitor BeforeEach(func() { - nodes := ListSchedulableNodesOrDie(framework.Client) + nodes := framework.ListSchedulableNodesOrDie(f.Client) nodeNames = sets.NewString() for _, node := range nodes.Items { nodeNames.Insert(node.Name) } - rm = newResourceMonitor(framework.Client, targetContainers(), containerStatsPollingPeriod) + rm = framework.NewResourceMonitor(f.Client, framework.TargetContainers(), containerStatsPollingPeriod) rm.Start() }) AfterEach(func() { rm.Stop() }) - KubeDescribe("regular resource usage tracking", func() { + framework.KubeDescribe("regular resource usage tracking", func() { // We assume that the scheduler will make reasonable scheduling choices // and assign ~N pods on the node. // Although we want to track N pods per node, there are N + add-on pods @@ -210,27 +211,27 @@ var _ = KubeDescribe("Kubelet [Serial] [Slow]", func() { rTests := []resourceTest{ { podsPerNode: 0, - cpuLimits: containersCPUSummary{ + cpuLimits: framework.ContainersCPUSummary{ stats.SystemContainerKubelet: {0.50: 0.06, 0.95: 0.08}, stats.SystemContainerRuntime: {0.50: 0.05, 0.95: 0.06}, }, // We set the memory limits generously because the distribution // of the addon pods affect the memory usage on each node. - memLimits: resourceUsagePerContainer{ - stats.SystemContainerKubelet: &containerResourceUsage{MemoryRSSInBytes: 70 * 1024 * 1024}, - stats.SystemContainerRuntime: &containerResourceUsage{MemoryRSSInBytes: 85 * 1024 * 1024}, + memLimits: framework.ResourceUsagePerContainer{ + stats.SystemContainerKubelet: &framework.ContainerResourceUsage{MemoryRSSInBytes: 70 * 1024 * 1024}, + stats.SystemContainerRuntime: &framework.ContainerResourceUsage{MemoryRSSInBytes: 85 * 1024 * 1024}, }, }, { podsPerNode: 35, - cpuLimits: containersCPUSummary{ + cpuLimits: framework.ContainersCPUSummary{ stats.SystemContainerKubelet: {0.50: 0.12, 0.95: 0.14}, stats.SystemContainerRuntime: {0.50: 0.06, 0.95: 0.08}, }, // We set the memory limits generously because the distribution // of the addon pods affect the memory usage on each node. - memLimits: resourceUsagePerContainer{ - stats.SystemContainerRuntime: &containerResourceUsage{MemoryRSSInBytes: 100 * 1024 * 1024}, + memLimits: framework.ResourceUsagePerContainer{ + stats.SystemContainerRuntime: &framework.ContainerResourceUsage{MemoryRSSInBytes: 100 * 1024 * 1024}, }, }, { @@ -244,18 +245,18 @@ var _ = KubeDescribe("Kubelet [Serial] [Slow]", func() { name := fmt.Sprintf( "for %d pods per node over %v", podsPerNode, monitoringTime) It(name, func() { - runResourceTrackingTest(framework, podsPerNode, nodeNames, rm, itArg.cpuLimits, itArg.memLimits) + runResourceTrackingTest(f, podsPerNode, nodeNames, rm, itArg.cpuLimits, itArg.memLimits) }) } }) - KubeDescribe("experimental resource usage tracking [Feature:ExperimentalResourceUsageTracking]", func() { + framework.KubeDescribe("experimental resource usage tracking [Feature:ExperimentalResourceUsageTracking]", func() { density := []int{100} for i := range density { podsPerNode := density[i] name := fmt.Sprintf( "for %d pods per node over %v", podsPerNode, monitoringTime) It(name, func() { - runResourceTrackingTest(framework, podsPerNode, nodeNames, rm, nil, nil) + runResourceTrackingTest(f, podsPerNode, nodeNames, rm, nil, nil) }) } }) diff --git a/test/e2e/kubeproxy.go b/test/e2e/kubeproxy.go index a5df91687a5..191104bc21e 100644 --- a/test/e2e/kubeproxy.go +++ b/test/e2e/kubeproxy.go @@ -36,6 +36,7 @@ import ( "k8s.io/kubernetes/pkg/util/intstr" utilnet "k8s.io/kubernetes/pkg/util/net" "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/kubernetes/test/e2e/framework" ) const ( @@ -63,15 +64,15 @@ type KubeProxyTestConfig struct { testContainerPod *api.Pod hostTestContainerPod *api.Pod endpointPods []*api.Pod - f *Framework + f *framework.Framework nodePortService *api.Service loadBalancerService *api.Service externalAddrs []string nodes []api.Node } -var _ = KubeDescribe("KubeProxy", func() { - f := NewDefaultFramework("e2e-kubeproxy") +var _ = framework.KubeDescribe("KubeProxy", func() { + f := framework.NewDefaultFramework("e2e-kubeproxy") config := &KubeProxyTestConfig{ f: f, } @@ -238,7 +239,7 @@ func (config *KubeProxyTestConfig) dialFromContainer(protocol, containerIP, targ tries) By(fmt.Sprintf("Dialing from container. Running command:%s", cmd)) - stdout := RunHostCmdOrDie(config.f.Namespace.Name, config.hostTestContainerPod.Name, cmd) + stdout := framework.RunHostCmdOrDie(config.f.Namespace.Name, config.hostTestContainerPod.Name, cmd) var output map[string][]string err := json.Unmarshal([]byte(stdout), &output) Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Could not unmarshal curl response: %s", stdout)) @@ -258,14 +259,14 @@ func (config *KubeProxyTestConfig) dialFromNode(protocol, targetIP string, targe // hitting any other. forLoop := fmt.Sprintf("for i in $(seq 1 %d); do %s; echo; sleep %v; done | grep -v '^\\s*$' |sort | uniq -c | wc -l", tries, cmd, hitEndpointRetryDelay) By(fmt.Sprintf("Dialing from node. command:%s", forLoop)) - stdout := RunHostCmdOrDie(config.f.Namespace.Name, config.hostTestContainerPod.Name, forLoop) + stdout := framework.RunHostCmdOrDie(config.f.Namespace.Name, config.hostTestContainerPod.Name, forLoop) Expect(strconv.Atoi(strings.TrimSpace(stdout))).To(BeNumerically("==", expectedCount)) } func (config *KubeProxyTestConfig) getSelfURL(path string, expected string) { cmd := fmt.Sprintf("curl -s --connect-timeout 1 http://localhost:10249%s", path) By(fmt.Sprintf("Getting kube-proxy self URL %s", path)) - stdout := RunHostCmdOrDie(config.f.Namespace.Name, config.hostTestContainerPod.Name, cmd) + stdout := framework.RunHostCmdOrDie(config.f.Namespace.Name, config.hostTestContainerPod.Name, cmd) Expect(strings.Contains(stdout, expected)).To(BeTrue()) } @@ -421,23 +422,23 @@ func (config *KubeProxyTestConfig) waitForLoadBalancerIngressSetup() { func (config *KubeProxyTestConfig) createTestPods() { testContainerPod := config.createTestPodSpec() - hostTestContainerPod := NewHostExecPodSpec(config.f.Namespace.Name, hostTestPodName) + hostTestContainerPod := framework.NewHostExecPodSpec(config.f.Namespace.Name, hostTestPodName) config.createPod(testContainerPod) config.createPod(hostTestContainerPod) - expectNoError(config.f.WaitForPodRunning(testContainerPod.Name)) - expectNoError(config.f.WaitForPodRunning(hostTestContainerPod.Name)) + framework.ExpectNoError(config.f.WaitForPodRunning(testContainerPod.Name)) + framework.ExpectNoError(config.f.WaitForPodRunning(hostTestContainerPod.Name)) var err error config.testContainerPod, err = config.getPodClient().Get(testContainerPod.Name) if err != nil { - Failf("Failed to retrieve %s pod: %v", testContainerPod.Name, err) + framework.Failf("Failed to retrieve %s pod: %v", testContainerPod.Name, err) } config.hostTestContainerPod, err = config.getPodClient().Get(hostTestContainerPod.Name) if err != nil { - Failf("Failed to retrieve %s pod: %v", hostTestContainerPod.Name, err) + framework.Failf("Failed to retrieve %s pod: %v", hostTestContainerPod.Name, err) } } @@ -445,7 +446,7 @@ func (config *KubeProxyTestConfig) createService(serviceSpec *api.Service) *api. _, err := config.getServiceClient().Create(serviceSpec) Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create %s service: %v", serviceSpec.Name, err)) - err = waitForService(config.f.Client, config.f.Namespace.Name, serviceSpec.Name, true, 5*time.Second, 45*time.Second) + err = framework.WaitForService(config.f.Client, config.f.Namespace.Name, serviceSpec.Name, true, 5*time.Second, 45*time.Second) Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("error while waiting for service:%s err: %v", serviceSpec.Name, err)) createdService, err := config.getServiceClient().Get(serviceSpec.Name) @@ -462,11 +463,11 @@ func (config *KubeProxyTestConfig) setup() { } By("Getting node addresses") - nodeList := ListSchedulableNodesOrDie(config.f.Client) - config.externalAddrs = NodeAddresses(nodeList, api.NodeExternalIP) + nodeList := framework.ListSchedulableNodesOrDie(config.f.Client) + config.externalAddrs = framework.NodeAddresses(nodeList, api.NodeExternalIP) if len(config.externalAddrs) < 2 { // fall back to legacy IPs - config.externalAddrs = NodeAddresses(nodeList, api.NodeLegacyHostIP) + config.externalAddrs = framework.NodeAddresses(nodeList, api.NodeLegacyHostIP) } Expect(len(config.externalAddrs)).To(BeNumerically(">=", 2), fmt.Sprintf("At least two nodes necessary with an external or LegacyHostIP")) config.nodes = nodeList.Items @@ -500,7 +501,7 @@ func (config *KubeProxyTestConfig) cleanup() { } func (config *KubeProxyTestConfig) createNetProxyPods(podName string, selector map[string]string) []*api.Pod { - nodes := ListSchedulableNodesOrDie(config.f.Client) + nodes := framework.ListSchedulableNodesOrDie(config.f.Client) // create pods, one for each node createdPods := make([]*api.Pod, 0, len(nodes.Items)) @@ -515,9 +516,9 @@ func (config *KubeProxyTestConfig) createNetProxyPods(podName string, selector m // wait that all of them are up runningPods := make([]*api.Pod, 0, len(nodes.Items)) for _, p := range createdPods { - expectNoError(config.f.WaitForPodReady(p.Name)) + framework.ExpectNoError(config.f.WaitForPodReady(p.Name)) rp, err := config.getPodClient().Get(p.Name) - expectNoError(err) + framework.ExpectNoError(err) runningPods = append(runningPods, rp) } @@ -529,14 +530,14 @@ func (config *KubeProxyTestConfig) deleteNetProxyPod() { config.getPodClient().Delete(pod.Name, api.NewDeleteOptions(0)) config.endpointPods = config.endpointPods[1:] // wait for pod being deleted. - err := waitForPodToDisappear(config.f.Client, config.f.Namespace.Name, pod.Name, labels.Everything(), time.Second, wait.ForeverTestTimeout) + err := framework.WaitForPodToDisappear(config.f.Client, config.f.Namespace.Name, pod.Name, labels.Everything(), time.Second, wait.ForeverTestTimeout) if err != nil { - Failf("Failed to delete %s pod: %v", pod.Name, err) + framework.Failf("Failed to delete %s pod: %v", pod.Name, err) } // wait for endpoint being removed. - err = waitForServiceEndpointsNum(config.f.Client, config.f.Namespace.Name, nodePortServiceName, len(config.endpointPods), time.Second, wait.ForeverTestTimeout) + err = framework.WaitForServiceEndpointsNum(config.f.Client, config.f.Namespace.Name, nodePortServiceName, len(config.endpointPods), time.Second, wait.ForeverTestTimeout) if err != nil { - Failf("Failed to remove endpoint from service: %s", nodePortServiceName) + framework.Failf("Failed to remove endpoint from service: %s", nodePortServiceName) } // wait for kube-proxy to catch up with the pod being deleted. time.Sleep(5 * time.Second) @@ -545,7 +546,7 @@ func (config *KubeProxyTestConfig) deleteNetProxyPod() { func (config *KubeProxyTestConfig) createPod(pod *api.Pod) *api.Pod { createdPod, err := config.getPodClient().Create(pod) if err != nil { - Failf("Failed to create %s pod: %v", pod.Name, err) + framework.Failf("Failed to create %s pod: %v", pod.Name, err) } return createdPod } diff --git a/test/e2e/limit_range.go b/test/e2e/limit_range.go index e576e28e593..79d4734e142 100644 --- a/test/e2e/limit_range.go +++ b/test/e2e/limit_range.go @@ -21,13 +21,14 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) -var _ = KubeDescribe("LimitRange", func() { - f := NewDefaultFramework("limitrange") +var _ = framework.KubeDescribe("LimitRange", func() { + f := framework.NewDefaultFramework("limitrange") It("should create a LimitRange with defaults and ensure pod has those defaults applied.", func() { By("Creating a LimitRange") @@ -63,7 +64,7 @@ var _ = KubeDescribe("LimitRange", func() { err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources) if err != nil { // Print the pod to help in debugging. - Logf("Pod %+v does not have the expected requirements", pod) + framework.Logf("Pod %+v does not have the expected requirements", pod) Expect(err).NotTo(HaveOccurred()) } } @@ -84,7 +85,7 @@ var _ = KubeDescribe("LimitRange", func() { err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources) if err != nil { // Print the pod to help in debugging. - Logf("Pod %+v does not have the expected requirements", pod) + framework.Logf("Pod %+v does not have the expected requirements", pod) Expect(err).NotTo(HaveOccurred()) } } @@ -103,12 +104,12 @@ var _ = KubeDescribe("LimitRange", func() { }) func equalResourceRequirement(expected api.ResourceRequirements, actual api.ResourceRequirements) error { - Logf("Verifying requests: expected %s with actual %s", expected.Requests, actual.Requests) + framework.Logf("Verifying requests: expected %s with actual %s", expected.Requests, actual.Requests) err := equalResourceList(expected.Requests, actual.Requests) if err != nil { return err } - Logf("Verifying limits: expected %v with actual %v", expected.Limits, actual.Limits) + framework.Logf("Verifying limits: expected %v with actual %v", expected.Limits, actual.Limits) err = equalResourceList(expected.Limits, actual.Limits) if err != nil { return err diff --git a/test/e2e/load.go b/test/e2e/load.go index 1f7d37f9c7e..01bb00b2084 100644 --- a/test/e2e/load.go +++ b/test/e2e/load.go @@ -26,6 +26,7 @@ import ( "k8s.io/kubernetes/pkg/api" client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -47,45 +48,45 @@ const ( // the ginkgo.skip list (see driver.go). // To run this suite you must explicitly ask for it by setting the // -t/--test flag or ginkgo.focus flag. -var _ = KubeDescribe("Load capacity", func() { +var _ = framework.KubeDescribe("Load capacity", func() { var c *client.Client var nodeCount int var ns string - var configs []*RCConfig + var configs []*framework.RCConfig // Gathers metrics before teardown // TODO add flag that allows to skip cleanup on failure AfterEach(func() { // Verify latency metrics - highLatencyRequests, err := HighLatencyRequests(c) - expectNoError(err, "Too many instances metrics above the threshold") + highLatencyRequests, err := framework.HighLatencyRequests(c) + framework.ExpectNoError(err, "Too many instances metrics above the threshold") Expect(highLatencyRequests).NotTo(BeNumerically(">", 0)) }) // Explicitly put here, to delete namespace at the end of the test // (after measuring latency metrics, etc.). - options := FrameworkOptions{ - clientQPS: 50, - clientBurst: 100, + options := framework.FrameworkOptions{ + ClientQPS: 50, + ClientBurst: 100, } - framework := NewFramework("load", options) - framework.NamespaceDeletionTimeout = time.Hour + f := framework.NewFramework("load", options) + f.NamespaceDeletionTimeout = time.Hour BeforeEach(func() { - c = framework.Client + c = f.Client - ns = framework.Namespace.Name - nodes := ListSchedulableNodesOrDie(c) + ns = f.Namespace.Name + nodes := framework.ListSchedulableNodesOrDie(c) nodeCount = len(nodes.Items) Expect(nodeCount).NotTo(BeZero()) // Terminating a namespace (deleting the remaining objects from it - which // generally means events) can affect the current run. Thus we wait for all // terminating namespace to be finally deleted before starting this test. - err := checkTestingNSDeletedExcept(c, ns) - expectNoError(err) + err := framework.CheckTestingNSDeletedExcept(c, ns) + framework.ExpectNoError(err) - expectNoError(resetMetrics(c)) + framework.ExpectNoError(framework.ResetMetrics(c)) }) type Load struct { @@ -166,8 +167,8 @@ func computeRCCounts(total int) (int, int, int) { return smallRCCount, mediumRCCount, bigRCCount } -func generateRCConfigs(totalPods int, image string, command []string, c *client.Client, ns string) []*RCConfig { - configs := make([]*RCConfig, 0) +func generateRCConfigs(totalPods int, image string, command []string, c *client.Client, ns string) []*framework.RCConfig { + configs := make([]*framework.RCConfig, 0) smallRCCount, mediumRCCount, bigRCCount := computeRCCounts(totalPods) configs = append(configs, generateRCConfigsForGroup(c, ns, smallRCGroupName, smallRCSize, smallRCCount, image, command)...) @@ -177,10 +178,10 @@ func generateRCConfigs(totalPods int, image string, command []string, c *client. return configs } -func generateRCConfigsForGroup(c *client.Client, ns, groupName string, size, count int, image string, command []string) []*RCConfig { - configs := make([]*RCConfig, 0, count) +func generateRCConfigsForGroup(c *client.Client, ns, groupName string, size, count int, image string, command []string) []*framework.RCConfig { + configs := make([]*framework.RCConfig, 0, count) for i := 1; i <= count; i++ { - config := &RCConfig{ + config := &framework.RCConfig{ Client: c, Name: groupName + "-" + strconv.Itoa(i), Namespace: ns, @@ -200,7 +201,7 @@ func sleepUpTo(d time.Duration) { time.Sleep(time.Duration(rand.Int63n(d.Nanoseconds()))) } -func createAllRC(configs []*RCConfig, creatingTime time.Duration) { +func createAllRC(configs []*framework.RCConfig, creatingTime time.Duration) { var wg sync.WaitGroup wg.Add(len(configs)) for _, config := range configs { @@ -209,15 +210,15 @@ func createAllRC(configs []*RCConfig, creatingTime time.Duration) { wg.Wait() } -func createRC(wg *sync.WaitGroup, config *RCConfig, creatingTime time.Duration) { +func createRC(wg *sync.WaitGroup, config *framework.RCConfig, creatingTime time.Duration) { defer GinkgoRecover() defer wg.Done() sleepUpTo(creatingTime) - expectNoError(RunRC(*config), fmt.Sprintf("creating rc %s", config.Name)) + framework.ExpectNoError(framework.RunRC(*config), fmt.Sprintf("creating rc %s", config.Name)) } -func scaleAllRC(configs []*RCConfig, scalingTime time.Duration) { +func scaleAllRC(configs []*framework.RCConfig, scalingTime time.Duration) { var wg sync.WaitGroup wg.Add(len(configs)) for _, config := range configs { @@ -228,13 +229,13 @@ func scaleAllRC(configs []*RCConfig, scalingTime time.Duration) { // Scales RC to a random size within [0.5*size, 1.5*size] and lists all the pods afterwards. // Scaling happens always based on original size, not the current size. -func scaleRC(wg *sync.WaitGroup, config *RCConfig, scalingTime time.Duration) { +func scaleRC(wg *sync.WaitGroup, config *framework.RCConfig, scalingTime time.Duration) { defer GinkgoRecover() defer wg.Done() sleepUpTo(scalingTime) newSize := uint(rand.Intn(config.Replicas) + config.Replicas/2) - expectNoError(ScaleRC(config.Client, config.Namespace, config.Name, newSize, true), + framework.ExpectNoError(framework.ScaleRC(config.Client, config.Namespace, config.Name, newSize, true), fmt.Sprintf("scaling rc %s for the first time", config.Name)) selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": config.Name})) options := api.ListOptions{ @@ -242,10 +243,10 @@ func scaleRC(wg *sync.WaitGroup, config *RCConfig, scalingTime time.Duration) { ResourceVersion: "0", } _, err := config.Client.Pods(config.Namespace).List(options) - expectNoError(err, fmt.Sprintf("listing pods from rc %v", config.Name)) + framework.ExpectNoError(err, fmt.Sprintf("listing pods from rc %v", config.Name)) } -func deleteAllRC(configs []*RCConfig, deletingTime time.Duration) { +func deleteAllRC(configs []*framework.RCConfig, deletingTime time.Duration) { var wg sync.WaitGroup wg.Add(len(configs)) for _, config := range configs { @@ -254,10 +255,10 @@ func deleteAllRC(configs []*RCConfig, deletingTime time.Duration) { wg.Wait() } -func deleteRC(wg *sync.WaitGroup, config *RCConfig, deletingTime time.Duration) { +func deleteRC(wg *sync.WaitGroup, config *framework.RCConfig, deletingTime time.Duration) { defer GinkgoRecover() defer wg.Done() sleepUpTo(deletingTime) - expectNoError(DeleteRC(config.Client, config.Namespace, config.Name), fmt.Sprintf("deleting rc %s", config.Name)) + framework.ExpectNoError(framework.DeleteRC(config.Client, config.Namespace, config.Name), fmt.Sprintf("deleting rc %s", config.Name)) } diff --git a/test/e2e/mesos.go b/test/e2e/mesos.go index f15c406c00c..1862840a4cb 100644 --- a/test/e2e/mesos.go +++ b/test/e2e/mesos.go @@ -24,30 +24,31 @@ import ( client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) -var _ = KubeDescribe("Mesos", func() { - framework := NewDefaultFramework("pods") +var _ = framework.KubeDescribe("Mesos", func() { + f := framework.NewDefaultFramework("pods") var c *client.Client var ns string BeforeEach(func() { - SkipUnlessProviderIs("mesos/docker") - c = framework.Client - ns = framework.Namespace.Name + framework.SkipUnlessProviderIs("mesos/docker") + c = f.Client + ns = f.Namespace.Name }) It("applies slave attributes as labels", func() { - nodeClient := framework.Client.Nodes() + nodeClient := f.Client.Nodes() rackA := labels.SelectorFromSet(map[string]string{"k8s.mesosphere.io/attribute-rack": "1"}) options := api.ListOptions{LabelSelector: rackA} nodes, err := nodeClient.List(options) if err != nil { - Failf("Failed to query for node: %v", err) + framework.Failf("Failed to query for node: %v", err) } Expect(len(nodes.Items)).To(Equal(1)) @@ -61,14 +62,14 @@ var _ = KubeDescribe("Mesos", func() { }) It("starts static pods on every node in the mesos cluster", func() { - client := framework.Client - expectNoError(allNodesReady(client, wait.ForeverTestTimeout), "all nodes ready") + client := f.Client + framework.ExpectNoError(framework.AllNodesReady(client, wait.ForeverTestTimeout), "all nodes ready") - nodelist := ListSchedulableNodesOrDie(framework.Client) + nodelist := framework.ListSchedulableNodesOrDie(f.Client) const ns = "static-pods" numpods := len(nodelist.Items) - expectNoError(waitForPodsRunningReady(ns, numpods, wait.ForeverTestTimeout), + framework.ExpectNoError(framework.WaitForPodsRunningReady(ns, numpods, wait.ForeverTestTimeout), fmt.Sprintf("number of static pods in namespace %s is %d", ns, numpods)) }) @@ -98,13 +99,13 @@ var _ = KubeDescribe("Mesos", func() { }, }, }) - expectNoError(err) + framework.ExpectNoError(err) - expectNoError(waitForPodRunningInNamespace(c, podName, ns)) + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, podName, ns)) pod, err := c.Pods(ns).Get(podName) - expectNoError(err) + framework.ExpectNoError(err) - nodeClient := framework.Client.Nodes() + nodeClient := f.Client.Nodes() // schedule onto node with rack=2 being assigned to the "public" role rack2 := labels.SelectorFromSet(map[string]string{ @@ -112,7 +113,7 @@ var _ = KubeDescribe("Mesos", func() { }) options := api.ListOptions{LabelSelector: rack2} nodes, err := nodeClient.List(options) - expectNoError(err) + framework.ExpectNoError(err) Expect(nodes.Items[0].Name).To(Equal(pod.Spec.NodeName)) }) diff --git a/test/e2e/metrics_grabber_test.go b/test/e2e/metrics_grabber_test.go index 4d9d3826b6e..181bd362683 100644 --- a/test/e2e/metrics_grabber_test.go +++ b/test/e2e/metrics_grabber_test.go @@ -23,6 +23,7 @@ import ( client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/metrics" "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -77,23 +78,23 @@ func checkMetrics(response metrics.Metrics, assumedMetrics map[string][]string) Expect(invalidLabels).To(BeEmpty()) } -var _ = KubeDescribe("MetricsGrabber", func() { - framework := NewDefaultFramework("metrics-grabber") +var _ = framework.KubeDescribe("MetricsGrabber", func() { + f := framework.NewDefaultFramework("metrics-grabber") var c *client.Client var grabber *metrics.MetricsGrabber BeforeEach(func() { var err error - c = framework.Client - expectNoError(err) + c = f.Client + framework.ExpectNoError(err) grabber, err = metrics.NewMetricsGrabber(c, true, true, true, true) - expectNoError(err) + framework.ExpectNoError(err) }) It("should grab all metrics from API server.", func() { By("Connecting to /metrics endpoint") unknownMetrics := sets.NewString() response, err := grabber.GrabFromApiServer(unknownMetrics) - expectNoError(err) + framework.ExpectNoError(err) Expect(unknownMetrics).To(BeEmpty()) checkMetrics(metrics.Metrics(response), metrics.KnownApiServerMetrics) @@ -101,10 +102,10 @@ var _ = KubeDescribe("MetricsGrabber", func() { It("should grab all metrics from a Kubelet.", func() { By("Proxying to Node through the API server") - nodes := ListSchedulableNodesOrDie(c) + nodes := framework.ListSchedulableNodesOrDie(c) Expect(nodes.Items).NotTo(BeEmpty()) response, err := grabber.GrabFromKubelet(nodes.Items[0].Name) - expectNoError(err) + framework.ExpectNoError(err) checkNecessaryMetrics(metrics.Metrics(response), metrics.NecessaryKubeletMetrics) }) @@ -112,7 +113,7 @@ var _ = KubeDescribe("MetricsGrabber", func() { By("Proxying to Pod through the API server") // Check if master Node is registered nodes, err := c.Nodes().List(api.ListOptions{}) - expectNoError(err) + framework.ExpectNoError(err) var masterRegistered = false for _, node := range nodes.Items { @@ -121,12 +122,12 @@ var _ = KubeDescribe("MetricsGrabber", func() { } } if !masterRegistered { - Logf("Master is node registered. Skipping testing Scheduler metrics.") + framework.Logf("Master is node registered. Skipping testing Scheduler metrics.") return } unknownMetrics := sets.NewString() response, err := grabber.GrabFromScheduler(unknownMetrics) - expectNoError(err) + framework.ExpectNoError(err) Expect(unknownMetrics).To(BeEmpty()) checkMetrics(metrics.Metrics(response), metrics.KnownSchedulerMetrics) @@ -136,7 +137,7 @@ var _ = KubeDescribe("MetricsGrabber", func() { By("Proxying to Pod through the API server") // Check if master Node is registered nodes, err := c.Nodes().List(api.ListOptions{}) - expectNoError(err) + framework.ExpectNoError(err) var masterRegistered = false for _, node := range nodes.Items { @@ -145,12 +146,12 @@ var _ = KubeDescribe("MetricsGrabber", func() { } } if !masterRegistered { - Logf("Master is node registered. Skipping testing ControllerManager metrics.") + framework.Logf("Master is node registered. Skipping testing ControllerManager metrics.") return } unknownMetrics := sets.NewString() response, err := grabber.GrabFromControllerManager(unknownMetrics) - expectNoError(err) + framework.ExpectNoError(err) Expect(unknownMetrics).To(BeEmpty()) checkMetrics(metrics.Metrics(response), metrics.KnownControllerManagerMetrics) diff --git a/test/e2e/monitoring.go b/test/e2e/monitoring.go index e383fc9c85a..063999db96c 100644 --- a/test/e2e/monitoring.go +++ b/test/e2e/monitoring.go @@ -26,15 +26,16 @@ import ( "k8s.io/kubernetes/pkg/api" client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" ) -var _ = KubeDescribe("Monitoring", func() { - f := NewDefaultFramework("monitoring") +var _ = framework.KubeDescribe("Monitoring", func() { + f := framework.NewDefaultFramework("monitoring") BeforeEach(func() { - SkipUnlessProviderIs("gce") + framework.SkipUnlessProviderIs("gce") }) It("should verify monitoring pods and all cluster nodes are available on influxdb using heapster.", func() { @@ -190,7 +191,7 @@ func getInfluxdbData(c *client.Client, query string, tag string) (map[string]boo return nil, fmt.Errorf("expected exactly one series for query %q.", query) } if len(response.Results[0].Series[0].Columns) != 1 { - Failf("Expected one column for query %q. Found %v", query, response.Results[0].Series[0].Columns) + framework.Failf("Expected one column for query %q. Found %v", query, response.Results[0].Series[0].Columns) } result := map[string]bool{} for _, value := range response.Results[0].Series[0].Values { @@ -216,20 +217,20 @@ func validatePodsAndNodes(c *client.Client, expectedPods, expectedNodes []string pods, err := getInfluxdbData(c, podlistQuery, "pod_id") if err != nil { // We don't fail the test here because the influxdb service might still not be running. - Logf("failed to query list of pods from influxdb. Query: %q, Err: %v", podlistQuery, err) + framework.Logf("failed to query list of pods from influxdb. Query: %q, Err: %v", podlistQuery, err) return false } nodes, err := getInfluxdbData(c, nodelistQuery, "hostname") if err != nil { - Logf("failed to query list of nodes from influxdb. Query: %q, Err: %v", nodelistQuery, err) + framework.Logf("failed to query list of nodes from influxdb. Query: %q, Err: %v", nodelistQuery, err) return false } if !expectedItemsExist(expectedPods, pods) { - Logf("failed to find all expected Pods.\nExpected: %v\nActual: %v", expectedPods, pods) + framework.Logf("failed to find all expected Pods.\nExpected: %v\nActual: %v", expectedPods, pods) return false } if !expectedItemsExist(expectedNodes, nodes) { - Logf("failed to find all expected Nodes.\nExpected: %v\nActual: %v", expectedNodes, nodes) + framework.Logf("failed to find all expected Nodes.\nExpected: %v\nActual: %v", expectedNodes, nodes) return false } return true @@ -238,12 +239,12 @@ func validatePodsAndNodes(c *client.Client, expectedPods, expectedNodes []string func testMonitoringUsingHeapsterInfluxdb(c *client.Client) { // Check if heapster pods and services are up. expectedPods, err := verifyExpectedRcsExistAndGetExpectedPods(c) - expectNoError(err) - expectNoError(expectedServicesExist(c)) + framework.ExpectNoError(err) + framework.ExpectNoError(expectedServicesExist(c)) // TODO: Wait for all pods and services to be running. expectedNodes, err := getAllNodesInCluster(c) - expectNoError(err) + framework.ExpectNoError(err) startTime := time.Now() for { if validatePodsAndNodes(c, expectedPods, expectedNodes) { @@ -256,7 +257,7 @@ func testMonitoringUsingHeapsterInfluxdb(c *client.Client) { } time.Sleep(sleepBetweenAttempts) } - Failf("monitoring using heapster and influxdb test failed") + framework.Failf("monitoring using heapster and influxdb test failed") } func printDebugInfo(c *client.Client) { @@ -264,10 +265,10 @@ func printDebugInfo(c *client.Client) { options := api.ListOptions{LabelSelector: set.AsSelector()} podList, err := c.Pods(api.NamespaceSystem).List(options) if err != nil { - Logf("Error while listing pods %v", err) + framework.Logf("Error while listing pods %v", err) return } for _, pod := range podList.Items { - Logf("Kubectl output:\n%v", runKubectlOrDie("log", pod.Name, "--namespace=kube-system")) + framework.Logf("Kubectl output:\n%v", framework.RunKubectlOrDie("log", pod.Name, "--namespace=kube-system")) } } diff --git a/test/e2e/namespace.go b/test/e2e/namespace.go index 4a6df5bf1d0..bdb4c2225a5 100644 --- a/test/e2e/namespace.go +++ b/test/e2e/namespace.go @@ -26,12 +26,13 @@ import ( "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) -func extinguish(f *Framework, totalNS int, maxAllowedAfterDel int, maxSeconds int) { +func extinguish(f *framework.Framework, totalNS int, maxAllowedAfterDel int, maxSeconds int) { var err error By("Creating testing namespaces") @@ -50,13 +51,13 @@ func extinguish(f *Framework, totalNS int, maxAllowedAfterDel int, maxSeconds in //Wait 10 seconds, then SEND delete requests for all the namespaces. By("Waiting 10 seconds") time.Sleep(time.Duration(10 * time.Second)) - deleted, err := deleteNamespaces(f.Client, []string{"nslifetest"}, nil /* skipFilter */) + deleted, err := framework.DeleteNamespaces(f.Client, []string{"nslifetest"}, nil /* skipFilter */) Expect(err).NotTo(HaveOccurred()) Expect(len(deleted)).To(Equal(totalNS)) By("Waiting for namespaces to vanish") //Now POLL until all namespaces have been eradicated. - expectNoError(wait.Poll(2*time.Second, time.Duration(maxSeconds)*time.Second, + framework.ExpectNoError(wait.Poll(2*time.Second, time.Duration(maxSeconds)*time.Second, func() (bool, error) { var cnt = 0 nsList, err := f.Client.Namespaces().List(api.ListOptions{}) @@ -69,14 +70,14 @@ func extinguish(f *Framework, totalNS int, maxAllowedAfterDel int, maxSeconds in } } if cnt > maxAllowedAfterDel { - Logf("Remaining namespaces : %v", cnt) + framework.Logf("Remaining namespaces : %v", cnt) return false, nil } return true, nil })) } -func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *Framework) { +func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) { var err error By("Creating a test namespace") @@ -84,7 +85,7 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *Framework) { Expect(err).NotTo(HaveOccurred()) By("Waiting for a default service account to be provisioned in namespace") - err = waitForDefaultServiceAccountInNamespace(f.Client, namespace.Name) + err = framework.WaitForDefaultServiceAccountInNamespace(f.Client, namespace.Name) Expect(err).NotTo(HaveOccurred()) By("Creating a pod in the namespace") @@ -105,7 +106,7 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *Framework) { Expect(err).NotTo(HaveOccurred()) By("Waiting for the pod to have running status") - expectNoError(waitForPodRunningInNamespace(f.Client, pod.Name, pod.Namespace)) + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.Client, pod.Name, pod.Namespace)) By("Deleting the namespace") err = f.Client.Namespaces().Delete(namespace.Name) @@ -113,7 +114,7 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *Framework) { By("Waiting for the namespace to be removed.") maxWaitSeconds := int64(60) + *pod.Spec.TerminationGracePeriodSeconds - expectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second, + framework.ExpectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second, func() (bool, error) { _, err = f.Client.Namespaces().Get(namespace.Name) if err != nil && errors.IsNotFound(err) { @@ -127,7 +128,7 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *Framework) { Expect(err).To(HaveOccurred()) } -func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *Framework) { +func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) { var err error By("Creating a test namespace") @@ -135,7 +136,7 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *Framework) { Expect(err).NotTo(HaveOccurred()) By("Waiting for a default service account to be provisioned in namespace") - err = waitForDefaultServiceAccountInNamespace(f.Client, namespace.Name) + err = framework.WaitForDefaultServiceAccountInNamespace(f.Client, namespace.Name) Expect(err).NotTo(HaveOccurred()) By("Creating a service in the namespace") @@ -165,7 +166,7 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *Framework) { By("Waiting for the namespace to be removed.") maxWaitSeconds := int64(60) - expectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second, + framework.ExpectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second, func() (bool, error) { _, err = f.Client.Namespaces().Get(namespace.Name) if err != nil && errors.IsNotFound(err) { @@ -207,9 +208,9 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *Framework) { // that each have a variable amount of content in the associated Namespace. // When run in [Serial] this test appears to delete Namespace objects at a // rate of approximately 1 per second. -var _ = KubeDescribe("Namespaces [Serial]", func() { +var _ = framework.KubeDescribe("Namespaces [Serial]", func() { - f := NewDefaultFramework("namespaces") + f := framework.NewDefaultFramework("namespaces") It("should ensure that all pods are removed when a namespace is deleted.", func() { ensurePodsAreRemovedWhenNamespaceIsDeleted(f) }) diff --git a/test/e2e/networking.go b/test/e2e/networking.go index 95a31d3bdbc..914d62fcb2b 100644 --- a/test/e2e/networking.go +++ b/test/e2e/networking.go @@ -24,13 +24,14 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/util/intstr" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) -var _ = KubeDescribe("Networking", func() { - f := NewDefaultFramework("nettest") +var _ = framework.KubeDescribe("Networking", func() { + f := framework.NewDefaultFramework("nettest") var svcname = "nettest" @@ -41,16 +42,16 @@ var _ = KubeDescribe("Networking", func() { By("Executing a successful http request from the external internet") resp, err := http.Get("http://google.com") if err != nil { - Failf("Unable to connect/talk to the internet: %v", err) + framework.Failf("Unable to connect/talk to the internet: %v", err) } if resp.StatusCode != http.StatusOK { - Failf("Unexpected error code, expected 200, got, %v (%v)", resp.StatusCode, resp) + framework.Failf("Unexpected error code, expected 200, got, %v (%v)", resp.StatusCode, resp) } }) It("should provide Internet connection for containers [Conformance]", func() { By("Running container which tries to wget google.com") - expectNoError(CheckConnectivityToHost(f, "", "wget-test", "google.com")) + framework.ExpectNoError(framework.CheckConnectivityToHost(f, "", "wget-test", "google.com")) }) // First test because it has no dependencies on variables created later on. @@ -69,7 +70,7 @@ var _ = KubeDescribe("Networking", func() { AbsPath(test.path). DoRaw() if err != nil { - Failf("Failed: %v\nBody: %s", err, string(data)) + framework.Failf("Failed: %v\nBody: %s", err, string(data)) } } }) @@ -97,30 +98,30 @@ var _ = KubeDescribe("Networking", func() { }, }) if err != nil { - Failf("unable to create test service named [%s] %v", svc.Name, err) + framework.Failf("unable to create test service named [%s] %v", svc.Name, err) } // Clean up service defer func() { By("Cleaning up the service") if err = f.Client.Services(f.Namespace.Name).Delete(svc.Name); err != nil { - Failf("unable to delete svc %v: %v", svc.Name, err) + framework.Failf("unable to delete svc %v: %v", svc.Name, err) } }() By("Creating a webserver (pending) pod on each node") - nodes, err := GetReadyNodes(f) - expectNoError(err) + nodes, err := framework.GetReadyNodes(f) + framework.ExpectNoError(err) if len(nodes.Items) == 1 { // in general, the test requires two nodes. But for local development, often a one node cluster // is created, for simplicity and speed. (see issue #10012). We permit one-node test // only in some cases - if !providerIs("local") { - Failf(fmt.Sprintf("The test requires two Ready nodes on %s, but found just one.", testContext.Provider)) + if !framework.ProviderIs("local") { + framework.Failf(fmt.Sprintf("The test requires two Ready nodes on %s, but found just one.", framework.TestContext.Provider)) } - Logf("Only one ready node is detected. The test has limited scope in such setting. " + + framework.Logf("Only one ready node is detected. The test has limited scope in such setting. " + "Rerun it with at least two nodes to get complete coverage.") } @@ -131,7 +132,7 @@ var _ = KubeDescribe("Networking", func() { By("Cleaning up the webserver pods") for _, podName := range podNames { if err = f.Client.Pods(f.Namespace.Name).Delete(podName, nil); err != nil { - Logf("Failed to delete pod %s: %v", podName, err) + framework.Logf("Failed to delete pod %s: %v", podName, err) } } }() @@ -148,7 +149,7 @@ var _ = KubeDescribe("Networking", func() { //once response OK, evaluate response body for pass/fail. var body []byte getDetails := func() ([]byte, error) { - proxyRequest, errProxy := getServicesProxyRequest(f.Client, f.Client.Get()) + proxyRequest, errProxy := framework.GetServicesProxyRequest(f.Client, f.Client.Get()) if errProxy != nil { return nil, errProxy } @@ -159,7 +160,7 @@ var _ = KubeDescribe("Networking", func() { } getStatus := func() ([]byte, error) { - proxyRequest, errProxy := getServicesProxyRequest(f.Client, f.Client.Get()) + proxyRequest, errProxy := framework.GetServicesProxyRequest(f.Client, f.Client.Get()) if errProxy != nil { return nil, errProxy } @@ -174,61 +175,61 @@ var _ = KubeDescribe("Networking", func() { timeout := time.Now().Add(3 * time.Minute) for i := 0; !passed && timeout.After(time.Now()); i++ { time.Sleep(2 * time.Second) - Logf("About to make a proxy status call") + framework.Logf("About to make a proxy status call") start := time.Now() body, err = getStatus() - Logf("Proxy status call returned in %v", time.Since(start)) + framework.Logf("Proxy status call returned in %v", time.Since(start)) if err != nil { - Logf("Attempt %v: service/pod still starting. (error: '%v')", i, err) + framework.Logf("Attempt %v: service/pod still starting. (error: '%v')", i, err) continue } // Finally, we pass/fail the test based on if the container's response body, as to whether or not it was able to find peers. switch { case string(body) == "pass": - Logf("Passed on attempt %v. Cleaning up.", i) + framework.Logf("Passed on attempt %v. Cleaning up.", i) passed = true case string(body) == "running": - Logf("Attempt %v: test still running", i) + framework.Logf("Attempt %v: test still running", i) case string(body) == "fail": if body, err = getDetails(); err != nil { - Failf("Failed on attempt %v. Cleaning up. Error reading details: %v", i, err) + framework.Failf("Failed on attempt %v. Cleaning up. Error reading details: %v", i, err) } else { - Failf("Failed on attempt %v. Cleaning up. Details:\n%s", i, string(body)) + framework.Failf("Failed on attempt %v. Cleaning up. Details:\n%s", i, string(body)) } case strings.Contains(string(body), "no endpoints available"): - Logf("Attempt %v: waiting on service/endpoints", i) + framework.Logf("Attempt %v: waiting on service/endpoints", i) default: - Logf("Unexpected response:\n%s", body) + framework.Logf("Unexpected response:\n%s", body) } } if !passed { if body, err = getDetails(); err != nil { - Failf("Timed out. Cleaning up. Error reading details: %v", err) + framework.Failf("Timed out. Cleaning up. Error reading details: %v", err) } else { - Failf("Timed out. Cleaning up. Details:\n%s", string(body)) + framework.Failf("Timed out. Cleaning up. Details:\n%s", string(body)) } } Expect(string(body)).To(Equal("pass")) }) // Marked with [Flaky] until the tests prove themselves stable. - KubeDescribe("[Flaky] Granular Checks", func() { + framework.KubeDescribe("[Flaky] Granular Checks", func() { It("should function for pod communication on a single node", func() { By("Picking a node") - nodes, err := GetReadyNodes(f) - expectNoError(err) + nodes, err := framework.GetReadyNodes(f) + framework.ExpectNoError(err) node := nodes.Items[0] By("Creating a webserver pod") podName := "same-node-webserver" defer f.Client.Pods(f.Namespace.Name).Delete(podName, nil) - ip := LaunchWebserverPod(f, podName, node.Name) + ip := framework.LaunchWebserverPod(f, podName, node.Name) By("Checking that the webserver is accessible from a pod on the same node") - expectNoError(CheckConnectivityToHost(f, node.Name, "same-node-wget", ip)) + framework.ExpectNoError(framework.CheckConnectivityToHost(f, node.Name, "same-node-wget", ip)) }) It("should function for pod communication between nodes", func() { @@ -236,11 +237,11 @@ var _ = KubeDescribe("Networking", func() { podClient := f.Client.Pods(f.Namespace.Name) By("Picking multiple nodes") - nodes, err := GetReadyNodes(f) - expectNoError(err) + nodes, err := framework.GetReadyNodes(f) + framework.ExpectNoError(err) if len(nodes.Items) == 1 { - Skipf("The test requires two Ready nodes on %s, but found just one.", testContext.Provider) + framework.Skipf("The test requires two Ready nodes on %s, but found just one.", framework.TestContext.Provider) } node1 := nodes.Items[0] @@ -249,15 +250,15 @@ var _ = KubeDescribe("Networking", func() { By("Creating a webserver pod") podName := "different-node-webserver" defer podClient.Delete(podName, nil) - ip := LaunchWebserverPod(f, podName, node1.Name) + ip := framework.LaunchWebserverPod(f, podName, node1.Name) By("Checking that the webserver is accessible from a pod on a different node") - expectNoError(CheckConnectivityToHost(f, node2.Name, "different-node-wget", ip)) + framework.ExpectNoError(framework.CheckConnectivityToHost(f, node2.Name, "different-node-wget", ip)) }) }) }) -func LaunchNetTestPodPerNode(f *Framework, nodes *api.NodeList, name, version string) []string { +func LaunchNetTestPodPerNode(f *framework.Framework, nodes *api.NodeList, name, version string) []string { podNames := []string{} totalPods := len(nodes.Items) @@ -291,7 +292,7 @@ func LaunchNetTestPodPerNode(f *Framework, nodes *api.NodeList, name, version st }, }) Expect(err).NotTo(HaveOccurred()) - Logf("Created pod %s on node %s", pod.ObjectMeta.Name, node.Name) + framework.Logf("Created pod %s on node %s", pod.ObjectMeta.Name, node.Name) podNames = append(podNames, pod.ObjectMeta.Name) } return podNames diff --git a/test/e2e/nodeoutofdisk.go b/test/e2e/nodeoutofdisk.go index 22bbfc90f0f..acbc480c583 100644 --- a/test/e2e/nodeoutofdisk.go +++ b/test/e2e/nodeoutofdisk.go @@ -27,6 +27,7 @@ import ( client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -64,19 +65,19 @@ const ( // 7. Observe that the pod in pending status schedules on that node. // // Flaky issue #20015. We have no clear path for how to test this functionality in a non-flaky way. -var _ = KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", func() { +var _ = framework.KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", func() { var c *client.Client var unfilledNodeName, recoveredNodeName string - framework := NewDefaultFramework("node-outofdisk") + f := framework.NewDefaultFramework("node-outofdisk") BeforeEach(func() { - c = framework.Client + c = f.Client - nodelist := ListSchedulableNodesOrDie(c) + nodelist := framework.ListSchedulableNodesOrDie(c) // Skip this test on small clusters. No need to fail since it is not a use // case that any cluster of small size needs to support. - SkipUnlessNodeCountIsAtLeast(2) + framework.SkipUnlessNodeCountIsAtLeast(2) unfilledNodeName = nodelist.Items[0].Name for _, node := range nodelist.Items[1:] { @@ -86,7 +87,7 @@ var _ = KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", func() { AfterEach(func() { - nodelist := ListSchedulableNodesOrDie(c) + nodelist := framework.ListSchedulableNodesOrDie(c) Expect(len(nodelist.Items)).ToNot(BeZero()) for _, node := range nodelist.Items { if unfilledNodeName == node.Name || recoveredNodeName == node.Name { @@ -98,11 +99,11 @@ var _ = KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", func() { It("runs out of disk space", func() { unfilledNode, err := c.Nodes().Get(unfilledNodeName) - expectNoError(err) + framework.ExpectNoError(err) By(fmt.Sprintf("Calculating CPU availability on node %s", unfilledNode.Name)) milliCpu, err := availCpu(c, unfilledNode) - expectNoError(err) + framework.ExpectNoError(err) // Per pod CPU should be just enough to fit only (numNodeOODPods - 1) pods on the given // node. We compute this value by dividing the available CPU capacity on the node by @@ -111,7 +112,7 @@ var _ = KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", func() { // subtracting 1% from the value, we directly use 0.99 as the multiplier. podCPU := int64(float64(milliCpu/(numNodeOODPods-1)) * 0.99) - ns := framework.Namespace.Name + ns := f.Namespace.Name podClient := c.Pods(ns) By("Creating pods and waiting for all but one pods to be scheduled") @@ -120,9 +121,9 @@ var _ = KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", func() { name := fmt.Sprintf("pod-node-outofdisk-%d", i) createOutOfDiskPod(c, ns, name, podCPU) - expectNoError(framework.WaitForPodRunning(name)) + framework.ExpectNoError(f.WaitForPodRunning(name)) pod, err := podClient.Get(name) - expectNoError(err) + framework.ExpectNoError(err) Expect(pod.Spec.NodeName).To(Equal(unfilledNodeName)) } @@ -140,7 +141,7 @@ var _ = KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", func() { }.AsSelector() options := api.ListOptions{FieldSelector: selector} schedEvents, err := c.Events(ns).List(options) - expectNoError(err) + framework.ExpectNoError(err) if len(schedEvents.Items) > 0 { return true, nil @@ -149,7 +150,7 @@ var _ = KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", func() { } }) - nodelist := ListSchedulableNodesOrDie(c) + nodelist := framework.ListSchedulableNodesOrDie(c) Expect(len(nodelist.Items)).To(BeNumerically(">", 1)) nodeToRecover := nodelist.Items[1] @@ -159,9 +160,9 @@ var _ = KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", func() { recoveredNodeName = nodeToRecover.Name By(fmt.Sprintf("Verifying that pod %s schedules on node %s", pendingPodName, recoveredNodeName)) - expectNoError(framework.WaitForPodRunning(pendingPodName)) + framework.ExpectNoError(f.WaitForPodRunning(pendingPodName)) pendingPod, err := podClient.Get(pendingPodName) - expectNoError(err) + framework.ExpectNoError(err) Expect(pendingPod.Spec.NodeName).To(Equal(recoveredNodeName)) }) }) @@ -191,7 +192,7 @@ func createOutOfDiskPod(c *client.Client, ns, name string, milliCPU int64) { } _, err := podClient.Create(pod) - expectNoError(err) + framework.ExpectNoError(err) } // availCpu calculates the available CPU on a given node by subtracting the CPU requested by @@ -218,7 +219,7 @@ func availCpu(c *client.Client, node *api.Node) (int64, error) { // is in turn obtained internally from cadvisor. func availSize(c *client.Client, node *api.Node) (uint64, error) { statsResource := fmt.Sprintf("api/v1/proxy/nodes/%s/stats/", node.Name) - Logf("Querying stats for node %s using url %s", node.Name, statsResource) + framework.Logf("Querying stats for node %s using url %s", node.Name, statsResource) res, err := c.Get().AbsPath(statsResource).Timeout(time.Minute).Do().Raw() if err != nil { return 0, fmt.Errorf("error querying cAdvisor API: %v", err) @@ -236,21 +237,21 @@ func availSize(c *client.Client, node *api.Node) (uint64, error) { // below the lowDiskSpaceThreshold mark. func fillDiskSpace(c *client.Client, node *api.Node) { avail, err := availSize(c, node) - expectNoError(err, "Node %s: couldn't obtain available disk size %v", node.Name, err) + framework.ExpectNoError(err, "Node %s: couldn't obtain available disk size %v", node.Name, err) fillSize := (avail - lowDiskSpaceThreshold + (100 * mb)) - Logf("Node %s: disk space available %d bytes", node.Name, avail) + framework.Logf("Node %s: disk space available %d bytes", node.Name, avail) By(fmt.Sprintf("Node %s: creating a file of size %d bytes to fill the available disk space", node.Name, fillSize)) cmd := fmt.Sprintf("fallocate -l %d test.img", fillSize) - expectNoError(issueSSHCommand(cmd, testContext.Provider, node)) + framework.ExpectNoError(framework.IssueSSHCommand(cmd, framework.TestContext.Provider, node)) - ood := waitForNodeToBe(c, node.Name, api.NodeOutOfDisk, true, nodeOODTimeOut) + ood := framework.WaitForNodeToBe(c, node.Name, api.NodeOutOfDisk, true, nodeOODTimeOut) Expect(ood).To(BeTrue(), "Node %s did not run out of disk within %v", node.Name, nodeOODTimeOut) avail, err = availSize(c, node) - Logf("Node %s: disk space available %d bytes", node.Name, avail) + framework.Logf("Node %s: disk space available %d bytes", node.Name, avail) Expect(avail < lowDiskSpaceThreshold).To(BeTrue()) } @@ -258,8 +259,8 @@ func fillDiskSpace(c *client.Client, node *api.Node) { func recoverDiskSpace(c *client.Client, node *api.Node) { By(fmt.Sprintf("Recovering disk space on node %s", node.Name)) cmd := "rm -f test.img" - expectNoError(issueSSHCommand(cmd, testContext.Provider, node)) + framework.ExpectNoError(framework.IssueSSHCommand(cmd, framework.TestContext.Provider, node)) - ood := waitForNodeToBe(c, node.Name, api.NodeOutOfDisk, false, nodeOODTimeOut) + ood := framework.WaitForNodeToBe(c, node.Name, api.NodeOutOfDisk, false, nodeOODTimeOut) Expect(ood).To(BeTrue(), "Node %s's out of disk condition status did not change to false within %v", node.Name, nodeOODTimeOut) } diff --git a/test/e2e/pd.go b/test/e2e/pd.go index 998cc6e958b..6e8e40bf05c 100644 --- a/test/e2e/pd.go +++ b/test/e2e/pd.go @@ -38,6 +38,7 @@ import ( awscloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws" gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/test/e2e/framework" ) const ( @@ -45,19 +46,19 @@ const ( gcePDDetachPollTime = 10 * time.Second ) -var _ = KubeDescribe("Pod Disks", func() { +var _ = framework.KubeDescribe("Pod Disks", func() { var ( podClient client.PodInterface host0Name string host1Name string ) - framework := NewDefaultFramework("pod-disks") + f := framework.NewDefaultFramework("pod-disks") BeforeEach(func() { - SkipUnlessNodeCountIsAtLeast(2) + framework.SkipUnlessNodeCountIsAtLeast(2) - podClient = framework.Client.Pods(framework.Namespace.Name) - nodes := ListSchedulableNodesOrDie(framework.Client) + podClient = f.Client.Pods(f.Namespace.Name) + nodes := framework.ListSchedulableNodesOrDie(f.Client) Expect(len(nodes.Items)).To(BeNumerically(">=", 2), "Requires at least 2 nodes") @@ -68,11 +69,11 @@ var _ = KubeDescribe("Pod Disks", func() { }) It("should schedule a pod w/ a RW PD, remove it, then schedule it on another host [Slow]", func() { - SkipUnlessProviderIs("gce", "gke", "aws") + framework.SkipUnlessProviderIs("gce", "gke", "aws") By("creating PD") diskName, err := createPDWithRetry() - expectNoError(err, "Error creating PD") + framework.ExpectNoError(err, "Error creating PD") host0Pod := testPDPod([]string{diskName}, host0Name, false /* readOnly */, 1 /* numContainers */) host1Pod := testPDPod([]string{diskName}, host1Name, false /* readOnly */, 1 /* numContainers */) @@ -89,43 +90,43 @@ var _ = KubeDescribe("Pod Disks", func() { By("submitting host0Pod to kubernetes") _, err = podClient.Create(host0Pod) - expectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err)) + framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err)) - expectNoError(framework.WaitForPodRunningSlow(host0Pod.Name)) + framework.ExpectNoError(f.WaitForPodRunningSlow(host0Pod.Name)) testFile := "/testpd1/tracker" testFileContents := fmt.Sprintf("%v", mathrand.Int()) - expectNoError(framework.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents)) - Logf("Wrote value: %v", testFileContents) + framework.ExpectNoError(f.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents)) + framework.Logf("Wrote value: %v", testFileContents) By("deleting host0Pod") - expectNoError(podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host0Pod") + framework.ExpectNoError(podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host0Pod") By("submitting host1Pod to kubernetes") _, err = podClient.Create(host1Pod) - expectNoError(err, "Failed to create host1Pod") + framework.ExpectNoError(err, "Failed to create host1Pod") - expectNoError(framework.WaitForPodRunningSlow(host1Pod.Name)) + framework.ExpectNoError(f.WaitForPodRunningSlow(host1Pod.Name)) - v, err := framework.ReadFileViaContainer(host1Pod.Name, containerName, testFile) - expectNoError(err) - Logf("Read value: %v", v) + v, err := f.ReadFileViaContainer(host1Pod.Name, containerName, testFile) + framework.ExpectNoError(err) + framework.Logf("Read value: %v", v) Expect(strings.TrimSpace(v)).To(Equal(strings.TrimSpace(testFileContents))) By("deleting host1Pod") - expectNoError(podClient.Delete(host1Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host1Pod") + framework.ExpectNoError(podClient.Delete(host1Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host1Pod") return }) It("should schedule a pod w/ a readonly PD on two hosts, then remove both. [Slow]", func() { - SkipUnlessProviderIs("gce", "gke") + framework.SkipUnlessProviderIs("gce", "gke") By("creating PD") diskName, err := createPDWithRetry() - expectNoError(err, "Error creating PD") + framework.ExpectNoError(err, "Error creating PD") rwPod := testPDPod([]string{diskName}, host0Name, false /* readOnly */, 1 /* numContainers */) host0ROPod := testPDPod([]string{diskName}, host0Name, true /* readOnly */, 1 /* numContainers */) @@ -143,36 +144,36 @@ var _ = KubeDescribe("Pod Disks", func() { By("submitting rwPod to ensure PD is formatted") _, err = podClient.Create(rwPod) - expectNoError(err, "Failed to create rwPod") - expectNoError(framework.WaitForPodRunningSlow(rwPod.Name)) - expectNoError(podClient.Delete(rwPod.Name, api.NewDeleteOptions(0)), "Failed to delete host0Pod") - expectNoError(waitForPDDetach(diskName, host0Name)) + framework.ExpectNoError(err, "Failed to create rwPod") + framework.ExpectNoError(f.WaitForPodRunningSlow(rwPod.Name)) + framework.ExpectNoError(podClient.Delete(rwPod.Name, api.NewDeleteOptions(0)), "Failed to delete host0Pod") + framework.ExpectNoError(waitForPDDetach(diskName, host0Name)) By("submitting host0ROPod to kubernetes") _, err = podClient.Create(host0ROPod) - expectNoError(err, "Failed to create host0ROPod") + framework.ExpectNoError(err, "Failed to create host0ROPod") By("submitting host1ROPod to kubernetes") _, err = podClient.Create(host1ROPod) - expectNoError(err, "Failed to create host1ROPod") + framework.ExpectNoError(err, "Failed to create host1ROPod") - expectNoError(framework.WaitForPodRunningSlow(host0ROPod.Name)) + framework.ExpectNoError(f.WaitForPodRunningSlow(host0ROPod.Name)) - expectNoError(framework.WaitForPodRunningSlow(host1ROPod.Name)) + framework.ExpectNoError(f.WaitForPodRunningSlow(host1ROPod.Name)) By("deleting host0ROPod") - expectNoError(podClient.Delete(host0ROPod.Name, api.NewDeleteOptions(0)), "Failed to delete host0ROPod") + framework.ExpectNoError(podClient.Delete(host0ROPod.Name, api.NewDeleteOptions(0)), "Failed to delete host0ROPod") By("deleting host1ROPod") - expectNoError(podClient.Delete(host1ROPod.Name, api.NewDeleteOptions(0)), "Failed to delete host1ROPod") + framework.ExpectNoError(podClient.Delete(host1ROPod.Name, api.NewDeleteOptions(0)), "Failed to delete host1ROPod") }) It("should schedule a pod w/ a RW PD shared between multiple containers, write to PD, delete pod, verify contents, and repeat in rapid succession [Slow]", func() { - SkipUnlessProviderIs("gce", "gke", "aws") + framework.SkipUnlessProviderIs("gce", "gke", "aws") By("creating PD") diskName, err := createPDWithRetry() - expectNoError(err, "Error creating PD") + framework.ExpectNoError(err, "Error creating PD") numContainers := 4 host0Pod := testPDPod([]string{diskName}, host0Name, false /* readOnly */, numContainers) @@ -187,43 +188,43 @@ var _ = KubeDescribe("Pod Disks", func() { fileAndContentToVerify := make(map[string]string) for i := 0; i < 3; i++ { - Logf("PD Read/Writer Iteration #%v", i) + framework.Logf("PD Read/Writer Iteration #%v", i) By("submitting host0Pod to kubernetes") _, err = podClient.Create(host0Pod) - expectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err)) + framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err)) - expectNoError(framework.WaitForPodRunningSlow(host0Pod.Name)) + framework.ExpectNoError(f.WaitForPodRunningSlow(host0Pod.Name)) // randomly select a container and read/verify pd contents from it containerName := fmt.Sprintf("mycontainer%v", mathrand.Intn(numContainers)+1) - verifyPDContentsViaContainer(framework, host0Pod.Name, containerName, fileAndContentToVerify) + verifyPDContentsViaContainer(f, host0Pod.Name, containerName, fileAndContentToVerify) // Randomly select a container to write a file to PD from containerName = fmt.Sprintf("mycontainer%v", mathrand.Intn(numContainers)+1) testFile := fmt.Sprintf("/testpd1/tracker%v", i) testFileContents := fmt.Sprintf("%v", mathrand.Int()) fileAndContentToVerify[testFile] = testFileContents - expectNoError(framework.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents)) - Logf("Wrote value: \"%v\" to PD %q from pod %q container %q", testFileContents, diskName, host0Pod.Name, containerName) + framework.ExpectNoError(f.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents)) + framework.Logf("Wrote value: \"%v\" to PD %q from pod %q container %q", testFileContents, diskName, host0Pod.Name, containerName) // Randomly select a container and read/verify pd contents from it containerName = fmt.Sprintf("mycontainer%v", mathrand.Intn(numContainers)+1) - verifyPDContentsViaContainer(framework, host0Pod.Name, containerName, fileAndContentToVerify) + verifyPDContentsViaContainer(f, host0Pod.Name, containerName, fileAndContentToVerify) By("deleting host0Pod") - expectNoError(podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host0Pod") + framework.ExpectNoError(podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host0Pod") } }) It("should schedule a pod w/two RW PDs both mounted to one container, write to PD, verify contents, delete pod, recreate pod, verify contents, and repeat in rapid succession [Slow]", func() { - SkipUnlessProviderIs("gce", "gke", "aws") + framework.SkipUnlessProviderIs("gce", "gke", "aws") By("creating PD1") disk1Name, err := createPDWithRetry() - expectNoError(err, "Error creating PD1") + framework.ExpectNoError(err, "Error creating PD1") By("creating PD2") disk2Name, err := createPDWithRetry() - expectNoError(err, "Error creating PD2") + framework.ExpectNoError(err, "Error creating PD2") host0Pod := testPDPod([]string{disk1Name, disk2Name}, host0Name, false /* readOnly */, 1 /* numContainers */) @@ -239,15 +240,15 @@ var _ = KubeDescribe("Pod Disks", func() { containerName := "mycontainer" fileAndContentToVerify := make(map[string]string) for i := 0; i < 3; i++ { - Logf("PD Read/Writer Iteration #%v", i) + framework.Logf("PD Read/Writer Iteration #%v", i) By("submitting host0Pod to kubernetes") _, err = podClient.Create(host0Pod) - expectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err)) + framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err)) - expectNoError(framework.WaitForPodRunningSlow(host0Pod.Name)) + framework.ExpectNoError(f.WaitForPodRunningSlow(host0Pod.Name)) // Read/verify pd contents for both disks from container - verifyPDContentsViaContainer(framework, host0Pod.Name, containerName, fileAndContentToVerify) + verifyPDContentsViaContainer(f, host0Pod.Name, containerName, fileAndContentToVerify) // Write a file to both PDs from container testFilePD1 := fmt.Sprintf("/testpd1/tracker%v", i) @@ -256,16 +257,16 @@ var _ = KubeDescribe("Pod Disks", func() { testFilePD2Contents := fmt.Sprintf("%v", mathrand.Int()) fileAndContentToVerify[testFilePD1] = testFilePD1Contents fileAndContentToVerify[testFilePD2] = testFilePD2Contents - expectNoError(framework.WriteFileViaContainer(host0Pod.Name, containerName, testFilePD1, testFilePD1Contents)) - Logf("Wrote value: \"%v\" to PD1 (%q) from pod %q container %q", testFilePD1Contents, disk1Name, host0Pod.Name, containerName) - expectNoError(framework.WriteFileViaContainer(host0Pod.Name, containerName, testFilePD2, testFilePD2Contents)) - Logf("Wrote value: \"%v\" to PD2 (%q) from pod %q container %q", testFilePD2Contents, disk2Name, host0Pod.Name, containerName) + framework.ExpectNoError(f.WriteFileViaContainer(host0Pod.Name, containerName, testFilePD1, testFilePD1Contents)) + framework.Logf("Wrote value: \"%v\" to PD1 (%q) from pod %q container %q", testFilePD1Contents, disk1Name, host0Pod.Name, containerName) + framework.ExpectNoError(f.WriteFileViaContainer(host0Pod.Name, containerName, testFilePD2, testFilePD2Contents)) + framework.Logf("Wrote value: \"%v\" to PD2 (%q) from pod %q container %q", testFilePD2Contents, disk2Name, host0Pod.Name, containerName) // Read/verify pd contents for both disks from container - verifyPDContentsViaContainer(framework, host0Pod.Name, containerName, fileAndContentToVerify) + verifyPDContentsViaContainer(f, host0Pod.Name, containerName, fileAndContentToVerify) By("deleting host0Pod") - expectNoError(podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host0Pod") + framework.ExpectNoError(podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host0Pod") } }) }) @@ -275,10 +276,10 @@ func createPDWithRetry() (string, error) { var err error for start := time.Now(); time.Since(start) < 180*time.Second; time.Sleep(5 * time.Second) { if newDiskName, err = createPD(); err != nil { - Logf("Couldn't create a new PD. Sleeping 5 seconds (%v)", err) + framework.Logf("Couldn't create a new PD. Sleeping 5 seconds (%v)", err) continue } - Logf("Successfully created a new PD: %q.", newDiskName) + framework.Logf("Successfully created a new PD: %q.", newDiskName) break } return newDiskName, err @@ -288,30 +289,30 @@ func deletePDWithRetry(diskName string) { var err error for start := time.Now(); time.Since(start) < 180*time.Second; time.Sleep(5 * time.Second) { if err = deletePD(diskName); err != nil { - Logf("Couldn't delete PD %q. Sleeping 5 seconds (%v)", diskName, err) + framework.Logf("Couldn't delete PD %q. Sleeping 5 seconds (%v)", diskName, err) continue } - Logf("Successfully deleted PD %q.", diskName) + framework.Logf("Successfully deleted PD %q.", diskName) break } - expectNoError(err, "Error deleting PD") + framework.ExpectNoError(err, "Error deleting PD") } -func verifyPDContentsViaContainer(f *Framework, podName, containerName string, fileAndContentToVerify map[string]string) { +func verifyPDContentsViaContainer(f *framework.Framework, podName, containerName string, fileAndContentToVerify map[string]string) { for filePath, expectedContents := range fileAndContentToVerify { v, err := f.ReadFileViaContainer(podName, containerName, filePath) if err != nil { - Logf("Error reading file: %v", err) + framework.Logf("Error reading file: %v", err) } - expectNoError(err) - Logf("Read file %q with content: %v", filePath, v) + framework.ExpectNoError(err) + framework.Logf("Read file %q with content: %v", filePath, v) Expect(strings.TrimSpace(v)).To(Equal(strings.TrimSpace(expectedContents))) } } func createPD() (string, error) { - if testContext.Provider == "gce" || testContext.Provider == "gke" { - pdName := fmt.Sprintf("%s-%s", testContext.prefix, string(util.NewUUID())) + if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" { + pdName := fmt.Sprintf("%s-%s", framework.TestContext.Prefix, string(util.NewUUID())) gceCloud, err := getGCECloud() if err != nil { @@ -319,12 +320,12 @@ func createPD() (string, error) { } tags := map[string]string{} - err = gceCloud.CreateDisk(pdName, testContext.CloudConfig.Zone, 10 /* sizeGb */, tags) + err = gceCloud.CreateDisk(pdName, framework.TestContext.CloudConfig.Zone, 10 /* sizeGb */, tags) if err != nil { return "", err } return pdName, nil - } else if testContext.Provider == "aws" { + } else if framework.TestContext.Provider == "aws" { client := ec2.New(session.New()) request := &ec2.CreateVolumeInput{} @@ -347,7 +348,7 @@ func createPD() (string, error) { } func deletePD(pdName string) error { - if testContext.Provider == "gce" || testContext.Provider == "gke" { + if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" { gceCloud, err := getGCECloud() if err != nil { return err @@ -361,10 +362,10 @@ func deletePD(pdName string) error { return nil } - Logf("Error deleting PD %q: %v", pdName, err) + framework.Logf("Error deleting PD %q: %v", pdName, err) } return err - } else if testContext.Provider == "aws" { + } else if framework.TestContext.Provider == "aws" { client := ec2.New(session.New()) tokens := strings.Split(pdName, "/") @@ -374,7 +375,7 @@ func deletePD(pdName string) error { _, err := client.DeleteVolume(request) if err != nil { if awsError, ok := err.(awserr.Error); ok && awsError.Code() == "InvalidVolume.NotFound" { - Logf("Volume deletion implicitly succeeded because volume %q does not exist.", pdName) + framework.Logf("Volume deletion implicitly succeeded because volume %q does not exist.", pdName) } else { return fmt.Errorf("error deleting EBS volumes: %v", err) } @@ -386,7 +387,7 @@ func deletePD(pdName string) error { } func detachPD(hostName, pdName string) error { - if testContext.Provider == "gce" || testContext.Provider == "gke" { + if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" { instanceName := strings.Split(hostName, ".")[0] gceCloud, err := getGCECloud() @@ -401,11 +402,11 @@ func detachPD(hostName, pdName string) error { return nil } - Logf("Error detaching PD %q: %v", pdName, err) + framework.Logf("Error detaching PD %q: %v", pdName, err) } return err - } else if testContext.Provider == "aws" { + } else if framework.TestContext.Provider == "aws" { client := ec2.New(session.New()) tokens := strings.Split(pdName, "/") @@ -462,7 +463,7 @@ func testPDPod(diskNames []string, targetHost string, readOnly bool, numContaine }, } - if testContext.Provider == "gce" || testContext.Provider == "gke" { + if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" { pod.Spec.Volumes = make([]api.Volume, len(diskNames)) for k, diskName := range diskNames { pod.Spec.Volumes[k].Name = fmt.Sprintf("testpd%v", k+1) @@ -474,7 +475,7 @@ func testPDPod(diskNames []string, targetHost string, readOnly bool, numContaine }, } } - } else if testContext.Provider == "aws" { + } else if framework.TestContext.Provider == "aws" { pod.Spec.Volumes = make([]api.Volume, len(diskNames)) for k, diskName := range diskNames { pod.Spec.Volumes[k].Name = fmt.Sprintf("testpd%v", k+1) @@ -487,7 +488,7 @@ func testPDPod(diskNames []string, targetHost string, readOnly bool, numContaine } } } else { - panic("Unknown provider: " + testContext.Provider) + panic("Unknown provider: " + framework.TestContext.Provider) } return pod @@ -495,7 +496,7 @@ func testPDPod(diskNames []string, targetHost string, readOnly bool, numContaine // Waits for specified PD to to detach from specified hostName func waitForPDDetach(diskName, hostName string) error { - if testContext.Provider == "gce" || testContext.Provider == "gke" { + if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" { gceCloud, err := getGCECloud() if err != nil { return err @@ -504,17 +505,17 @@ func waitForPDDetach(diskName, hostName string) error { for start := time.Now(); time.Since(start) < gcePDDetachTimeout; time.Sleep(gcePDDetachPollTime) { diskAttached, err := gceCloud.DiskIsAttached(diskName, hostName) if err != nil { - Logf("Error waiting for PD %q to detach from node %q. 'DiskIsAttached(...)' failed with %v", diskName, hostName, err) + framework.Logf("Error waiting for PD %q to detach from node %q. 'DiskIsAttached(...)' failed with %v", diskName, hostName, err) return err } if !diskAttached { // Specified disk does not appear to be attached to specified node - Logf("GCE PD %q appears to have successfully detached from %q.", diskName, hostName) + framework.Logf("GCE PD %q appears to have successfully detached from %q.", diskName, hostName) return nil } - Logf("Waiting for GCE PD %q to detach from %q.", diskName, hostName) + framework.Logf("Waiting for GCE PD %q to detach from %q.", diskName, hostName) } return fmt.Errorf("Gave up waiting for GCE PD %q to detach from %q after %v", diskName, hostName, gcePDDetachTimeout) @@ -524,10 +525,10 @@ func waitForPDDetach(diskName, hostName string) error { } func getGCECloud() (*gcecloud.GCECloud, error) { - gceCloud, ok := testContext.CloudConfig.Provider.(*gcecloud.GCECloud) + gceCloud, ok := framework.TestContext.CloudConfig.Provider.(*gcecloud.GCECloud) if !ok { - return nil, fmt.Errorf("failed to convert CloudConfig.Provider to GCECloud: %#v", testContext.CloudConfig.Provider) + return nil, fmt.Errorf("failed to convert CloudConfig.Provider to GCECloud: %#v", framework.TestContext.CloudConfig.Provider) } return gceCloud, nil diff --git a/test/e2e/persistent_volumes.go b/test/e2e/persistent_volumes.go index 1f34630cf0d..75787d42eaf 100644 --- a/test/e2e/persistent_volumes.go +++ b/test/e2e/persistent_volumes.go @@ -26,18 +26,19 @@ import ( "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/api/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/test/e2e/framework" ) // This test needs privileged containers, which are disabled by default. Run // the test with "go run hack/e2e.go ... --ginkgo.focus=[Feature:Volumes]" -var _ = KubeDescribe("PersistentVolumes [Feature:Volumes]", func() { - framework := NewDefaultFramework("pv") +var _ = framework.KubeDescribe("PersistentVolumes [Feature:Volumes]", func() { + f := framework.NewDefaultFramework("pv") var c *client.Client var ns string BeforeEach(func() { - c = framework.Client - ns = framework.Namespace.Name + c = f.Client + ns = f.Namespace.Name }) It("NFS volume can be created, bound, retrieved, unbound, and used by a pod", func() { @@ -54,47 +55,47 @@ var _ = KubeDescribe("PersistentVolumes [Feature:Volumes]", func() { pod := startVolumeServer(c, config) serverIP := pod.Status.PodIP - Logf("NFS server IP address: %v", serverIP) + framework.Logf("NFS server IP address: %v", serverIP) pv := makePersistentVolume(serverIP) pvc := makePersistentVolumeClaim(ns) - Logf("Creating PersistentVolume using NFS") + framework.Logf("Creating PersistentVolume using NFS") pv, err := c.PersistentVolumes().Create(pv) Expect(err).NotTo(HaveOccurred()) - Logf("Creating PersistentVolumeClaim") + framework.Logf("Creating PersistentVolumeClaim") pvc, err = c.PersistentVolumeClaims(ns).Create(pvc) Expect(err).NotTo(HaveOccurred()) // allow the binder a chance to catch up. should not be more than 20s. - waitForPersistentVolumePhase(api.VolumeBound, c, pv.Name, 1*time.Second, 30*time.Second) + framework.WaitForPersistentVolumePhase(api.VolumeBound, c, pv.Name, 1*time.Second, 30*time.Second) pv, err = c.PersistentVolumes().Get(pv.Name) Expect(err).NotTo(HaveOccurred()) if pv.Spec.ClaimRef == nil { - Failf("Expected PersistentVolume to be bound, but got nil ClaimRef: %+v", pv) + framework.Failf("Expected PersistentVolume to be bound, but got nil ClaimRef: %+v", pv) } - Logf("Deleting PersistentVolumeClaim to trigger PV Recycling") + framework.Logf("Deleting PersistentVolumeClaim to trigger PV Recycling") err = c.PersistentVolumeClaims(ns).Delete(pvc.Name) Expect(err).NotTo(HaveOccurred()) // allow the recycler a chance to catch up. it has to perform NFS scrub, which can be slow in e2e. - waitForPersistentVolumePhase(api.VolumeAvailable, c, pv.Name, 5*time.Second, 300*time.Second) + framework.WaitForPersistentVolumePhase(api.VolumeAvailable, c, pv.Name, 5*time.Second, 300*time.Second) pv, err = c.PersistentVolumes().Get(pv.Name) Expect(err).NotTo(HaveOccurred()) if pv.Spec.ClaimRef != nil { - Failf("Expected PersistentVolume to be unbound, but found non-nil ClaimRef: %+v", pv) + framework.Failf("Expected PersistentVolume to be unbound, but found non-nil ClaimRef: %+v", pv) } // The NFS Server pod we're using contains an index.html file // Verify the file was really scrubbed from the volume podTemplate := makeCheckPod(ns, serverIP) checkpod, err := c.Pods(ns).Create(podTemplate) - expectNoError(err, "Failed to create checker pod: %v", err) - err = waitForPodSuccessInNamespace(c, checkpod.Name, checkpod.Spec.Containers[0].Name, checkpod.Namespace) + framework.ExpectNoError(err, "Failed to create checker pod: %v", err) + err = framework.WaitForPodSuccessInNamespace(c, checkpod.Name, checkpod.Spec.Containers[0].Name, checkpod.Namespace) Expect(err).NotTo(HaveOccurred()) }) }) diff --git a/test/e2e/pods.go b/test/e2e/pods.go index a2f9a0b7d9f..66b67cf1071 100644 --- a/test/e2e/pods.go +++ b/test/e2e/pods.go @@ -36,6 +36,7 @@ import ( "k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/watch" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -54,7 +55,7 @@ var ( func runLivenessTest(c *client.Client, ns string, podDescr *api.Pod, expectNumRestarts int, timeout time.Duration) { By(fmt.Sprintf("Creating pod %s in namespace %s", podDescr.Name, ns)) _, err := c.Pods(ns).Create(podDescr) - expectNoError(err, fmt.Sprintf("creating pod %s", podDescr.Name)) + framework.ExpectNoError(err, fmt.Sprintf("creating pod %s", podDescr.Name)) // At the end of the test, clean up by removing the pod. defer func() { @@ -65,16 +66,16 @@ func runLivenessTest(c *client.Client, ns string, podDescr *api.Pod, expectNumRe // Wait until the pod is not pending. (Here we need to check for something other than // 'Pending' other than checking for 'Running', since when failures occur, we go to // 'Terminated' which can cause indefinite blocking.) - expectNoError(waitForPodNotPending(c, ns, podDescr.Name), + framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, podDescr.Name), fmt.Sprintf("starting pod %s in namespace %s", podDescr.Name, ns)) - Logf("Started pod %s in namespace %s", podDescr.Name, ns) + framework.Logf("Started pod %s in namespace %s", podDescr.Name, ns) // Check the pod's current state and verify that restartCount is present. By("checking the pod's current state and verifying that restartCount is present") pod, err := c.Pods(ns).Get(podDescr.Name) - expectNoError(err, fmt.Sprintf("getting pod %s in namespace %s", podDescr.Name, ns)) + framework.ExpectNoError(err, fmt.Sprintf("getting pod %s in namespace %s", podDescr.Name, ns)) initialRestartCount := api.GetExistingContainerStatus(pod.Status.ContainerStatuses, "liveness").RestartCount - Logf("Initial restart count of pod %s is %d", podDescr.Name, initialRestartCount) + framework.Logf("Initial restart count of pod %s is %d", podDescr.Name, initialRestartCount) // Wait for the restart state to be as desired. deadline := time.Now().Add(timeout) @@ -82,13 +83,13 @@ func runLivenessTest(c *client.Client, ns string, podDescr *api.Pod, expectNumRe observedRestarts := 0 for start := time.Now(); time.Now().Before(deadline); time.Sleep(2 * time.Second) { pod, err = c.Pods(ns).Get(podDescr.Name) - expectNoError(err, fmt.Sprintf("getting pod %s", podDescr.Name)) + framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podDescr.Name)) restartCount := api.GetExistingContainerStatus(pod.Status.ContainerStatuses, "liveness").RestartCount if restartCount != lastRestartCount { - Logf("Restart count of pod %s/%s is now %d (%v elapsed)", + framework.Logf("Restart count of pod %s/%s is now %d (%v elapsed)", ns, podDescr.Name, restartCount, time.Since(start)) if restartCount < lastRestartCount { - Failf("Restart count should increment monotonically: restart cont of pod %s/%s changed from %d to %d", + framework.Failf("Restart count should increment monotonically: restart cont of pod %s/%s changed from %d to %d", ns, podDescr.Name, lastRestartCount, restartCount) } } @@ -104,7 +105,7 @@ func runLivenessTest(c *client.Client, ns string, podDescr *api.Pod, expectNumRe // If we expected n restarts (n > 0), fail if we observed < n restarts. if (expectNumRestarts == 0 && observedRestarts > 0) || (expectNumRestarts > 0 && observedRestarts < expectNumRestarts) { - Failf("pod %s/%s - expected number of restarts: %t, found restarts: %t", + framework.Failf("pod %s/%s - expected number of restarts: %t, found restarts: %t", ns, podDescr.Name, expectNumRestarts, observedRestarts) } } @@ -115,12 +116,12 @@ func testHostIP(c *client.Client, ns string, pod *api.Pod) { By("creating pod") defer podClient.Delete(pod.Name, api.NewDeleteOptions(0)) if _, err := podClient.Create(pod); err != nil { - Failf("Failed to create pod: %v", err) + framework.Failf("Failed to create pod: %v", err) } By("ensuring that pod is running and has a hostIP") // Wait for the pods to enter the running state. Waiting loops until the pods // are running so non-running pods cause a timeout for this test. - err := waitForPodRunningInNamespace(c, pod.Name, ns) + err := framework.WaitForPodRunningInNamespace(c, pod.Name, ns) Expect(err).NotTo(HaveOccurred()) // Try to make sure we get a hostIP for each pod. hostIPTimeout := 2 * time.Minute @@ -129,56 +130,56 @@ func testHostIP(c *client.Client, ns string, pod *api.Pod) { p, err := podClient.Get(pod.Name) Expect(err).NotTo(HaveOccurred()) if p.Status.HostIP != "" { - Logf("Pod %s has hostIP: %s", p.Name, p.Status.HostIP) + framework.Logf("Pod %s has hostIP: %s", p.Name, p.Status.HostIP) break } if time.Since(t) >= hostIPTimeout { - Failf("Gave up waiting for hostIP of pod %s after %v seconds", + framework.Failf("Gave up waiting for hostIP of pod %s after %v seconds", p.Name, time.Since(t).Seconds()) } - Logf("Retrying to get the hostIP of pod %s", p.Name) + framework.Logf("Retrying to get the hostIP of pod %s", p.Name) time.Sleep(5 * time.Second) } } -func runPodFromStruct(framework *Framework, pod *api.Pod) { +func runPodFromStruct(f *framework.Framework, pod *api.Pod) { By("submitting the pod to kubernetes") - podClient := framework.Client.Pods(framework.Namespace.Name) + podClient := f.Client.Pods(f.Namespace.Name) pod, err := podClient.Create(pod) if err != nil { - Failf("Failed to create pod: %v", err) + framework.Failf("Failed to create pod: %v", err) } - expectNoError(framework.WaitForPodRunning(pod.Name)) + framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) By("verifying the pod is in kubernetes") pod, err = podClient.Get(pod.Name) if err != nil { - Failf("failed to get pod: %v", err) + framework.Failf("failed to get pod: %v", err) } } -func startPodAndGetBackOffs(framework *Framework, pod *api.Pod, podName string, containerName string, sleepAmount time.Duration) (time.Duration, time.Duration) { - runPodFromStruct(framework, pod) +func startPodAndGetBackOffs(f *framework.Framework, pod *api.Pod, podName string, containerName string, sleepAmount time.Duration) (time.Duration, time.Duration) { + runPodFromStruct(f, pod) time.Sleep(sleepAmount) By("getting restart delay-0") - _, err := getRestartDelay(framework.Client, pod, framework.Namespace.Name, podName, containerName) + _, err := getRestartDelay(f.Client, pod, f.Namespace.Name, podName, containerName) if err != nil { - Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) + framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) } By("getting restart delay-1") - delay1, err := getRestartDelay(framework.Client, pod, framework.Namespace.Name, podName, containerName) + delay1, err := getRestartDelay(f.Client, pod, f.Namespace.Name, podName, containerName) if err != nil { - Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) + framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) } By("getting restart delay-2") - delay2, err := getRestartDelay(framework.Client, pod, framework.Namespace.Name, podName, containerName) + delay2, err := getRestartDelay(f.Client, pod, f.Namespace.Name, podName, containerName) if err != nil { - Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) + framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) } return delay1, delay2 } @@ -188,29 +189,29 @@ func getRestartDelay(c *client.Client, pod *api.Pod, ns string, name string, con for time.Since(beginTime) < (2 * maxBackOffTolerance) { // may just miss the 1st MaxContainerBackOff delay time.Sleep(time.Second) pod, err := c.Pods(ns).Get(name) - expectNoError(err, fmt.Sprintf("getting pod %s", name)) + framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", name)) status, ok := api.GetContainerStatus(pod.Status.ContainerStatuses, containerName) if !ok { - Logf("getRestartDelay: status missing") + framework.Logf("getRestartDelay: status missing") continue } if status.State.Waiting == nil && status.State.Running != nil && status.LastTerminationState.Terminated != nil && status.State.Running.StartedAt.Time.After(beginTime) { startedAt := status.State.Running.StartedAt.Time finishedAt := status.LastTerminationState.Terminated.FinishedAt.Time - Logf("getRestartDelay: restartCount = %d, finishedAt=%s restartedAt=%s (%s)", status.RestartCount, finishedAt, startedAt, startedAt.Sub(finishedAt)) + framework.Logf("getRestartDelay: restartCount = %d, finishedAt=%s restartedAt=%s (%s)", status.RestartCount, finishedAt, startedAt, startedAt.Sub(finishedAt)) return startedAt.Sub(finishedAt), nil } } return 0, fmt.Errorf("timeout getting pod restart delay") } -var _ = KubeDescribe("Pods", func() { - framework := NewDefaultFramework("pods") +var _ = framework.KubeDescribe("Pods", func() { + f := framework.NewDefaultFramework("pods") It("should get a host IP [Conformance]", func() { name := "pod-hostip-" + string(util.NewUUID()) - testHostIP(framework.Client, framework.Namespace.Name, &api.Pod{ + testHostIP(f.Client, f.Namespace.Name, &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: name, }, @@ -226,7 +227,7 @@ var _ = KubeDescribe("Pods", func() { }) It("should be schedule with cpu and memory limits [Conformance]", func() { - podClient := framework.Client.Pods(framework.Namespace.Name) + podClient := f.Client.Pods(f.Namespace.Name) By("creating the pod") name := "pod-update-" + string(util.NewUUID()) @@ -257,13 +258,13 @@ var _ = KubeDescribe("Pods", func() { defer podClient.Delete(pod.Name, nil) _, err := podClient.Create(pod) if err != nil { - Failf("Error creating a pod: %v", err) + framework.Failf("Error creating a pod: %v", err) } - expectNoError(framework.WaitForPodRunning(pod.Name)) + framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) }) It("should be submitted and removed [Conformance]", func() { - podClient := framework.Client.Pods(framework.Namespace.Name) + podClient := f.Client.Pods(f.Namespace.Name) By("creating the pod") name := "pod-update-" + string(util.NewUUID()) @@ -301,7 +302,7 @@ var _ = KubeDescribe("Pods", func() { options := api.ListOptions{LabelSelector: selector} pods, err := podClient.List(options) if err != nil { - Failf("Failed to query for pods: %v", err) + framework.Failf("Failed to query for pods: %v", err) } Expect(len(pods.Items)).To(Equal(0)) options = api.ListOptions{ @@ -310,7 +311,7 @@ var _ = KubeDescribe("Pods", func() { } w, err := podClient.Watch(options) if err != nil { - Failf("Failed to set up watch: %v", err) + framework.Failf("Failed to set up watch: %v", err) } By("submitting the pod to kubernetes") @@ -320,7 +321,7 @@ var _ = KubeDescribe("Pods", func() { defer podClient.Delete(pod.Name, api.NewDeleteOptions(0)) _, err = podClient.Create(pod) if err != nil { - Failf("Failed to create pod: %v", err) + framework.Failf("Failed to create pod: %v", err) } By("verifying the pod is in kubernetes") @@ -328,7 +329,7 @@ var _ = KubeDescribe("Pods", func() { options = api.ListOptions{LabelSelector: selector} pods, err = podClient.List(options) if err != nil { - Failf("Failed to query for pods: %v", err) + framework.Failf("Failed to query for pods: %v", err) } Expect(len(pods.Items)).To(Equal(1)) @@ -336,27 +337,27 @@ var _ = KubeDescribe("Pods", func() { select { case event, _ := <-w.ResultChan(): if event.Type != watch.Added { - Failf("Failed to observe pod creation: %v", event) + framework.Failf("Failed to observe pod creation: %v", event) } - case <-time.After(podStartTimeout): + case <-time.After(framework.PodStartTimeout): Fail("Timeout while waiting for pod creation") } // We need to wait for the pod to be scheduled, otherwise the deletion // will be carried out immediately rather than gracefully. - expectNoError(framework.WaitForPodRunning(pod.Name)) + framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) By("deleting the pod gracefully") if err := podClient.Delete(pod.Name, api.NewDeleteOptions(30)); err != nil { - Failf("Failed to delete pod: %v", err) + framework.Failf("Failed to delete pod: %v", err) } By("verifying the kubelet observed the termination notice") pod, err = podClient.Get(pod.Name) Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) { - podList, err := GetKubeletPods(framework.Client, pod.Spec.NodeName) + podList, err := framework.GetKubeletPods(f.Client, pod.Spec.NodeName) if err != nil { - Logf("Unable to retrieve kubelet pods for node %v: %v", pod.Spec.NodeName, err) + framework.Logf("Unable to retrieve kubelet pods for node %v: %v", pod.Spec.NodeName, err) return false, nil } for _, kubeletPod := range podList.Items { @@ -364,12 +365,12 @@ var _ = KubeDescribe("Pods", func() { continue } if kubeletPod.ObjectMeta.DeletionTimestamp == nil { - Logf("deletion has not yet been observed") + framework.Logf("deletion has not yet been observed") return false, nil } return true, nil } - Logf("no pod exists with the name we were looking for, assuming the termination request was observed and completed") + framework.Logf("no pod exists with the name we were looking for, assuming the termination request was observed and completed") return true, nil })).NotTo(HaveOccurred(), "kubelet never observed the termination notice") @@ -406,7 +407,7 @@ var _ = KubeDescribe("Pods", func() { }) It("should be updated [Conformance]", func() { - podClient := framework.Client.Pods(framework.Namespace.Name) + podClient := f.Client.Pods(f.Namespace.Name) By("creating the pod") name := "pod-update-" + string(util.NewUUID()) @@ -446,10 +447,10 @@ var _ = KubeDescribe("Pods", func() { }() pod, err := podClient.Create(pod) if err != nil { - Failf("Failed to create pod: %v", err) + framework.Failf("Failed to create pod: %v", err) } - expectNoError(framework.WaitForPodRunning(pod.Name)) + framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) By("verifying the pod is in kubernetes") selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) @@ -458,7 +459,7 @@ var _ = KubeDescribe("Pods", func() { Expect(len(pods.Items)).To(Equal(1)) // Standard get, update retry loop - expectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) { + framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) { By("updating the pod") value = strconv.Itoa(time.Now().Nanosecond()) if pod == nil { // on retries we need to re-get @@ -470,29 +471,29 @@ var _ = KubeDescribe("Pods", func() { pod.Labels["time"] = value pod, err = podClient.Update(pod) if err == nil { - Logf("Successfully updated pod") + framework.Logf("Successfully updated pod") return true, nil } if errors.IsConflict(err) { - Logf("Conflicting update to pod, re-get and re-update: %v", err) + framework.Logf("Conflicting update to pod, re-get and re-update: %v", err) pod = nil // re-get it when we retry return false, nil } return false, fmt.Errorf("failed to update pod: %v", err) })) - expectNoError(framework.WaitForPodRunning(pod.Name)) + framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) By("verifying the updated pod is in kubernetes") selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options = api.ListOptions{LabelSelector: selector} pods, err = podClient.List(options) Expect(len(pods.Items)).To(Equal(1)) - Logf("Pod update OK") + framework.Logf("Pod update OK") }) It("should allow activeDeadlineSeconds to be updated [Conformance]", func() { - podClient := framework.Client.Pods(framework.Namespace.Name) + podClient := f.Client.Pods(f.Namespace.Name) By("creating the pod") name := "pod-update-activedeadlineseconds-" + string(util.NewUUID()) @@ -532,10 +533,10 @@ var _ = KubeDescribe("Pods", func() { }() pod, err := podClient.Create(pod) if err != nil { - Failf("Failed to create pod: %v", err) + framework.Failf("Failed to create pod: %v", err) } - expectNoError(framework.WaitForPodRunning(pod.Name)) + framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) By("verifying the pod is in kubernetes") selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) @@ -544,7 +545,7 @@ var _ = KubeDescribe("Pods", func() { Expect(len(pods.Items)).To(Equal(1)) // Standard get, update retry loop - expectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) { + framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) { By("updating the pod") value = strconv.Itoa(time.Now().Nanosecond()) if pod == nil { // on retries we need to re-get @@ -557,18 +558,18 @@ var _ = KubeDescribe("Pods", func() { pod.Spec.ActiveDeadlineSeconds = &newDeadline pod, err = podClient.Update(pod) if err == nil { - Logf("Successfully updated pod") + framework.Logf("Successfully updated pod") return true, nil } if errors.IsConflict(err) { - Logf("Conflicting update to pod, re-get and re-update: %v", err) + framework.Logf("Conflicting update to pod, re-get and re-update: %v", err) pod = nil // re-get it when we retry return false, nil } return false, fmt.Errorf("failed to update pod: %v", err) })) - expectNoError(framework.WaitForPodTerminated(pod.Name, "DeadlineExceeded")) + framework.ExpectNoError(f.WaitForPodTerminated(pod.Name, "DeadlineExceeded")) }) It("should contain environment variables for services [Conformance]", func() { @@ -590,12 +591,12 @@ var _ = KubeDescribe("Pods", func() { }, }, } - defer framework.Client.Pods(framework.Namespace.Name).Delete(serverPod.Name, api.NewDeleteOptions(0)) - _, err := framework.Client.Pods(framework.Namespace.Name).Create(serverPod) + defer f.Client.Pods(f.Namespace.Name).Delete(serverPod.Name, api.NewDeleteOptions(0)) + _, err := f.Client.Pods(f.Namespace.Name).Create(serverPod) if err != nil { - Failf("Failed to create serverPod: %v", err) + framework.Failf("Failed to create serverPod: %v", err) } - expectNoError(framework.WaitForPodRunning(serverPod.Name)) + framework.ExpectNoError(f.WaitForPodRunning(serverPod.Name)) // This service exposes port 8080 of the test pod as a service on port 8765 // TODO(filbranden): We would like to use a unique service name such as: @@ -622,10 +623,10 @@ var _ = KubeDescribe("Pods", func() { }, }, } - defer framework.Client.Services(framework.Namespace.Name).Delete(svc.Name) - _, err = framework.Client.Services(framework.Namespace.Name).Create(svc) + defer f.Client.Services(f.Namespace.Name).Delete(svc.Name) + _, err = f.Client.Services(f.Namespace.Name).Create(svc) if err != nil { - Failf("Failed to create service: %v", err) + framework.Failf("Failed to create service: %v", err) } // Make a client pod that verifies that it has the service environment variables. @@ -647,7 +648,7 @@ var _ = KubeDescribe("Pods", func() { }, } - framework.TestContainerOutput("service env", pod, 0, []string{ + f.TestContainerOutput("service env", pod, 0, []string{ "FOOSERVICE_SERVICE_HOST=", "FOOSERVICE_SERVICE_PORT=", "FOOSERVICE_PORT=", @@ -659,7 +660,7 @@ var _ = KubeDescribe("Pods", func() { }) It("should be restarted with a docker exec \"cat /tmp/health\" liveness probe [Conformance]", func() { - runLivenessTest(framework.Client, framework.Namespace.Name, &api.Pod{ + runLivenessTest(f.Client, f.Namespace.Name, &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "liveness-exec", Labels: map[string]string{"test": "liveness"}, @@ -686,7 +687,7 @@ var _ = KubeDescribe("Pods", func() { }) It("should *not* be restarted with a docker exec \"cat /tmp/health\" liveness probe [Conformance]", func() { - runLivenessTest(framework.Client, framework.Namespace.Name, &api.Pod{ + runLivenessTest(f.Client, f.Namespace.Name, &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "liveness-exec", Labels: map[string]string{"test": "liveness"}, @@ -713,7 +714,7 @@ var _ = KubeDescribe("Pods", func() { }) It("should be restarted with a /healthz http liveness probe [Conformance]", func() { - runLivenessTest(framework.Client, framework.Namespace.Name, &api.Pod{ + runLivenessTest(f.Client, f.Namespace.Name, &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "liveness-http", Labels: map[string]string{"test": "liveness"}, @@ -742,7 +743,7 @@ var _ = KubeDescribe("Pods", func() { // Slow by design (5 min) It("should have monotonically increasing restart count [Conformance] [Slow]", func() { - runLivenessTest(framework.Client, framework.Namespace.Name, &api.Pod{ + runLivenessTest(f.Client, f.Namespace.Name, &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "liveness-http", Labels: map[string]string{"test": "liveness"}, @@ -770,7 +771,7 @@ var _ = KubeDescribe("Pods", func() { }) It("should *not* be restarted with a /healthz http liveness probe [Conformance]", func() { - runLivenessTest(framework.Client, framework.Namespace.Name, &api.Pod{ + runLivenessTest(f.Client, f.Namespace.Name, &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "liveness-http", Labels: map[string]string{"test": "liveness"}, @@ -785,7 +786,7 @@ var _ = KubeDescribe("Pods", func() { Args: []string{ "-service=liveness-http", "-peers=1", - "-namespace=" + framework.Namespace.Name}, + "-namespace=" + f.Namespace.Name}, Ports: []api.ContainerPort{{ContainerPort: 8080}}, LivenessProbe: &api.Probe{ Handler: api.Handler{ @@ -805,11 +806,11 @@ var _ = KubeDescribe("Pods", func() { }) It("should support remote command execution over websockets", func() { - config, err := loadConfig() + config, err := framework.LoadConfig() if err != nil { - Failf("Unable to get base config: %v", err) + framework.Failf("Unable to get base config: %v", err) } - podClient := framework.Client.Pods(framework.Namespace.Name) + podClient := f.Client.Pods(f.Namespace.Name) By("creating the pod") name := "pod-exec-websocket-" + string(util.NewUUID()) @@ -835,13 +836,13 @@ var _ = KubeDescribe("Pods", func() { }() pod, err = podClient.Create(pod) if err != nil { - Failf("Failed to create pod: %v", err) + framework.Failf("Failed to create pod: %v", err) } - expectNoError(framework.WaitForPodRunning(pod.Name)) + framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) - req := framework.Client.Get(). - Namespace(framework.Namespace.Name). + req := f.Client.Get(). + Namespace(f.Namespace.Name). Resource("pods"). Name(pod.Name). Suffix("exec"). @@ -852,9 +853,9 @@ var _ = KubeDescribe("Pods", func() { Param("command", "/etc/resolv.conf") url := req.URL() - ws, err := OpenWebSocketForURL(url, config, []string{"channel.k8s.io"}) + ws, err := framework.OpenWebSocketForURL(url, config, []string{"channel.k8s.io"}) if err != nil { - Failf("Failed to open websocket to %s: %v", url.String(), err) + framework.Failf("Failed to open websocket to %s: %v", url.String(), err) } defer ws.Close() @@ -865,30 +866,30 @@ var _ = KubeDescribe("Pods", func() { if err == io.EOF { break } - Failf("Failed to read completely from websocket %s: %v", url.String(), err) + framework.Failf("Failed to read completely from websocket %s: %v", url.String(), err) } if len(msg) == 0 { continue } if msg[0] != 1 { - Failf("Got message from server that didn't start with channel 1 (STDOUT): %v", msg) + framework.Failf("Got message from server that didn't start with channel 1 (STDOUT): %v", msg) } buf.Write(msg[1:]) } if buf.Len() == 0 { - Failf("Unexpected output from server") + framework.Failf("Unexpected output from server") } if !strings.Contains(buf.String(), "nameserver") { - Failf("Expected to find 'nameserver' in %q", buf.String()) + framework.Failf("Expected to find 'nameserver' in %q", buf.String()) } }) It("should support retrieving logs from the container over websockets", func() { - config, err := loadConfig() + config, err := framework.LoadConfig() if err != nil { - Failf("Unable to get base config: %v", err) + framework.Failf("Unable to get base config: %v", err) } - podClient := framework.Client.Pods(framework.Namespace.Name) + podClient := f.Client.Pods(f.Namespace.Name) By("creating the pod") name := "pod-logs-websocket-" + string(util.NewUUID()) @@ -914,13 +915,13 @@ var _ = KubeDescribe("Pods", func() { }() pod, err = podClient.Create(pod) if err != nil { - Failf("Failed to create pod: %v", err) + framework.Failf("Failed to create pod: %v", err) } - expectNoError(framework.WaitForPodRunning(pod.Name)) + framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) - req := framework.Client.Get(). - Namespace(framework.Namespace.Name). + req := f.Client.Get(). + Namespace(f.Namespace.Name). Resource("pods"). Name(pod.Name). Suffix("log"). @@ -928,9 +929,9 @@ var _ = KubeDescribe("Pods", func() { url := req.URL() - ws, err := OpenWebSocketForURL(url, config, []string{"binary.k8s.io"}) + ws, err := framework.OpenWebSocketForURL(url, config, []string{"binary.k8s.io"}) if err != nil { - Failf("Failed to open websocket to %s: %v", url.String(), err) + framework.Failf("Failed to open websocket to %s: %v", url.String(), err) } defer ws.Close() buf := &bytes.Buffer{} @@ -940,7 +941,7 @@ var _ = KubeDescribe("Pods", func() { if err == io.EOF { break } - Failf("Failed to read completely from websocket %s: %v", url.String(), err) + framework.Failf("Failed to read completely from websocket %s: %v", url.String(), err) } if len(msg) == 0 { continue @@ -948,14 +949,14 @@ var _ = KubeDescribe("Pods", func() { buf.Write(msg) } if buf.String() != "container is alive\n" { - Failf("Unexpected websocket logs:\n%s", buf.String()) + framework.Failf("Unexpected websocket logs:\n%s", buf.String()) } }) It("should have their auto-restart back-off timer reset on image update [Slow]", func() { podName := "pod-back-off-image" containerName := "back-off" - podClient := framework.Client.Pods(framework.Namespace.Name) + podClient := f.Client.Pods(f.Namespace.Name) pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: podName, @@ -977,35 +978,35 @@ var _ = KubeDescribe("Pods", func() { podClient.Delete(pod.Name, api.NewDeleteOptions(0)) }() - delay1, delay2 := startPodAndGetBackOffs(framework, pod, podName, containerName, buildBackOffDuration) + delay1, delay2 := startPodAndGetBackOffs(f, pod, podName, containerName, buildBackOffDuration) By("updating the image") pod, err := podClient.Get(pod.Name) if err != nil { - Failf("failed to get pod: %v", err) + framework.Failf("failed to get pod: %v", err) } pod.Spec.Containers[0].Image = "gcr.io/google_containers/nginx:1.7.9" pod, err = podClient.Update(pod) if err != nil { - Failf("error updating pod=%s/%s %v", podName, containerName, err) + framework.Failf("error updating pod=%s/%s %v", podName, containerName, err) } time.Sleep(syncLoopFrequency) - expectNoError(framework.WaitForPodRunning(pod.Name)) + framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) By("get restart delay after image update") - delayAfterUpdate, err := getRestartDelay(framework.Client, pod, framework.Namespace.Name, podName, containerName) + delayAfterUpdate, err := getRestartDelay(f.Client, pod, f.Namespace.Name, podName, containerName) if err != nil { - Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) + framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) } if delayAfterUpdate > 2*delay2 || delayAfterUpdate > 2*delay1 { - Failf("updating image did not reset the back-off value in pod=%s/%s d3=%s d2=%s d1=%s", podName, containerName, delayAfterUpdate, delay1, delay2) + framework.Failf("updating image did not reset the back-off value in pod=%s/%s d3=%s d2=%s d1=%s", podName, containerName, delayAfterUpdate, delay1, delay2) } }) // Slow issue #19027 (20 mins) It("should cap back-off at MaxContainerBackOff [Slow]", func() { - podClient := framework.Client.Pods(framework.Namespace.Name) + podClient := f.Client.Pods(f.Namespace.Name) podName := "back-off-cap" containerName := "back-off-cap" pod := &api.Pod{ @@ -1029,7 +1030,7 @@ var _ = KubeDescribe("Pods", func() { podClient.Delete(pod.Name, api.NewDeleteOptions(0)) }() - runPodFromStruct(framework, pod) + runPodFromStruct(f, pod) time.Sleep(2 * kubelet.MaxContainerBackOff) // it takes slightly more than 2*x to get to a back-off of x // wait for a delay == capped delay of MaxContainerBackOff @@ -1039,9 +1040,9 @@ var _ = KubeDescribe("Pods", func() { err error ) for i := 0; i < 3; i++ { - delay1, err = getRestartDelay(framework.Client, pod, framework.Namespace.Name, podName, containerName) + delay1, err = getRestartDelay(f.Client, pod, f.Namespace.Name, podName, containerName) if err != nil { - Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) + framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) } if delay1 < kubelet.MaxContainerBackOff { @@ -1050,17 +1051,17 @@ var _ = KubeDescribe("Pods", func() { } if (delay1 < kubelet.MaxContainerBackOff) || (delay1 > maxBackOffTolerance) { - Failf("expected %s back-off got=%s in delay1", kubelet.MaxContainerBackOff, delay1) + framework.Failf("expected %s back-off got=%s in delay1", kubelet.MaxContainerBackOff, delay1) } By("getting restart delay after a capped delay") - delay2, err := getRestartDelay(framework.Client, pod, framework.Namespace.Name, podName, containerName) + delay2, err := getRestartDelay(f.Client, pod, f.Namespace.Name, podName, containerName) if err != nil { - Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) + framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) } if delay2 < kubelet.MaxContainerBackOff || delay2 > maxBackOffTolerance { // syncloop cumulative drift - Failf("expected %s back-off got=%s on delay2", kubelet.MaxContainerBackOff, delay2) + framework.Failf("expected %s back-off got=%s on delay2", kubelet.MaxContainerBackOff, delay2) } }) @@ -1071,12 +1072,12 @@ var _ = KubeDescribe("Pods", func() { // all providers), we can enable these tests. /* It("should support remote command execution", func() { - clientConfig, err := loadConfig() + clientConfig, err := framework.LoadConfig() if err != nil { - Failf("Failed to create client config: %v", err) + framework.Failf("Failed to create client config: %v", err) } - podClient := framework.Client.Pods(framework.Namespace.Name) + podClient := f.Client.Pods(f.Namespace.Name) By("creating the pod") name := "pod-exec-" + string(util.NewUUID()) @@ -1102,7 +1103,7 @@ var _ = KubeDescribe("Pods", func() { By("submitting the pod to kubernetes") _, err = podClient.Create(pod) if err != nil { - Failf("Failed to create pod: %v", err) + framework.Failf("Failed to create pod: %v", err) } defer func() { // We call defer here in case there is a problem with @@ -1112,45 +1113,45 @@ var _ = KubeDescribe("Pods", func() { }() By("waiting for the pod to start running") - expectNoError(framework.WaitForPodRunning(pod.Name)) + framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) By("verifying the pod is in kubernetes") selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options := api.ListOptions{LabelSelector: selector} pods, err := podClient.List(options) if err != nil { - Failf("Failed to query for pods: %v", err) + framework.Failf("Failed to query for pods: %v", err) } Expect(len(pods.Items)).To(Equal(1)) pod = &pods.Items[0] By(fmt.Sprintf("executing command on host %s pod %s in container %s", pod.Status.Host, pod.Name, pod.Spec.Containers[0].Name)) - req := framework.Client.Get(). + req := f.Client.Get(). Prefix("proxy"). Resource("nodes"). Name(pod.Status.Host). - Suffix("exec", framework.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) + Suffix("exec", f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) out := &bytes.Buffer{} e := remotecommand.New(req, clientConfig, []string{"whoami"}, nil, out, nil, false) err = e.Execute() if err != nil { - Failf("Failed to execute command on host %s pod %s in container %s: %v", + framework.Failf("Failed to execute command on host %s pod %s in container %s: %v", pod.Status.Host, pod.Name, pod.Spec.Containers[0].Name, err) } if e, a := "root\n", out.String(); e != a { - Failf("exec: whoami: expected '%s', got '%s'", e, a) + framework.Failf("exec: whoami: expected '%s', got '%s'", e, a) } }) It("should support port forwarding", func() { - clientConfig, err := loadConfig() + clientConfig, err := framework.LoadConfig() if err != nil { - Failf("Failed to create client config: %v", err) + framework.Failf("Failed to create client config: %v", err) } - podClient := framework.Client.Pods(framework.Namespace.Name) + podClient := f.Client.Pods(f.Namespace.Name) By("creating the pod") name := "pod-portforward-" + string(util.NewUUID()) @@ -1177,7 +1178,7 @@ var _ = KubeDescribe("Pods", func() { By("submitting the pod to kubernetes") _, err = podClient.Create(pod) if err != nil { - Failf("Failed to create pod: %v", err) + framework.Failf("Failed to create pod: %v", err) } defer func() { // We call defer here in case there is a problem with @@ -1187,14 +1188,14 @@ var _ = KubeDescribe("Pods", func() { }() By("waiting for the pod to start running") - expectNoError(framework.WaitForPodRunning(pod.Name)) + framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) By("verifying the pod is in kubernetes") selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options := api.ListOptions{LabelSelector: selector} pods, err := podClient.List(options) if err != nil { - Failf("Failed to query for pods: %v", err) + framework.Failf("Failed to query for pods: %v", err) } Expect(len(pods.Items)).To(Equal(1)) @@ -1202,16 +1203,16 @@ var _ = KubeDescribe("Pods", func() { By(fmt.Sprintf("initiating port forwarding to host %s pod %s in container %s", pod.Status.Host, pod.Name, pod.Spec.Containers[0].Name)) - req := framework.Client.Get(). + req := f.Client.Get(). Prefix("proxy"). Resource("nodes"). Name(pod.Status.Host). - Suffix("portForward", framework.Namespace.Name, pod.Name) + Suffix("portForward", f.Namespace.Name, pod.Name) stopChan := make(chan struct{}) pf, err := portforward.New(req, clientConfig, []string{"5678:80"}, stopChan) if err != nil { - Failf("Error creating port forwarder: %s", err) + framework.Failf("Error creating port forwarder: %s", err) } errorChan := make(chan error) @@ -1224,11 +1225,11 @@ var _ = KubeDescribe("Pods", func() { resp, err := http.Get("http://localhost:5678/") if err != nil { - Failf("Error with http get to localhost:5678: %s", err) + framework.Failf("Error with http get to localhost:5678: %s", err) } body, err := ioutil.ReadAll(resp.Body) if err != nil { - Failf("Error reading response body: %s", err) + framework.Failf("Error reading response body: %s", err) } titleRegex := regexp.MustCompile("(.+)") @@ -1237,7 +1238,7 @@ var _ = KubeDescribe("Pods", func() { Fail("Unable to locate page title in response HTML") } if e, a := "Welcome to nginx on Debian!", matches[1]; e != a { - Failf(": expected '%s', got '%s'", e, a) + framework.Failf("<title>: expected '%s', got '%s'", e, a) } }) */ diff --git a/test/e2e/portforward.go b/test/e2e/portforward.go index b1f55aacdc1..0a5d8aa8836 100644 --- a/test/e2e/portforward.go +++ b/test/e2e/portforward.go @@ -29,6 +29,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" ) @@ -90,7 +91,7 @@ type portForwardCommand struct { func (c *portForwardCommand) Stop() { // SIGINT signals that kubectl port-forward should gracefully terminate if err := c.cmd.Process.Signal(syscall.SIGINT); err != nil { - Logf("error sending SIGINT to kubectl port-forward: %v", err) + framework.Logf("error sending SIGINT to kubectl port-forward: %v", err) } // try to wait for a clean exit @@ -108,41 +109,41 @@ func (c *portForwardCommand) Stop() { // success return } - Logf("error waiting for kubectl port-forward to exit: %v", err) + framework.Logf("error waiting for kubectl port-forward to exit: %v", err) case <-expired.C: - Logf("timed out waiting for kubectl port-forward to exit") + framework.Logf("timed out waiting for kubectl port-forward to exit") } - Logf("trying to forcibly kill kubectl port-forward") - tryKill(c.cmd) + framework.Logf("trying to forcibly kill kubectl port-forward") + framework.TryKill(c.cmd) } func runPortForward(ns, podName string, port int) *portForwardCommand { - cmd := kubectlCmd("port-forward", fmt.Sprintf("--namespace=%v", ns), podName, fmt.Sprintf(":%d", port)) + cmd := framework.KubectlCmd("port-forward", fmt.Sprintf("--namespace=%v", ns), podName, fmt.Sprintf(":%d", port)) // This is somewhat ugly but is the only way to retrieve the port that was picked // by the port-forward command. We don't want to hard code the port as we have no // way of guaranteeing we can pick one that isn't in use, particularly on Jenkins. - Logf("starting port-forward command and streaming output") - _, stderr, err := startCmdAndStreamOutput(cmd) + framework.Logf("starting port-forward command and streaming output") + _, stderr, err := framework.StartCmdAndStreamOutput(cmd) if err != nil { - Failf("Failed to start port-forward command: %v", err) + framework.Failf("Failed to start port-forward command: %v", err) } buf := make([]byte, 128) var n int - Logf("reading from `kubectl port-forward` command's stderr") + framework.Logf("reading from `kubectl port-forward` command's stderr") if n, err = stderr.Read(buf); err != nil { - Failf("Failed to read from kubectl port-forward stderr: %v", err) + framework.Failf("Failed to read from kubectl port-forward stderr: %v", err) } portForwardOutput := string(buf[:n]) match := portForwardRegexp.FindStringSubmatch(portForwardOutput) if len(match) != 2 { - Failf("Failed to parse kubectl port-forward output: %s", portForwardOutput) + framework.Failf("Failed to parse kubectl port-forward output: %s", portForwardOutput) } listenPort, err := strconv.Atoi(match[1]) if err != nil { - Failf("Error converting %s to an int: %v", match[1], err) + framework.Failf("Error converting %s to an int: %v", match[1], err) } return &portForwardCommand{ @@ -151,42 +152,42 @@ func runPortForward(ns, podName string, port int) *portForwardCommand { } } -var _ = KubeDescribe("Port forwarding", func() { - framework := NewDefaultFramework("port-forwarding") +var _ = framework.KubeDescribe("Port forwarding", func() { + f := framework.NewDefaultFramework("port-forwarding") - KubeDescribe("With a server that expects a client request", func() { + framework.KubeDescribe("With a server that expects a client request", func() { It("should support a client that connects, sends no data, and disconnects [Conformance]", func() { By("creating the target pod") pod := pfPod("abc", "1", "1", "1") - if _, err := framework.Client.Pods(framework.Namespace.Name).Create(pod); err != nil { - Failf("Couldn't create pod: %v", err) + if _, err := f.Client.Pods(f.Namespace.Name).Create(pod); err != nil { + framework.Failf("Couldn't create pod: %v", err) } - if err := framework.WaitForPodRunning(pod.Name); err != nil { - Failf("Pod did not start running: %v", err) + if err := f.WaitForPodRunning(pod.Name); err != nil { + framework.Failf("Pod did not start running: %v", err) } By("Running 'kubectl port-forward'") - cmd := runPortForward(framework.Namespace.Name, pod.Name, 80) + cmd := runPortForward(f.Namespace.Name, pod.Name, 80) defer cmd.Stop() By("Dialing the local port") conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", cmd.port)) if err != nil { - Failf("Couldn't connect to port %d: %v", cmd.port, err) + framework.Failf("Couldn't connect to port %d: %v", cmd.port, err) } By("Closing the connection to the local port") conn.Close() By("Waiting for the target pod to stop running") - if err := framework.WaitForPodNoLongerRunning(pod.Name); err != nil { - Failf("Pod did not stop running: %v", err) + if err := f.WaitForPodNoLongerRunning(pod.Name); err != nil { + framework.Failf("Pod did not stop running: %v", err) } By("Retrieving logs from the target pod") - logOutput, err := getPodLogs(framework.Client, framework.Namespace.Name, pod.Name, "portforwardtester") + logOutput, err := framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, "portforwardtester") if err != nil { - Failf("Error retrieving logs: %v", err) + framework.Failf("Error retrieving logs: %v", err) } By("Verifying logs") @@ -197,25 +198,25 @@ var _ = KubeDescribe("Port forwarding", func() { It("should support a client that connects, sends data, and disconnects [Conformance]", func() { By("creating the target pod") pod := pfPod("abc", "10", "10", "100") - if _, err := framework.Client.Pods(framework.Namespace.Name).Create(pod); err != nil { - Failf("Couldn't create pod: %v", err) + if _, err := f.Client.Pods(f.Namespace.Name).Create(pod); err != nil { + framework.Failf("Couldn't create pod: %v", err) } - if err := framework.WaitForPodRunning(pod.Name); err != nil { - Failf("Pod did not start running: %v", err) + if err := f.WaitForPodRunning(pod.Name); err != nil { + framework.Failf("Pod did not start running: %v", err) } By("Running 'kubectl port-forward'") - cmd := runPortForward(framework.Namespace.Name, pod.Name, 80) + cmd := runPortForward(f.Namespace.Name, pod.Name, 80) defer cmd.Stop() By("Dialing the local port") addr, err := net.ResolveTCPAddr("tcp", fmt.Sprintf("127.0.0.1:%d", cmd.port)) if err != nil { - Failf("Error resolving tcp addr: %v", err) + framework.Failf("Error resolving tcp addr: %v", err) } conn, err := net.DialTCP("tcp", nil, addr) if err != nil { - Failf("Couldn't connect to port %d: %v", cmd.port, err) + framework.Failf("Couldn't connect to port %d: %v", cmd.port, err) } defer func() { By("Closing the connection to the local port") @@ -231,22 +232,22 @@ var _ = KubeDescribe("Port forwarding", func() { By("Reading data from the local port") fromServer, err := ioutil.ReadAll(conn) if err != nil { - Failf("Unexpected error reading data from the server: %v", err) + framework.Failf("Unexpected error reading data from the server: %v", err) } if e, a := strings.Repeat("x", 100), string(fromServer); e != a { - Failf("Expected %q from server, got %q", e, a) + framework.Failf("Expected %q from server, got %q", e, a) } By("Waiting for the target pod to stop running") - if err := framework.WaitForPodNoLongerRunning(pod.Name); err != nil { - Failf("Pod did not stop running: %v", err) + if err := f.WaitForPodNoLongerRunning(pod.Name); err != nil { + framework.Failf("Pod did not stop running: %v", err) } By("Retrieving logs from the target pod") - logOutput, err := getPodLogs(framework.Client, framework.Namespace.Name, pod.Name, "portforwardtester") + logOutput, err := framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, "portforwardtester") if err != nil { - Failf("Error retrieving logs: %v", err) + framework.Failf("Error retrieving logs: %v", err) } By("Verifying logs") @@ -255,25 +256,25 @@ var _ = KubeDescribe("Port forwarding", func() { verifyLogMessage(logOutput, "^Done$") }) }) - KubeDescribe("With a server that expects no client request", func() { + framework.KubeDescribe("With a server that expects no client request", func() { It("should support a client that connects, sends no data, and disconnects [Conformance]", func() { By("creating the target pod") pod := pfPod("", "10", "10", "100") - if _, err := framework.Client.Pods(framework.Namespace.Name).Create(pod); err != nil { - Failf("Couldn't create pod: %v", err) + if _, err := f.Client.Pods(f.Namespace.Name).Create(pod); err != nil { + framework.Failf("Couldn't create pod: %v", err) } - if err := framework.WaitForPodRunning(pod.Name); err != nil { - Failf("Pod did not start running: %v", err) + if err := f.WaitForPodRunning(pod.Name); err != nil { + framework.Failf("Pod did not start running: %v", err) } By("Running 'kubectl port-forward'") - cmd := runPortForward(framework.Namespace.Name, pod.Name, 80) + cmd := runPortForward(f.Namespace.Name, pod.Name, 80) defer cmd.Stop() By("Dialing the local port") conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", cmd.port)) if err != nil { - Failf("Couldn't connect to port %d: %v", cmd.port, err) + framework.Failf("Couldn't connect to port %d: %v", cmd.port, err) } defer func() { By("Closing the connection to the local port") @@ -283,22 +284,22 @@ var _ = KubeDescribe("Port forwarding", func() { By("Reading data from the local port") fromServer, err := ioutil.ReadAll(conn) if err != nil { - Failf("Unexpected error reading data from the server: %v", err) + framework.Failf("Unexpected error reading data from the server: %v", err) } if e, a := strings.Repeat("x", 100), string(fromServer); e != a { - Failf("Expected %q from server, got %q", e, a) + framework.Failf("Expected %q from server, got %q", e, a) } By("Waiting for the target pod to stop running") - if err := framework.WaitForPodNoLongerRunning(pod.Name); err != nil { - Failf("Pod did not stop running: %v", err) + if err := f.WaitForPodNoLongerRunning(pod.Name); err != nil { + framework.Failf("Pod did not stop running: %v", err) } By("Retrieving logs from the target pod") - logOutput, err := getPodLogs(framework.Client, framework.Namespace.Name, pod.Name, "portforwardtester") + logOutput, err := framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, "portforwardtester") if err != nil { - Failf("Error retrieving logs: %v", err) + framework.Failf("Error retrieving logs: %v", err) } By("Verifying logs") @@ -316,5 +317,5 @@ func verifyLogMessage(log, expected string) { return } } - Failf("Missing %q from log: %s", expected, log) + framework.Failf("Missing %q from log: %s", expected, log) } diff --git a/test/e2e/pre_stop.go b/test/e2e/pre_stop.go index e200391ab99..e2bfa51bbe6 100644 --- a/test/e2e/pre_stop.go +++ b/test/e2e/pre_stop.go @@ -24,6 +24,7 @@ import ( "k8s.io/kubernetes/pkg/api" client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" ) @@ -51,7 +52,7 @@ func testPreStop(c *client.Client, ns string) { } By(fmt.Sprintf("Creating server pod %s in namespace %s", podDescr.Name, ns)) _, err := c.Pods(ns).Create(podDescr) - expectNoError(err, fmt.Sprintf("creating pod %s", podDescr.Name)) + framework.ExpectNoError(err, fmt.Sprintf("creating pod %s", podDescr.Name)) // At the end of the test, clean up by removing the pod. defer func() { @@ -60,13 +61,13 @@ func testPreStop(c *client.Client, ns string) { }() By("Waiting for pods to come up.") - err = waitForPodRunningInNamespace(c, podDescr.Name, ns) - expectNoError(err, "waiting for server pod to start") + err = framework.WaitForPodRunningInNamespace(c, podDescr.Name, ns) + framework.ExpectNoError(err, "waiting for server pod to start") val := "{\"Source\": \"prestop\"}" podOut, err := c.Pods(ns).Get(podDescr.Name) - expectNoError(err, "getting pod info") + framework.ExpectNoError(err, "getting pod info") preStopDescr := &api.Pod{ ObjectMeta: api.ObjectMeta{ @@ -94,7 +95,7 @@ func testPreStop(c *client.Client, ns string) { By(fmt.Sprintf("Creating tester pod %s in namespace %s", preStopDescr.Name, ns)) _, err = c.Pods(ns).Create(preStopDescr) - expectNoError(err, fmt.Sprintf("creating pod %s", preStopDescr.Name)) + framework.ExpectNoError(err, fmt.Sprintf("creating pod %s", preStopDescr.Name)) deletePreStop := true // At the end of the test, clean up by removing the pod. @@ -105,19 +106,19 @@ func testPreStop(c *client.Client, ns string) { } }() - err = waitForPodRunningInNamespace(c, preStopDescr.Name, ns) - expectNoError(err, "waiting for tester pod to start") + err = framework.WaitForPodRunningInNamespace(c, preStopDescr.Name, ns) + framework.ExpectNoError(err, "waiting for tester pod to start") // Delete the pod with the preStop handler. By("Deleting pre-stop pod") if err := c.Pods(ns).Delete(preStopDescr.Name, nil); err == nil { deletePreStop = false } - expectNoError(err, fmt.Sprintf("deleting pod: %s", preStopDescr.Name)) + framework.ExpectNoError(err, fmt.Sprintf("deleting pod: %s", preStopDescr.Name)) // Validate that the server received the web poke. err = wait.Poll(time.Second*5, time.Second*60, func() (bool, error) { - subResourceProxyAvailable, err := serverVersionGTE(subResourcePodProxyVersion, c) + subResourceProxyAvailable, err := framework.ServerVersionGTE(framework.SubResourcePodProxyVersion, c) if err != nil { return false, err } @@ -142,11 +143,11 @@ func testPreStop(c *client.Client, ns string) { if err != nil { By(fmt.Sprintf("Error validating prestop: %v", err)) } else { - Logf("Saw: %s", string(body)) + framework.Logf("Saw: %s", string(body)) state := State{} err := json.Unmarshal(body, &state) if err != nil { - Logf("Error parsing: %v", err) + framework.Logf("Error parsing: %v", err) return false, nil } if state.Received["prestop"] != 0 { @@ -155,11 +156,11 @@ func testPreStop(c *client.Client, ns string) { } return false, nil }) - expectNoError(err, "validating pre-stop.") + framework.ExpectNoError(err, "validating pre-stop.") } -var _ = KubeDescribe("PreStop", func() { - f := NewDefaultFramework("prestop") +var _ = framework.KubeDescribe("PreStop", func() { + f := framework.NewDefaultFramework("prestop") It("should call prestop when killing a pod [Conformance]", func() { testPreStop(f.Client, f.Namespace.Name) diff --git a/test/e2e/privileged.go b/test/e2e/privileged.go index a61189cc288..38104adeea4 100644 --- a/test/e2e/privileged.go +++ b/test/e2e/privileged.go @@ -27,6 +27,7 @@ import ( "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/apimachinery/registered" client "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/test/e2e/framework" ) const ( @@ -43,17 +44,17 @@ const ( type PrivilegedPodTestConfig struct { privilegedPod *api.Pod - f *Framework + f *framework.Framework hostExecPod *api.Pod } -var _ = KubeDescribe("PrivilegedPod", func() { - f := NewDefaultFramework("e2e-privilegedpod") +var _ = framework.KubeDescribe("PrivilegedPod", func() { + f := framework.NewDefaultFramework("e2e-privilegedpod") config := &PrivilegedPodTestConfig{ f: f, } It("should test privileged pod", func() { - config.hostExecPod = LaunchHostExecPod(config.f.Client, config.f.Namespace.Name, "hostexec") + config.hostExecPod = framework.LaunchHostExecPod(config.f.Client, config.f.Namespace.Name, "hostexec") By("Creating a privileged pod") config.createPrivilegedPod() @@ -69,14 +70,14 @@ var _ = KubeDescribe("PrivilegedPod", func() { func (config *PrivilegedPodTestConfig) runPrivilegedCommandOnPrivilegedContainer() { outputMap := config.dialFromContainer(config.privilegedPod.Status.PodIP, privilegedHttpPort) if len(outputMap["error"]) > 0 { - Failf("Privileged command failed unexpectedly on privileged container, output:%v", outputMap) + framework.Failf("Privileged command failed unexpectedly on privileged container, output:%v", outputMap) } } func (config *PrivilegedPodTestConfig) runPrivilegedCommandOnNonPrivilegedContainer() { outputMap := config.dialFromContainer(config.privilegedPod.Status.PodIP, notPrivilegedHttpPort) if len(outputMap["error"]) == 0 { - Failf("Privileged command should have failed on non-privileged container, output:%v", outputMap) + framework.Failf("Privileged command should have failed on non-privileged container, output:%v", outputMap) } } @@ -89,11 +90,11 @@ func (config *PrivilegedPodTestConfig) dialFromContainer(containerIP string, con v.Encode()) By(fmt.Sprintf("Exec-ing into container over http. Running command:%s", cmd)) - stdout := RunHostCmdOrDie(config.hostExecPod.Namespace, config.hostExecPod.Name, cmd) + stdout := framework.RunHostCmdOrDie(config.hostExecPod.Namespace, config.hostExecPod.Name, cmd) var output map[string]string err := json.Unmarshal([]byte(stdout), &output) Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Could not unmarshal curl response: %s", stdout)) - Logf("Deserialized output is %v", stdout) + framework.Logf("Deserialized output is %v", stdout) return output } @@ -147,12 +148,12 @@ func (config *PrivilegedPodTestConfig) createPrivilegedPod() { func (config *PrivilegedPodTestConfig) createPod(pod *api.Pod) *api.Pod { createdPod, err := config.getPodClient().Create(pod) if err != nil { - Failf("Failed to create %q pod: %v", pod.Name, err) + framework.Failf("Failed to create %q pod: %v", pod.Name, err) } - expectNoError(config.f.WaitForPodRunning(pod.Name)) + framework.ExpectNoError(config.f.WaitForPodRunning(pod.Name)) createdPod, err = config.getPodClient().Get(pod.Name) if err != nil { - Failf("Failed to retrieve %q pod: %v", pod.Name, err) + framework.Failf("Failed to retrieve %q pod: %v", pod.Name, err) } return createdPod } diff --git a/test/e2e/proxy.go b/test/e2e/proxy.go index 9117cbfc835..48267ca95a5 100644 --- a/test/e2e/proxy.go +++ b/test/e2e/proxy.go @@ -29,12 +29,13 @@ import ( client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/net" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) -var _ = KubeDescribe("Proxy", func() { +var _ = framework.KubeDescribe("Proxy", func() { version := testapi.Default.GroupVersion().Version Context("version "+version, func() { proxyContext(version) }) }) @@ -51,7 +52,7 @@ const ( ) func proxyContext(version string) { - f := NewDefaultFramework("proxy") + f := framework.NewDefaultFramework("proxy") prefix := "/api/" + version // Port here has to be kept in sync with default kubelet port. @@ -99,13 +100,13 @@ func proxyContext(version string) { defer func(name string) { err := f.Client.Services(f.Namespace.Name).Delete(name) if err != nil { - Logf("Failed deleting service %v: %v", name, err) + framework.Logf("Failed deleting service %v: %v", name, err) } }(service.Name) // Make an RC with a single pod. pods := []*api.Pod{} - cfg := RCConfig{ + cfg := framework.RCConfig{ Client: f.Client, Image: "gcr.io/google_containers/porter:cd5cb5791ebaa8641955f0e8c2a9bed669b1eaab", Name: service.Name, @@ -141,8 +142,8 @@ func proxyContext(version string) { Labels: labels, CreatedPods: &pods, } - Expect(RunRC(cfg)).NotTo(HaveOccurred()) - defer DeleteRC(f.Client, f.Namespace.Name, cfg.Name) + Expect(framework.RunRC(cfg)).NotTo(HaveOccurred()) + defer framework.DeleteRC(f.Client, f.Namespace.Name, cfg.Name) Expect(f.WaitForAnEndpoint(service.Name)).NotTo(HaveOccurred()) @@ -247,7 +248,7 @@ func proxyContext(version string) { }) } -func doProxy(f *Framework, path string) (body []byte, statusCode int, d time.Duration, err error) { +func doProxy(f *framework.Framework, path string) (body []byte, statusCode int, d time.Duration, err error) { // About all of the proxy accesses in this file: // * AbsPath is used because it preserves the trailing '/'. // * Do().Raw() is used (instead of DoRaw()) because it will turn an @@ -258,9 +259,9 @@ func doProxy(f *Framework, path string) (body []byte, statusCode int, d time.Dur body, err = f.Client.Get().AbsPath(path).Do().StatusCode(&statusCode).Raw() d = time.Since(start) if len(body) > 0 { - Logf("%v: %s (%v; %v)", path, truncate(body, maxDisplayBodyLen), statusCode, d) + framework.Logf("%v: %s (%v; %v)", path, truncate(body, maxDisplayBodyLen), statusCode, d) } else { - Logf("%v: %s (%v; %v)", path, "no body", statusCode, d) + framework.Logf("%v: %s (%v; %v)", path, "no body", statusCode, d) } return } @@ -276,14 +277,14 @@ func truncate(b []byte, maxLen int) []byte { func pickNode(c *client.Client) (string, error) { // TODO: investigate why it doesn't work on master Node. - nodes := ListSchedulableNodesOrDie(c) + nodes := framework.ListSchedulableNodesOrDie(c) if len(nodes.Items) == 0 { return "", fmt.Errorf("no nodes exist, can't test node proxy") } return nodes.Items[0].Name, nil } -func nodeProxyTest(f *Framework, prefix, nodeDest string) { +func nodeProxyTest(f *framework.Framework, prefix, nodeDest string) { node, err := pickNode(f.Client) Expect(err).NotTo(HaveOccurred()) // TODO: Change it to test whether all requests succeeded when requests @@ -292,7 +293,7 @@ func nodeProxyTest(f *Framework, prefix, nodeDest string) { for i := 0; i < proxyAttempts; i++ { _, status, d, err := doProxy(f, prefix+node+nodeDest) if status == http.StatusServiceUnavailable { - Logf("Failed proxying node logs due to service unavailable: %v", err) + framework.Logf("Failed proxying node logs due to service unavailable: %v", err) time.Sleep(time.Second) serviceUnavailableErrors++ } else { @@ -302,7 +303,7 @@ func nodeProxyTest(f *Framework, prefix, nodeDest string) { } } if serviceUnavailableErrors > 0 { - Logf("error: %d requests to proxy node logs failed", serviceUnavailableErrors) + framework.Logf("error: %d requests to proxy node logs failed", serviceUnavailableErrors) } maxFailures := int(math.Floor(0.1 * float64(proxyAttempts))) Expect(serviceUnavailableErrors).To(BeNumerically("<", maxFailures)) diff --git a/test/e2e/rc.go b/test/e2e/rc.go index 182d04513e9..94962766659 100644 --- a/test/e2e/rc.go +++ b/test/e2e/rc.go @@ -24,30 +24,31 @@ import ( "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) -var _ = KubeDescribe("ReplicationController", func() { - framework := NewDefaultFramework("replication-controller") +var _ = framework.KubeDescribe("ReplicationController", func() { + f := framework.NewDefaultFramework("replication-controller") It("should serve a basic image on each replica with a public image [Conformance]", func() { - ServeImageOrFail(framework, "basic", "gcr.io/google_containers/serve_hostname:v1.4") + ServeImageOrFail(f, "basic", "gcr.io/google_containers/serve_hostname:v1.4") }) It("should serve a basic image on each replica with a private image", func() { // requires private images - SkipUnlessProviderIs("gce", "gke") + framework.SkipUnlessProviderIs("gce", "gke") - ServeImageOrFail(framework, "private", "b.gcr.io/k8s_authenticated_test/serve_hostname:v1.4") + ServeImageOrFail(f, "private", "b.gcr.io/k8s_authenticated_test/serve_hostname:v1.4") }) }) // A basic test to check the deployment of an image using // a replication controller. The image serves its hostname // which is checked for each replica. -func ServeImageOrFail(f *Framework, test string, image string) { +func ServeImageOrFail(f *framework.Framework, test string, image string) { name := "my-hostname-" + test + "-" + string(util.NewUUID()) replicas := 2 @@ -85,15 +86,15 @@ func ServeImageOrFail(f *Framework, test string, image string) { // Cleanup the replication controller when we are done. defer func() { // Resize the replication controller to zero to get rid of pods. - if err := DeleteRC(f.Client, f.Namespace.Name, controller.Name); err != nil { - Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err) + if err := framework.DeleteRC(f.Client, f.Namespace.Name, controller.Name); err != nil { + framework.Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err) } }() // List the pods, making sure we observe all the replicas. label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) - pods, err := podsCreated(f.Client, f.Namespace.Name, name, replicas) + pods, err := framework.PodsCreated(f.Client, f.Namespace.Name, name, replicas) By("Ensuring each pod is running") @@ -111,8 +112,8 @@ func ServeImageOrFail(f *Framework, test string, image string) { By("Trying to dial each unique pod") retryTimeout := 2 * time.Minute retryInterval := 5 * time.Second - err = wait.Poll(retryInterval, retryTimeout, podProxyResponseChecker{f.Client, f.Namespace.Name, label, name, true, pods}.checkAllResponses) + err = wait.Poll(retryInterval, retryTimeout, framework.PodProxyResponseChecker(f.Client, f.Namespace.Name, label, name, true, pods).CheckAllResponses) if err != nil { - Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds()) + framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds()) } } diff --git a/test/e2e/reboot.go b/test/e2e/reboot.go index 0bbddb4de85..af794770c8b 100644 --- a/test/e2e/reboot.go +++ b/test/e2e/reboot.go @@ -26,6 +26,7 @@ import ( "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -45,15 +46,15 @@ const ( rebootPodReadyAgainTimeout = 5 * time.Minute ) -var _ = KubeDescribe("Reboot [Disruptive] [Feature:Reboot]", func() { - var f *Framework +var _ = framework.KubeDescribe("Reboot [Disruptive] [Feature:Reboot]", func() { + var f *framework.Framework BeforeEach(func() { // These tests requires SSH to nodes, so the provider check should be identical to there - // (the limiting factor is the implementation of util.go's getSigner(...)). + // (the limiting factor is the implementation of util.go's framework.GetSigner(...)). // Cluster must support node reboot - SkipUnlessProviderIs(providersWithSSH...) + framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...) }) AfterEach(func() { @@ -66,23 +67,23 @@ var _ = KubeDescribe("Reboot [Disruptive] [Feature:Reboot]", func() { Expect(err).NotTo(HaveOccurred()) for _, e := range events.Items { - Logf("event for %v: %v %v: %v", e.InvolvedObject.Name, e.Source, e.Reason, e.Message) + framework.Logf("event for %v: %v %v: %v", e.InvolvedObject.Name, e.Source, e.Reason, e.Message) } } // In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a // rebooted/deleted node) for up to 5 minutes before all tunnels are dropped and recreated. Most tests // make use of some proxy feature to verify functionality. So, if a reboot test runs right before a test // that tries to get logs, for example, we may get unlucky and try to use a closed tunnel to a node that - // was recently rebooted. There's no good way to poll for proxies being closed, so we sleep. + // was recently rebooted. There's no good way to framework.Poll for proxies being closed, so we sleep. // // TODO(cjcullen) reduce this sleep (#19314) - if providerIs("gke") { + if framework.ProviderIs("gke") { By("waiting 5 minutes for all dead tunnels to be dropped") time.Sleep(5 * time.Minute) } }) - f = NewDefaultFramework("reboot") + f = framework.NewDefaultFramework("reboot") It("each node by ordering clean reboot and ensure they function upon restart", func() { // clean shutdown and restart @@ -127,7 +128,7 @@ var _ = KubeDescribe("Reboot [Disruptive] [Feature:Reboot]", func() { func testReboot(c *client.Client, rebootCmd string) { // Get all nodes, and kick off the test on each. - nodelist := ListSchedulableNodesOrDie(c) + nodelist := framework.ListSchedulableNodesOrDie(c) result := make([]bool, len(nodelist.Items)) wg := sync.WaitGroup{} wg.Add(len(nodelist.Items)) @@ -137,7 +138,7 @@ func testReboot(c *client.Client, rebootCmd string) { go func(ix int) { defer wg.Done() n := nodelist.Items[ix] - result[ix] = rebootNode(c, testContext.Provider, n.ObjectMeta.Name, rebootCmd) + result[ix] = rebootNode(c, framework.TestContext.Provider, n.ObjectMeta.Name, rebootCmd) if !result[ix] { failed = true } @@ -151,10 +152,10 @@ func testReboot(c *client.Client, rebootCmd string) { for ix := range nodelist.Items { n := nodelist.Items[ix] if !result[ix] { - Logf("Node %s failed reboot test.", n.ObjectMeta.Name) + framework.Logf("Node %s failed reboot test.", n.ObjectMeta.Name) } } - Failf("Test failed; at least one node failed to reboot in the time given.") + framework.Failf("Test failed; at least one node failed to reboot in the time given.") } } @@ -165,9 +166,9 @@ func printStatusAndLogsForNotReadyPods(c *client.Client, ns string, podNames []s prefix = "Retrieving log for the last terminated container" } if err != nil { - Logf("%s %s, err: %v:\n%s\n", prefix, id, err, log) + framework.Logf("%s %s, err: %v:\n%s\n", prefix, id, err, log) } else { - Logf("%s %s:\n%s\n", prefix, id, log) + framework.Logf("%s %s:\n%s\n", prefix, id, log) } } podNameSet := sets.NewString(podNames...) @@ -178,14 +179,14 @@ func printStatusAndLogsForNotReadyPods(c *client.Client, ns string, podNames []s if !podNameSet.Has(p.Name) { continue } - if ok, _ := podRunningReady(p); ok { + if ok, _ := framework.PodRunningReady(p); ok { continue } - Logf("Status for not ready pod %s/%s: %+v", p.Namespace, p.Name, p.Status) + framework.Logf("Status for not ready pod %s/%s: %+v", p.Namespace, p.Name, p.Status) // Print the log of the containers if pod is not running and ready. for _, container := range p.Status.ContainerStatuses { cIdentifer := fmt.Sprintf("%s/%s/%s", p.Namespace, p.Name, container.Name) - log, err := getPodLogs(c, p.Namespace, p.Name, container.Name) + log, err := framework.GetPodLogs(c, p.Namespace, p.Name, container.Name) printFn(cIdentifer, log, err, false) // Get log from the previous container. if container.RestartCount > 0 { @@ -208,19 +209,19 @@ func printStatusAndLogsForNotReadyPods(c *client.Client, ns string, podNames []s func rebootNode(c *client.Client, provider, name, rebootCmd string) bool { // Setup ns := api.NamespaceSystem - ps := newPodStore(c, ns, labels.Everything(), fields.OneTermEqualSelector(api.PodHostField, name)) + ps := framework.NewPodStore(c, ns, labels.Everything(), fields.OneTermEqualSelector(api.PodHostField, name)) defer ps.Stop() // Get the node initially. - Logf("Getting %s", name) + framework.Logf("Getting %s", name) node, err := c.Nodes().Get(name) if err != nil { - Logf("Couldn't get node %s", name) + framework.Logf("Couldn't get node %s", name) return false } // Node sanity check: ensure it is "ready". - if !waitForNodeToBeReady(c, name, nodeReadyInitialTimeout) { + if !framework.WaitForNodeToBeReady(c, name, framework.NodeReadyInitialTimeout) { return false } @@ -240,39 +241,39 @@ func rebootNode(c *client.Client, provider, name, rebootCmd string) bool { podNames = append(podNames, p.ObjectMeta.Name) } } - Logf("Node %s has %d pods: %v", name, len(podNames), podNames) + framework.Logf("Node %s has %d pods: %v", name, len(podNames), podNames) // For each pod, we do a sanity check to ensure it's running / healthy // now, as that's what we'll be checking later. - if !checkPodsRunningReady(c, ns, podNames, podReadyBeforeTimeout) { + if !framework.CheckPodsRunningReady(c, ns, podNames, framework.PodReadyBeforeTimeout) { printStatusAndLogsForNotReadyPods(c, ns, podNames, pods) return false } // Reboot the node. - if err = issueSSHCommand(rebootCmd, provider, node); err != nil { - Logf("Error while issuing ssh command: %v", err) + if err = framework.IssueSSHCommand(rebootCmd, provider, node); err != nil { + framework.Logf("Error while issuing ssh command: %v", err) return false } // Wait for some kind of "not ready" status. - if !waitForNodeToBeNotReady(c, name, rebootNodeNotReadyTimeout) { + if !framework.WaitForNodeToBeNotReady(c, name, rebootNodeNotReadyTimeout) { return false } // Wait for some kind of "ready" status. - if !waitForNodeToBeReady(c, name, rebootNodeReadyAgainTimeout) { + if !framework.WaitForNodeToBeReady(c, name, rebootNodeReadyAgainTimeout) { return false } // Ensure all of the pods that we found on this node before the reboot are // running / healthy. - if !checkPodsRunningReady(c, ns, podNames, rebootPodReadyAgainTimeout) { + if !framework.CheckPodsRunningReady(c, ns, podNames, rebootPodReadyAgainTimeout) { newPods := ps.List() printStatusAndLogsForNotReadyPods(c, ns, podNames, newPods) return false } - Logf("Reboot successful on node %s", name) + framework.Logf("Reboot successful on node %s", name) return true } diff --git a/test/e2e/replica_set.go b/test/e2e/replica_set.go index 6e9a06748bd..e8acda5c45e 100644 --- a/test/e2e/replica_set.go +++ b/test/e2e/replica_set.go @@ -26,29 +26,30 @@ import ( "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) -var _ = KubeDescribe("ReplicaSet", func() { - framework := NewDefaultFramework("replicaset") +var _ = framework.KubeDescribe("ReplicaSet", func() { + f := framework.NewDefaultFramework("replicaset") It("should serve a basic image on each replica with a public image [Conformance]", func() { - ReplicaSetServeImageOrFail(framework, "basic", "gcr.io/google_containers/serve_hostname:v1.4") + ReplicaSetServeImageOrFail(f, "basic", "gcr.io/google_containers/serve_hostname:v1.4") }) It("should serve a basic image on each replica with a private image", func() { // requires private images - SkipUnlessProviderIs("gce", "gke") + framework.SkipUnlessProviderIs("gce", "gke") - ReplicaSetServeImageOrFail(framework, "private", "b.gcr.io/k8s_authenticated_test/serve_hostname:v1.4") + ReplicaSetServeImageOrFail(f, "private", "b.gcr.io/k8s_authenticated_test/serve_hostname:v1.4") }) }) // A basic test to check the deployment of an image using a ReplicaSet. The // image serves its hostname which is checked for each replica. -func ReplicaSetServeImageOrFail(f *Framework, test string, image string) { +func ReplicaSetServeImageOrFail(f *framework.Framework, test string, image string) { name := "my-hostname-" + test + "-" + string(util.NewUUID()) replicas := 2 @@ -85,15 +86,15 @@ func ReplicaSetServeImageOrFail(f *Framework, test string, image string) { // Cleanup the ReplicaSet when we are done. defer func() { // Resize the ReplicaSet to zero to get rid of pods. - if err := DeleteReplicaSet(f.Client, f.Namespace.Name, rs.Name); err != nil { - Logf("Failed to cleanup ReplicaSet %v: %v.", rs.Name, err) + if err := framework.DeleteReplicaSet(f.Client, f.Namespace.Name, rs.Name); err != nil { + framework.Logf("Failed to cleanup ReplicaSet %v: %v.", rs.Name, err) } }() // List the pods, making sure we observe all the replicas. label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) - pods, err := podsCreated(f.Client, f.Namespace.Name, name, replicas) + pods, err := framework.PodsCreated(f.Client, f.Namespace.Name, name, replicas) Expect(err).NotTo(HaveOccurred()) By("Ensuring each pod is running") @@ -112,8 +113,8 @@ func ReplicaSetServeImageOrFail(f *Framework, test string, image string) { By("Trying to dial each unique pod") retryTimeout := 2 * time.Minute retryInterval := 5 * time.Second - err = wait.Poll(retryInterval, retryTimeout, podProxyResponseChecker{f.Client, f.Namespace.Name, label, name, true, pods}.checkAllResponses) + err = wait.Poll(retryInterval, retryTimeout, framework.PodProxyResponseChecker(f.Client, f.Namespace.Name, label, name, true, pods).CheckAllResponses) if err != nil { - Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds()) + framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds()) } } diff --git a/test/e2e/resize_nodes.go b/test/e2e/resize_nodes.go index 8e3fca25e67..2ad692f1e4e 100644 --- a/test/e2e/resize_nodes.go +++ b/test/e2e/resize_nodes.go @@ -29,6 +29,7 @@ import ( client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/util/intstr" + "k8s.io/kubernetes/test/e2e/framework" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/autoscaling" @@ -53,48 +54,48 @@ const ( ) func resizeGroup(size int) error { - if testContext.ReportDir != "" { - CoreDump(testContext.ReportDir) - defer CoreDump(testContext.ReportDir) + if framework.TestContext.ReportDir != "" { + framework.CoreDump(framework.TestContext.ReportDir) + defer framework.CoreDump(framework.TestContext.ReportDir) } - if testContext.Provider == "gce" || testContext.Provider == "gke" { + if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" { // TODO: make this hit the compute API directly instead of shelling out to gcloud. // TODO: make gce/gke implement InstanceGroups, so we can eliminate the per-provider logic output, err := exec.Command("gcloud", "compute", "instance-groups", "managed", "resize", - testContext.CloudConfig.NodeInstanceGroup, fmt.Sprintf("--size=%v", size), - "--project="+testContext.CloudConfig.ProjectID, "--zone="+testContext.CloudConfig.Zone).CombinedOutput() + framework.TestContext.CloudConfig.NodeInstanceGroup, fmt.Sprintf("--size=%v", size), + "--project="+framework.TestContext.CloudConfig.ProjectID, "--zone="+framework.TestContext.CloudConfig.Zone).CombinedOutput() if err != nil { - Logf("Failed to resize node instance group: %v", string(output)) + framework.Logf("Failed to resize node instance group: %v", string(output)) } return err - } else if testContext.Provider == "aws" { + } else if framework.TestContext.Provider == "aws" { client := autoscaling.New(session.New()) - return awscloud.ResizeInstanceGroup(client, testContext.CloudConfig.NodeInstanceGroup, size) + return awscloud.ResizeInstanceGroup(client, framework.TestContext.CloudConfig.NodeInstanceGroup, size) } else { return fmt.Errorf("Provider does not support InstanceGroups") } } func groupSize() (int, error) { - if testContext.Provider == "gce" || testContext.Provider == "gke" { + if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" { // TODO: make this hit the compute API directly instead of shelling out to gcloud. // TODO: make gce/gke implement InstanceGroups, so we can eliminate the per-provider logic output, err := exec.Command("gcloud", "compute", "instance-groups", "managed", - "list-instances", testContext.CloudConfig.NodeInstanceGroup, "--project="+testContext.CloudConfig.ProjectID, - "--zone="+testContext.CloudConfig.Zone).CombinedOutput() + "list-instances", framework.TestContext.CloudConfig.NodeInstanceGroup, "--project="+framework.TestContext.CloudConfig.ProjectID, + "--zone="+framework.TestContext.CloudConfig.Zone).CombinedOutput() if err != nil { return -1, err } re := regexp.MustCompile("RUNNING") return len(re.FindAllString(string(output), -1)), nil - } else if testContext.Provider == "aws" { + } else if framework.TestContext.Provider == "aws" { client := autoscaling.New(session.New()) - instanceGroup, err := awscloud.DescribeInstanceGroup(client, testContext.CloudConfig.NodeInstanceGroup) + instanceGroup, err := awscloud.DescribeInstanceGroup(client, framework.TestContext.CloudConfig.NodeInstanceGroup) if err != nil { return -1, fmt.Errorf("error describing instance group: %v", err) } if instanceGroup == nil { - return -1, fmt.Errorf("instance group not found: %s", testContext.CloudConfig.NodeInstanceGroup) + return -1, fmt.Errorf("instance group not found: %s", framework.TestContext.CloudConfig.NodeInstanceGroup) } return instanceGroup.CurrentSize() } else { @@ -107,14 +108,14 @@ func waitForGroupSize(size int) error { for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) { currentSize, err := groupSize() if err != nil { - Logf("Failed to get node instance group size: %v", err) + framework.Logf("Failed to get node instance group size: %v", err) continue } if currentSize != size { - Logf("Waiting for node instance group size %d, current size %d", size, currentSize) + framework.Logf("Waiting for node instance group size %d, current size %d", size, currentSize) continue } - Logf("Node instance group has reached the desired size %d", size) + framework.Logf("Node instance group has reached the desired size %d", size) return nil } return fmt.Errorf("timeout waiting %v for node instance group size to be %d", timeout, size) @@ -168,9 +169,9 @@ func podOnNode(podName, nodeName string, image string) *api.Pod { func newPodOnNode(c *client.Client, namespace, podName, nodeName string) error { pod, err := c.Pods(namespace).Create(podOnNode(podName, nodeName, serveHostnameImage)) if err == nil { - Logf("Created pod %s on node %s", pod.ObjectMeta.Name, nodeName) + framework.Logf("Created pod %s on node %s", pod.ObjectMeta.Name, nodeName) } else { - Logf("Failed to create pod %s on node %s: %v", podName, nodeName, err) + framework.Logf("Failed to create pod %s on node %s: %v", podName, nodeName, err) } return err } @@ -239,23 +240,23 @@ func resizeRC(c *client.Client, ns, name string, replicas int) error { func getMaster(c *client.Client) string { master := "" - switch testContext.Provider { + switch framework.TestContext.Provider { case "gce": eps, err := c.Endpoints(api.NamespaceDefault).Get("kubernetes") if err != nil { - Failf("Fail to get kubernetes endpoinds: %v", err) + framework.Failf("Fail to get kubernetes endpoinds: %v", err) } if len(eps.Subsets) != 1 || len(eps.Subsets[0].Addresses) != 1 { - Failf("There are more than 1 endpoints for kubernetes service: %+v", eps) + framework.Failf("There are more than 1 endpoints for kubernetes service: %+v", eps) } master = eps.Subsets[0].Addresses[0].IP case "gke": - master = strings.TrimPrefix(testContext.Host, "https://") + master = strings.TrimPrefix(framework.TestContext.Host, "https://") case "aws": // TODO(justinsb): Avoid hardcoding this. master = "172.20.0.9" default: - Failf("This test is not supported for provider %s and should be disabled", testContext.Provider) + framework.Failf("This test is not supported for provider %s and should be disabled", framework.TestContext.Provider) } return master } @@ -263,7 +264,7 @@ func getMaster(c *client.Client) string { // Return node external IP concatenated with port 22 for ssh // e.g. 1.2.3.4:22 func getNodeExternalIP(node *api.Node) string { - Logf("Getting external IP address for %s", node.Name) + framework.Logf("Getting external IP address for %s", node.Name) host := "" for _, a := range node.Status.Addresses { if a.Type == api.NodeExternalIP { @@ -272,7 +273,7 @@ func getNodeExternalIP(node *api.Node) string { } } if host == "" { - Failf("Couldn't get the external IP of host %s with addresses %v", node.Name, node.Status.Addresses) + framework.Failf("Couldn't get the external IP of host %s with addresses %v", node.Name, node.Status.Addresses) } return host } @@ -294,26 +295,26 @@ func performTemporaryNetworkFailure(c *client.Client, ns, rcName string, replica // had been inserted. (yes, we could look at the error code and ssh error // separately, but I prefer to stay on the safe side). By(fmt.Sprintf("Unblock network traffic from node %s to the master", node.Name)) - unblockNetwork(host, master) + framework.UnblockNetwork(host, master) }() - Logf("Waiting %v to ensure node %s is ready before beginning test...", resizeNodeReadyTimeout, node.Name) - if !waitForNodeToBe(c, node.Name, api.NodeReady, true, resizeNodeReadyTimeout) { - Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout) + framework.Logf("Waiting %v to ensure node %s is ready before beginning test...", resizeNodeReadyTimeout, node.Name) + if !framework.WaitForNodeToBe(c, node.Name, api.NodeReady, true, resizeNodeReadyTimeout) { + framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout) } - blockNetwork(host, master) + framework.BlockNetwork(host, master) - Logf("Waiting %v for node %s to be not ready after simulated network failure", resizeNodeNotReadyTimeout, node.Name) - if !waitForNodeToBe(c, node.Name, api.NodeReady, false, resizeNodeNotReadyTimeout) { - Failf("Node %s did not become not-ready within %v", node.Name, resizeNodeNotReadyTimeout) + framework.Logf("Waiting %v for node %s to be not ready after simulated network failure", resizeNodeNotReadyTimeout, node.Name) + if !framework.WaitForNodeToBe(c, node.Name, api.NodeReady, false, resizeNodeNotReadyTimeout) { + framework.Failf("Node %s did not become not-ready within %v", node.Name, resizeNodeNotReadyTimeout) } - Logf("Waiting for pod %s to be removed", podNameToDisappear) - err := waitForRCPodToDisappear(c, ns, rcName, podNameToDisappear) + framework.Logf("Waiting for pod %s to be removed", podNameToDisappear) + err := framework.WaitForRCPodToDisappear(c, ns, rcName, podNameToDisappear) Expect(err).NotTo(HaveOccurred()) By("verifying whether the pod from the unreachable node is recreated") - err = verifyPods(c, ns, rcName, true, replicas) + err = framework.VerifyPods(c, ns, rcName, true, replicas) Expect(err).NotTo(HaveOccurred()) // network traffic is unblocked in a deferred function @@ -326,41 +327,41 @@ func expectNodeReadiness(isReady bool, newNode chan *api.Node) { for !expected && !timeout { select { case n := <-newNode: - if isNodeConditionSetAsExpected(n, api.NodeReady, isReady) { + if framework.IsNodeConditionSetAsExpected(n, api.NodeReady, isReady) { expected = true } else { - Logf("Observed node ready status is NOT %v as expected", isReady) + framework.Logf("Observed node ready status is NOT %v as expected", isReady) } case <-timer: timeout = true } } if !expected { - Failf("Failed to observe node ready status change to %v", isReady) + framework.Failf("Failed to observe node ready status change to %v", isReady) } } -var _ = KubeDescribe("Nodes [Disruptive]", func() { - framework := NewDefaultFramework("resize-nodes") +var _ = framework.KubeDescribe("Nodes [Disruptive]", func() { + f := framework.NewDefaultFramework("resize-nodes") var systemPodsNo int var c *client.Client var ns string BeforeEach(func() { - c = framework.Client - ns = framework.Namespace.Name + c = f.Client + ns = f.Namespace.Name systemPods, err := c.Pods(api.NamespaceSystem).List(api.ListOptions{}) Expect(err).NotTo(HaveOccurred()) systemPodsNo = len(systemPods.Items) }) // Slow issue #13323 (8 min) - KubeDescribe("Resize [Slow]", func() { + framework.KubeDescribe("Resize [Slow]", func() { var skipped bool BeforeEach(func() { skipped = true - SkipUnlessProviderIs("gce", "gke", "aws") - SkipUnlessNodeCountIsAtLeast(2) + framework.SkipUnlessProviderIs("gce", "gke", "aws") + framework.SkipUnlessNodeCountIsAtLeast(2) skipped = false }) @@ -370,32 +371,32 @@ var _ = KubeDescribe("Nodes [Disruptive]", func() { } By("restoring the original node instance group size") - if err := resizeGroup(testContext.CloudConfig.NumNodes); err != nil { - Failf("Couldn't restore the original node instance group size: %v", err) + if err := resizeGroup(framework.TestContext.CloudConfig.NumNodes); err != nil { + framework.Failf("Couldn't restore the original node instance group size: %v", err) } // In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a // rebooted/deleted node) for up to 5 minutes before all tunnels are dropped and recreated. // Most tests make use of some proxy feature to verify functionality. So, if a reboot test runs // right before a test that tries to get logs, for example, we may get unlucky and try to use a - // closed tunnel to a node that was recently rebooted. There's no good way to poll for proxies + // closed tunnel to a node that was recently rebooted. There's no good way to framework.Poll for proxies // being closed, so we sleep. // // TODO(cjcullen) reduce this sleep (#19314) - if providerIs("gke") { + if framework.ProviderIs("gke") { By("waiting 5 minutes for all dead tunnels to be dropped") time.Sleep(5 * time.Minute) } - if err := waitForGroupSize(testContext.CloudConfig.NumNodes); err != nil { - Failf("Couldn't restore the original node instance group size: %v", err) + if err := waitForGroupSize(framework.TestContext.CloudConfig.NumNodes); err != nil { + framework.Failf("Couldn't restore the original node instance group size: %v", err) } - if err := waitForClusterSize(c, testContext.CloudConfig.NumNodes, 10*time.Minute); err != nil { - Failf("Couldn't restore the original cluster size: %v", err) + if err := framework.WaitForClusterSize(c, framework.TestContext.CloudConfig.NumNodes, 10*time.Minute); err != nil { + framework.Failf("Couldn't restore the original cluster size: %v", err) } // Many e2e tests assume that the cluster is fully healthy before they start. Wait until // the cluster is restored to health. By("waiting for system pods to successfully restart") - err := waitForPodsRunningReady(api.NamespaceSystem, systemPodsNo, podReadyBeforeTimeout) + err := framework.WaitForPodsRunningReady(api.NamespaceSystem, systemPodsNo, framework.PodReadyBeforeTimeout) Expect(err).NotTo(HaveOccurred()) }) @@ -403,9 +404,9 @@ var _ = KubeDescribe("Nodes [Disruptive]", func() { // Create a replication controller for a service that serves its hostname. // The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname name := "my-hostname-delete-node" - replicas := testContext.CloudConfig.NumNodes + replicas := framework.TestContext.CloudConfig.NumNodes newRCByName(c, ns, name, replicas) - err := verifyPods(c, ns, name, true, replicas) + err := framework.VerifyPods(c, ns, name, true, replicas) Expect(err).NotTo(HaveOccurred()) By(fmt.Sprintf("decreasing cluster size to %d", replicas-1)) @@ -413,11 +414,11 @@ var _ = KubeDescribe("Nodes [Disruptive]", func() { Expect(err).NotTo(HaveOccurred()) err = waitForGroupSize(replicas - 1) Expect(err).NotTo(HaveOccurred()) - err = waitForClusterSize(c, replicas-1, 10*time.Minute) + err = framework.WaitForClusterSize(c, replicas-1, 10*time.Minute) Expect(err).NotTo(HaveOccurred()) By("verifying whether the pods from the removed node are recreated") - err = verifyPods(c, ns, name, true, replicas) + err = framework.VerifyPods(c, ns, name, true, replicas) Expect(err).NotTo(HaveOccurred()) }) @@ -427,9 +428,9 @@ var _ = KubeDescribe("Nodes [Disruptive]", func() { // The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname name := "my-hostname-add-node" newSVCByName(c, ns, name) - replicas := testContext.CloudConfig.NumNodes + replicas := framework.TestContext.CloudConfig.NumNodes newRCByName(c, ns, name, replicas) - err := verifyPods(c, ns, name, true, replicas) + err := framework.VerifyPods(c, ns, name, true, replicas) Expect(err).NotTo(HaveOccurred()) By(fmt.Sprintf("increasing cluster size to %d", replicas+1)) @@ -437,22 +438,22 @@ var _ = KubeDescribe("Nodes [Disruptive]", func() { Expect(err).NotTo(HaveOccurred()) err = waitForGroupSize(replicas + 1) Expect(err).NotTo(HaveOccurred()) - err = waitForClusterSize(c, replicas+1, 10*time.Minute) + err = framework.WaitForClusterSize(c, replicas+1, 10*time.Minute) Expect(err).NotTo(HaveOccurred()) By(fmt.Sprintf("increasing size of the replication controller to %d and verifying all pods are running", replicas+1)) err = resizeRC(c, ns, name, replicas+1) Expect(err).NotTo(HaveOccurred()) - err = verifyPods(c, ns, name, true, replicas+1) + err = framework.VerifyPods(c, ns, name, true, replicas+1) Expect(err).NotTo(HaveOccurred()) }) }) - KubeDescribe("Network", func() { + framework.KubeDescribe("Network", func() { Context("when a node becomes unreachable", func() { BeforeEach(func() { - SkipUnlessProviderIs("gce", "gke", "aws") - SkipUnlessNodeCountIsAtLeast(2) + framework.SkipUnlessProviderIs("gce", "gke", "aws") + framework.SkipUnlessNodeCountIsAtLeast(2) }) // TODO marekbiskup 2015-06-19 #10085 @@ -468,9 +469,9 @@ var _ = KubeDescribe("Nodes [Disruptive]", func() { // The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname name := "my-hostname-net" newSVCByName(c, ns, name) - replicas := testContext.CloudConfig.NumNodes + replicas := framework.TestContext.CloudConfig.NumNodes newRCByName(c, ns, name, replicas) - err := verifyPods(c, ns, name, true, replicas) + err := framework.VerifyPods(c, ns, name, true, replicas) Expect(err).NotTo(HaveOccurred(), "Each pod should start running and responding") By("choose a node with at least one pod - we will block some network traffic on this node") @@ -485,9 +486,9 @@ var _ = KubeDescribe("Nodes [Disruptive]", func() { By(fmt.Sprintf("block network traffic from node %s", node.Name)) performTemporaryNetworkFailure(c, ns, name, replicas, pods.Items[0].Name, node) - Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name) - if !waitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) { - Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout) + framework.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name) + if !framework.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) { + framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout) } // sleep a bit, to allow Watch in NodeController to catch up. @@ -499,7 +500,7 @@ var _ = KubeDescribe("Nodes [Disruptive]", func() { additionalPod := "additionalpod" err = newPodOnNode(c, ns, additionalPod, node.Name) Expect(err).NotTo(HaveOccurred()) - err = verifyPods(c, ns, additionalPod, true, 1) + err = framework.VerifyPods(c, ns, additionalPod, true, 1) Expect(err).NotTo(HaveOccurred()) // verify that it is really on the requested node @@ -507,7 +508,7 @@ var _ = KubeDescribe("Nodes [Disruptive]", func() { pod, err := c.Pods(ns).Get(additionalPod) Expect(err).NotTo(HaveOccurred()) if pod.Spec.NodeName != node.Name { - Logf("Pod %s found on invalid node: %s instead of %s", pod.Name, pod.Spec.NodeName, node.Name) + framework.Logf("Pod %s found on invalid node: %s instead of %s", pod.Name, pod.Spec.NodeName, node.Name) } } }) @@ -525,8 +526,8 @@ var _ = KubeDescribe("Nodes [Disruptive]", func() { nodeOpts := api.ListOptions{} nodes, err := c.Nodes().List(nodeOpts) Expect(err).NotTo(HaveOccurred()) - filterNodes(nodes, func(node api.Node) bool { - if !isNodeConditionSetAsExpected(&node, api.NodeReady, true) { + framework.FilterNodes(nodes, func(node api.Node) bool { + if !framework.IsNodeConditionSetAsExpected(&node, api.NodeReady, true) { return false } podOpts = api.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name)} @@ -537,12 +538,12 @@ var _ = KubeDescribe("Nodes [Disruptive]", func() { return true }) if len(nodes.Items) <= 0 { - Failf("No eligible node were found: %d", len(nodes.Items)) + framework.Failf("No eligible node were found: %d", len(nodes.Items)) } node := nodes.Items[0] podOpts = api.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name)} - if err = waitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, podRunningReady); err != nil { - Failf("Pods on node %s are not ready and running within %v: %v", node.Name, podReadyTimeout, err) + if err = framework.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, framework.PodRunningReady); err != nil { + framework.Failf("Pods on node %s are not ready and running within %v: %v", node.Name, podReadyTimeout, err) } By("Set up watch on node status") @@ -554,11 +555,11 @@ var _ = KubeDescribe("Nodes [Disruptive]", func() { &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { options.FieldSelector = nodeSelector - return framework.Client.Nodes().List(options) + return f.Client.Nodes().List(options) }, WatchFunc: func(options api.ListOptions) (watch.Interface, error) { options.FieldSelector = nodeSelector - return framework.Client.Nodes().Watch(options) + return f.Client.Nodes().Watch(options) }, }, &api.Node{}, @@ -585,7 +586,7 @@ var _ = KubeDescribe("Nodes [Disruptive]", func() { master := getMaster(c) defer func() { By(fmt.Sprintf("Unblock traffic from node %s to the master", node.Name)) - unblockNetwork(host, master) + framework.UnblockNetwork(host, master) if CurrentGinkgoTestDescription().Failed { return @@ -593,17 +594,17 @@ var _ = KubeDescribe("Nodes [Disruptive]", func() { By("Expect to observe node and pod status change from NotReady to Ready after network connectivity recovers") expectNodeReadiness(true, newNode) - if err = waitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, podRunningReady); err != nil { - Failf("Pods on node %s did not become ready and running within %v: %v", node.Name, podReadyTimeout, err) + if err = framework.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, framework.PodRunningReady); err != nil { + framework.Failf("Pods on node %s did not become ready and running within %v: %v", node.Name, podReadyTimeout, err) } }() - blockNetwork(host, master) + framework.BlockNetwork(host, master) By("Expect to observe node and pod status change from Ready to NotReady after network partition") expectNodeReadiness(false, newNode) - if err = waitForMatchPodsCondition(c, podOpts, "NotReady", podNotReadyTimeout, podNotReady); err != nil { - Failf("Pods on node %s did not become NotReady within %v: %v", node.Name, podNotReadyTimeout, err) + if err = framework.WaitForMatchPodsCondition(c, podOpts, "NotReady", podNotReadyTimeout, framework.PodNotReady); err != nil { + framework.Failf("Pods on node %s did not become NotReady within %v: %v", node.Name, podNotReadyTimeout, err) } }) }) diff --git a/test/e2e/resource_quota.go b/test/e2e/resource_quota.go index 90c4cc125c5..6831a40b798 100644 --- a/test/e2e/resource_quota.go +++ b/test/e2e/resource_quota.go @@ -25,6 +25,7 @@ import ( client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -35,8 +36,8 @@ const ( resourceQuotaTimeout = 30 * time.Second ) -var _ = KubeDescribe("ResourceQuota", func() { - f := NewDefaultFramework("resourcequota") +var _ = framework.KubeDescribe("ResourceQuota", func() { + f := framework.NewDefaultFramework("resourcequota") It("should create a ResourceQuota and ensure its status is promptly calculated.", func() { By("Creating a ResourceQuota") @@ -712,7 +713,7 @@ func deleteResourceQuota(c *client.Client, namespace, name string) error { // wait for resource quota status to show the expected used resources value func waitForResourceQuota(c *client.Client, ns, quotaName string, used api.ResourceList) error { - return wait.Poll(poll, resourceQuotaTimeout, func() (bool, error) { + return wait.Poll(framework.Poll, resourceQuotaTimeout, func() (bool, error) { resourceQuota, err := c.ResourceQuotas(ns).Get(quotaName) if err != nil { return false, err @@ -724,7 +725,7 @@ func waitForResourceQuota(c *client.Client, ns, quotaName string, used api.Resou // verify that the quota shows the expected used resource values for k, v := range used { if actualValue, found := resourceQuota.Status.Used[k]; !found || (actualValue.Cmp(v) != 0) { - Logf("resource %s, expected %s, actual %s", k, v.String(), actualValue.String()) + framework.Logf("resource %s, expected %s, actual %s", k, v.String(), actualValue.String()) return false, nil } } diff --git a/test/e2e/restart.go b/test/e2e/restart.go index 656e8b4e0b2..916306ff4ca 100644 --- a/test/e2e/restart.go +++ b/test/e2e/restart.go @@ -25,6 +25,7 @@ import ( "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -36,7 +37,7 @@ const ( // restart all nodes will be this number times the number of nodes.) restartPerNodeTimeout = 5 * time.Minute - // How often to poll the statues of a restart. + // How often to framework.Poll the statues of a restart. restartPoll = 20 * time.Second // How long a node is allowed to become "Ready" after it is restarted before @@ -48,16 +49,16 @@ const ( restartPodReadyAgainTimeout = 5 * time.Minute ) -var _ = KubeDescribe("Restart [Disruptive]", func() { - f := NewDefaultFramework("restart") - var ps *podStore +var _ = framework.KubeDescribe("Restart [Disruptive]", func() { + f := framework.NewDefaultFramework("restart") + var ps *framework.PodStore BeforeEach(func() { // This test requires the ability to restart all nodes, so the provider // check must be identical to that call. - SkipUnlessProviderIs("gce", "gke") + framework.SkipUnlessProviderIs("gce", "gke") - ps = newPodStore(f.Client, api.NamespaceSystem, labels.Everything(), fields.Everything()) + ps = framework.NewPodStore(f.Client, api.NamespaceSystem, labels.Everything(), fields.Everything()) }) AfterEach(func() { @@ -67,12 +68,12 @@ var _ = KubeDescribe("Restart [Disruptive]", func() { }) It("should restart all nodes and ensure all nodes and pods recover", func() { - nn := testContext.CloudConfig.NumNodes + nn := framework.TestContext.CloudConfig.NumNodes By("ensuring all nodes are ready") - nodeNamesBefore, err := checkNodesReady(f.Client, nodeReadyInitialTimeout, nn) + nodeNamesBefore, err := checkNodesReady(f.Client, framework.NodeReadyInitialTimeout, nn) Expect(err).NotTo(HaveOccurred()) - Logf("Got the following nodes before restart: %v", nodeNamesBefore) + framework.Logf("Got the following nodes before restart: %v", nodeNamesBefore) By("ensuring all pods are running and ready") pods := ps.List() @@ -81,24 +82,24 @@ var _ = KubeDescribe("Restart [Disruptive]", func() { podNamesBefore[i] = p.ObjectMeta.Name } ns := api.NamespaceSystem - if !checkPodsRunningReady(f.Client, ns, podNamesBefore, podReadyBeforeTimeout) { - Failf("At least one pod wasn't running and ready at test start.") + if !framework.CheckPodsRunningReady(f.Client, ns, podNamesBefore, framework.PodReadyBeforeTimeout) { + framework.Failf("At least one pod wasn't running and ready at test start.") } By("restarting all of the nodes") - err = restartNodes(testContext.Provider, restartPerNodeTimeout) + err = restartNodes(framework.TestContext.Provider, restartPerNodeTimeout) Expect(err).NotTo(HaveOccurred()) By("ensuring all nodes are ready after the restart") nodeNamesAfter, err := checkNodesReady(f.Client, restartNodeReadyAgainTimeout, nn) Expect(err).NotTo(HaveOccurred()) - Logf("Got the following nodes after restart: %v", nodeNamesAfter) + framework.Logf("Got the following nodes after restart: %v", nodeNamesAfter) // Make sure that we have the same number of nodes. We're not checking // that the names match because that's implementation specific. By("ensuring the same number of nodes exist after the restart") if len(nodeNamesBefore) != len(nodeNamesAfter) { - Failf("Had %d nodes before nodes were restarted, but now only have %d", + framework.Failf("Had %d nodes before nodes were restarted, but now only have %d", len(nodeNamesBefore), len(nodeNamesAfter)) } @@ -110,23 +111,23 @@ var _ = KubeDescribe("Restart [Disruptive]", func() { podNamesAfter, err := waitForNPods(ps, len(podNamesBefore), restartPodReadyAgainTimeout) Expect(err).NotTo(HaveOccurred()) remaining := restartPodReadyAgainTimeout - time.Since(podCheckStart) - if !checkPodsRunningReady(f.Client, ns, podNamesAfter, remaining) { - Failf("At least one pod wasn't running and ready after the restart.") + if !framework.CheckPodsRunningReady(f.Client, ns, podNamesAfter, remaining) { + framework.Failf("At least one pod wasn't running and ready after the restart.") } }) }) // waitForNPods tries to list pods using c until it finds expect of them, // returning their names if it can do so before timeout. -func waitForNPods(ps *podStore, expect int, timeout time.Duration) ([]string, error) { +func waitForNPods(ps *framework.PodStore, expect int, timeout time.Duration) ([]string, error) { // Loop until we find expect pods or timeout is passed. var pods []*api.Pod var errLast error - found := wait.Poll(poll, timeout, func() (bool, error) { + found := wait.Poll(framework.Poll, timeout, func() (bool, error) { pods = ps.List() if len(pods) != expect { errLast = fmt.Errorf("expected to find %d pods but found only %d", expect, len(pods)) - Logf("Error getting pods: %v", errLast) + framework.Logf("Error getting pods: %v", errLast) return false, nil } return true, nil @@ -151,7 +152,7 @@ func checkNodesReady(c *client.Client, nt time.Duration, expect int) ([]string, var nodeList *api.NodeList var errLast error start := time.Now() - found := wait.Poll(poll, nt, func() (bool, error) { + found := wait.Poll(framework.Poll, nt, func() (bool, error) { // A rolling-update (GCE/GKE implementation of restart) can complete before the apiserver // knows about all of the nodes. Thus, we retry the list nodes call // until we get the expected number of nodes. @@ -163,7 +164,7 @@ func checkNodesReady(c *client.Client, nt time.Duration, expect int) ([]string, if len(nodeList.Items) != expect { errLast = fmt.Errorf("expected to find %d nodes but found only %d (%v elapsed)", expect, len(nodeList.Items), time.Since(start)) - Logf("%v", errLast) + framework.Logf("%v", errLast) return false, nil } return true, nil @@ -176,7 +177,7 @@ func checkNodesReady(c *client.Client, nt time.Duration, expect int) ([]string, return nodeNames, fmt.Errorf("couldn't find %d nodes within %v; last error: %v", expect, nt, errLast) } - Logf("Successfully found %d nodes", expect) + framework.Logf("Successfully found %d nodes", expect) // Next, ensure in parallel that all the nodes are ready. We subtract the // time we spent waiting above. @@ -184,7 +185,7 @@ func checkNodesReady(c *client.Client, nt time.Duration, expect int) ([]string, result := make(chan bool, len(nodeList.Items)) for _, n := range nodeNames { n := n - go func() { result <- waitForNodeToBeReady(c, n, timeout) }() + go func() { result <- framework.WaitForNodeToBeReady(c, n, timeout) }() } failed := false // TODO(mbforbes): Change to `for range` syntax once we support only Go diff --git a/test/e2e/scheduler_predicates.go b/test/e2e/scheduler_predicates.go index 93479d567fa..6a094de484f 100644 --- a/test/e2e/scheduler_predicates.go +++ b/test/e2e/scheduler_predicates.go @@ -30,6 +30,7 @@ import ( "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/system" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -63,7 +64,7 @@ func getRequestedCPU(pod api.Pod) int64 { func verifyResult(c *client.Client, podName string, ns string) { allPods, err := c.Pods(api.NamespaceAll).List(api.ListOptions{}) - expectNoError(err) + framework.ExpectNoError(err) scheduledPods, notScheduledPods := getPodsScheduled(allPods) selector := fields.Set{ @@ -75,7 +76,7 @@ func verifyResult(c *client.Client, podName string, ns string) { }.AsSelector() options := api.ListOptions{FieldSelector: selector} schedEvents, err := c.Events(ns).List(options) - expectNoError(err) + framework.ExpectNoError(err) // If we failed to find event with a capitalized first letter of reason // try looking for one starting with a small one for backward compatibility. // If we don't do it we end up in #15806. @@ -90,7 +91,7 @@ func verifyResult(c *client.Client, podName string, ns string) { }.AsSelector() options := api.ListOptions{FieldSelector: selector} schedEvents, err = c.Events(ns).List(options) - expectNoError(err) + framework.ExpectNoError(err) } printed := false @@ -110,10 +111,10 @@ func verifyResult(c *client.Client, podName string, ns string) { func cleanupPods(c *client.Client, ns string) { By("Removing all pods in namespace " + ns) pods, err := c.Pods(ns).List(api.ListOptions{}) - expectNoError(err) + framework.ExpectNoError(err) opt := api.NewDeleteOptions(0) for _, p := range pods.Items { - expectNoError(c.Pods(ns).Delete(p.ObjectMeta.Name, opt)) + framework.ExpectNoError(c.Pods(ns).Delete(p.ObjectMeta.Name, opt)) } } @@ -123,24 +124,24 @@ func waitForStableCluster(c *client.Client) int { startTime := time.Now() allPods, err := c.Pods(api.NamespaceAll).List(api.ListOptions{}) - expectNoError(err) + framework.ExpectNoError(err) scheduledPods, currentlyNotScheduledPods := getPodsScheduled(allPods) for len(currentlyNotScheduledPods) != 0 { time.Sleep(2 * time.Second) allPods, err := c.Pods(api.NamespaceAll).List(api.ListOptions{}) - expectNoError(err) + framework.ExpectNoError(err) scheduledPods, currentlyNotScheduledPods = getPodsScheduled(allPods) if startTime.Add(timeout).Before(time.Now()) { - Failf("Timed out after %v waiting for stable cluster.", timeout) + framework.Failf("Timed out after %v waiting for stable cluster.", timeout) break } } return len(scheduledPods) } -var _ = KubeDescribe("SchedulerPredicates [Serial]", func() { +var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { var c *client.Client var nodeList *api.NodeList var systemPodsNo int @@ -152,16 +153,16 @@ var _ = KubeDescribe("SchedulerPredicates [Serial]", func() { rc, err := c.ReplicationControllers(ns).Get(RCName) if err == nil && rc.Spec.Replicas != 0 { By("Cleaning up the replication controller") - err := DeleteRC(c, ns, RCName) - expectNoError(err) + err := framework.DeleteRC(c, ns, RCName) + framework.ExpectNoError(err) } }) - framework := NewDefaultFramework("sched-pred") + f := framework.NewDefaultFramework("sched-pred") BeforeEach(func() { - c = framework.Client - ns = framework.Namespace.Name + c = f.Client + ns = f.Namespace.Name nodeList = &api.NodeList{} nodes, err := c.Nodes().List(api.ListOptions{}) masterNodes = sets.NewString() @@ -173,8 +174,8 @@ var _ = KubeDescribe("SchedulerPredicates [Serial]", func() { } } - err = checkTestingNSDeletedExcept(c, ns) - expectNoError(err) + err = framework.CheckTestingNSDeletedExcept(c, ns) + framework.ExpectNoError(err) // Every test case in this suite assumes that cluster add-on pods stay stable and // cannot be run in parallel with any other test that touches Nodes or Pods. @@ -188,12 +189,12 @@ var _ = KubeDescribe("SchedulerPredicates [Serial]", func() { } } - err = waitForPodsRunningReady(api.NamespaceSystem, systemPodsNo, podReadyBeforeTimeout) + err = framework.WaitForPodsRunningReady(api.NamespaceSystem, systemPodsNo, framework.PodReadyBeforeTimeout) Expect(err).NotTo(HaveOccurred()) for _, node := range nodeList.Items { - Logf("\nLogging pods the kubelet thinks is on node %v before test", node.Name) - PrintAllKubeletPods(c, node.Name) + framework.Logf("\nLogging pods the kubelet thinks is on node %v before test", node.Name) + framework.PrintAllKubeletPods(c, node.Name) } }) @@ -207,7 +208,7 @@ var _ = KubeDescribe("SchedulerPredicates [Serial]", func() { totalPodCapacity = 0 for _, node := range nodeList.Items { - Logf("Node: %v", node) + framework.Logf("Node: %v", node) podCapacity, found := node.Status.Capacity["pods"] Expect(found).To(Equal(true)) totalPodCapacity += podCapacity.Value() @@ -218,7 +219,7 @@ var _ = KubeDescribe("SchedulerPredicates [Serial]", func() { By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster max pods and trying to start another one", podsNeededForSaturation)) - startPods(c, podsNeededForSaturation, ns, "maxp", api.Pod{ + framework.StartPods(c, podsNeededForSaturation, ns, "maxp", api.Pod{ TypeMeta: unversioned.TypeMeta{ Kind: "Pod", }, @@ -254,10 +255,10 @@ var _ = KubeDescribe("SchedulerPredicates [Serial]", func() { }, }, }) - expectNoError(err) + framework.ExpectNoError(err) // Wait a bit to allow scheduler to do its thing // TODO: this is brittle; there's no guarantee the scheduler will have run in 10 seconds. - Logf("Sleeping 10 seconds and crossing our fingers that scheduler will run in that time.") + framework.Logf("Sleeping 10 seconds and crossing our fingers that scheduler will run in that time.") time.Sleep(10 * time.Second) verifyResult(c, podName, ns) @@ -277,11 +278,11 @@ var _ = KubeDescribe("SchedulerPredicates [Serial]", func() { waitForStableCluster(c) pods, err := c.Pods(api.NamespaceAll).List(api.ListOptions{}) - expectNoError(err) + framework.ExpectNoError(err) for _, pod := range pods.Items { _, found := nodeToCapacityMap[pod.Spec.NodeName] if found && pod.Status.Phase == api.PodRunning { - Logf("Pod %v requesting resource %v on Node %v", pod.Name, getRequestedCPU(pod), pod.Spec.NodeName) + framework.Logf("Pod %v requesting resource %v on Node %v", pod.Name, getRequestedCPU(pod), pod.Spec.NodeName) nodeToCapacityMap[pod.Spec.NodeName] -= getRequestedCPU(pod) } } @@ -289,13 +290,13 @@ var _ = KubeDescribe("SchedulerPredicates [Serial]", func() { var podsNeededForSaturation int milliCpuPerPod := int64(500) for name, leftCapacity := range nodeToCapacityMap { - Logf("Node: %v has capacity: %v", name, leftCapacity) + framework.Logf("Node: %v has capacity: %v", name, leftCapacity) podsNeededForSaturation += (int)(leftCapacity / milliCpuPerPod) } By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster CPU and trying to start another one", podsNeededForSaturation)) - startPods(c, podsNeededForSaturation, ns, "overcommit", api.Pod{ + framework.StartPods(c, podsNeededForSaturation, ns, "overcommit", api.Pod{ TypeMeta: unversioned.TypeMeta{ Kind: "Pod", }, @@ -344,10 +345,10 @@ var _ = KubeDescribe("SchedulerPredicates [Serial]", func() { }, }, }) - expectNoError(err) + framework.ExpectNoError(err) // Wait a bit to allow scheduler to do its thing // TODO: this is brittle; there's no guarantee the scheduler will have run in 10 seconds. - Logf("Sleeping 10 seconds and crossing our fingers that scheduler will run in that time.") + framework.Logf("Sleeping 10 seconds and crossing our fingers that scheduler will run in that time.") time.Sleep(10 * time.Second) verifyResult(c, podName, ns) @@ -382,10 +383,10 @@ var _ = KubeDescribe("SchedulerPredicates [Serial]", func() { }, }, }) - expectNoError(err) + framework.ExpectNoError(err) // Wait a bit to allow scheduler to do its thing // TODO: this is brittle; there's no guarantee the scheduler will have run in 10 seconds. - Logf("Sleeping 10 seconds and crossing our fingers that scheduler will run in that time.") + framework.Logf("Sleeping 10 seconds and crossing our fingers that scheduler will run in that time.") time.Sleep(10 * time.Second) verifyResult(c, podName, ns) @@ -424,12 +425,12 @@ var _ = KubeDescribe("SchedulerPredicates [Serial]", func() { }) if err == nil || !errors.IsInvalid(err) { - Failf("Expect error of invalid, got : %v", err) + framework.Failf("Expect error of invalid, got : %v", err) } // Wait a bit to allow scheduler to do its thing if the pod is not rejected. // TODO: this is brittle; there's no guarantee the scheduler will have run in 10 seconds. - Logf("Sleeping 10 seconds and crossing our fingers that scheduler will run in that time.") + framework.Logf("Sleeping 10 seconds and crossing our fingers that scheduler will run in that time.") time.Sleep(10 * time.Second) cleanupPods(c, ns) @@ -458,24 +459,24 @@ var _ = KubeDescribe("SchedulerPredicates [Serial]", func() { }, }, }) - expectNoError(err) - expectNoError(waitForPodRunningInNamespace(c, podName, ns)) + framework.ExpectNoError(err) + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, podName, ns)) pod, err := c.Pods(ns).Get(podName) - expectNoError(err) + framework.ExpectNoError(err) nodeName := pod.Spec.NodeName err = c.Pods(ns).Delete(podName, api.NewDeleteOptions(0)) - expectNoError(err) + framework.ExpectNoError(err) By("Trying to apply a random label on the found node.") k := fmt.Sprintf("kubernetes.io/e2e-%s", string(util.NewUUID())) v := "42" patch := fmt.Sprintf(`{"metadata":{"labels":{"%s":"%s"}}}`, k, v) err = c.Patch(api.MergePatchType).Resource("nodes").Name(nodeName).Body([]byte(patch)).Do().Error() - expectNoError(err) + framework.ExpectNoError(err) node, err := c.Nodes().Get(nodeName) - expectNoError(err) + framework.ExpectNoError(err) Expect(node.Labels[k]).To(Equal(v)) By("Trying to relaunch the pod, now with labels.") @@ -500,7 +501,7 @@ var _ = KubeDescribe("SchedulerPredicates [Serial]", func() { }, }, }) - expectNoError(err) + framework.ExpectNoError(err) defer c.Pods(ns).Delete(labelPodName, api.NewDeleteOptions(0)) // check that pod got scheduled. We intentionally DO NOT check that the @@ -508,9 +509,9 @@ var _ = KubeDescribe("SchedulerPredicates [Serial]", func() { // kubelet and the scheduler: the scheduler might have scheduled a pod // already when the kubelet does not know about its new label yet. The // kubelet will then refuse to launch the pod. - expectNoError(waitForPodNotPending(c, ns, labelPodName)) + framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, labelPodName)) labelPod, err := c.Pods(ns).Get(labelPodName) - expectNoError(err) + framework.ExpectNoError(err) Expect(labelPod.Spec.NodeName).To(Equal(nodeName)) }) @@ -560,10 +561,10 @@ var _ = KubeDescribe("SchedulerPredicates [Serial]", func() { }, }, }) - expectNoError(err) + framework.ExpectNoError(err) // Wait a bit to allow scheduler to do its thing // TODO: this is brittle; there's no guarantee the scheduler will have run in 10 seconds. - Logf("Sleeping 10 seconds and crossing our fingers that scheduler will run in that time.") + framework.Logf("Sleeping 10 seconds and crossing our fingers that scheduler will run in that time.") time.Sleep(10 * time.Second) verifyResult(c, podName, ns) @@ -595,24 +596,24 @@ var _ = KubeDescribe("SchedulerPredicates [Serial]", func() { }, }, }) - expectNoError(err) - expectNoError(waitForPodRunningInNamespace(c, podName, ns)) + framework.ExpectNoError(err) + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, podName, ns)) pod, err := c.Pods(ns).Get(podName) - expectNoError(err) + framework.ExpectNoError(err) nodeName := pod.Spec.NodeName err = c.Pods(ns).Delete(podName, api.NewDeleteOptions(0)) - expectNoError(err) + framework.ExpectNoError(err) By("Trying to apply a random label on the found node.") k := fmt.Sprintf("kubernetes.io/e2e-%s", string(util.NewUUID())) v := "42" patch := fmt.Sprintf(`{"metadata":{"labels":{"%s":"%s"}}}`, k, v) err = c.Patch(api.MergePatchType).Resource("nodes").Name(nodeName).Body([]byte(patch)).Do().Error() - expectNoError(err) + framework.ExpectNoError(err) node, err := c.Nodes().Get(nodeName) - expectNoError(err) + framework.ExpectNoError(err) Expect(node.Labels[k]).To(Equal(v)) By("Trying to relaunch the pod, now with labels.") @@ -651,7 +652,7 @@ var _ = KubeDescribe("SchedulerPredicates [Serial]", func() { }, }, }) - expectNoError(err) + framework.ExpectNoError(err) defer c.Pods(ns).Delete(labelPodName, api.NewDeleteOptions(0)) // check that pod got scheduled. We intentionally DO NOT check that the @@ -659,9 +660,9 @@ var _ = KubeDescribe("SchedulerPredicates [Serial]", func() { // kubelet and the scheduler: the scheduler might have scheduled a pod // already when the kubelet does not know about its new label yet. The // kubelet will then refuse to launch the pod. - expectNoError(waitForPodNotPending(c, ns, labelPodName)) + framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, labelPodName)) labelPod, err := c.Pods(ns).Get(labelPodName) - expectNoError(err) + framework.ExpectNoError(err) Expect(labelPod.Spec.NodeName).To(Equal(nodeName)) }) @@ -689,31 +690,31 @@ var _ = KubeDescribe("SchedulerPredicates [Serial]", func() { }, }, }) - expectNoError(err) - expectNoError(waitForPodRunningInNamespace(c, podName, ns)) + framework.ExpectNoError(err) + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, podName, ns)) pod, err := c.Pods(ns).Get(podName) - expectNoError(err) + framework.ExpectNoError(err) nodeName := pod.Spec.NodeName err = c.Pods(ns).Delete(podName, api.NewDeleteOptions(0)) - expectNoError(err) + framework.ExpectNoError(err) By("Trying to apply a label with fake az info on the found node.") k := "kubernetes.io/e2e-az-name" v := "e2e-az1" patch := fmt.Sprintf(`{"metadata":{"labels":{"%s":"%s"}}}`, k, v) err = c.Patch(api.MergePatchType).Resource("nodes").Name(nodeName).Body([]byte(patch)).Do().Error() - expectNoError(err) + framework.ExpectNoError(err) node, err := c.Nodes().Get(nodeName) - expectNoError(err) + framework.ExpectNoError(err) Expect(node.Labels[k]).To(Equal(v)) By("Trying to launch a pod that with NodeAffinity setting as embedded JSON string in the annotation value.") labelPodName := "with-labels" - nodeSelectionRoot := filepath.Join(testContext.RepoRoot, "docs/user-guide/node-selection") + nodeSelectionRoot := filepath.Join(framework.TestContext.RepoRoot, "docs/user-guide/node-selection") testPodPath := filepath.Join(nodeSelectionRoot, "pod-with-node-affinity.yaml") - runKubectlOrDie("create", "-f", testPodPath, fmt.Sprintf("--namespace=%v", ns)) + framework.RunKubectlOrDie("create", "-f", testPodPath, fmt.Sprintf("--namespace=%v", ns)) defer c.Pods(ns).Delete(labelPodName, api.NewDeleteOptions(0)) // check that pod got scheduled. We intentionally DO NOT check that the @@ -721,9 +722,9 @@ var _ = KubeDescribe("SchedulerPredicates [Serial]", func() { // kubelet and the scheduler: the scheduler might have scheduled a pod // already when the kubelet does not know about its new label yet. The // kubelet will then refuse to launch the pod. - expectNoError(waitForPodNotPending(c, ns, labelPodName)) + framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, labelPodName)) labelPod, err := c.Pods(ns).Get(labelPodName) - expectNoError(err) + framework.ExpectNoError(err) Expect(labelPod.Spec.NodeName).To(Equal(nodeName)) }) }) diff --git a/test/e2e/secrets.go b/test/e2e/secrets.go index cb915efaf62..dcbf3ac189e 100644 --- a/test/e2e/secrets.go +++ b/test/e2e/secrets.go @@ -21,12 +21,13 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" ) -var _ = KubeDescribe("Secrets", func() { - f := NewDefaultFramework("secrets") +var _ = framework.KubeDescribe("Secrets", func() { + f := framework.NewDefaultFramework("secrets") It("should be consumable from pods in volume [Conformance]", func() { name := "secret-test-" + string(util.NewUUID()) @@ -49,12 +50,12 @@ var _ = KubeDescribe("Secrets", func() { defer func() { By("Cleaning up the secret") if err := f.Client.Secrets(f.Namespace.Name).Delete(secret.Name); err != nil { - Failf("unable to delete secret %v: %v", secret.Name, err) + framework.Failf("unable to delete secret %v: %v", secret.Name, err) } }() var err error if secret, err = f.Client.Secrets(f.Namespace.Name).Create(secret); err != nil { - Failf("unable to create test secret %s: %v", secret.Name, err) + framework.Failf("unable to create test secret %s: %v", secret.Name, err) } pod := &api.Pod{ @@ -92,7 +93,7 @@ var _ = KubeDescribe("Secrets", func() { }, } - testContainerOutput("consume secrets", f.Client, pod, 0, []string{ + framework.TestContainerOutput("consume secrets", f.Client, pod, 0, []string{ "content of file \"/etc/secret-volume/data-1\": value-1", "mode of file \"/etc/secret-volume/data-1\": -r--r--r--", }, f.Namespace.Name) @@ -115,12 +116,12 @@ var _ = KubeDescribe("Secrets", func() { defer func() { By("Cleaning up the secret") if err := f.Client.Secrets(f.Namespace.Name).Delete(secret.Name); err != nil { - Failf("unable to delete secret %v: %v", secret.Name, err) + framework.Failf("unable to delete secret %v: %v", secret.Name, err) } }() var err error if secret, err = f.Client.Secrets(f.Namespace.Name).Create(secret); err != nil { - Failf("unable to create test secret %s: %v", secret.Name, err) + framework.Failf("unable to create test secret %s: %v", secret.Name, err) } pod := &api.Pod{ @@ -152,7 +153,7 @@ var _ = KubeDescribe("Secrets", func() { }, } - testContainerOutput("consume secrets", f.Client, pod, 0, []string{ + framework.TestContainerOutput("consume secrets", f.Client, pod, 0, []string{ "SECRET_DATA=value-1", }, f.Namespace.Name) }) diff --git a/test/e2e/security_context.go b/test/e2e/security_context.go index a6ec2fbdc90..a0d41b5f3f5 100644 --- a/test/e2e/security_context.go +++ b/test/e2e/security_context.go @@ -27,6 +27,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -57,15 +58,15 @@ func scTestPod(hostIPC bool, hostPID bool) *api.Pod { return pod } -var _ = KubeDescribe("Security Context [Feature:SecurityContext]", func() { - framework := NewDefaultFramework("security-context") +var _ = framework.KubeDescribe("Security Context [Feature:SecurityContext]", func() { + f := framework.NewDefaultFramework("security-context") It("should support pod.Spec.SecurityContext.SupplementalGroups", func() { pod := scTestPod(false, false) pod.Spec.Containers[0].Command = []string{"id", "-G"} pod.Spec.SecurityContext.SupplementalGroups = []int64{1234, 5678} groups := []string{"1234", "5678"} - framework.TestContainerOutput("pod.Spec.SecurityContext.SupplementalGroups", pod, 0, groups) + f.TestContainerOutput("pod.Spec.SecurityContext.SupplementalGroups", pod, 0, groups) }) It("should support pod.Spec.SecurityContext.RunAsUser", func() { @@ -74,7 +75,7 @@ var _ = KubeDescribe("Security Context [Feature:SecurityContext]", func() { pod.Spec.SecurityContext.RunAsUser = &uid pod.Spec.Containers[0].Command = []string{"sh", "-c", "id -u"} - framework.TestContainerOutput("pod.Spec.SecurityContext.RunAsUser", pod, 0, []string{ + f.TestContainerOutput("pod.Spec.SecurityContext.RunAsUser", pod, 0, []string{ fmt.Sprintf("%v", uid), }) }) @@ -88,26 +89,26 @@ var _ = KubeDescribe("Security Context [Feature:SecurityContext]", func() { pod.Spec.Containers[0].SecurityContext.RunAsUser = &overrideUid pod.Spec.Containers[0].Command = []string{"sh", "-c", "id -u"} - framework.TestContainerOutput("pod.Spec.SecurityContext.RunAsUser", pod, 0, []string{ + f.TestContainerOutput("pod.Spec.SecurityContext.RunAsUser", pod, 0, []string{ fmt.Sprintf("%v", overrideUid), }) }) It("should support volume SELinux relabeling", func() { - testPodSELinuxLabeling(framework, false, false) + testPodSELinuxLabeling(f, false, false) }) It("should support volume SELinux relabeling when using hostIPC", func() { - testPodSELinuxLabeling(framework, true, false) + testPodSELinuxLabeling(f, true, false) }) It("should support volume SELinux relabeling when using hostPID", func() { - testPodSELinuxLabeling(framework, false, true) + testPodSELinuxLabeling(f, false, true) }) }) -func testPodSELinuxLabeling(framework *Framework, hostIPC bool, hostPID bool) { +func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool) { // Write and read a file with an empty_dir volume // with a pod with the MCS label s0:c0,c1 pod := scTestPod(hostIPC, hostPID) @@ -134,28 +135,28 @@ func testPodSELinuxLabeling(framework *Framework, hostIPC bool, hostPID bool) { } pod.Spec.Containers[0].Command = []string{"sleep", "6000"} - client := framework.Client.Pods(framework.Namespace.Name) + client := f.Client.Pods(f.Namespace.Name) _, err := client.Create(pod) - expectNoError(err, "Error creating pod %v", pod) + framework.ExpectNoError(err, "Error creating pod %v", pod) defer client.Delete(pod.Name, nil) - expectNoError(waitForPodRunningInNamespace(framework.Client, pod.Name, framework.Namespace.Name)) + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.Client, pod.Name, f.Namespace.Name)) testContent := "hello" testFilePath := mountPath + "/TEST" - err = framework.WriteFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, testFilePath, testContent) + err = f.WriteFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, testFilePath, testContent) Expect(err).To(BeNil()) - content, err := framework.ReadFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, testFilePath) + content, err := f.ReadFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, testFilePath) Expect(err).To(BeNil()) Expect(content).To(ContainSubstring(testContent)) - foundPod, err := framework.Client.Pods(framework.Namespace.Name).Get(pod.Name) + foundPod, err := f.Client.Pods(f.Namespace.Name).Get(pod.Name) Expect(err).NotTo(HaveOccurred()) // Confirm that the file can be accessed from a second // pod using host_path with the same MCS label - volumeHostPath := fmt.Sprintf("%s/pods/%s/volumes/kubernetes.io~empty-dir/%s", testContext.KubeVolumeDir, foundPod.UID, volumeName) - By(fmt.Sprintf("confirming a container with the same label can read the file under --volume-dir=%s", testContext.KubeVolumeDir)) + volumeHostPath := fmt.Sprintf("%s/pods/%s/volumes/kubernetes.io~empty-dir/%s", framework.TestContext.KubeVolumeDir, foundPod.UID, volumeName) + By(fmt.Sprintf("confirming a container with the same label can read the file under --volume-dir=%s", framework.TestContext.KubeVolumeDir)) pod = scTestPod(hostIPC, hostPID) pod.Spec.NodeName = foundPod.Spec.NodeName volumeMounts := []api.VolumeMount{ @@ -181,7 +182,7 @@ func testPodSELinuxLabeling(framework *Framework, hostIPC bool, hostPID bool) { Level: "s0:c0,c1", } - framework.TestContainerOutput("Pod with same MCS label reading test file", pod, 0, []string{testContent}) + f.TestContainerOutput("Pod with same MCS label reading test file", pod, 0, []string{testContent}) // Confirm that the same pod with a different MCS // label cannot access the volume pod = scTestPod(hostIPC, hostPID) @@ -192,12 +193,12 @@ func testPodSELinuxLabeling(framework *Framework, hostIPC bool, hostPID bool) { Level: "s0:c2,c3", } _, err = client.Create(pod) - expectNoError(err, "Error creating pod %v", pod) + framework.ExpectNoError(err, "Error creating pod %v", pod) defer client.Delete(pod.Name, nil) - err = framework.WaitForPodRunning(pod.Name) - expectNoError(err, "Error waiting for pod to run %v", pod) + err = f.WaitForPodRunning(pod.Name) + framework.ExpectNoError(err, "Error waiting for pod to run %v", pod) - content, err = framework.ReadFileViaContainer(pod.Name, "test-container", testFilePath) + content, err = f.ReadFileViaContainer(pod.Name, "test-container", testFilePath) Expect(content).NotTo(ContainSubstring(testContent)) } diff --git a/test/e2e/service.go b/test/e2e/service.go index cd55308fd8a..d98e91f3d8b 100644 --- a/test/e2e/service.go +++ b/test/e2e/service.go @@ -39,6 +39,7 @@ import ( utilnet "k8s.io/kubernetes/pkg/util/net" "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/kubernetes/test/e2e/framework" ) // Maximum time a kube-proxy daemon on a node is allowed to not @@ -61,14 +62,14 @@ const loadBalancerCreateTimeout = 20 * time.Minute // This should match whatever the default/configured range is var ServiceNodePortRange = utilnet.PortRange{Base: 30000, Size: 2768} -var _ = KubeDescribe("Services", func() { - f := NewDefaultFramework("services") +var _ = framework.KubeDescribe("Services", func() { + f := framework.NewDefaultFramework("services") var c *client.Client BeforeEach(func() { var err error - c, err = loadClient() + c, err = framework.LoadClient() Expect(err).NotTo(HaveOccurred()) }) @@ -225,8 +226,8 @@ var _ = KubeDescribe("Services", func() { It("should be able to up and down services", func() { // TODO: use the ServiceTestJig here - // this test uses NodeSSHHosts that does not work if a Node only reports LegacyHostIP - SkipUnlessProviderIs(providersWithSSH...) + // this test uses framework.NodeSSHHosts that does not work if a Node only reports LegacyHostIP + framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...) ns := f.Namespace.Name numPods, servicePort := 3, 80 @@ -237,27 +238,27 @@ var _ = KubeDescribe("Services", func() { podNames2, svc2IP, err := startServeHostnameService(c, ns, "service2", servicePort, numPods) Expect(err).NotTo(HaveOccurred()) - hosts, err := NodeSSHHosts(c) + hosts, err := framework.NodeSSHHosts(c) Expect(err).NotTo(HaveOccurred()) if len(hosts) == 0 { - Failf("No ssh-able nodes") + framework.Failf("No ssh-able nodes") } host := hosts[0] By("verifying service1 is up") - expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort)) By("verifying service2 is up") - expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort)) // Stop service 1 and make sure it is gone. By("stopping service1") - expectNoError(stopServeHostnameService(c, ns, "service1")) + framework.ExpectNoError(stopServeHostnameService(c, ns, "service1")) By("verifying service1 is not up") - expectNoError(verifyServeHostnameServiceDown(c, host, svc1IP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceDown(c, host, svc1IP, servicePort)) By("verifying service2 is still up") - expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort)) // Start another service and verify both are up. By("creating service3 in namespace " + ns) @@ -265,19 +266,19 @@ var _ = KubeDescribe("Services", func() { Expect(err).NotTo(HaveOccurred()) if svc2IP == svc3IP { - Failf("service IPs conflict: %v", svc2IP) + framework.Failf("service IPs conflict: %v", svc2IP) } By("verifying service2 is still up") - expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort)) By("verifying service3 is up") - expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames3, svc3IP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames3, svc3IP, servicePort)) }) It("should work after restarting kube-proxy [Disruptive]", func() { // TODO: use the ServiceTestJig here - SkipUnlessProviderIs("gce", "gke") + framework.SkipUnlessProviderIs("gce", "gke") ns := f.Namespace.Name numPods, servicePort := 3, 80 @@ -285,88 +286,88 @@ var _ = KubeDescribe("Services", func() { svc1 := "service1" svc2 := "service2" - defer func() { expectNoError(stopServeHostnameService(c, ns, svc1)) }() + defer func() { framework.ExpectNoError(stopServeHostnameService(c, ns, svc1)) }() podNames1, svc1IP, err := startServeHostnameService(c, ns, svc1, servicePort, numPods) Expect(err).NotTo(HaveOccurred()) - defer func() { expectNoError(stopServeHostnameService(c, ns, svc2)) }() + defer func() { framework.ExpectNoError(stopServeHostnameService(c, ns, svc2)) }() podNames2, svc2IP, err := startServeHostnameService(c, ns, svc2, servicePort, numPods) Expect(err).NotTo(HaveOccurred()) if svc1IP == svc2IP { - Failf("VIPs conflict: %v", svc1IP) + framework.Failf("VIPs conflict: %v", svc1IP) } - hosts, err := NodeSSHHosts(c) + hosts, err := framework.NodeSSHHosts(c) Expect(err).NotTo(HaveOccurred()) if len(hosts) == 0 { - Failf("No ssh-able nodes") + framework.Failf("No ssh-able nodes") } host := hosts[0] - expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort)) - expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort)) By("Restarting kube-proxy") - if err := restartKubeProxy(host); err != nil { - Failf("error restarting kube-proxy: %v", err) + if err := framework.RestartKubeProxy(host); err != nil { + framework.Failf("error restarting kube-proxy: %v", err) } - expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort)) - expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort)) By("Removing iptable rules") - result, err := SSH(` + result, err := framework.SSH(` sudo iptables -t nat -F KUBE-SERVICES || true; sudo iptables -t nat -F KUBE-PORTALS-HOST || true; - sudo iptables -t nat -F KUBE-PORTALS-CONTAINER || true`, host, testContext.Provider) + sudo iptables -t nat -F KUBE-PORTALS-CONTAINER || true`, host, framework.TestContext.Provider) if err != nil || result.Code != 0 { - LogSSHResult(result) - Failf("couldn't remove iptable rules: %v", err) + framework.LogSSHResult(result) + framework.Failf("couldn't remove iptable rules: %v", err) } - expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort)) - expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort)) }) It("should work after restarting apiserver [Disruptive]", func() { // TODO: use the ServiceTestJig here - // TODO: restartApiserver doesn't work in GKE - fix it and reenable this test. - SkipUnlessProviderIs("gce") + // TODO: framework.RestartApiserver doesn't work in GKE - fix it and reenable this test. + framework.SkipUnlessProviderIs("gce") ns := f.Namespace.Name numPods, servicePort := 3, 80 - defer func() { expectNoError(stopServeHostnameService(c, ns, "service1")) }() + defer func() { framework.ExpectNoError(stopServeHostnameService(c, ns, "service1")) }() podNames1, svc1IP, err := startServeHostnameService(c, ns, "service1", servicePort, numPods) Expect(err).NotTo(HaveOccurred()) - hosts, err := NodeSSHHosts(c) + hosts, err := framework.NodeSSHHosts(c) Expect(err).NotTo(HaveOccurred()) if len(hosts) == 0 { - Failf("No ssh-able nodes") + framework.Failf("No ssh-able nodes") } host := hosts[0] - expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort)) // Restart apiserver - if err := restartApiserver(); err != nil { - Failf("error restarting apiserver: %v", err) + if err := framework.RestartApiserver(); err != nil { + framework.Failf("error restarting apiserver: %v", err) } - if err := waitForApiserverUp(c); err != nil { - Failf("error while waiting for apiserver up: %v", err) + if err := framework.WaitForApiserverUp(c); err != nil { + framework.Failf("error while waiting for apiserver up: %v", err) } - expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort)) // Create a new service and check if it's not reusing IP. - defer func() { expectNoError(stopServeHostnameService(c, ns, "service2")) }() + defer func() { framework.ExpectNoError(stopServeHostnameService(c, ns, "service2")) }() podNames2, svc2IP, err := startServeHostnameService(c, ns, "service2", servicePort, numPods) Expect(err).NotTo(HaveOccurred()) if svc1IP == svc2IP { - Failf("VIPs conflict: %v", svc1IP) + framework.Failf("VIPs conflict: %v", svc1IP) } - expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort)) - expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort)) }) // TODO: Run this test against the userspace proxy and nodes @@ -393,24 +394,24 @@ var _ = KubeDescribe("Services", func() { jig.TestReachableHTTP(nodeIP, nodePort, kubeProxyLagTimeout) By("verifying the node port is locked") - hostExec := LaunchHostExecPod(f.Client, f.Namespace.Name, "hostexec") + hostExec := framework.LaunchHostExecPod(f.Client, f.Namespace.Name, "hostexec") // Even if the node-ip:node-port check above passed, this hostexec pod // might fall on a node with a laggy kube-proxy. cmd := fmt.Sprintf(`for i in $(seq 1 300); do if ss -ant46 'sport = :%d' | grep ^LISTEN; then exit 0; fi; sleep 1; done; exit 1`, nodePort) - stdout, err := RunHostCmd(hostExec.Namespace, hostExec.Name, cmd) + stdout, err := framework.RunHostCmd(hostExec.Namespace, hostExec.Name, cmd) if err != nil { - Failf("expected node port %d to be in use, stdout: %v. err: %v", nodePort, stdout, err) + framework.Failf("expected node port %d to be in use, stdout: %v. err: %v", nodePort, stdout, err) } }) It("should be able to change the type and ports of a service [Slow]", func() { // requires cloud load-balancer support - SkipUnlessProviderIs("gce", "gke", "aws") + framework.SkipUnlessProviderIs("gce", "gke", "aws") - loadBalancerSupportsUDP := !providerIs("aws") + loadBalancerSupportsUDP := !framework.ProviderIs("aws") loadBalancerLagTimeout := loadBalancerLagTimeoutDefault - if providerIs("aws") { + if framework.ProviderIs("aws") { loadBalancerLagTimeout = loadBalancerLagTimeoutAWS } @@ -419,13 +420,13 @@ var _ = KubeDescribe("Services", func() { serviceName := "mutability-test" ns1 := f.Namespace.Name // LB1 in ns1 on TCP - Logf("namespace for TCP test: %s", ns1) + framework.Logf("namespace for TCP test: %s", ns1) By("creating a second namespace") namespacePtr, err := f.CreateNamespace("services", nil) Expect(err).NotTo(HaveOccurred()) ns2 := namespacePtr.Name // LB2 in ns2 on UDP - Logf("namespace for UDP test: %s", ns2) + framework.Logf("namespace for UDP test: %s", ns2) jig := NewServiceTestJig(c, serviceName) nodeIP := pickNodeIP(jig.Client) // for later @@ -443,10 +444,10 @@ var _ = KubeDescribe("Services", func() { By("verifying that TCP and UDP use the same port") if tcpService.Spec.Ports[0].Port != udpService.Spec.Ports[0].Port { - Failf("expected to use the same port for TCP and UDP") + framework.Failf("expected to use the same port for TCP and UDP") } svcPort := tcpService.Spec.Ports[0].Port - Logf("service port (TCP and UDP): %d", svcPort) + framework.Logf("service port (TCP and UDP): %d", svcPort) By("creating a pod to be part of the TCP service " + serviceName) jig.RunOrFail(ns1, nil) @@ -462,7 +463,7 @@ var _ = KubeDescribe("Services", func() { }) jig.SanityCheckService(tcpService, api.ServiceTypeNodePort) tcpNodePort := tcpService.Spec.Ports[0].NodePort - Logf("TCP node port: %d", tcpNodePort) + framework.Logf("TCP node port: %d", tcpNodePort) By("changing the UDP service to type=NodePort") udpService = jig.UpdateServiceOrFail(ns2, udpService.Name, func(s *api.Service) { @@ -470,7 +471,7 @@ var _ = KubeDescribe("Services", func() { }) jig.SanityCheckService(udpService, api.ServiceTypeNodePort) udpNodePort := udpService.Spec.Ports[0].NodePort - Logf("UDP node port: %d", udpNodePort) + framework.Logf("UDP node port: %d", udpNodePort) By("hitting the TCP service's NodePort") jig.TestReachableHTTP(nodeIP, tcpNodePort, kubeProxyLagTimeout) @@ -482,20 +483,20 @@ var _ = KubeDescribe("Services", func() { requestedIP := "" staticIPName := "" - if providerIs("gce", "gke") { + if framework.ProviderIs("gce", "gke") { By("creating a static load balancer IP") - staticIPName = fmt.Sprintf("e2e-external-lb-test-%s", runId) + staticIPName = fmt.Sprintf("e2e-external-lb-test-%s", framework.RunId) requestedIP, err = createGCEStaticIP(staticIPName) Expect(err).NotTo(HaveOccurred()) defer func() { if staticIPName != "" { // Release GCE static IP - this is not kube-managed and will not be automatically released. if err := deleteGCEStaticIP(staticIPName); err != nil { - Logf("failed to release static IP %s: %v", staticIPName, err) + framework.Logf("failed to release static IP %s: %v", staticIPName, err) } } }() - Logf("Allocated static load balancer IP: %s", requestedIP) + framework.Logf("Allocated static load balancer IP: %s", requestedIP) } By("changing the TCP service to type=LoadBalancer") @@ -516,15 +517,15 @@ var _ = KubeDescribe("Services", func() { tcpService = jig.WaitForLoadBalancerOrFail(ns1, tcpService.Name) jig.SanityCheckService(tcpService, api.ServiceTypeLoadBalancer) if tcpService.Spec.Ports[0].NodePort != tcpNodePort { - Failf("TCP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", tcpNodePort, tcpService.Spec.Ports[0].NodePort) + framework.Failf("TCP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", tcpNodePort, tcpService.Spec.Ports[0].NodePort) } if requestedIP != "" && getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != requestedIP { - Failf("unexpected TCP Status.LoadBalancer.Ingress (expected %s, got %s)", requestedIP, getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) + framework.Failf("unexpected TCP Status.LoadBalancer.Ingress (expected %s, got %s)", requestedIP, getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) } tcpIngressIP := getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) - Logf("TCP load balancer: %s", tcpIngressIP) + framework.Logf("TCP load balancer: %s", tcpIngressIP) - if providerIs("gce", "gke") { + if framework.ProviderIs("gce", "gke") { // Do this as early as possible, which overrides the `defer` above. // This is mostly out of fear of leaking the IP in a timeout case // (as of this writing we're not 100% sure where the leaks are @@ -534,7 +535,7 @@ var _ = KubeDescribe("Services", func() { // Deleting it after it is attached "demotes" it to an // ephemeral IP, which can be auto-released. if err := deleteGCEStaticIP(staticIPName); err != nil { - Failf("failed to release static IP %s: %v", staticIPName, err) + framework.Failf("failed to release static IP %s: %v", staticIPName, err) } staticIPName = "" } @@ -547,14 +548,14 @@ var _ = KubeDescribe("Services", func() { udpService = jig.WaitForLoadBalancerOrFail(ns2, udpService.Name) jig.SanityCheckService(udpService, api.ServiceTypeLoadBalancer) if udpService.Spec.Ports[0].NodePort != udpNodePort { - Failf("UDP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", udpNodePort, udpService.Spec.Ports[0].NodePort) + framework.Failf("UDP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", udpNodePort, udpService.Spec.Ports[0].NodePort) } udpIngressIP = getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) - Logf("UDP load balancer: %s", udpIngressIP) + framework.Logf("UDP load balancer: %s", udpIngressIP) By("verifying that TCP and UDP use different load balancers") if tcpIngressIP == udpIngressIP { - Failf("Load balancers are not different: %s", getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) + framework.Failf("Load balancers are not different: %s", getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) } } @@ -580,12 +581,12 @@ var _ = KubeDescribe("Services", func() { tcpNodePortOld := tcpNodePort tcpNodePort = tcpService.Spec.Ports[0].NodePort if tcpNodePort == tcpNodePortOld { - Failf("TCP Spec.Ports[0].NodePort (%d) did not change", tcpNodePort) + framework.Failf("TCP Spec.Ports[0].NodePort (%d) did not change", tcpNodePort) } if getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP { - Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) + framework.Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) } - Logf("TCP node port: %d", tcpNodePort) + framework.Logf("TCP node port: %d", tcpNodePort) By("changing the UDP service's NodePort") udpService = jig.ChangeServiceNodePortOrFail(ns2, udpService.Name, udpNodePort) @@ -597,12 +598,12 @@ var _ = KubeDescribe("Services", func() { udpNodePortOld := udpNodePort udpNodePort = udpService.Spec.Ports[0].NodePort if udpNodePort == udpNodePortOld { - Failf("UDP Spec.Ports[0].NodePort (%d) did not change", udpNodePort) + framework.Failf("UDP Spec.Ports[0].NodePort (%d) did not change", udpNodePort) } if loadBalancerSupportsUDP && getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP { - Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0])) + framework.Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0])) } - Logf("UDP node port: %d", udpNodePort) + framework.Logf("UDP node port: %d", udpNodePort) By("hitting the TCP service's new NodePort") jig.TestReachableHTTP(nodeIP, tcpNodePort, kubeProxyLagTimeout) @@ -634,13 +635,13 @@ var _ = KubeDescribe("Services", func() { svcPortOld := svcPort svcPort = tcpService.Spec.Ports[0].Port if svcPort == svcPortOld { - Failf("TCP Spec.Ports[0].Port (%d) did not change", svcPort) + framework.Failf("TCP Spec.Ports[0].Port (%d) did not change", svcPort) } if tcpService.Spec.Ports[0].NodePort != tcpNodePort { - Failf("TCP Spec.Ports[0].NodePort (%d) changed", tcpService.Spec.Ports[0].NodePort) + framework.Failf("TCP Spec.Ports[0].NodePort (%d) changed", tcpService.Spec.Ports[0].NodePort) } if getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP { - Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) + framework.Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) } By("changing the UDP service's port") @@ -653,16 +654,16 @@ var _ = KubeDescribe("Services", func() { jig.SanityCheckService(udpService, api.ServiceTypeNodePort) } if udpService.Spec.Ports[0].Port != svcPort { - Failf("UDP Spec.Ports[0].Port (%d) did not change", udpService.Spec.Ports[0].Port) + framework.Failf("UDP Spec.Ports[0].Port (%d) did not change", udpService.Spec.Ports[0].Port) } if udpService.Spec.Ports[0].NodePort != udpNodePort { - Failf("UDP Spec.Ports[0].NodePort (%d) changed", udpService.Spec.Ports[0].NodePort) + framework.Failf("UDP Spec.Ports[0].NodePort (%d) changed", udpService.Spec.Ports[0].NodePort) } if loadBalancerSupportsUDP && getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP { - Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0])) + framework.Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0])) } - Logf("service port (TCP and UDP): %d", svcPort) + framework.Logf("service port (TCP and UDP): %d", svcPort) By("hitting the TCP service's NodePort") jig.TestReachableHTTP(nodeIP, tcpNodePort, kubeProxyLagTimeout) @@ -727,7 +728,7 @@ var _ = KubeDescribe("Services", func() { defer GinkgoRecover() errs := t.Cleanup() if len(errs) != 0 { - Failf("errors in cleanup: %v", errs) + framework.Failf("errors in cleanup: %v", errs) } }() @@ -738,14 +739,14 @@ var _ = KubeDescribe("Services", func() { Expect(err).NotTo(HaveOccurred()) if result.Spec.Type != api.ServiceTypeNodePort { - Failf("got unexpected Spec.Type for new service: %v", result) + framework.Failf("got unexpected Spec.Type for new service: %v", result) } if len(result.Spec.Ports) != 1 { - Failf("got unexpected len(Spec.Ports) for new service: %v", result) + framework.Failf("got unexpected len(Spec.Ports) for new service: %v", result) } port := result.Spec.Ports[0] if port.NodePort == 0 { - Failf("got unexpected Spec.Ports[0].nodePort for new service: %v", result) + framework.Failf("got unexpected Spec.Ports[0].nodePort for new service: %v", result) } By("creating service " + serviceName2 + " with conflicting NodePort") @@ -755,7 +756,7 @@ var _ = KubeDescribe("Services", func() { service2.Spec.Ports[0].NodePort = port.NodePort result2, err := t.CreateService(service2) if err == nil { - Failf("Created service with conflicting NodePort: %v", result2) + framework.Failf("Created service with conflicting NodePort: %v", result2) } expectedErr := fmt.Sprintf("%d.*port is already allocated", port.NodePort) Expect(fmt.Sprintf("%v", err)).To(MatchRegexp(expectedErr)) @@ -779,7 +780,7 @@ var _ = KubeDescribe("Services", func() { defer GinkgoRecover() errs := t.Cleanup() if len(errs) != 0 { - Failf("errors in cleanup: %v", errs) + framework.Failf("errors in cleanup: %v", errs) } }() @@ -791,17 +792,17 @@ var _ = KubeDescribe("Services", func() { Expect(err).NotTo(HaveOccurred()) if service.Spec.Type != api.ServiceTypeNodePort { - Failf("got unexpected Spec.Type for new service: %v", service) + framework.Failf("got unexpected Spec.Type for new service: %v", service) } if len(service.Spec.Ports) != 1 { - Failf("got unexpected len(Spec.Ports) for new service: %v", service) + framework.Failf("got unexpected len(Spec.Ports) for new service: %v", service) } port := service.Spec.Ports[0] if port.NodePort == 0 { - Failf("got unexpected Spec.Ports[0].nodePort for new service: %v", service) + framework.Failf("got unexpected Spec.Ports[0].nodePort for new service: %v", service) } if !ServiceNodePortRange.Contains(port.NodePort) { - Failf("got unexpected (out-of-range) port for new service: %v", service) + framework.Failf("got unexpected (out-of-range) port for new service: %v", service) } outOfRangeNodePort := 0 @@ -817,7 +818,7 @@ var _ = KubeDescribe("Services", func() { s.Spec.Ports[0].NodePort = outOfRangeNodePort }) if err == nil { - Failf("failed to prevent update of service with out-of-range NodePort: %v", result) + framework.Failf("failed to prevent update of service with out-of-range NodePort: %v", result) } expectedErr := fmt.Sprintf("%d.*port is not in the valid range", outOfRangeNodePort) Expect(fmt.Sprintf("%v", err)).To(MatchRegexp(expectedErr)) @@ -832,7 +833,7 @@ var _ = KubeDescribe("Services", func() { service.Spec.Ports[0].NodePort = outOfRangeNodePort service, err = t.CreateService(service) if err == nil { - Failf("failed to prevent create of service with out-of-range NodePort (%d): %v", outOfRangeNodePort, service) + framework.Failf("failed to prevent create of service with out-of-range NodePort (%d): %v", outOfRangeNodePort, service) } Expect(fmt.Sprintf("%v", err)).To(MatchRegexp(expectedErr)) }) @@ -847,7 +848,7 @@ var _ = KubeDescribe("Services", func() { defer GinkgoRecover() errs := t.Cleanup() if len(errs) != 0 { - Failf("errors in cleanup: %v", errs) + framework.Failf("errors in cleanup: %v", errs) } }() @@ -859,17 +860,17 @@ var _ = KubeDescribe("Services", func() { Expect(err).NotTo(HaveOccurred()) if service.Spec.Type != api.ServiceTypeNodePort { - Failf("got unexpected Spec.Type for new service: %v", service) + framework.Failf("got unexpected Spec.Type for new service: %v", service) } if len(service.Spec.Ports) != 1 { - Failf("got unexpected len(Spec.Ports) for new service: %v", service) + framework.Failf("got unexpected len(Spec.Ports) for new service: %v", service) } port := service.Spec.Ports[0] if port.NodePort == 0 { - Failf("got unexpected Spec.Ports[0].nodePort for new service: %v", service) + framework.Failf("got unexpected Spec.Ports[0].nodePort for new service: %v", service) } if !ServiceNodePortRange.Contains(port.NodePort) { - Failf("got unexpected (out-of-range) port for new service: %v", service) + framework.Failf("got unexpected (out-of-range) port for new service: %v", service) } nodePort := port.NodePort @@ -877,19 +878,19 @@ var _ = KubeDescribe("Services", func() { err = t.DeleteService(serviceName) Expect(err).NotTo(HaveOccurred()) - hostExec := LaunchHostExecPod(f.Client, f.Namespace.Name, "hostexec") + hostExec := framework.LaunchHostExecPod(f.Client, f.Namespace.Name, "hostexec") cmd := fmt.Sprintf(`! ss -ant46 'sport = :%d' | tail -n +2 | grep LISTEN`, nodePort) var stdout string - if pollErr := wait.PollImmediate(poll, kubeProxyLagTimeout, func() (bool, error) { + if pollErr := wait.PollImmediate(framework.Poll, kubeProxyLagTimeout, func() (bool, error) { var err error - stdout, err = RunHostCmd(hostExec.Namespace, hostExec.Name, cmd) + stdout, err = framework.RunHostCmd(hostExec.Namespace, hostExec.Name, cmd) if err != nil { - Logf("expected node port (%d) to not be in use, stdout: %v", nodePort, stdout) + framework.Logf("expected node port (%d) to not be in use, stdout: %v", nodePort, stdout) return false, nil } return true, nil }); pollErr != nil { - Failf("expected node port (%d) to not be in use in %v, stdout: %v", nodePort, kubeProxyLagTimeout, stdout) + framework.Failf("expected node port (%d) to not be in use in %v, stdout: %v", nodePort, kubeProxyLagTimeout, stdout) } By(fmt.Sprintf("creating service "+serviceName+" with same NodePort %d", nodePort)) @@ -942,10 +943,10 @@ func getContainerPortsByPodUID(endpoints *api.Endpoints) PortsByPodUID { if err != nil { continue } - Logf("Mapped mesos host port %d to container port %d via annotation %s=%s", hostPort, containerPort, key, mesosContainerPortString) + framework.Logf("Mapped mesos host port %d to container port %d via annotation %s=%s", hostPort, containerPort, key, mesosContainerPortString) } - // Logf("Found pod %v, host port %d and container port %d", addr.TargetRef.UID, hostPort, containerPort) + // framework.Logf("Found pod %v, host port %d and container port %d", addr.TargetRef.UID, hostPort, containerPort) if _, ok := m[addr.TargetRef.UID]; !ok { m[addr.TargetRef.UID] = make([]int, 0) } @@ -965,78 +966,78 @@ func translatePodNameToUIDOrFail(c *client.Client, ns string, expectedEndpoints for name, portList := range expectedEndpoints { pod, err := c.Pods(ns).Get(name) if err != nil { - Failf("failed to get pod %s, that's pretty weird. validation failed: %s", name, err) + framework.Failf("failed to get pod %s, that's pretty weird. validation failed: %s", name, err) } portsByUID[pod.ObjectMeta.UID] = portList } - // Logf("successfully translated pod names to UIDs: %v -> %v on namespace %s", expectedEndpoints, portsByUID, ns) + // framework.Logf("successfully translated pod names to UIDs: %v -> %v on namespace %s", expectedEndpoints, portsByUID, ns) return portsByUID } func validatePortsOrFail(endpoints PortsByPodUID, expectedEndpoints PortsByPodUID) { if len(endpoints) != len(expectedEndpoints) { // should not happen because we check this condition before - Failf("invalid number of endpoints got %v, expected %v", endpoints, expectedEndpoints) + framework.Failf("invalid number of endpoints got %v, expected %v", endpoints, expectedEndpoints) } for podUID := range expectedEndpoints { if _, ok := endpoints[podUID]; !ok { - Failf("endpoint %v not found", podUID) + framework.Failf("endpoint %v not found", podUID) } if len(endpoints[podUID]) != len(expectedEndpoints[podUID]) { - Failf("invalid list of ports for uid %v. Got %v, expected %v", podUID, endpoints[podUID], expectedEndpoints[podUID]) + framework.Failf("invalid list of ports for uid %v. Got %v, expected %v", podUID, endpoints[podUID], expectedEndpoints[podUID]) } sort.Ints(endpoints[podUID]) sort.Ints(expectedEndpoints[podUID]) for index := range endpoints[podUID] { if endpoints[podUID][index] != expectedEndpoints[podUID][index] { - Failf("invalid list of ports for uid %v. Got %v, expected %v", podUID, endpoints[podUID], expectedEndpoints[podUID]) + framework.Failf("invalid list of ports for uid %v. Got %v, expected %v", podUID, endpoints[podUID], expectedEndpoints[podUID]) } } } } func validateEndpointsOrFail(c *client.Client, namespace, serviceName string, expectedEndpoints PortsByPodName) { - By(fmt.Sprintf("waiting up to %v for service %s in namespace %s to expose endpoints %v", serviceStartTimeout, serviceName, namespace, expectedEndpoints)) + By(fmt.Sprintf("waiting up to %v for service %s in namespace %s to expose endpoints %v", framework.ServiceStartTimeout, serviceName, namespace, expectedEndpoints)) i := 1 - for start := time.Now(); time.Since(start) < serviceStartTimeout; time.Sleep(1 * time.Second) { + for start := time.Now(); time.Since(start) < framework.ServiceStartTimeout; time.Sleep(1 * time.Second) { endpoints, err := c.Endpoints(namespace).Get(serviceName) if err != nil { - Logf("Get endpoints failed (%v elapsed, ignoring for 5s): %v", time.Since(start), err) + framework.Logf("Get endpoints failed (%v elapsed, ignoring for 5s): %v", time.Since(start), err) continue } - // Logf("Found endpoints %v", endpoints) + // framework.Logf("Found endpoints %v", endpoints) portsByPodUID := getContainerPortsByPodUID(endpoints) - // Logf("Found port by pod UID %v", portsByPodUID) + // framework.Logf("Found port by pod UID %v", portsByPodUID) expectedPortsByPodUID := translatePodNameToUIDOrFail(c, namespace, expectedEndpoints) if len(portsByPodUID) == len(expectedEndpoints) { validatePortsOrFail(portsByPodUID, expectedPortsByPodUID) - Logf("successfully validated that service %s in namespace %s exposes endpoints %v (%v elapsed)", + framework.Logf("successfully validated that service %s in namespace %s exposes endpoints %v (%v elapsed)", serviceName, namespace, expectedEndpoints, time.Since(start)) return } if i%5 == 0 { - Logf("Unexpected endpoints: found %v, expected %v (%v elapsed, will retry)", portsByPodUID, expectedEndpoints, time.Since(start)) + framework.Logf("Unexpected endpoints: found %v, expected %v (%v elapsed, will retry)", portsByPodUID, expectedEndpoints, time.Since(start)) } i++ } if pods, err := c.Pods(api.NamespaceAll).List(api.ListOptions{}); err == nil { for _, pod := range pods.Items { - Logf("Pod %s\t%s\t%s\t%s", pod.Namespace, pod.Name, pod.Spec.NodeName, pod.DeletionTimestamp) + framework.Logf("Pod %s\t%s\t%s\t%s", pod.Namespace, pod.Name, pod.Spec.NodeName, pod.DeletionTimestamp) } } else { - Logf("Can't list pod debug info: %v", err) + framework.Logf("Can't list pod debug info: %v", err) } - Failf("Timed out waiting for service %s in namespace %s to expose endpoints %v (%v elapsed)", serviceName, namespace, expectedEndpoints, serviceStartTimeout) + framework.Failf("Timed out waiting for service %s in namespace %s to expose endpoints %v (%v elapsed)", serviceName, namespace, expectedEndpoints, framework.ServiceStartTimeout) } // createExecPodOrFail creates a simple busybox pod in a sleep loop used as a // vessel for kubectl exec commands. func createExecPodOrFail(c *client.Client, ns, name string) { - Logf("Creating new exec pod") + framework.Logf("Creating new exec pod") immediate := int64(0) pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ @@ -1056,7 +1057,7 @@ func createExecPodOrFail(c *client.Client, ns, name string) { } _, err := c.Pods(ns).Create(pod) Expect(err).NotTo(HaveOccurred()) - err = wait.PollImmediate(poll, 5*time.Minute, func() (bool, error) { + err = wait.PollImmediate(framework.Poll, 5*time.Minute, func() (bool, error) { retrievedPod, err := c.Pods(pod.Namespace).Get(pod.Name) if err != nil { return false, nil @@ -1108,7 +1109,7 @@ func collectAddresses(nodes *api.NodeList, addressType api.NodeAddressType) []st } func getNodePublicIps(c *client.Client) ([]string, error) { - nodes := ListSchedulableNodesOrDie(c) + nodes := framework.ListSchedulableNodesOrDie(c) ips := collectAddresses(nodes, api.NodeExternalIP) if len(ips) == 0 { @@ -1121,7 +1122,7 @@ func pickNodeIP(c *client.Client) string { publicIps, err := getNodePublicIps(c) Expect(err).NotTo(HaveOccurred()) if len(publicIps) == 0 { - Failf("got unexpected number (%d) of public IPs", len(publicIps)) + framework.Failf("got unexpected number (%d) of public IPs", len(publicIps)) } ip := publicIps[0] return ip @@ -1130,25 +1131,25 @@ func pickNodeIP(c *client.Client) string { func testReachableHTTP(ip string, port int, request string, expect string) (bool, error) { url := fmt.Sprintf("http://%s:%d%s", ip, port, request) if ip == "" { - Failf("Got empty IP for reachability check (%s)", url) + framework.Failf("Got empty IP for reachability check (%s)", url) return false, nil } if port == 0 { - Failf("Got port==0 for reachability check (%s)", url) + framework.Failf("Got port==0 for reachability check (%s)", url) return false, nil } - Logf("Testing HTTP reachability of %v", url) + framework.Logf("Testing HTTP reachability of %v", url) resp, err := httpGetNoConnectionPool(url) if err != nil { - Logf("Got error testing for reachability of %s: %v", url, err) + framework.Logf("Got error testing for reachability of %s: %v", url, err) return false, nil } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { - Logf("Got error reading response from %s: %v", url, err) + framework.Logf("Got error reading response from %s: %v", url, err) return false, nil } if resp.StatusCode != 200 { @@ -1157,26 +1158,26 @@ func testReachableHTTP(ip string, port int, request string, expect string) (bool if !strings.Contains(string(body), expect) { return false, fmt.Errorf("received response body without expected substring %q: %s", expect, string(body)) } - Logf("Successfully reached %v", url) + framework.Logf("Successfully reached %v", url) return true, nil } func testNotReachableHTTP(ip string, port int) (bool, error) { url := fmt.Sprintf("http://%s:%d", ip, port) if ip == "" { - Failf("Got empty IP for non-reachability check (%s)", url) + framework.Failf("Got empty IP for non-reachability check (%s)", url) return false, nil } if port == 0 { - Failf("Got port==0 for non-reachability check (%s)", url) + framework.Failf("Got port==0 for non-reachability check (%s)", url) return false, nil } - Logf("Testing HTTP non-reachability of %v", url) + framework.Logf("Testing HTTP non-reachability of %v", url) resp, err := httpGetNoConnectionPool(url) if err != nil { - Logf("Confirmed that %s is not reachable", url) + framework.Logf("Confirmed that %s is not reachable", url) return true, nil } resp.Body.Close() @@ -1186,15 +1187,15 @@ func testNotReachableHTTP(ip string, port int) (bool, error) { func testReachableUDP(ip string, port int, request string, expect string) (bool, error) { uri := fmt.Sprintf("udp://%s:%d", ip, port) if ip == "" { - Failf("Got empty IP for reachability check (%s)", uri) + framework.Failf("Got empty IP for reachability check (%s)", uri) return false, nil } if port == 0 { - Failf("Got port==0 for reachability check (%s)", uri) + framework.Failf("Got port==0 for reachability check (%s)", uri) return false, nil } - Logf("Testing UDP reachability of %v", uri) + framework.Logf("Testing UDP reachability of %v", uri) con, err := net.Dial("udp", ip+":"+strconv.Itoa(port)) if err != nil { @@ -1222,32 +1223,32 @@ func testReachableUDP(ip string, port int, request string, expect string) (bool, return false, fmt.Errorf("Failed to retrieve %q, got %q", expect, string(buf)) } - Logf("Successfully reached %v", uri) + framework.Logf("Successfully reached %v", uri) return true, nil } func testNotReachableUDP(ip string, port int, request string) (bool, error) { uri := fmt.Sprintf("udp://%s:%d", ip, port) if ip == "" { - Failf("Got empty IP for reachability check (%s)", uri) + framework.Failf("Got empty IP for reachability check (%s)", uri) return false, nil } if port == 0 { - Failf("Got port==0 for reachability check (%s)", uri) + framework.Failf("Got port==0 for reachability check (%s)", uri) return false, nil } - Logf("Testing UDP non-reachability of %v", uri) + framework.Logf("Testing UDP non-reachability of %v", uri) con, err := net.Dial("udp", ip+":"+strconv.Itoa(port)) if err != nil { - Logf("Confirmed that %s is not reachable", uri) + framework.Logf("Confirmed that %s is not reachable", uri) return true, nil } _, err = con.Write([]byte(fmt.Sprintf("%s\n", request))) if err != nil { - Logf("Confirmed that %s is not reachable", uri) + framework.Logf("Confirmed that %s is not reachable", uri) return true, nil } @@ -1260,7 +1261,7 @@ func testNotReachableUDP(ip string, port int, request string) (bool, error) { _, err = con.Read(buf) if err != nil { - Logf("Confirmed that %s is not reachable", uri) + framework.Logf("Confirmed that %s is not reachable", uri) return true, nil } @@ -1293,18 +1294,18 @@ func startServeHostnameService(c *client.Client, ns, name string, port, replicas var createdPods []*api.Pod maxContainerFailures := 0 - config := RCConfig{ + config := framework.RCConfig{ Client: c, Image: "gcr.io/google_containers/serve_hostname:v1.4", Name: name, Namespace: ns, PollInterval: 3 * time.Second, - Timeout: podReadyBeforeTimeout, + Timeout: framework.PodReadyBeforeTimeout, Replicas: replicas, CreatedPods: &createdPods, MaxContainerFailures: &maxContainerFailures, } - err = RunRC(config) + err = framework.RunRC(config) if err != nil { return podNames, "", err } @@ -1330,7 +1331,7 @@ func startServeHostnameService(c *client.Client, ns, name string, port, replicas } func stopServeHostnameService(c *client.Client, ns, name string) error { - if err := DeleteRC(c, ns, name); err != nil { + if err := framework.DeleteRC(c, ns, name); err != nil { return err } if err := c.Services(ns).Delete(name); err != nil { @@ -1360,22 +1361,22 @@ func verifyServeHostnameServiceUp(c *client.Client, ns, host string, expectedPod // verify service from node func() string { cmd := "set -e; " + buildCommand("wget -q --timeout=0.2 --tries=1 -O -") - Logf("Executing cmd %q on host %v", cmd, host) - result, err := SSH(cmd, host, testContext.Provider) + framework.Logf("Executing cmd %q on host %v", cmd, host) + result, err := framework.SSH(cmd, host, framework.TestContext.Provider) if err != nil || result.Code != 0 { - LogSSHResult(result) - Logf("error while SSH-ing to node: %v", err) + framework.LogSSHResult(result) + framework.Logf("error while SSH-ing to node: %v", err) } return result.Stdout }, // verify service from pod func() string { cmd := buildCommand("wget -q -T 1 -O -") - Logf("Executing cmd %q in pod %v/%v", cmd, ns, execPodName) + framework.Logf("Executing cmd %q in pod %v/%v", cmd, ns, execPodName) // TODO: Use exec-over-http via the netexec pod instead of kubectl exec. - output, err := RunHostCmd(ns, execPodName, cmd) + output, err := framework.RunHostCmd(ns, execPodName, cmd) if err != nil { - Logf("error while kubectl execing %q in pod %v/%v: %v\nOutput: %v", cmd, ns, execPodName, err, output) + framework.Logf("error while kubectl execing %q in pod %v/%v: %v\nOutput: %v", cmd, ns, execPodName, err, output) } return output }, @@ -1401,12 +1402,12 @@ func verifyServeHostnameServiceUp(c *client.Client, ns, host string, expectedPod // and we need a better way to track how often it occurs. if gotEndpoints.IsSuperset(expectedEndpoints) { if !gotEndpoints.Equal(expectedEndpoints) { - Logf("Ignoring unexpected output wgetting endpoints of service %s: %v", serviceIP, gotEndpoints.Difference(expectedEndpoints)) + framework.Logf("Ignoring unexpected output wgetting endpoints of service %s: %v", serviceIP, gotEndpoints.Difference(expectedEndpoints)) } passed = true break } - Logf("Unable to reach the following endpoints of service %s: %v", serviceIP, expectedEndpoints.Difference(gotEndpoints)) + framework.Logf("Unable to reach the following endpoints of service %s: %v", serviceIP, expectedEndpoints.Difference(gotEndpoints)) } if !passed { // Sort the lists so they're easier to visually diff. @@ -1425,15 +1426,15 @@ func verifyServeHostnameServiceDown(c *client.Client, host string, serviceIP str "curl -s --connect-timeout 2 http://%s:%d && exit 99", serviceIP, servicePort) for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) { - result, err := SSH(command, host, testContext.Provider) + result, err := framework.SSH(command, host, framework.TestContext.Provider) if err != nil { - LogSSHResult(result) - Logf("error while SSH-ing to node: %v", err) + framework.LogSSHResult(result) + framework.Logf("error while SSH-ing to node: %v", err) } if result.Code != 99 { return nil } - Logf("service still alive - still waiting") + framework.Logf("service still alive - still waiting") } return fmt.Errorf("waiting for service to be down timed out") } @@ -1505,7 +1506,7 @@ func (j *ServiceTestJig) CreateTCPServiceOrFail(namespace string, tweak func(svc } result, err := j.Client.Services(namespace).Create(svc) if err != nil { - Failf("Failed to create TCP Service %q: %v", svc.Name, err) + framework.Failf("Failed to create TCP Service %q: %v", svc.Name, err) } return result } @@ -1520,14 +1521,14 @@ func (j *ServiceTestJig) CreateUDPServiceOrFail(namespace string, tweak func(svc } result, err := j.Client.Services(namespace).Create(svc) if err != nil { - Failf("Failed to create UDP Service %q: %v", svc.Name, err) + framework.Failf("Failed to create UDP Service %q: %v", svc.Name, err) } return result } func (j *ServiceTestJig) SanityCheckService(svc *api.Service, svcType api.ServiceType) { if svc.Spec.Type != svcType { - Failf("unexpected Spec.Type (%s) for service, expected %s", svc.Spec.Type, svcType) + framework.Failf("unexpected Spec.Type (%s) for service, expected %s", svc.Spec.Type, svcType) } expectNodePorts := false if svcType != api.ServiceTypeClusterIP { @@ -1536,11 +1537,11 @@ func (j *ServiceTestJig) SanityCheckService(svc *api.Service, svcType api.Servic for i, port := range svc.Spec.Ports { hasNodePort := (port.NodePort != 0) if hasNodePort != expectNodePorts { - Failf("unexpected Spec.Ports[%d].NodePort (%d) for service", i, port.NodePort) + framework.Failf("unexpected Spec.Ports[%d].NodePort (%d) for service", i, port.NodePort) } if hasNodePort { if !ServiceNodePortRange.Contains(port.NodePort) { - Failf("out-of-range nodePort (%d) for service", port.NodePort) + framework.Failf("out-of-range nodePort (%d) for service", port.NodePort) } } } @@ -1550,12 +1551,12 @@ func (j *ServiceTestJig) SanityCheckService(svc *api.Service, svcType api.Servic } hasIngress := len(svc.Status.LoadBalancer.Ingress) != 0 if hasIngress != expectIngress { - Failf("unexpected number of Status.LoadBalancer.Ingress (%d) for service", len(svc.Status.LoadBalancer.Ingress)) + framework.Failf("unexpected number of Status.LoadBalancer.Ingress (%d) for service", len(svc.Status.LoadBalancer.Ingress)) } if hasIngress { for i, ing := range svc.Status.LoadBalancer.Ingress { if ing.IP == "" && ing.Hostname == "" { - Failf("unexpected Status.LoadBalancer.Ingress[%d] for service: %#v", i, ing) + framework.Failf("unexpected Status.LoadBalancer.Ingress[%d] for service: %#v", i, ing) } } } @@ -1589,7 +1590,7 @@ func (j *ServiceTestJig) UpdateService(namespace, name string, update func(*api. func (j *ServiceTestJig) UpdateServiceOrFail(namespace, name string, update func(*api.Service)) *api.Service { svc, err := j.UpdateService(namespace, name, update) if err != nil { - Failf(err.Error()) + framework.Failf(err.Error()) } return svc } @@ -1605,21 +1606,21 @@ func (j *ServiceTestJig) ChangeServiceNodePortOrFail(namespace, name string, ini s.Spec.Ports[0].NodePort = newPort }) if err != nil && strings.Contains(err.Error(), "provided port is already allocated") { - Logf("tried nodePort %d, but it is in use, will try another", newPort) + framework.Logf("tried nodePort %d, but it is in use, will try another", newPort) continue } // Otherwise err was nil or err was a real error break } if err != nil { - Failf("Could not change the nodePort: %v", err) + framework.Failf("Could not change the nodePort: %v", err) } return service } func (j *ServiceTestJig) WaitForLoadBalancerOrFail(namespace, name string) *api.Service { var service *api.Service - Logf("Waiting up to %v for service %q to have a LoadBalancer", loadBalancerCreateTimeout, name) + framework.Logf("Waiting up to %v for service %q to have a LoadBalancer", loadBalancerCreateTimeout, name) pollFunc := func() (bool, error) { svc, err := j.Client.Services(namespace).Get(name) if err != nil { @@ -1631,8 +1632,8 @@ func (j *ServiceTestJig) WaitForLoadBalancerOrFail(namespace, name string) *api. } return false, nil } - if err := wait.PollImmediate(poll, loadBalancerCreateTimeout, pollFunc); err != nil { - Failf("Timeout waiting for service %q to have a load balancer", name) + if err := wait.PollImmediate(framework.Poll, loadBalancerCreateTimeout, pollFunc); err != nil { + framework.Failf("Timeout waiting for service %q to have a load balancer", name) } return service } @@ -1640,13 +1641,13 @@ func (j *ServiceTestJig) WaitForLoadBalancerOrFail(namespace, name string) *api. func (j *ServiceTestJig) WaitForLoadBalancerDestroyOrFail(namespace, name string, ip string, port int) *api.Service { // TODO: once support ticket 21807001 is resolved, reduce this timeout back to something reasonable defer func() { - if err := EnsureLoadBalancerResourcesDeleted(ip, strconv.Itoa(port)); err != nil { - Logf("Failed to delete cloud resources for service: %s %d (%v)", ip, port, err) + if err := framework.EnsureLoadBalancerResourcesDeleted(ip, strconv.Itoa(port)); err != nil { + framework.Logf("Failed to delete cloud resources for service: %s %d (%v)", ip, port, err) } }() var service *api.Service - Logf("Waiting up to %v for service %q to have no LoadBalancer", loadBalancerCreateTimeout, name) + framework.Logf("Waiting up to %v for service %q to have no LoadBalancer", loadBalancerCreateTimeout, name) pollFunc := func() (bool, error) { svc, err := j.Client.Services(namespace).Get(name) if err != nil { @@ -1658,33 +1659,33 @@ func (j *ServiceTestJig) WaitForLoadBalancerDestroyOrFail(namespace, name string } return false, nil } - if err := wait.PollImmediate(poll, loadBalancerCreateTimeout, pollFunc); err != nil { - Failf("Timeout waiting for service %q to have no load balancer", name) + if err := wait.PollImmediate(framework.Poll, loadBalancerCreateTimeout, pollFunc); err != nil { + framework.Failf("Timeout waiting for service %q to have no load balancer", name) } return service } func (j *ServiceTestJig) TestReachableHTTP(host string, port int, timeout time.Duration) { - if err := wait.PollImmediate(poll, timeout, func() (bool, error) { return testReachableHTTP(host, port, "/echo?msg=hello", "hello") }); err != nil { - Failf("Could not reach HTTP service through %v:%v after %v: %v", host, port, timeout, err) + if err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { return testReachableHTTP(host, port, "/echo?msg=hello", "hello") }); err != nil { + framework.Failf("Could not reach HTTP service through %v:%v after %v: %v", host, port, timeout, err) } } func (j *ServiceTestJig) TestNotReachableHTTP(host string, port int, timeout time.Duration) { - if err := wait.PollImmediate(poll, timeout, func() (bool, error) { return testNotReachableHTTP(host, port) }); err != nil { - Failf("Could still reach HTTP service through %v:%v after %v: %v", host, port, timeout, err) + if err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { return testNotReachableHTTP(host, port) }); err != nil { + framework.Failf("Could still reach HTTP service through %v:%v after %v: %v", host, port, timeout, err) } } func (j *ServiceTestJig) TestReachableUDP(host string, port int, timeout time.Duration) { - if err := wait.PollImmediate(poll, timeout, func() (bool, error) { return testReachableUDP(host, port, "echo hello", "hello") }); err != nil { - Failf("Could not reach UDP service through %v:%v after %v: %v", host, port, timeout, err) + if err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { return testReachableUDP(host, port, "echo hello", "hello") }); err != nil { + framework.Failf("Could not reach UDP service through %v:%v after %v: %v", host, port, timeout, err) } } func (j *ServiceTestJig) TestNotReachableUDP(host string, port int, timeout time.Duration) { - if err := wait.PollImmediate(poll, timeout, func() (bool, error) { return testNotReachableUDP(host, port, "echo hello") }); err != nil { - Failf("Could still reach UDP service through %v:%v after %v: %v", host, port, timeout, err) + if err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { return testNotReachableUDP(host, port, "echo hello") }); err != nil { + framework.Failf("Could still reach UDP service through %v:%v after %v: %v", host, port, timeout, err) } } @@ -1748,14 +1749,14 @@ func (j *ServiceTestJig) RunOrFail(namespace string, tweak func(rc *api.Replicat } result, err := j.Client.ReplicationControllers(namespace).Create(rc) if err != nil { - Failf("Failed to created RC %q: %v", rc.Name, err) + framework.Failf("Failed to created RC %q: %v", rc.Name, err) } pods, err := j.waitForPodsCreated(namespace, rc.Spec.Replicas) if err != nil { - Failf("Failed to create pods: %v", err) + framework.Failf("Failed to create pods: %v", err) } if err := j.waitForPodsReady(namespace, pods); err != nil { - Failf("Failed waiting for pods to be running: %v", err) + framework.Failf("Failed waiting for pods to be running: %v", err) } return result } @@ -1764,7 +1765,7 @@ func (j *ServiceTestJig) waitForPodsCreated(namespace string, replicas int) ([]s timeout := 2 * time.Minute // List the pods, making sure we observe all the replicas. label := labels.SelectorFromSet(labels.Set(j.Labels)) - Logf("Waiting up to %v for %d pods to be created", timeout, replicas) + framework.Logf("Waiting up to %v for %d pods to be created", timeout, replicas) for start := time.Now(); time.Since(start) < timeout; time.Sleep(2 * time.Second) { options := api.ListOptions{LabelSelector: label} pods, err := j.Client.Pods(namespace).List(options) @@ -1780,17 +1781,17 @@ func (j *ServiceTestJig) waitForPodsCreated(namespace string, replicas int) ([]s found = append(found, pod.Name) } if len(found) == replicas { - Logf("Found all %d pods", replicas) + framework.Logf("Found all %d pods", replicas) return found, nil } - Logf("Found %d/%d pods - will retry", len(found), replicas) + framework.Logf("Found %d/%d pods - will retry", len(found), replicas) } return nil, fmt.Errorf("Timeout waiting for %d pods to be created", replicas) } func (j *ServiceTestJig) waitForPodsReady(namespace string, pods []string) error { timeout := 2 * time.Minute - if !checkPodsRunningReady(j.Client, namespace, pods, timeout) { + if !framework.CheckPodsRunningReady(j.Client, namespace, pods, timeout) { return fmt.Errorf("Timeout waiting for %d pods to be ready", len(pods)) } return nil @@ -1854,10 +1855,10 @@ func (t *ServiceTestFixture) CreateWebserverRC(replicas int) *api.ReplicationCon rcSpec := rcByNamePort(t.name, replicas, t.image, 80, api.ProtocolTCP, t.Labels) rcAct, err := t.createRC(rcSpec) if err != nil { - Failf("Failed to create rc %s: %v", rcSpec.Name, err) + framework.Failf("Failed to create rc %s: %v", rcSpec.Name, err) } - if err := verifyPods(t.Client, t.Namespace, t.name, false, replicas); err != nil { - Failf("Failed to create %d pods with name %s: %v", replicas, t.name, err) + if err := framework.VerifyPods(t.Client, t.Namespace, t.name, false, replicas); err != nil { + framework.Failf("Failed to create %d pods with name %s: %v", replicas, t.name, err) } return rcAct } diff --git a/test/e2e/service_accounts.go b/test/e2e/service_accounts.go index bf0ca0f8664..c60d867f01b 100644 --- a/test/e2e/service_accounts.go +++ b/test/e2e/service_accounts.go @@ -26,6 +26,7 @@ import ( "k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/version" "k8s.io/kubernetes/plugin/pkg/admission/serviceaccount" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -33,29 +34,29 @@ import ( var serviceAccountTokenNamespaceVersion = version.MustParse("v1.2.0") -var _ = KubeDescribe("ServiceAccounts", func() { - f := NewDefaultFramework("svcaccounts") +var _ = framework.KubeDescribe("ServiceAccounts", func() { + f := framework.NewDefaultFramework("svcaccounts") It("should ensure a single API token exists", func() { // wait for the service account to reference a single secret var secrets []api.ObjectReference - expectNoError(wait.Poll(time.Millisecond*500, time.Second*10, func() (bool, error) { + framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*10, func() (bool, error) { By("waiting for a single token reference") sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default") if apierrors.IsNotFound(err) { - Logf("default service account was not found") + framework.Logf("default service account was not found") return false, nil } if err != nil { - Logf("error getting default service account: %v", err) + framework.Logf("error getting default service account: %v", err) return false, err } switch len(sa.Secrets) { case 0: - Logf("default service account has no secret references") + framework.Logf("default service account has no secret references") return false, nil case 1: - Logf("default service account has a single secret reference") + framework.Logf("default service account has a single secret reference") secrets = sa.Secrets return true, nil default: @@ -68,32 +69,32 @@ var _ = KubeDescribe("ServiceAccounts", func() { By("ensuring the single token reference persists") time.Sleep(2 * time.Second) sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default") - expectNoError(err) + framework.ExpectNoError(err) Expect(sa.Secrets).To(Equal(secrets)) } // delete the referenced secret By("deleting the service account token") - expectNoError(f.Client.Secrets(f.Namespace.Name).Delete(secrets[0].Name)) + framework.ExpectNoError(f.Client.Secrets(f.Namespace.Name).Delete(secrets[0].Name)) // wait for the referenced secret to be removed, and another one autocreated - expectNoError(wait.Poll(time.Millisecond*500, serviceAccountProvisionTimeout, func() (bool, error) { + framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) { By("waiting for a new token reference") sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default") if err != nil { - Logf("error getting default service account: %v", err) + framework.Logf("error getting default service account: %v", err) return false, err } switch len(sa.Secrets) { case 0: - Logf("default service account has no secret references") + framework.Logf("default service account has no secret references") return false, nil case 1: if sa.Secrets[0] == secrets[0] { - Logf("default service account still has the deleted secret reference") + framework.Logf("default service account still has the deleted secret reference") return false, nil } - Logf("default service account has a new single secret reference") + framework.Logf("default service account has a new single secret reference") secrets = sa.Secrets return true, nil default: @@ -106,7 +107,7 @@ var _ = KubeDescribe("ServiceAccounts", func() { By("ensuring the single token reference persists") time.Sleep(2 * time.Second) sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default") - expectNoError(err) + framework.ExpectNoError(err) Expect(sa.Secrets).To(Equal(secrets)) } @@ -114,26 +115,26 @@ var _ = KubeDescribe("ServiceAccounts", func() { By("deleting the reference to the service account token") { sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default") - expectNoError(err) + framework.ExpectNoError(err) sa.Secrets = nil _, updateErr := f.Client.ServiceAccounts(f.Namespace.Name).Update(sa) - expectNoError(updateErr) + framework.ExpectNoError(updateErr) } // wait for another one to be autocreated - expectNoError(wait.Poll(time.Millisecond*500, serviceAccountProvisionTimeout, func() (bool, error) { + framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) { By("waiting for a new token to be created and added") sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default") if err != nil { - Logf("error getting default service account: %v", err) + framework.Logf("error getting default service account: %v", err) return false, err } switch len(sa.Secrets) { case 0: - Logf("default service account has no secret references") + framework.Logf("default service account has no secret references") return false, nil case 1: - Logf("default service account has a new single secret reference") + framework.Logf("default service account has a new single secret reference") secrets = sa.Secrets return true, nil default: @@ -146,7 +147,7 @@ var _ = KubeDescribe("ServiceAccounts", func() { By("ensuring the single token reference persists") time.Sleep(2 * time.Second) sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default") - expectNoError(err) + framework.ExpectNoError(err) Expect(sa.Secrets).To(Equal(secrets)) } }) @@ -156,25 +157,25 @@ var _ = KubeDescribe("ServiceAccounts", func() { var rootCAContent string // Standard get, update retry loop - expectNoError(wait.Poll(time.Millisecond*500, serviceAccountProvisionTimeout, func() (bool, error) { + framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) { By("getting the auto-created API token") sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default") if apierrors.IsNotFound(err) { - Logf("default service account was not found") + framework.Logf("default service account was not found") return false, nil } if err != nil { - Logf("error getting default service account: %v", err) + framework.Logf("error getting default service account: %v", err) return false, err } if len(sa.Secrets) == 0 { - Logf("default service account has no secret references") + framework.Logf("default service account has no secret references") return false, nil } for _, secretRef := range sa.Secrets { secret, err := f.Client.Secrets(f.Namespace.Name).Get(secretRef.Name) if err != nil { - Logf("Error getting secret %s: %v", secretRef.Name, err) + framework.Logf("Error getting secret %s: %v", secretRef.Name, err) continue } if secret.Type == api.SecretTypeServiceAccountToken { @@ -184,7 +185,7 @@ var _ = KubeDescribe("ServiceAccounts", func() { } } - Logf("default service account has no secret references to valid service account tokens") + framework.Logf("default service account has no secret references to valid service account tokens") return false, nil })) @@ -213,7 +214,7 @@ var _ = KubeDescribe("ServiceAccounts", func() { }, } - supportsTokenNamespace, _ := serverVersionGTE(serviceAccountTokenNamespaceVersion, f.Client) + supportsTokenNamespace, _ := framework.ServerVersionGTE(serviceAccountTokenNamespaceVersion, f.Client) if supportsTokenNamespace { pod.Spec.Containers = append(pod.Spec.Containers, api.Container{ Name: "namespace-test", diff --git a/test/e2e/service_latency.go b/test/e2e/service_latency.go index 83c229974c4..fd24e44942f 100644 --- a/test/e2e/service_latency.go +++ b/test/e2e/service_latency.go @@ -24,11 +24,12 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/client/cache" - "k8s.io/kubernetes/pkg/controller/framework" + controllerframework "k8s.io/kubernetes/pkg/controller/framework" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util/flowcontrol" "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/watch" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" ) @@ -39,8 +40,8 @@ func (d durations) Len() int { return len(d) } func (d durations) Less(i, j int) bool { return d[i] < d[j] } func (d durations) Swap(i, j int) { d[i], d[j] = d[j], d[i] } -var _ = KubeDescribe("Service endpoints latency", func() { - f := NewDefaultFramework("svc-latency") +var _ = framework.KubeDescribe("Service endpoints latency", func() { + f := framework.NewDefaultFramework("svc-latency") It("should not be very high [Conformance]", func() { const ( @@ -91,14 +92,14 @@ var _ = KubeDescribe("Service endpoints latency", func() { } return dSorted[est] } - Logf("Latencies: %v", dSorted) + framework.Logf("Latencies: %v", dSorted) p50 := percentile(50) p90 := percentile(90) p99 := percentile(99) - Logf("50 %%ile: %v", p50) - Logf("90 %%ile: %v", p90) - Logf("99 %%ile: %v", p99) - Logf("Total sample count: %v", len(dSorted)) + framework.Logf("50 %%ile: %v", p50) + framework.Logf("90 %%ile: %v", p90) + framework.Logf("99 %%ile: %v", p99) + framework.Logf("Total sample count: %v", len(dSorted)) if p50 > limitMedian { failing.Insert("Median latency should be less than " + limitMedian.String()) @@ -114,8 +115,8 @@ var _ = KubeDescribe("Service endpoints latency", func() { }) }) -func runServiceLatencies(f *Framework, inParallel, total int) (output []time.Duration, err error) { - cfg := RCConfig{ +func runServiceLatencies(f *framework.Framework, inParallel, total int) (output []time.Duration, err error) { + cfg := framework.RCConfig{ Client: f.Client, Image: "gcr.io/google_containers/pause:2.0", Name: "svc-latency-rc", @@ -123,10 +124,10 @@ func runServiceLatencies(f *Framework, inParallel, total int) (output []time.Dur Replicas: 1, PollInterval: time.Second, } - if err := RunRC(cfg); err != nil { + if err := framework.RunRC(cfg); err != nil { return nil, err } - defer DeleteRC(f.Client, f.Namespace.Name, cfg.Name) + defer framework.DeleteRC(f.Client, f.Namespace.Name, cfg.Name) // Run a single watcher, to reduce the number of API calls we have to // make; this is to minimize the timing error. It's how kube-proxy @@ -164,7 +165,7 @@ func runServiceLatencies(f *Framework, inParallel, total int) (output []time.Dur for i := 0; i < total; i++ { select { case e := <-errs: - Logf("Got error: %v", e) + framework.Logf("Got error: %v", e) errCount += 1 case d := <-durations: output = append(output, d) @@ -273,8 +274,8 @@ func (eq *endpointQueries) added(e *api.Endpoints) { } // blocks until it has finished syncing. -func startEndpointWatcher(f *Framework, q *endpointQueries) { - _, controller := framework.NewInformer( +func startEndpointWatcher(f *framework.Framework, q *endpointQueries) { + _, controller := controllerframework.NewInformer( &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { return f.Client.Endpoints(f.Namespace.Name).List(options) @@ -285,7 +286,7 @@ func startEndpointWatcher(f *Framework, q *endpointQueries) { }, &api.Endpoints{}, 0, - framework.ResourceEventHandlerFuncs{ + controllerframework.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { if e, ok := obj.(*api.Endpoints); ok { if len(e.Subsets) > 0 && len(e.Subsets[0].Addresses) > 0 { @@ -311,7 +312,7 @@ func startEndpointWatcher(f *Framework, q *endpointQueries) { } } -func singleServiceLatency(f *Framework, name string, q *endpointQueries) (time.Duration, error) { +func singleServiceLatency(f *framework.Framework, name string, q *endpointQueries) (time.Duration, error) { // Make a service that points to that pod. svc := &api.Service{ ObjectMeta: api.ObjectMeta{ @@ -329,7 +330,7 @@ func singleServiceLatency(f *Framework, name string, q *endpointQueries) (time.D if err != nil { return 0, err } - Logf("Created: %v", gotSvc.Name) + framework.Logf("Created: %v", gotSvc.Name) defer f.Client.Services(gotSvc.Namespace).Delete(gotSvc.Name) if e := q.request(gotSvc.Name); e == nil { @@ -337,6 +338,6 @@ func singleServiceLatency(f *Framework, name string, q *endpointQueries) (time.D } stopTime := time.Now() d := stopTime.Sub(startTime) - Logf("Got endpoints: %v [%v]", gotSvc.Name, d) + framework.Logf("Got endpoints: %v [%v]", gotSvc.Name, d) return d, nil } diff --git a/test/e2e/serviceloadbalancers.go b/test/e2e/serviceloadbalancers.go index b248d13394d..a8c50311d6a 100644 --- a/test/e2e/serviceloadbalancers.go +++ b/test/e2e/serviceloadbalancers.go @@ -28,6 +28,7 @@ import ( "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util/wait" utilyaml "k8s.io/kubernetes/pkg/util/yaml" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -95,14 +96,14 @@ func (h *haproxyControllerTester) start(namespace string) (err error) { for i, c := range rc.Spec.Template.Spec.Containers { rc.Spec.Template.Spec.Containers[i].Args = append( c.Args, fmt.Sprintf("--namespace=%v", namespace)) - Logf("Container args %+v", rc.Spec.Template.Spec.Containers[i].Args) + framework.Logf("Container args %+v", rc.Spec.Template.Spec.Containers[i].Args) } rc, err = h.client.ReplicationControllers(rc.Namespace).Create(rc) if err != nil { return } - if err = waitForRCPodsRunning(h.client, namespace, rc.Name); err != nil { + if err = framework.WaitForRCPodsRunning(h.client, namespace, rc.Name); err != nil { return } h.rcName = rc.Name @@ -119,10 +120,10 @@ func (h *haproxyControllerTester) start(namespace string) (err error) { // Find the external addresses of the nodes the pods are running on. for _, p := range pods.Items { - wait.Poll(pollInterval, serviceRespondingTimeout, func() (bool, error) { - address, err := getHostExternalAddress(h.client, &p) + wait.Poll(pollInterval, framework.ServiceRespondingTimeout, func() (bool, error) { + address, err := framework.GetHostExternalAddress(h.client, &p) if err != nil { - Logf("%v", err) + framework.Logf("%v", err) return false, nil } h.address = append(h.address, address) @@ -169,7 +170,7 @@ func (s *ingManager) start(namespace string) (err error) { if err != nil { return } - if err = waitForRCPodsRunning(s.client, rc.Namespace, rc.Name); err != nil { + if err = framework.WaitForRCPodsRunning(s.client, rc.Namespace, rc.Name); err != nil { return } } @@ -194,28 +195,28 @@ func (s *ingManager) start(namespace string) (err error) { func (s *ingManager) test(path string) error { url := fmt.Sprintf("%v/hostName", path) httpClient := &http.Client{} - return wait.Poll(pollInterval, serviceRespondingTimeout, func() (bool, error) { + return wait.Poll(pollInterval, framework.ServiceRespondingTimeout, func() (bool, error) { body, err := simpleGET(httpClient, url, "") if err != nil { - Logf("%v\n%v\n%v", url, body, err) + framework.Logf("%v\n%v\n%v", url, body, err) return false, nil } return true, nil }) } -var _ = KubeDescribe("ServiceLoadBalancer [Feature:ServiceLoadBalancer]", func() { +var _ = framework.KubeDescribe("ServiceLoadBalancer [Feature:ServiceLoadBalancer]", func() { // These variables are initialized after framework's beforeEach. var ns string var repoRoot string var client *client.Client - framework := NewDefaultFramework("servicelb") + f := framework.NewDefaultFramework("servicelb") BeforeEach(func() { - client = framework.Client - ns = framework.Namespace.Name - repoRoot = testContext.RepoRoot + client = f.Client + ns = f.Namespace.Name + repoRoot = framework.TestContext.RepoRoot }) It("should support simple GET on Ingress ips", func() { @@ -229,7 +230,7 @@ var _ = KubeDescribe("ServiceLoadBalancer [Feature:ServiceLoadBalancer]", func() for _, sName := range s.svcNames { path := t.lookup(sName) - Logf("Testing path %v", path) + framework.Logf("Testing path %v", path) Expect(s.test(path)).NotTo(HaveOccurred()) } } @@ -266,7 +267,7 @@ func simpleGET(c *http.Client, url, host string) (string, error) { // rcFromManifest reads a .json/yaml file and returns the rc in it. func rcFromManifest(fileName string) *api.ReplicationController { var controller api.ReplicationController - Logf("Parsing rc from %v", fileName) + framework.Logf("Parsing rc from %v", fileName) data, err := ioutil.ReadFile(fileName) Expect(err).NotTo(HaveOccurred()) @@ -280,7 +281,7 @@ func rcFromManifest(fileName string) *api.ReplicationController { // svcFromManifest reads a .json/yaml file and returns the rc in it. func svcFromManifest(fileName string) *api.Service { var svc api.Service - Logf("Parsing service from %v", fileName) + framework.Logf("Parsing service from %v", fileName) data, err := ioutil.ReadFile(fileName) Expect(err).NotTo(HaveOccurred()) diff --git a/test/e2e/ssh.go b/test/e2e/ssh.go index d7e49afb820..3da59417a2b 100644 --- a/test/e2e/ssh.go +++ b/test/e2e/ssh.go @@ -20,24 +20,26 @@ import ( "fmt" "strings" + "k8s.io/kubernetes/test/e2e/framework" + . "github.com/onsi/ginkgo" ) -var _ = KubeDescribe("SSH", func() { +var _ = framework.KubeDescribe("SSH", func() { - f := NewDefaultFramework("ssh") + f := framework.NewDefaultFramework("ssh") BeforeEach(func() { - // When adding more providers here, also implement their functionality in util.go's getSigner(...). - SkipUnlessProviderIs(providersWithSSH...) + // When adding more providers here, also implement their functionality in util.go's framework.GetSigner(...). + framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...) }) It("should SSH to all nodes and run commands", func() { // Get all nodes' external IPs. By("Getting all nodes' SSH-able IP addresses") - hosts, err := NodeSSHHosts(f.Client) + hosts, err := framework.NodeSSHHosts(f.Client) if err != nil { - Failf("Error getting node hostnames: %v", err) + framework.Failf("Error getting node hostnames: %v", err) } testCases := []struct { @@ -59,34 +61,34 @@ var _ = KubeDescribe("SSH", func() { for _, testCase := range testCases { By(fmt.Sprintf("SSH'ing to all nodes and running %s", testCase.cmd)) for _, host := range hosts { - result, err := SSH(testCase.cmd, host, testContext.Provider) + result, err := framework.SSH(testCase.cmd, host, framework.TestContext.Provider) stdout, stderr := strings.TrimSpace(result.Stdout), strings.TrimSpace(result.Stderr) if err != testCase.expectedError { - Failf("Ran %s on %s, got error %v, expected %v", testCase.cmd, host, err, testCase.expectedError) + framework.Failf("Ran %s on %s, got error %v, expected %v", testCase.cmd, host, err, testCase.expectedError) } if testCase.checkStdout && stdout != testCase.expectedStdout { - Failf("Ran %s on %s, got stdout '%s', expected '%s'", testCase.cmd, host, stdout, testCase.expectedStdout) + framework.Failf("Ran %s on %s, got stdout '%s', expected '%s'", testCase.cmd, host, stdout, testCase.expectedStdout) } if stderr != testCase.expectedStderr { - Failf("Ran %s on %s, got stderr '%s', expected '%s'", testCase.cmd, host, stderr, testCase.expectedStderr) + framework.Failf("Ran %s on %s, got stderr '%s', expected '%s'", testCase.cmd, host, stderr, testCase.expectedStderr) } if result.Code != testCase.expectedCode { - Failf("Ran %s on %s, got exit code %d, expected %d", testCase.cmd, host, result.Code, testCase.expectedCode) + framework.Failf("Ran %s on %s, got exit code %d, expected %d", testCase.cmd, host, result.Code, testCase.expectedCode) } // Show stdout, stderr for logging purposes. if len(stdout) > 0 { - Logf("Got stdout from %s: %s", host, strings.TrimSpace(stdout)) + framework.Logf("Got stdout from %s: %s", host, strings.TrimSpace(stdout)) } if len(stderr) > 0 { - Logf("Got stderr from %s: %s", host, strings.TrimSpace(stderr)) + framework.Logf("Got stderr from %s: %s", host, strings.TrimSpace(stderr)) } } } // Quickly test that SSH itself errors correctly. By("SSH'ing to a nonexistent host") - if _, err = SSH(`echo "hello"`, "i.do.not.exist", testContext.Provider); err == nil { - Failf("Expected error trying to SSH to nonexistent host.") + if _, err = framework.SSH(`echo "hello"`, "i.do.not.exist", framework.TestContext.Provider); err == nil { + framework.Failf("Expected error trying to SSH to nonexistent host.") } }) }) diff --git a/test/e2e/ubernetes_lite.go b/test/e2e/ubernetes_lite.go index c31e51b5ced..16a8adde268 100644 --- a/test/e2e/ubernetes_lite.go +++ b/test/e2e/ubernetes_lite.go @@ -29,34 +29,35 @@ import ( "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/kubernetes/test/e2e/framework" ) -var _ = KubeDescribe("Ubernetes Lite", func() { - framework := NewDefaultFramework("ubernetes-lite") +var _ = framework.KubeDescribe("Ubernetes Lite", func() { + f := framework.NewDefaultFramework("ubernetes-lite") var zoneCount int var err error image := "gcr.io/google_containers/serve_hostname:v1.4" BeforeEach(func() { if zoneCount <= 0 { - zoneCount, err = getZoneCount(framework.Client) + zoneCount, err = getZoneCount(f.Client) Expect(err).NotTo(HaveOccurred()) } By(fmt.Sprintf("Checking for multi-zone cluster. Zone count = %d", zoneCount)) - SkipUnlessAtLeast(zoneCount, 2, "Zone count is %d, only run for multi-zone clusters, skipping test") - SkipUnlessProviderIs("gce", "gke", "aws") + framework.SkipUnlessAtLeast(zoneCount, 2, "Zone count is %d, only run for multi-zone clusters, skipping test") + framework.SkipUnlessProviderIs("gce", "gke", "aws") // TODO: SkipUnlessDefaultScheduler() // Non-default schedulers might not spread }) It("should spread the pods of a service across zones", func() { - SpreadServiceOrFail(framework, (2*zoneCount)+1, image) + SpreadServiceOrFail(f, (2*zoneCount)+1, image) }) It("should spread the pods of a replication controller across zones", func() { - SpreadRCOrFail(framework, (2*zoneCount)+1, image) + SpreadRCOrFail(f, (2*zoneCount)+1, image) }) }) // Check that the pods comprising a service get spread evenly across available zones -func SpreadServiceOrFail(f *Framework, replicaCount int, image string) { +func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string) { // First create the service serviceName := "test-service" serviceSpec := &api.Service{ @@ -92,11 +93,11 @@ func SpreadServiceOrFail(f *Framework, replicaCount int, image string) { }, }, } - startPods(f.Client, replicaCount, f.Namespace.Name, serviceName, *podSpec, false) + framework.StartPods(f.Client, replicaCount, f.Namespace.Name, serviceName, *podSpec, false) // Wait for all of them to be scheduled selector := labels.SelectorFromSet(labels.Set(map[string]string{"service": serviceName})) - pods, err := waitForPodsWithLabelScheduled(f.Client, f.Namespace.Name, selector) + pods, err := framework.WaitForPodsWithLabelScheduled(f.Client, f.Namespace.Name, selector) Expect(err).NotTo(HaveOccurred()) // Now make sure they're spread across zones @@ -180,7 +181,7 @@ func checkZoneSpreading(c *client.Client, pods *api.PodList, zoneNames []string) } // Check that the pods comprising a replication controller get spread evenly across available zones -func SpreadRCOrFail(f *Framework, replicaCount int, image string) { +func SpreadRCOrFail(f *framework.Framework, replicaCount int, image string) { name := "ubelite-spread-rc-" + string(util.NewUUID()) By(fmt.Sprintf("Creating replication controller %s", name)) controller, err := f.Client.ReplicationControllers(f.Namespace.Name).Create(&api.ReplicationController{ @@ -213,18 +214,18 @@ func SpreadRCOrFail(f *Framework, replicaCount int, image string) { // Cleanup the replication controller when we are done. defer func() { // Resize the replication controller to zero to get rid of pods. - if err := DeleteRC(f.Client, f.Namespace.Name, controller.Name); err != nil { - Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err) + if err := framework.DeleteRC(f.Client, f.Namespace.Name, controller.Name); err != nil { + framework.Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err) } }() // List the pods, making sure we observe all the replicas. selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) - pods, err := podsCreated(f.Client, f.Namespace.Name, name, replicaCount) + pods, err := framework.PodsCreated(f.Client, f.Namespace.Name, name, replicaCount) Expect(err).NotTo(HaveOccurred()) // Wait for all of them to be scheduled By(fmt.Sprintf("Waiting for %d replicas of %s to be scheduled. Selector: %v", replicaCount, name, selector)) - pods, err = waitForPodsWithLabelScheduled(f.Client, f.Namespace.Name, selector) + pods, err = framework.WaitForPodsWithLabelScheduled(f.Client, f.Namespace.Name, selector) Expect(err).NotTo(HaveOccurred()) // Now make sure they're spread across zones diff --git a/test/e2e/volume_provisioning.go b/test/e2e/volume_provisioning.go index f6258ed906d..ef5f51b55aa 100644 --- a/test/e2e/volume_provisioning.go +++ b/test/e2e/volume_provisioning.go @@ -23,6 +23,7 @@ import ( "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -36,21 +37,21 @@ const ( expectedSize = "2Gi" ) -var _ = KubeDescribe("Dynamic provisioning", func() { - framework := NewDefaultFramework("volume-provisioning") +var _ = framework.KubeDescribe("Dynamic provisioning", func() { + f := framework.NewDefaultFramework("volume-provisioning") // filled in BeforeEach var c *client.Client var ns string BeforeEach(func() { - c = framework.Client - ns = framework.Namespace.Name + c = f.Client + ns = f.Namespace.Name }) - KubeDescribe("DynamicProvisioner", func() { + framework.KubeDescribe("DynamicProvisioner", func() { It("should create and delete persistent volumes", func() { - SkipUnlessProviderIs("openstack", "gce", "aws", "gke") + framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke") By("creating a claim with a dynamic provisioning annotation") claim := createClaim(ns) defer func() { @@ -59,7 +60,7 @@ var _ = KubeDescribe("Dynamic provisioning", func() { claim, err := c.PersistentVolumeClaims(ns).Create(claim) Expect(err).NotTo(HaveOccurred()) - err = waitForPersistentVolumeClaimPhase(api.ClaimBound, c, ns, claim.Name, poll, claimProvisionTimeout) + err = framework.WaitForPersistentVolumeClaimPhase(api.ClaimBound, c, ns, claim.Name, framework.Poll, framework.ClaimProvisionTimeout) Expect(err).NotTo(HaveOccurred()) By("checking the claim") @@ -115,10 +116,10 @@ var _ = KubeDescribe("Dynamic provisioning", func() { time.Sleep(time.Minute) By("deleting the claim") - expectNoError(c.PersistentVolumeClaims(ns).Delete(claim.Name)) + framework.ExpectNoError(c.PersistentVolumeClaims(ns).Delete(claim.Name)) // Wait for the PV to get deleted too. - expectNoError(waitForPersistentVolumeDeleted(c, pv.Name, 1*time.Second, 10*time.Minute)) + framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(c, pv.Name, 1*time.Second, 10*time.Minute)) }) }) }) @@ -186,8 +187,8 @@ func runInPodWithVolume(c *client.Client, ns, claimName, command string) { } pod, err := c.Pods(ns).Create(pod) defer func() { - expectNoError(c.Pods(ns).Delete(pod.Name, nil)) + framework.ExpectNoError(c.Pods(ns).Delete(pod.Name, nil)) }() - expectNoError(err, "Failed to create pod: %v", err) - expectNoError(waitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Spec.Containers[0].Name, pod.Namespace)) + framework.ExpectNoError(err, "Failed to create pod: %v", err) + framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Spec.Containers[0].Name, pod.Namespace)) } diff --git a/test/e2e/volumes.go b/test/e2e/volumes.go index c47bdc3bddf..fab25861280 100644 --- a/test/e2e/volumes.go +++ b/test/e2e/volumes.go @@ -49,6 +49,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/test/e2e/framework" "github.com/golang/glog" . "github.com/onsi/ginkgo" @@ -140,13 +141,13 @@ func startVolumeServer(client *client.Client, config VolumeTestConfig) *api.Pod }, } _, err := podClient.Create(serverPod) - expectNoError(err, "Failed to create %s pod: %v", serverPod.Name, err) + framework.ExpectNoError(err, "Failed to create %s pod: %v", serverPod.Name, err) - expectNoError(waitForPodRunningInNamespace(client, serverPod.Name, config.namespace)) + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(client, serverPod.Name, config.namespace)) By("locating the server pod") pod, err := podClient.Get(serverPod.Name) - expectNoError(err, "Cannot locate the server pod %v: %v", serverPod.Name, err) + framework.ExpectNoError(err, "Cannot locate the server pod %v: %v", serverPod.Name, err) By("sleeping a bit to give the server time to start") time.Sleep(20 * time.Second) @@ -164,16 +165,16 @@ func volumeTestCleanup(client *client.Client, config VolumeTestConfig) { err := podClient.Delete(config.prefix+"-client", nil) if err != nil { // Log the error before failing test: if the test has already failed, - // expectNoError() won't print anything to logs! + // framework.ExpectNoError() won't print anything to logs! glog.Warningf("Failed to delete client pod: %v", err) - expectNoError(err, "Failed to delete client pod: %v", err) + framework.ExpectNoError(err, "Failed to delete client pod: %v", err) } if config.serverImage != "" { err = podClient.Delete(config.prefix+"-server", nil) if err != nil { glog.Warningf("Failed to delete server pod: %v", err) - expectNoError(err, "Failed to delete server pod: %v", err) + framework.ExpectNoError(err, "Failed to delete server pod: %v", err) } } } @@ -234,18 +235,18 @@ func testVolumeClient(client *client.Client, config VolumeTestConfig, volume api clientPod.Spec.SecurityContext.FSGroup = fsGroup } if _, err := podsNamespacer.Create(clientPod); err != nil { - Failf("Failed to create %s pod: %v", clientPod.Name, err) + framework.Failf("Failed to create %s pod: %v", clientPod.Name, err) } - expectNoError(waitForPodRunningInNamespace(client, clientPod.Name, config.namespace)) + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(client, clientPod.Name, config.namespace)) By("Checking that text file contents are perfect.") - _, err := lookForStringInPodExec(config.namespace, clientPod.Name, []string{"cat", "/opt/index.html"}, expectedContent, time.Minute) + _, err := framework.LookForStringInPodExec(config.namespace, clientPod.Name, []string{"cat", "/opt/index.html"}, expectedContent, time.Minute) Expect(err).NotTo(HaveOccurred(), "failed: finding the contents of the mounted file.") if fsGroup != nil { By("Checking fsGroup is correct.") - _, err = lookForStringInPodExec(config.namespace, clientPod.Name, []string{"ls", "-ld", "/opt"}, strconv.Itoa(int(*fsGroup)), time.Minute) + _, err = framework.LookForStringInPodExec(config.namespace, clientPod.Name, []string{"ls", "-ld", "/opt"}, strconv.Itoa(int(*fsGroup)), time.Minute) Expect(err).NotTo(HaveOccurred(), "failed: getting the right priviliges in the file %v", int(*fsGroup)) } } @@ -303,8 +304,8 @@ func injectHtml(client *client.Client, config VolumeTestConfig, volume api.Volum }() injectPod, err := podClient.Create(injectPod) - expectNoError(err, "Failed to create injector pod: %v", err) - err = waitForPodSuccessInNamespace(client, injectPod.Name, injectPod.Spec.Containers[0].Name, injectPod.Namespace) + framework.ExpectNoError(err, "Failed to create injector pod: %v", err) + err = framework.WaitForPodSuccessInNamespace(client, injectPod.Name, injectPod.Spec.Containers[0].Name, injectPod.Namespace) Expect(err).NotTo(HaveOccurred()) } @@ -315,24 +316,24 @@ func deleteCinderVolume(name string) error { var err error timeout := time.Second * 120 - Logf("Waiting up to %v for removal of cinder volume %s", timeout, name) + framework.Logf("Waiting up to %v for removal of cinder volume %s", timeout, name) for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) { output, err = exec.Command("cinder", "delete", name).CombinedOutput() if err == nil { - Logf("Cinder volume %s deleted", name) + framework.Logf("Cinder volume %s deleted", name) return nil } else { - Logf("Failed to delete volume %s: %v", name, err) + framework.Logf("Failed to delete volume %s: %v", name, err) } } - Logf("Giving up deleting volume %s: %v\n%s", name, err, string(output[:])) + framework.Logf("Giving up deleting volume %s: %v\n%s", name, err, string(output[:])) return err } // These tests need privileged containers, which are disabled by default. Run // the test with "go run hack/e2e.go ... --ginkgo.focus=[Feature:Volumes]" -var _ = KubeDescribe("Volumes [Feature:Volumes]", func() { - framework := NewDefaultFramework("volume") +var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() { + f := framework.NewDefaultFramework("volume") // If 'false', the test won't clear its volumes upon completion. Useful for debugging, // note that namespace deletion is handled by delete-namespace flag @@ -342,15 +343,15 @@ var _ = KubeDescribe("Volumes [Feature:Volumes]", func() { var namespace *api.Namespace BeforeEach(func() { - c = framework.Client - namespace = framework.Namespace + c = f.Client + namespace = f.Namespace }) //////////////////////////////////////////////////////////////////////// // NFS //////////////////////////////////////////////////////////////////////// - KubeDescribe("NFS", func() { + framework.KubeDescribe("NFS", func() { It("should be mountable", func() { config := VolumeTestConfig{ namespace: namespace.Name, @@ -366,7 +367,7 @@ var _ = KubeDescribe("Volumes [Feature:Volumes]", func() { }() pod := startVolumeServer(c, config) serverIP := pod.Status.PodIP - Logf("NFS server IP address: %v", serverIP) + framework.Logf("NFS server IP address: %v", serverIP) volume := api.VolumeSource{ NFS: &api.NFSVolumeSource{ @@ -384,7 +385,7 @@ var _ = KubeDescribe("Volumes [Feature:Volumes]", func() { // Gluster //////////////////////////////////////////////////////////////////////// - KubeDescribe("GlusterFS", func() { + framework.KubeDescribe("GlusterFS", func() { It("should be mountable", func() { config := VolumeTestConfig{ namespace: namespace.Name, @@ -400,7 +401,7 @@ var _ = KubeDescribe("Volumes [Feature:Volumes]", func() { }() pod := startVolumeServer(c, config) serverIP := pod.Status.PodIP - Logf("Gluster server IP address: %v", serverIP) + framework.Logf("Gluster server IP address: %v", serverIP) // create Endpoints for the server endpoints := api.Endpoints{ @@ -438,7 +439,7 @@ var _ = KubeDescribe("Volumes [Feature:Volumes]", func() { }() if _, err := endClient.Create(&endpoints); err != nil { - Failf("Failed to create endpoints for Gluster server: %v", err) + framework.Failf("Failed to create endpoints for Gluster server: %v", err) } volume := api.VolumeSource{ @@ -463,7 +464,7 @@ var _ = KubeDescribe("Volumes [Feature:Volumes]", func() { // are installed on all nodes! // Run the test with "go run hack/e2e.go ... --ginkgo.focus=iSCSI" - KubeDescribe("iSCSI", func() { + framework.KubeDescribe("iSCSI", func() { It("should be mountable", func() { config := VolumeTestConfig{ namespace: namespace.Name, @@ -483,7 +484,7 @@ var _ = KubeDescribe("Volumes [Feature:Volumes]", func() { }() pod := startVolumeServer(c, config) serverIP := pod.Status.PodIP - Logf("iSCSI server IP address: %v", serverIP) + framework.Logf("iSCSI server IP address: %v", serverIP) volume := api.VolumeSource{ ISCSI: &api.ISCSIVolumeSource{ @@ -505,7 +506,7 @@ var _ = KubeDescribe("Volumes [Feature:Volumes]", func() { // Ceph RBD //////////////////////////////////////////////////////////////////////// - KubeDescribe("Ceph RBD", func() { + framework.KubeDescribe("Ceph RBD", func() { It("should be mountable", func() { config := VolumeTestConfig{ namespace: namespace.Name, @@ -526,7 +527,7 @@ var _ = KubeDescribe("Volumes [Feature:Volumes]", func() { }() pod := startVolumeServer(c, config) serverIP := pod.Status.PodIP - Logf("Ceph server IP address: %v", serverIP) + framework.Logf("Ceph server IP address: %v", serverIP) // create secrets for the server secret := api.Secret{ @@ -552,7 +553,7 @@ var _ = KubeDescribe("Volumes [Feature:Volumes]", func() { }() if _, err := secClient.Create(&secret); err != nil { - Failf("Failed to create secrets for Ceph RBD: %v", err) + framework.Failf("Failed to create secrets for Ceph RBD: %v", err) } volume := api.VolumeSource{ @@ -578,7 +579,7 @@ var _ = KubeDescribe("Volumes [Feature:Volumes]", func() { // Ceph //////////////////////////////////////////////////////////////////////// - KubeDescribe("CephFS", func() { + framework.KubeDescribe("CephFS", func() { It("should be mountable", func() { config := VolumeTestConfig{ namespace: namespace.Name, @@ -594,7 +595,7 @@ var _ = KubeDescribe("Volumes [Feature:Volumes]", func() { }() pod := startVolumeServer(c, config) serverIP := pod.Status.PodIP - Logf("Ceph server IP address: %v", serverIP) + framework.Logf("Ceph server IP address: %v", serverIP) By("sleeping a bit to give ceph server time to initialize") time.Sleep(20 * time.Second) @@ -617,14 +618,14 @@ var _ = KubeDescribe("Volumes [Feature:Volumes]", func() { defer func() { if clean { if err := c.Secrets(namespace.Name).Delete(secret.Name); err != nil { - Failf("unable to delete secret %v: %v", secret.Name, err) + framework.Failf("unable to delete secret %v: %v", secret.Name, err) } } }() var err error if secret, err = c.Secrets(namespace.Name).Create(secret); err != nil { - Failf("unable to create test secret %s: %v", secret.Name, err) + framework.Failf("unable to create test secret %s: %v", secret.Name, err) } volume := api.VolumeSource{ @@ -649,7 +650,7 @@ var _ = KubeDescribe("Volumes [Feature:Volumes]", func() { // and that the usual OpenStack authentication env. variables are set // (OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME at least). - KubeDescribe("Cinder", func() { + framework.KubeDescribe("Cinder", func() { It("should be mountable", func() { config := VolumeTestConfig{ namespace: namespace.Name, @@ -661,7 +662,7 @@ var _ = KubeDescribe("Volumes [Feature:Volumes]", func() { By("creating a test Cinder volume") output, err := exec.Command("cinder", "create", "--display-name="+volumeName, "1").CombinedOutput() outputString := string(output[:]) - Logf("cinder output:\n%s", outputString) + framework.Logf("cinder output:\n%s", outputString) Expect(err).NotTo(HaveOccurred()) defer func() { @@ -687,12 +688,12 @@ var _ = KubeDescribe("Volumes [Feature:Volumes]", func() { volumeID = fields[3] break } - Logf("Volume ID: %s", volumeID) + framework.Logf("Volume ID: %s", volumeID) Expect(volumeID).NotTo(Equal("")) defer func() { if clean { - Logf("Running volumeTestCleanup") + framework.Logf("Running volumeTestCleanup") volumeTestCleanup(c, config) } }() diff --git a/test/soak/serve_hostnames/serve_hostnames.go b/test/soak/serve_hostnames/serve_hostnames.go index 0e014608d96..805bec4cb04 100644 --- a/test/soak/serve_hostnames/serve_hostnames.go +++ b/test/soak/serve_hostnames/serve_hostnames.go @@ -37,7 +37,7 @@ import ( "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util/intstr" - "k8s.io/kubernetes/test/e2e" + e2e "k8s.io/kubernetes/test/e2e/framework" ) var (