Update test/e2e for test/e2e/framework refactoring

This commit is contained in:
Tim St. Clair 2016-04-07 10:21:31 -07:00
parent a55b4f2e77
commit b0d3f32e88
88 changed files with 2969 additions and 2887 deletions

View File

@ -65,7 +65,7 @@ import (
"k8s.io/kubernetes/plugin/pkg/scheduler" "k8s.io/kubernetes/plugin/pkg/scheduler"
_ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider" _ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider"
"k8s.io/kubernetes/plugin/pkg/scheduler/factory" "k8s.io/kubernetes/plugin/pkg/scheduler/factory"
"k8s.io/kubernetes/test/e2e" e2e "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/integration" "k8s.io/kubernetes/test/integration"
"k8s.io/kubernetes/test/integration/framework" "k8s.io/kubernetes/test/integration/framework"

View File

@ -180,7 +180,7 @@ right thing.
Here are a few pointers: Here are a few pointers:
+ [E2e Framework](../../test/e2e/framework.go): + [E2e Framework](../../test/e2e/framework/framework.go):
Familiarise yourself with this test framework and how to use it. Familiarise yourself with this test framework and how to use it.
Amongst others, it automatically creates uniquely named namespaces Amongst others, it automatically creates uniquely named namespaces
within which your tests can run to avoid name clashes, and reliably within which your tests can run to avoid name clashes, and reliably
@ -194,7 +194,7 @@ Here are a few pointers:
should always use this framework. Trying other home-grown should always use this framework. Trying other home-grown
approaches to avoiding name clashes and resource leaks has proven approaches to avoiding name clashes and resource leaks has proven
to be a very bad idea. to be a very bad idea.
+ [E2e utils library](../../test/e2e/util.go): + [E2e utils library](../../test/e2e/framework/util.go):
This handy library provides tons of reusable code for a host of This handy library provides tons of reusable code for a host of
commonly needed test functionality, including waiting for resources commonly needed test functionality, including waiting for resources
to enter specified states, safely and consistently retrying failed to enter specified states, safely and consistently retrying failed

View File

@ -79,7 +79,7 @@ pkg/util/oom/oom_linux.go:// Writes 'value' to /proc/<pid>/oom_score_adj for all
pkg/util/oom/oom_linux.go:// Writes 'value' to /proc/<pid>/oom_score_adj. PID = 0 means self pkg/util/oom/oom_linux.go:// Writes 'value' to /proc/<pid>/oom_score_adj. PID = 0 means self
test/e2e/configmap.go: Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/configmap-volume/data-1"}, test/e2e/configmap.go: Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/configmap-volume/data-1"},
test/e2e/downwardapi_volume.go: Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=" + filePath}, test/e2e/downwardapi_volume.go: Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=" + filePath},
test/e2e/es_cluster_logging.go: Failf("No cluster_name field in Elasticsearch response: %v", esResponse) test/e2e/es_cluster_logging.go: framework.Failf("No cluster_name field in Elasticsearch response: %v", esResponse)
test/e2e/es_cluster_logging.go: // Check to see if have a cluster_name field. test/e2e/es_cluster_logging.go: // Check to see if have a cluster_name field.
test/e2e/es_cluster_logging.go: clusterName, ok := esResponse["cluster_name"] test/e2e/es_cluster_logging.go: clusterName, ok := esResponse["cluster_name"]
test/e2e/host_path.go: fmt.Sprintf("--file_content_in_loop=%v", filePath), test/e2e/host_path.go: fmt.Sprintf("--file_content_in_loop=%v", filePath),

View File

@ -27,6 +27,7 @@ import (
"golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
@ -188,18 +189,18 @@ type stringPair struct {
data, fileName string data, fileName string
} }
var _ = KubeDescribe("Addon update", func() { var _ = framework.KubeDescribe("Addon update", func() {
var dir string var dir string
var sshClient *ssh.Client var sshClient *ssh.Client
f := NewDefaultFramework("addon-update-test") f := framework.NewDefaultFramework("addon-update-test")
BeforeEach(func() { BeforeEach(func() {
// This test requires: // This test requires:
// - SSH master access // - SSH master access
// ... so the provider check should be identical to the intersection of // ... so the provider check should be identical to the intersection of
// providers that provide those capabilities. // providers that provide those capabilities.
if !providerIs("gce") { if !framework.ProviderIs("gce") {
return return
} }
@ -210,26 +211,26 @@ var _ = KubeDescribe("Addon update", func() {
// Reduce the addon update intervals so that we have faster response // Reduce the addon update intervals so that we have faster response
// to changes in the addon directory. // to changes in the addon directory.
// do not use "service" command because it clears the environment variables // do not use "service" command because it clears the environment variables
switch testContext.OSDistro { switch framework.TestContext.OSDistro {
case "debian": case "debian":
sshExecAndVerify(sshClient, "sudo TEST_ADDON_CHECK_INTERVAL_SEC=1 /etc/init.d/kube-addons restart") sshExecAndVerify(sshClient, "sudo TEST_ADDON_CHECK_INTERVAL_SEC=1 /etc/init.d/kube-addons restart")
case "trusty": case "trusty":
sshExecAndVerify(sshClient, "sudo initctl restart kube-addons TEST_ADDON_CHECK_INTERVAL_SEC=1") sshExecAndVerify(sshClient, "sudo initctl restart kube-addons TEST_ADDON_CHECK_INTERVAL_SEC=1")
default: default:
Failf("Unsupported OS distro type %s", testContext.OSDistro) framework.Failf("Unsupported OS distro type %s", framework.TestContext.OSDistro)
} }
}) })
AfterEach(func() { AfterEach(func() {
if sshClient != nil { if sshClient != nil {
// restart addon_update with the default options // restart addon_update with the default options
switch testContext.OSDistro { switch framework.TestContext.OSDistro {
case "debian": case "debian":
sshExec(sshClient, "sudo /etc/init.d/kube-addons restart") sshExec(sshClient, "sudo /etc/init.d/kube-addons restart")
case "trusty": case "trusty":
sshExec(sshClient, "sudo initctl restart kube-addons") sshExec(sshClient, "sudo initctl restart kube-addons")
default: default:
Failf("Unsupported OS distro type %s", testContext.OSDistro) framework.Failf("Unsupported OS distro type %s", framework.TestContext.OSDistro)
} }
sshClient.Close() sshClient.Close()
} }
@ -242,7 +243,7 @@ var _ = KubeDescribe("Addon update", func() {
// - master access // - master access
// ... so the provider check should be identical to the intersection of // ... so the provider check should be identical to the intersection of
// providers that provide those capabilities. // providers that provide those capabilities.
SkipUnlessProviderIs("gce") framework.SkipUnlessProviderIs("gce")
//these tests are long, so I squeezed several cases in one scenario //these tests are long, so I squeezed several cases in one scenario
Expect(sshClient).NotTo(BeNil()) Expect(sshClient).NotTo(BeNil())
@ -337,20 +338,20 @@ var _ = KubeDescribe("Addon update", func() {
}) })
func waitForServiceInAddonTest(c *client.Client, addonNamespace, name string, exist bool) { func waitForServiceInAddonTest(c *client.Client, addonNamespace, name string, exist bool) {
expectNoError(waitForService(c, addonNamespace, name, exist, addonTestPollInterval, addonTestPollTimeout)) framework.ExpectNoError(framework.WaitForService(c, addonNamespace, name, exist, addonTestPollInterval, addonTestPollTimeout))
} }
func waitForReplicationControllerInAddonTest(c *client.Client, addonNamespace, name string, exist bool) { func waitForReplicationControllerInAddonTest(c *client.Client, addonNamespace, name string, exist bool) {
expectNoError(waitForReplicationController(c, addonNamespace, name, exist, addonTestPollInterval, addonTestPollTimeout)) framework.ExpectNoError(framework.WaitForReplicationController(c, addonNamespace, name, exist, addonTestPollInterval, addonTestPollTimeout))
} }
// TODO marekbiskup 2015-06-11: merge the ssh code into pkg/util/ssh.go after // TODO marekbiskup 2015-06-11: merge the ssh code into pkg/util/ssh.go after
// kubernetes v1.0 is released. In particular the code of sshExec. // kubernetes v1.0 is released. In particular the code of sshExec.
func getMasterSSHClient() (*ssh.Client, error) { func getMasterSSHClient() (*ssh.Client, error) {
// Get a signer for the provider. // Get a signer for the provider.
signer, err := getSigner(testContext.Provider) signer, err := framework.GetSigner(framework.TestContext.Provider)
if err != nil { if err != nil {
return nil, fmt.Errorf("error getting signer for provider %s: '%v'", testContext.Provider, err) return nil, fmt.Errorf("error getting signer for provider %s: '%v'", framework.TestContext.Provider, err)
} }
config := &ssh.ClientConfig{ config := &ssh.ClientConfig{
@ -358,7 +359,7 @@ func getMasterSSHClient() (*ssh.Client, error) {
Auth: []ssh.AuthMethod{ssh.PublicKeys(signer)}, Auth: []ssh.AuthMethod{ssh.PublicKeys(signer)},
} }
host := getMasterHost() + ":22" host := framework.GetMasterHost() + ":22"
client, err := ssh.Dial("tcp", host, config) client, err := ssh.Dial("tcp", host, config)
if err != nil { if err != nil {
return nil, fmt.Errorf("error getting SSH client to host %s: '%v'", host, err) return nil, fmt.Errorf("error getting SSH client to host %s: '%v'", host, err)
@ -373,7 +374,7 @@ func sshExecAndVerify(client *ssh.Client, cmd string) {
} }
func sshExec(client *ssh.Client, cmd string) (string, string, int, error) { func sshExec(client *ssh.Client, cmd string) (string, string, int, error) {
Logf("Executing '%s' on %v", cmd, client.RemoteAddr()) framework.Logf("Executing '%s' on %v", cmd, client.RemoteAddr())
session, err := client.NewSession() session, err := client.NewSession()
if err != nil { if err != nil {
return "", "", 0, fmt.Errorf("error creating session to host %s: '%v'", client.RemoteAddr(), err) return "", "", 0, fmt.Errorf("error creating session to host %s: '%v'", client.RemoteAddr(), err)
@ -405,7 +406,7 @@ func sshExec(client *ssh.Client, cmd string) (string, string, int, error) {
} }
func writeRemoteFile(sshClient *ssh.Client, data, dir, fileName string, mode os.FileMode) error { func writeRemoteFile(sshClient *ssh.Client, data, dir, fileName string, mode os.FileMode) error {
Logf(fmt.Sprintf("Writing remote file '%s/%s' on %v", dir, fileName, sshClient.RemoteAddr())) framework.Logf(fmt.Sprintf("Writing remote file '%s/%s' on %v", dir, fileName, sshClient.RemoteAddr()))
session, err := sshClient.NewSession() session, err := sshClient.NewSession()
if err != nil { if err != nil {
return fmt.Errorf("error creating session to host %s: '%v'", sshClient.RemoteAddr(), err) return fmt.Errorf("error creating session to host %s: '%v'", sshClient.RemoteAddr(), err)

View File

@ -24,6 +24,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
) )
@ -58,7 +59,7 @@ rc.ConsumeCPU(300)
type ResourceConsumer struct { type ResourceConsumer struct {
name string name string
kind string kind string
framework *Framework framework *framework.Framework
cpu chan int cpu chan int
mem chan int mem chan int
customMetric chan int customMetric chan int
@ -72,15 +73,15 @@ type ResourceConsumer struct {
requestSizeCustomMetric int requestSizeCustomMetric int
} }
func NewDynamicResourceConsumer(name, kind string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, framework *Framework) *ResourceConsumer { func NewDynamicResourceConsumer(name, kind string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, f *framework.Framework) *ResourceConsumer {
return newResourceConsumer(name, kind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, dynamicConsumptionTimeInSeconds, return newResourceConsumer(name, kind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, dynamicConsumptionTimeInSeconds,
dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, cpuLimit, memLimit, framework) dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, cpuLimit, memLimit, f)
} }
// TODO this still defaults to replication controller // TODO this still defaults to replication controller
func NewStaticResourceConsumer(name string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, framework *Framework) *ResourceConsumer { func NewStaticResourceConsumer(name string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, f *framework.Framework) *ResourceConsumer {
return newResourceConsumer(name, kindRC, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, staticConsumptionTimeInSeconds, return newResourceConsumer(name, kindRC, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, staticConsumptionTimeInSeconds,
initCPUTotal/replicas, initMemoryTotal/replicas, initCustomMetric/replicas, cpuLimit, memLimit, framework) initCPUTotal/replicas, initMemoryTotal/replicas, initCustomMetric/replicas, cpuLimit, memLimit, f)
} }
/* /*
@ -91,13 +92,13 @@ memLimit argument is in megabytes, memLimit is a maximum amount of memory that c
cpuLimit argument is in millicores, cpuLimit is a maximum amount of cpu that can be consumed by a single pod cpuLimit argument is in millicores, cpuLimit is a maximum amount of cpu that can be consumed by a single pod
*/ */
func newResourceConsumer(name, kind string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, consumptionTimeInSeconds, requestSizeInMillicores, func newResourceConsumer(name, kind string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, consumptionTimeInSeconds, requestSizeInMillicores,
requestSizeInMegabytes int, requestSizeCustomMetric int, cpuLimit, memLimit int64, framework *Framework) *ResourceConsumer { requestSizeInMegabytes int, requestSizeCustomMetric int, cpuLimit, memLimit int64, f *framework.Framework) *ResourceConsumer {
runServiceAndWorkloadForResourceConsumer(framework.Client, framework.Namespace.Name, name, kind, replicas, cpuLimit, memLimit) runServiceAndWorkloadForResourceConsumer(f.Client, f.Namespace.Name, name, kind, replicas, cpuLimit, memLimit)
rc := &ResourceConsumer{ rc := &ResourceConsumer{
name: name, name: name,
kind: kind, kind: kind,
framework: framework, framework: f,
cpu: make(chan int), cpu: make(chan int),
mem: make(chan int), mem: make(chan int),
customMetric: make(chan int), customMetric: make(chan int),
@ -121,19 +122,19 @@ func newResourceConsumer(name, kind string, replicas, initCPUTotal, initMemoryTo
// ConsumeCPU consumes given number of CPU // ConsumeCPU consumes given number of CPU
func (rc *ResourceConsumer) ConsumeCPU(millicores int) { func (rc *ResourceConsumer) ConsumeCPU(millicores int) {
Logf("RC %s: consume %v millicores in total", rc.name, millicores) framework.Logf("RC %s: consume %v millicores in total", rc.name, millicores)
rc.cpu <- millicores rc.cpu <- millicores
} }
// ConsumeMem consumes given number of Mem // ConsumeMem consumes given number of Mem
func (rc *ResourceConsumer) ConsumeMem(megabytes int) { func (rc *ResourceConsumer) ConsumeMem(megabytes int) {
Logf("RC %s: consume %v MB in total", rc.name, megabytes) framework.Logf("RC %s: consume %v MB in total", rc.name, megabytes)
rc.mem <- megabytes rc.mem <- megabytes
} }
// ConsumeMem consumes given number of custom metric // ConsumeMem consumes given number of custom metric
func (rc *ResourceConsumer) ConsumeCustomMetric(amount int) { func (rc *ResourceConsumer) ConsumeCustomMetric(amount int) {
Logf("RC %s: consume custom metric %v in total", rc.name, amount) framework.Logf("RC %s: consume custom metric %v in total", rc.name, amount)
rc.customMetric <- amount rc.customMetric <- amount
} }
@ -145,13 +146,13 @@ func (rc *ResourceConsumer) makeConsumeCPURequests() {
for { for {
select { select {
case millicores := <-rc.cpu: case millicores := <-rc.cpu:
Logf("RC %s: consume %v millicores in total", rc.name, millicores) framework.Logf("RC %s: consume %v millicores in total", rc.name, millicores)
if rc.requestSizeInMillicores != 0 { if rc.requestSizeInMillicores != 0 {
count = millicores / rc.requestSizeInMillicores count = millicores / rc.requestSizeInMillicores
} }
rest = millicores - count*rc.requestSizeInMillicores rest = millicores - count*rc.requestSizeInMillicores
case <-time.After(sleepTime): case <-time.After(sleepTime):
Logf("RC %s: sending %v requests to consume %v millicores each and 1 request to consume %v millicores", rc.name, count, rc.requestSizeInMillicores, rest) framework.Logf("RC %s: sending %v requests to consume %v millicores each and 1 request to consume %v millicores", rc.name, count, rc.requestSizeInMillicores, rest)
if count > 0 { if count > 0 {
rc.sendConsumeCPURequests(count, rc.requestSizeInMillicores, rc.consumptionTimeInSeconds) rc.sendConsumeCPURequests(count, rc.requestSizeInMillicores, rc.consumptionTimeInSeconds)
} }
@ -173,13 +174,13 @@ func (rc *ResourceConsumer) makeConsumeMemRequests() {
for { for {
select { select {
case megabytes := <-rc.mem: case megabytes := <-rc.mem:
Logf("RC %s: consume %v MB in total", rc.name, megabytes) framework.Logf("RC %s: consume %v MB in total", rc.name, megabytes)
if rc.requestSizeInMegabytes != 0 { if rc.requestSizeInMegabytes != 0 {
count = megabytes / rc.requestSizeInMegabytes count = megabytes / rc.requestSizeInMegabytes
} }
rest = megabytes - count*rc.requestSizeInMegabytes rest = megabytes - count*rc.requestSizeInMegabytes
case <-time.After(sleepTime): case <-time.After(sleepTime):
Logf("RC %s: sending %v requests to consume %v MB each and 1 request to consume %v MB", rc.name, count, rc.requestSizeInMegabytes, rest) framework.Logf("RC %s: sending %v requests to consume %v MB each and 1 request to consume %v MB", rc.name, count, rc.requestSizeInMegabytes, rest)
if count > 0 { if count > 0 {
rc.sendConsumeMemRequests(count, rc.requestSizeInMegabytes, rc.consumptionTimeInSeconds) rc.sendConsumeMemRequests(count, rc.requestSizeInMegabytes, rc.consumptionTimeInSeconds)
} }
@ -201,13 +202,13 @@ func (rc *ResourceConsumer) makeConsumeCustomMetric() {
for { for {
select { select {
case total := <-rc.customMetric: case total := <-rc.customMetric:
Logf("RC %s: consume custom metric %v in total", rc.name, total) framework.Logf("RC %s: consume custom metric %v in total", rc.name, total)
if rc.requestSizeInMegabytes != 0 { if rc.requestSizeInMegabytes != 0 {
count = total / rc.requestSizeCustomMetric count = total / rc.requestSizeCustomMetric
} }
rest = total - count*rc.requestSizeCustomMetric rest = total - count*rc.requestSizeCustomMetric
case <-time.After(sleepTime): case <-time.After(sleepTime):
Logf("RC %s: sending %v requests to consume %v custom metric each and 1 request to consume %v", framework.Logf("RC %s: sending %v requests to consume %v custom metric each and 1 request to consume %v",
rc.name, count, rc.requestSizeCustomMetric, rest) rc.name, count, rc.requestSizeCustomMetric, rest)
if count > 0 { if count > 0 {
rc.sendConsumeCustomMetric(count, rc.requestSizeCustomMetric, rc.consumptionTimeInSeconds) rc.sendConsumeCustomMetric(count, rc.requestSizeCustomMetric, rc.consumptionTimeInSeconds)
@ -243,36 +244,36 @@ func (rc *ResourceConsumer) sendConsumeCustomMetric(requests, delta, durationSec
// sendOneConsumeCPURequest sends POST request for cpu consumption // sendOneConsumeCPURequest sends POST request for cpu consumption
func (rc *ResourceConsumer) sendOneConsumeCPURequest(millicores int, durationSec int) { func (rc *ResourceConsumer) sendOneConsumeCPURequest(millicores int, durationSec int) {
defer GinkgoRecover() defer GinkgoRecover()
proxyRequest, err := getServicesProxyRequest(rc.framework.Client, rc.framework.Client.Post()) proxyRequest, err := framework.GetServicesProxyRequest(rc.framework.Client, rc.framework.Client.Post())
expectNoError(err) framework.ExpectNoError(err)
_, err = proxyRequest.Namespace(rc.framework.Namespace.Name). _, err = proxyRequest.Namespace(rc.framework.Namespace.Name).
Name(rc.name). Name(rc.name).
Suffix("ConsumeCPU"). Suffix("ConsumeCPU").
Param("millicores", strconv.Itoa(millicores)). Param("millicores", strconv.Itoa(millicores)).
Param("durationSec", strconv.Itoa(durationSec)). Param("durationSec", strconv.Itoa(durationSec)).
DoRaw() DoRaw()
expectNoError(err) framework.ExpectNoError(err)
} }
// sendOneConsumeMemRequest sends POST request for memory consumption // sendOneConsumeMemRequest sends POST request for memory consumption
func (rc *ResourceConsumer) sendOneConsumeMemRequest(megabytes int, durationSec int) { func (rc *ResourceConsumer) sendOneConsumeMemRequest(megabytes int, durationSec int) {
defer GinkgoRecover() defer GinkgoRecover()
proxyRequest, err := getServicesProxyRequest(rc.framework.Client, rc.framework.Client.Post()) proxyRequest, err := framework.GetServicesProxyRequest(rc.framework.Client, rc.framework.Client.Post())
expectNoError(err) framework.ExpectNoError(err)
_, err = proxyRequest.Namespace(rc.framework.Namespace.Name). _, err = proxyRequest.Namespace(rc.framework.Namespace.Name).
Name(rc.name). Name(rc.name).
Suffix("ConsumeMem"). Suffix("ConsumeMem").
Param("megabytes", strconv.Itoa(megabytes)). Param("megabytes", strconv.Itoa(megabytes)).
Param("durationSec", strconv.Itoa(durationSec)). Param("durationSec", strconv.Itoa(durationSec)).
DoRaw() DoRaw()
expectNoError(err) framework.ExpectNoError(err)
} }
// sendOneConsumeCustomMetric sends POST request for custom metric consumption // sendOneConsumeCustomMetric sends POST request for custom metric consumption
func (rc *ResourceConsumer) sendOneConsumeCustomMetric(delta int, durationSec int) { func (rc *ResourceConsumer) sendOneConsumeCustomMetric(delta int, durationSec int) {
defer GinkgoRecover() defer GinkgoRecover()
proxyRequest, err := getServicesProxyRequest(rc.framework.Client, rc.framework.Client.Post()) proxyRequest, err := framework.GetServicesProxyRequest(rc.framework.Client, rc.framework.Client.Post())
expectNoError(err) framework.ExpectNoError(err)
_, err = proxyRequest.Namespace(rc.framework.Namespace.Name). _, err = proxyRequest.Namespace(rc.framework.Namespace.Name).
Name(rc.name). Name(rc.name).
Suffix("BumpMetric"). Suffix("BumpMetric").
@ -280,34 +281,34 @@ func (rc *ResourceConsumer) sendOneConsumeCustomMetric(delta int, durationSec in
Param("delta", strconv.Itoa(delta)). Param("delta", strconv.Itoa(delta)).
Param("durationSec", strconv.Itoa(durationSec)). Param("durationSec", strconv.Itoa(durationSec)).
DoRaw() DoRaw()
expectNoError(err) framework.ExpectNoError(err)
} }
func (rc *ResourceConsumer) GetReplicas() int { func (rc *ResourceConsumer) GetReplicas() int {
switch rc.kind { switch rc.kind {
case kindRC: case kindRC:
replicationController, err := rc.framework.Client.ReplicationControllers(rc.framework.Namespace.Name).Get(rc.name) replicationController, err := rc.framework.Client.ReplicationControllers(rc.framework.Namespace.Name).Get(rc.name)
expectNoError(err) framework.ExpectNoError(err)
if replicationController == nil { if replicationController == nil {
Failf(rcIsNil) framework.Failf(rcIsNil)
} }
return replicationController.Status.Replicas return replicationController.Status.Replicas
case kindDeployment: case kindDeployment:
deployment, err := rc.framework.Client.Deployments(rc.framework.Namespace.Name).Get(rc.name) deployment, err := rc.framework.Client.Deployments(rc.framework.Namespace.Name).Get(rc.name)
expectNoError(err) framework.ExpectNoError(err)
if deployment == nil { if deployment == nil {
Failf(deploymentIsNil) framework.Failf(deploymentIsNil)
} }
return deployment.Status.Replicas return deployment.Status.Replicas
case kindReplicaSet: case kindReplicaSet:
rs, err := rc.framework.Client.ReplicaSets(rc.framework.Namespace.Name).Get(rc.name) rs, err := rc.framework.Client.ReplicaSets(rc.framework.Namespace.Name).Get(rc.name)
expectNoError(err) framework.ExpectNoError(err)
if rs == nil { if rs == nil {
Failf(rsIsNil) framework.Failf(rsIsNil)
} }
return rs.Status.Replicas return rs.Status.Replicas
default: default:
Failf(invalidKind) framework.Failf(invalidKind)
} }
return 0 return 0
} }
@ -316,24 +317,24 @@ func (rc *ResourceConsumer) WaitForReplicas(desiredReplicas int) {
timeout := 10 * time.Minute timeout := 10 * time.Minute
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) { for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
if desiredReplicas == rc.GetReplicas() { if desiredReplicas == rc.GetReplicas() {
Logf("%s: current replicas number is equal to desired replicas number: %d", rc.kind, desiredReplicas) framework.Logf("%s: current replicas number is equal to desired replicas number: %d", rc.kind, desiredReplicas)
return return
} else { } else {
Logf("%s: current replicas number %d waiting to be %d", rc.kind, rc.GetReplicas(), desiredReplicas) framework.Logf("%s: current replicas number %d waiting to be %d", rc.kind, rc.GetReplicas(), desiredReplicas)
} }
} }
Failf("timeout waiting %v for pods size to be %d", timeout, desiredReplicas) framework.Failf("timeout waiting %v for pods size to be %d", timeout, desiredReplicas)
} }
func (rc *ResourceConsumer) EnsureDesiredReplicas(desiredReplicas int, timeout time.Duration) { func (rc *ResourceConsumer) EnsureDesiredReplicas(desiredReplicas int, timeout time.Duration) {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(10 * time.Second) { for start := time.Now(); time.Since(start) < timeout; time.Sleep(10 * time.Second) {
actual := rc.GetReplicas() actual := rc.GetReplicas()
if desiredReplicas != actual { if desiredReplicas != actual {
Failf("Number of replicas has changed: expected %v, got %v", desiredReplicas, actual) framework.Failf("Number of replicas has changed: expected %v, got %v", desiredReplicas, actual)
} }
Logf("Number of replicas is as expected") framework.Logf("Number of replicas is as expected")
} }
Logf("Number of replicas was stable over %v", timeout) framework.Logf("Number of replicas was stable over %v", timeout)
} }
func (rc *ResourceConsumer) CleanUp() { func (rc *ResourceConsumer) CleanUp() {
@ -343,8 +344,8 @@ func (rc *ResourceConsumer) CleanUp() {
rc.stopCustomMetric <- 0 rc.stopCustomMetric <- 0
// Wait some time to ensure all child goroutines are finished. // Wait some time to ensure all child goroutines are finished.
time.Sleep(10 * time.Second) time.Sleep(10 * time.Second)
expectNoError(DeleteRC(rc.framework.Client, rc.framework.Namespace.Name, rc.name)) framework.ExpectNoError(framework.DeleteRC(rc.framework.Client, rc.framework.Namespace.Name, rc.name))
expectNoError(rc.framework.Client.Services(rc.framework.Namespace.Name).Delete(rc.name)) framework.ExpectNoError(rc.framework.Client.Services(rc.framework.Namespace.Name).Delete(rc.name))
} }
func runServiceAndWorkloadForResourceConsumer(c *client.Client, ns, name, kind string, replicas int, cpuLimitMillis, memLimitMb int64) { func runServiceAndWorkloadForResourceConsumer(c *client.Client, ns, name, kind string, replicas int, cpuLimitMillis, memLimitMb int64) {
@ -364,9 +365,9 @@ func runServiceAndWorkloadForResourceConsumer(c *client.Client, ns, name, kind s
}, },
}, },
}) })
expectNoError(err) framework.ExpectNoError(err)
rcConfig := RCConfig{ rcConfig := framework.RCConfig{
Client: c, Client: c,
Image: resourceConsumerImage, Image: resourceConsumerImage,
Name: name, Name: name,
@ -381,22 +382,22 @@ func runServiceAndWorkloadForResourceConsumer(c *client.Client, ns, name, kind s
switch kind { switch kind {
case kindRC: case kindRC:
expectNoError(RunRC(rcConfig)) framework.ExpectNoError(framework.RunRC(rcConfig))
break break
case kindDeployment: case kindDeployment:
dpConfig := DeploymentConfig{ dpConfig := framework.DeploymentConfig{
rcConfig, rcConfig,
} }
expectNoError(RunDeployment(dpConfig)) framework.ExpectNoError(framework.RunDeployment(dpConfig))
break break
case kindReplicaSet: case kindReplicaSet:
rsConfig := ReplicaSetConfig{ rsConfig := framework.ReplicaSetConfig{
rcConfig, rcConfig,
} }
expectNoError(RunReplicaSet(rsConfig)) framework.ExpectNoError(framework.RunReplicaSet(rsConfig))
break break
default: default:
Failf(invalidKind) framework.Failf(invalidKind)
} }
// Make sure endpoints are propagated. // Make sure endpoints are propagated.

View File

@ -30,6 +30,7 @@ import (
"k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
@ -43,8 +44,8 @@ const (
v1JobSelectorKey = "job-name" v1JobSelectorKey = "job-name"
) )
var _ = KubeDescribe("V1Job", func() { var _ = framework.KubeDescribe("V1Job", func() {
f := NewDefaultFramework("v1job") f := framework.NewDefaultFramework("v1job")
parallelism := 2 parallelism := 2
completions := 4 completions := 4
lotsOfFailures := 5 // more than completions lotsOfFailures := 5 // more than completions
@ -105,7 +106,7 @@ var _ = KubeDescribe("V1Job", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring job shows many failures") By("Ensuring job shows many failures")
err = wait.Poll(poll, v1JobTimeout, func() (bool, error) { err = wait.Poll(framework.Poll, v1JobTimeout, func() (bool, error) {
curr, err := f.Client.Batch().Jobs(f.Namespace.Name).Get(job.Name) curr, err := f.Client.Batch().Jobs(f.Namespace.Name).Get(job.Name)
if err != nil { if err != nil {
return false, err return false, err
@ -274,7 +275,7 @@ func deleteV1Job(c *client.Client, ns, name string) error {
// Wait for all pods to become Running. Only use when pods will run for a long time, or it will be racy. // Wait for all pods to become Running. Only use when pods will run for a long time, or it will be racy.
func waitForAllPodsRunningV1(c *client.Client, ns, jobName string, parallelism int) error { func waitForAllPodsRunningV1(c *client.Client, ns, jobName string, parallelism int) error {
label := labels.SelectorFromSet(labels.Set(map[string]string{v1JobSelectorKey: jobName})) label := labels.SelectorFromSet(labels.Set(map[string]string{v1JobSelectorKey: jobName}))
return wait.Poll(poll, v1JobTimeout, func() (bool, error) { return wait.Poll(framework.Poll, v1JobTimeout, func() (bool, error) {
options := api.ListOptions{LabelSelector: label} options := api.ListOptions{LabelSelector: label}
pods, err := c.Pods(ns).List(options) pods, err := c.Pods(ns).List(options)
if err != nil { if err != nil {
@ -292,7 +293,7 @@ func waitForAllPodsRunningV1(c *client.Client, ns, jobName string, parallelism i
// Wait for job to reach completions. // Wait for job to reach completions.
func waitForV1JobFinish(c *client.Client, ns, jobName string, completions int) error { func waitForV1JobFinish(c *client.Client, ns, jobName string, completions int) error {
return wait.Poll(poll, v1JobTimeout, func() (bool, error) { return wait.Poll(framework.Poll, v1JobTimeout, func() (bool, error) {
curr, err := c.Batch().Jobs(ns).Get(jobName) curr, err := c.Batch().Jobs(ns).Get(jobName)
if err != nil { if err != nil {
return false, err return false, err
@ -303,7 +304,7 @@ func waitForV1JobFinish(c *client.Client, ns, jobName string, completions int) e
// Wait for job fail. // Wait for job fail.
func waitForV1JobFail(c *client.Client, ns, jobName string) error { func waitForV1JobFail(c *client.Client, ns, jobName string) error {
return wait.Poll(poll, v1JobTimeout, func() (bool, error) { return wait.Poll(framework.Poll, v1JobTimeout, func() (bool, error) {
curr, err := c.Batch().Jobs(ns).Get(jobName) curr, err := c.Batch().Jobs(ns).Get(jobName)
if err != nil { if err != nil {
return false, err return false, err

View File

@ -22,6 +22,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
) )
@ -31,9 +32,9 @@ const (
sleepDuration = 10 * time.Second sleepDuration = 10 * time.Second
) )
var _ = KubeDescribe("Cadvisor", func() { var _ = framework.KubeDescribe("Cadvisor", func() {
f := NewDefaultFramework("cadvisor") f := framework.NewDefaultFramework("cadvisor")
It("should be healthy on every node.", func() { It("should be healthy on every node.", func() {
CheckCadvisorHealthOnAllNodes(f.Client, 5*time.Minute) CheckCadvisorHealthOnAllNodes(f.Client, 5*time.Minute)
@ -44,7 +45,7 @@ func CheckCadvisorHealthOnAllNodes(c *client.Client, timeout time.Duration) {
// It should be OK to list unschedulable Nodes here. // It should be OK to list unschedulable Nodes here.
By("getting list of nodes") By("getting list of nodes")
nodeList, err := c.Nodes().List(api.ListOptions{}) nodeList, err := c.Nodes().List(api.ListOptions{})
expectNoError(err) framework.ExpectNoError(err)
var errors []error var errors []error
retries := maxRetries retries := maxRetries
for { for {
@ -65,8 +66,8 @@ func CheckCadvisorHealthOnAllNodes(c *client.Client, timeout time.Duration) {
if retries--; retries <= 0 { if retries--; retries <= 0 {
break break
} }
Logf("failed to retrieve kubelet stats -\n %v", errors) framework.Logf("failed to retrieve kubelet stats -\n %v", errors)
time.Sleep(sleepDuration) time.Sleep(sleepDuration)
} }
Failf("Failed after retrying %d times for cadvisor to be healthy on all nodes. Errors:\n%v", maxRetries, errors) framework.Failf("Failed after retrying %d times for cadvisor to be healthy on all nodes. Errors:\n%v", maxRetries, errors)
} }

View File

@ -22,6 +22,7 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
@ -37,16 +38,16 @@ const (
// run by default. // run by default.
// //
// These tests take ~20 minutes to run each. // These tests take ~20 minutes to run each.
var _ = KubeDescribe("Cluster size autoscaling [Feature:ClusterSizeAutoscaling] [Slow]", func() { var _ = framework.KubeDescribe("Cluster size autoscaling [Feature:ClusterSizeAutoscaling] [Slow]", func() {
f := NewDefaultFramework("autoscaling") f := framework.NewDefaultFramework("autoscaling")
var nodeCount int var nodeCount int
var coresPerNode int var coresPerNode int
var memCapacityMb int var memCapacityMb int
BeforeEach(func() { BeforeEach(func() {
SkipUnlessProviderIs("gce") framework.SkipUnlessProviderIs("gce")
nodes := ListSchedulableNodesOrDie(f.Client) nodes := framework.ListSchedulableNodesOrDie(f.Client)
nodeCount = len(nodes.Items) nodeCount = len(nodes.Items)
Expect(nodeCount).NotTo(BeZero()) Expect(nodeCount).NotTo(BeZero())
cpu := nodes.Items[0].Status.Capacity[api.ResourceCPU] cpu := nodes.Items[0].Status.Capacity[api.ResourceCPU]
@ -64,23 +65,23 @@ var _ = KubeDescribe("Cluster size autoscaling [Feature:ClusterSizeAutoscaling]
// Consume 50% CPU // Consume 50% CPU
rcs := createConsumingRCs(f, "cpu-utilization", nodeCount*coresPerNode, 500, 0) rcs := createConsumingRCs(f, "cpu-utilization", nodeCount*coresPerNode, 500, 0)
err := waitForClusterSize(f.Client, nodeCount+1, scaleUpTimeout) err := framework.WaitForClusterSize(f.Client, nodeCount+1, scaleUpTimeout)
for _, rc := range rcs { for _, rc := range rcs {
rc.CleanUp() rc.CleanUp()
} }
expectNoError(err) framework.ExpectNoError(err)
expectNoError(waitForClusterSize(f.Client, nodeCount, scaleDownTimeout)) framework.ExpectNoError(framework.WaitForClusterSize(f.Client, nodeCount, scaleDownTimeout))
}) })
It("Should scale cluster size based on cpu reservation", func() { It("Should scale cluster size based on cpu reservation", func() {
setUpAutoscaler("cpu/node_reservation", 0.5, nodeCount, nodeCount+1) setUpAutoscaler("cpu/node_reservation", 0.5, nodeCount, nodeCount+1)
ReserveCpu(f, "cpu-reservation", 600*nodeCount*coresPerNode) ReserveCpu(f, "cpu-reservation", 600*nodeCount*coresPerNode)
expectNoError(waitForClusterSize(f.Client, nodeCount+1, scaleUpTimeout)) framework.ExpectNoError(framework.WaitForClusterSize(f.Client, nodeCount+1, scaleUpTimeout))
expectNoError(DeleteRC(f.Client, f.Namespace.Name, "cpu-reservation")) framework.ExpectNoError(framework.DeleteRC(f.Client, f.Namespace.Name, "cpu-reservation"))
expectNoError(waitForClusterSize(f.Client, nodeCount, scaleDownTimeout)) framework.ExpectNoError(framework.WaitForClusterSize(f.Client, nodeCount, scaleDownTimeout))
}) })
It("Should scale cluster size based on memory utilization", func() { It("Should scale cluster size based on memory utilization", func() {
@ -89,23 +90,23 @@ var _ = KubeDescribe("Cluster size autoscaling [Feature:ClusterSizeAutoscaling]
// Consume 60% of total memory capacity // Consume 60% of total memory capacity
megabytesPerReplica := int(memCapacityMb * 6 / 10 / coresPerNode) megabytesPerReplica := int(memCapacityMb * 6 / 10 / coresPerNode)
rcs := createConsumingRCs(f, "mem-utilization", nodeCount*coresPerNode, 0, megabytesPerReplica) rcs := createConsumingRCs(f, "mem-utilization", nodeCount*coresPerNode, 0, megabytesPerReplica)
err := waitForClusterSize(f.Client, nodeCount+1, scaleUpTimeout) err := framework.WaitForClusterSize(f.Client, nodeCount+1, scaleUpTimeout)
for _, rc := range rcs { for _, rc := range rcs {
rc.CleanUp() rc.CleanUp()
} }
expectNoError(err) framework.ExpectNoError(err)
expectNoError(waitForClusterSize(f.Client, nodeCount, scaleDownTimeout)) framework.ExpectNoError(framework.WaitForClusterSize(f.Client, nodeCount, scaleDownTimeout))
}) })
It("Should scale cluster size based on memory reservation", func() { It("Should scale cluster size based on memory reservation", func() {
setUpAutoscaler("memory/node_reservation", 0.5, nodeCount, nodeCount+1) setUpAutoscaler("memory/node_reservation", 0.5, nodeCount, nodeCount+1)
ReserveMemory(f, "memory-reservation", nodeCount*memCapacityMb*6/10) ReserveMemory(f, "memory-reservation", nodeCount*memCapacityMb*6/10)
expectNoError(waitForClusterSize(f.Client, nodeCount+1, scaleUpTimeout)) framework.ExpectNoError(framework.WaitForClusterSize(f.Client, nodeCount+1, scaleUpTimeout))
expectNoError(DeleteRC(f.Client, f.Namespace.Name, "memory-reservation")) framework.ExpectNoError(framework.DeleteRC(f.Client, f.Namespace.Name, "memory-reservation"))
expectNoError(waitForClusterSize(f.Client, nodeCount, scaleDownTimeout)) framework.ExpectNoError(framework.WaitForClusterSize(f.Client, nodeCount, scaleDownTimeout))
}) })
}) })
@ -113,17 +114,17 @@ func setUpAutoscaler(metric string, target float64, min, max int) {
// TODO integrate with kube-up.sh script once it will support autoscaler setup. // TODO integrate with kube-up.sh script once it will support autoscaler setup.
By("Setting up autoscaler to scale based on " + metric) By("Setting up autoscaler to scale based on " + metric)
out, err := exec.Command("gcloud", "compute", "instance-groups", "managed", "set-autoscaling", out, err := exec.Command("gcloud", "compute", "instance-groups", "managed", "set-autoscaling",
testContext.CloudConfig.NodeInstanceGroup, framework.TestContext.CloudConfig.NodeInstanceGroup,
"--project="+testContext.CloudConfig.ProjectID, "--project="+framework.TestContext.CloudConfig.ProjectID,
"--zone="+testContext.CloudConfig.Zone, "--zone="+framework.TestContext.CloudConfig.Zone,
"--custom-metric-utilization=metric=custom.cloudmonitoring.googleapis.com/kubernetes.io/"+metric+fmt.Sprintf(",utilization-target=%v", target)+",utilization-target-type=GAUGE", "--custom-metric-utilization=metric=custom.cloudmonitoring.googleapis.com/kubernetes.io/"+metric+fmt.Sprintf(",utilization-target=%v", target)+",utilization-target-type=GAUGE",
fmt.Sprintf("--min-num-replicas=%v", min), fmt.Sprintf("--min-num-replicas=%v", min),
fmt.Sprintf("--max-num-replicas=%v", max), fmt.Sprintf("--max-num-replicas=%v", max),
).CombinedOutput() ).CombinedOutput()
expectNoError(err, "Output: "+string(out)) framework.ExpectNoError(err, "Output: "+string(out))
} }
func createConsumingRCs(f *Framework, name string, count, cpuPerReplica, memPerReplica int) []*ResourceConsumer { func createConsumingRCs(f *framework.Framework, name string, count, cpuPerReplica, memPerReplica int) []*ResourceConsumer {
var res []*ResourceConsumer var res []*ResourceConsumer
for i := 1; i <= count; i++ { for i := 1; i <= count; i++ {
name := fmt.Sprintf("%s-%d", name, i) name := fmt.Sprintf("%s-%d", name, i)
@ -135,16 +136,16 @@ func createConsumingRCs(f *Framework, name string, count, cpuPerReplica, memPerR
func cleanUpAutoscaler() { func cleanUpAutoscaler() {
By("Removing autoscaler") By("Removing autoscaler")
out, err := exec.Command("gcloud", "compute", "instance-groups", "managed", "stop-autoscaling", out, err := exec.Command("gcloud", "compute", "instance-groups", "managed", "stop-autoscaling",
testContext.CloudConfig.NodeInstanceGroup, framework.TestContext.CloudConfig.NodeInstanceGroup,
"--project="+testContext.CloudConfig.ProjectID, "--project="+framework.TestContext.CloudConfig.ProjectID,
"--zone="+testContext.CloudConfig.Zone, "--zone="+framework.TestContext.CloudConfig.Zone,
).CombinedOutput() ).CombinedOutput()
expectNoError(err, "Output: "+string(out)) framework.ExpectNoError(err, "Output: "+string(out))
} }
func ReserveCpu(f *Framework, id string, millicores int) { func ReserveCpu(f *framework.Framework, id string, millicores int) {
By(fmt.Sprintf("Running RC which reserves %v millicores", millicores)) By(fmt.Sprintf("Running RC which reserves %v millicores", millicores))
config := &RCConfig{ config := &framework.RCConfig{
Client: f.Client, Client: f.Client,
Name: id, Name: id,
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
@ -153,12 +154,12 @@ func ReserveCpu(f *Framework, id string, millicores int) {
Replicas: millicores / 100, Replicas: millicores / 100,
CpuRequest: 100, CpuRequest: 100,
} }
expectNoError(RunRC(*config)) framework.ExpectNoError(framework.RunRC(*config))
} }
func ReserveMemory(f *Framework, id string, megabytes int) { func ReserveMemory(f *framework.Framework, id string, megabytes int) {
By(fmt.Sprintf("Running RC which reserves %v MB of memory", megabytes)) By(fmt.Sprintf("Running RC which reserves %v MB of memory", megabytes))
config := &RCConfig{ config := &framework.RCConfig{
Client: f.Client, Client: f.Client,
Name: id, Name: id,
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
@ -167,5 +168,5 @@ func ReserveMemory(f *Framework, id string, megabytes int) {
Replicas: megabytes / 500, Replicas: megabytes / 500,
MemRequest: 500 * 1024 * 1024, MemRequest: 500 * 1024 * 1024,
} }
expectNoError(RunRC(*config)) framework.ExpectNoError(framework.RunRC(*config))
} }

View File

@ -31,6 +31,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
@ -39,7 +40,7 @@ import (
// realVersion turns a version constant s into a version string deployable on // realVersion turns a version constant s into a version string deployable on
// GKE. See hack/get-build.sh for more information. // GKE. See hack/get-build.sh for more information.
func realVersion(s string) (string, error) { func realVersion(s string) (string, error) {
v, _, err := runCmd(path.Join(testContext.RepoRoot, "hack/get-build.sh"), "-v", s) v, _, err := runCmd(path.Join(framework.TestContext.RepoRoot, "hack/get-build.sh"), "-v", s)
if err != nil { if err != nil {
return v, err return v, err
} }
@ -49,46 +50,46 @@ func realVersion(s string) (string, error) {
// The following upgrade functions are passed into the framework below and used // The following upgrade functions are passed into the framework below and used
// to do the actual upgrades. // to do the actual upgrades.
var masterUpgrade = func(v string) error { var masterUpgrade = func(v string) error {
switch testContext.Provider { switch framework.TestContext.Provider {
case "gce": case "gce":
return masterUpgradeGCE(v) return masterUpgradeGCE(v)
case "gke": case "gke":
return masterUpgradeGKE(v) return masterUpgradeGKE(v)
default: default:
return fmt.Errorf("masterUpgrade() is not implemented for provider %s", testContext.Provider) return fmt.Errorf("masterUpgrade() is not implemented for provider %s", framework.TestContext.Provider)
} }
} }
func masterUpgradeGCE(rawV string) error { func masterUpgradeGCE(rawV string) error {
v := "v" + rawV v := "v" + rawV
_, _, err := runCmd(path.Join(testContext.RepoRoot, "cluster/gce/upgrade.sh"), "-M", v) _, _, err := runCmd(path.Join(framework.TestContext.RepoRoot, "cluster/gce/upgrade.sh"), "-M", v)
return err return err
} }
func masterUpgradeGKE(v string) error { func masterUpgradeGKE(v string) error {
Logf("Upgrading master to %q", v) framework.Logf("Upgrading master to %q", v)
_, _, err := runCmd("gcloud", "container", _, _, err := runCmd("gcloud", "container",
fmt.Sprintf("--project=%s", testContext.CloudConfig.ProjectID), fmt.Sprintf("--project=%s", framework.TestContext.CloudConfig.ProjectID),
fmt.Sprintf("--zone=%s", testContext.CloudConfig.Zone), fmt.Sprintf("--zone=%s", framework.TestContext.CloudConfig.Zone),
"clusters", "clusters",
"upgrade", "upgrade",
testContext.CloudConfig.Cluster, framework.TestContext.CloudConfig.Cluster,
"--master", "--master",
fmt.Sprintf("--cluster-version=%s", v), fmt.Sprintf("--cluster-version=%s", v),
"--quiet") "--quiet")
return err return err
} }
var nodeUpgrade = func(f *Framework, replicas int, v string) error { var nodeUpgrade = func(f *framework.Framework, replicas int, v string) error {
// Perform the upgrade. // Perform the upgrade.
var err error var err error
switch testContext.Provider { switch framework.TestContext.Provider {
case "gce": case "gce":
err = nodeUpgradeGCE(v) err = nodeUpgradeGCE(v)
case "gke": case "gke":
err = nodeUpgradeGKE(v) err = nodeUpgradeGKE(v)
default: default:
err = fmt.Errorf("nodeUpgrade() is not implemented for provider %s", testContext.Provider) err = fmt.Errorf("nodeUpgrade() is not implemented for provider %s", framework.TestContext.Provider)
} }
if err != nil { if err != nil {
return err return err
@ -98,12 +99,12 @@ var nodeUpgrade = func(f *Framework, replicas int, v string) error {
// //
// TODO(ihmccreery) We shouldn't have to wait for nodes to be ready in // TODO(ihmccreery) We shouldn't have to wait for nodes to be ready in
// GKE; the operation shouldn't return until they all are. // GKE; the operation shouldn't return until they all are.
Logf("Waiting up to %v for all nodes to be ready after the upgrade", restartNodeReadyAgainTimeout) framework.Logf("Waiting up to %v for all nodes to be ready after the upgrade", restartNodeReadyAgainTimeout)
if _, err := checkNodesReady(f.Client, restartNodeReadyAgainTimeout, testContext.CloudConfig.NumNodes); err != nil { if _, err := checkNodesReady(f.Client, restartNodeReadyAgainTimeout, framework.TestContext.CloudConfig.NumNodes); err != nil {
return err return err
} }
Logf("Waiting up to %v for all pods to be running and ready after the upgrade", restartPodReadyAgainTimeout) framework.Logf("Waiting up to %v for all pods to be running and ready after the upgrade", restartPodReadyAgainTimeout)
return waitForPodsRunningReady(f.Namespace.Name, replicas, restartPodReadyAgainTimeout) return framework.WaitForPodsRunningReady(f.Namespace.Name, replicas, restartPodReadyAgainTimeout)
} }
func nodeUpgradeGCE(rawV string) error { func nodeUpgradeGCE(rawV string) error {
@ -111,21 +112,21 @@ func nodeUpgradeGCE(rawV string) error {
// would trigger a node update; right now it's very different. // would trigger a node update; right now it's very different.
v := "v" + rawV v := "v" + rawV
Logf("Getting the node template before the upgrade") framework.Logf("Getting the node template before the upgrade")
tmplBefore, err := migTemplate() tmplBefore, err := migTemplate()
if err != nil { if err != nil {
return fmt.Errorf("error getting the node template before the upgrade: %v", err) return fmt.Errorf("error getting the node template before the upgrade: %v", err)
} }
Logf("Preparing node upgrade by creating new instance template for %q", v) framework.Logf("Preparing node upgrade by creating new instance template for %q", v)
stdout, _, err := runCmd(path.Join(testContext.RepoRoot, "cluster/gce/upgrade.sh"), "-P", v) stdout, _, err := runCmd(path.Join(framework.TestContext.RepoRoot, "cluster/gce/upgrade.sh"), "-P", v)
if err != nil { if err != nil {
cleanupNodeUpgradeGCE(tmplBefore) cleanupNodeUpgradeGCE(tmplBefore)
return fmt.Errorf("error preparing node upgrade: %v", err) return fmt.Errorf("error preparing node upgrade: %v", err)
} }
tmpl := strings.TrimSpace(stdout) tmpl := strings.TrimSpace(stdout)
Logf("Performing a node upgrade to %q; waiting at most %v per node", tmpl, restartPerNodeTimeout) framework.Logf("Performing a node upgrade to %q; waiting at most %v per node", tmpl, restartPerNodeTimeout)
if err := migRollingUpdate(tmpl, restartPerNodeTimeout); err != nil { if err := migRollingUpdate(tmpl, restartPerNodeTimeout); err != nil {
cleanupNodeUpgradeGCE(tmplBefore) cleanupNodeUpgradeGCE(tmplBefore)
return fmt.Errorf("error doing node upgrade via a migRollingUpdate to %s: %v", tmpl, err) return fmt.Errorf("error doing node upgrade via a migRollingUpdate to %s: %v", tmpl, err)
@ -134,42 +135,42 @@ func nodeUpgradeGCE(rawV string) error {
} }
func cleanupNodeUpgradeGCE(tmplBefore string) { func cleanupNodeUpgradeGCE(tmplBefore string) {
Logf("Cleaning up any unused node templates") framework.Logf("Cleaning up any unused node templates")
tmplAfter, err := migTemplate() tmplAfter, err := migTemplate()
if err != nil { if err != nil {
Logf("Could not get node template post-upgrade; may have leaked template %s", tmplBefore) framework.Logf("Could not get node template post-upgrade; may have leaked template %s", tmplBefore)
return return
} }
if tmplBefore == tmplAfter { if tmplBefore == tmplAfter {
// The node upgrade failed so there's no need to delete // The node upgrade failed so there's no need to delete
// anything. // anything.
Logf("Node template %s is still in use; not cleaning up", tmplBefore) framework.Logf("Node template %s is still in use; not cleaning up", tmplBefore)
return return
} }
Logf("Deleting node template %s", tmplBefore) framework.Logf("Deleting node template %s", tmplBefore)
if _, _, err := retryCmd("gcloud", "compute", "instance-templates", if _, _, err := retryCmd("gcloud", "compute", "instance-templates",
fmt.Sprintf("--project=%s", testContext.CloudConfig.ProjectID), fmt.Sprintf("--project=%s", framework.TestContext.CloudConfig.ProjectID),
"delete", "delete",
tmplBefore); err != nil { tmplBefore); err != nil {
Logf("gcloud compute instance-templates delete %s call failed with err: %v", tmplBefore, err) framework.Logf("gcloud compute instance-templates delete %s call failed with err: %v", tmplBefore, err)
Logf("May have leaked instance template %q", tmplBefore) framework.Logf("May have leaked instance template %q", tmplBefore)
} }
} }
func nodeUpgradeGKE(v string) error { func nodeUpgradeGKE(v string) error {
Logf("Upgrading nodes to %q", v) framework.Logf("Upgrading nodes to %q", v)
_, _, err := runCmd("gcloud", "container", _, _, err := runCmd("gcloud", "container",
fmt.Sprintf("--project=%s", testContext.CloudConfig.ProjectID), fmt.Sprintf("--project=%s", framework.TestContext.CloudConfig.ProjectID),
fmt.Sprintf("--zone=%s", testContext.CloudConfig.Zone), fmt.Sprintf("--zone=%s", framework.TestContext.CloudConfig.Zone),
"clusters", "clusters",
"upgrade", "upgrade",
testContext.CloudConfig.Cluster, framework.TestContext.CloudConfig.Cluster,
fmt.Sprintf("--cluster-version=%s", v), fmt.Sprintf("--cluster-version=%s", v),
"--quiet") "--quiet")
return err return err
} }
var _ = KubeDescribe("Upgrade [Feature:Upgrade]", func() { var _ = framework.KubeDescribe("Upgrade [Feature:Upgrade]", func() {
svcName, replicas := "baz", 2 svcName, replicas := "baz", 2
var rcName, ip, v string var rcName, ip, v string
@ -179,14 +180,14 @@ var _ = KubeDescribe("Upgrade [Feature:Upgrade]", func() {
// The version is determined once at the beginning of the test so that // The version is determined once at the beginning of the test so that
// the master and nodes won't be skewed if the value changes during the // the master and nodes won't be skewed if the value changes during the
// test. // test.
By(fmt.Sprintf("Getting real version for %q", testContext.UpgradeTarget)) By(fmt.Sprintf("Getting real version for %q", framework.TestContext.UpgradeTarget))
var err error var err error
v, err = realVersion(testContext.UpgradeTarget) v, err = realVersion(framework.TestContext.UpgradeTarget)
expectNoError(err) framework.ExpectNoError(err)
Logf("Version for %q is %q", testContext.UpgradeTarget, v) framework.Logf("Version for %q is %q", framework.TestContext.UpgradeTarget, v)
}) })
f := NewDefaultFramework("cluster-upgrade") f := framework.NewDefaultFramework("cluster-upgrade")
var w *ServiceTestFixture var w *ServiceTestFixture
BeforeEach(func() { BeforeEach(func() {
By("Setting up the service, RC, and pods") By("Setting up the service, RC, and pods")
@ -202,10 +203,10 @@ var _ = KubeDescribe("Upgrade [Feature:Upgrade]", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
ingresses := result.Status.LoadBalancer.Ingress ingresses := result.Status.LoadBalancer.Ingress
if len(ingresses) != 1 { if len(ingresses) != 1 {
Failf("Was expecting only 1 ingress IP but got %d (%v): %v", len(ingresses), ingresses, result) framework.Failf("Was expecting only 1 ingress IP but got %d (%v): %v", len(ingresses), ingresses, result)
} }
ingress = ingresses[0] ingress = ingresses[0]
Logf("Got load balancer ingress point %v", ingress) framework.Logf("Got load balancer ingress point %v", ingress)
ip = ingress.IP ip = ingress.IP
if ip == "" { if ip == "" {
ip = ingress.Hostname ip = ingress.Hostname
@ -222,98 +223,98 @@ var _ = KubeDescribe("Upgrade [Feature:Upgrade]", func() {
w.Cleanup() w.Cleanup()
}) })
KubeDescribe("master upgrade", func() { framework.KubeDescribe("master upgrade", func() {
It("should maintain responsive services [Feature:MasterUpgrade]", func() { It("should maintain responsive services [Feature:MasterUpgrade]", func() {
By("Validating cluster before master upgrade") By("Validating cluster before master upgrade")
expectNoError(validate(f, svcName, rcName, ingress, replicas)) framework.ExpectNoError(validate(f, svcName, rcName, ingress, replicas))
By("Performing a master upgrade") By("Performing a master upgrade")
testUpgrade(ip, v, masterUpgrade) testUpgrade(ip, v, masterUpgrade)
By("Checking master version") By("Checking master version")
expectNoError(checkMasterVersion(f.Client, v)) framework.ExpectNoError(checkMasterVersion(f.Client, v))
By("Validating cluster after master upgrade") By("Validating cluster after master upgrade")
expectNoError(validate(f, svcName, rcName, ingress, replicas)) framework.ExpectNoError(validate(f, svcName, rcName, ingress, replicas))
}) })
}) })
KubeDescribe("node upgrade", func() { framework.KubeDescribe("node upgrade", func() {
It("should maintain a functioning cluster [Feature:NodeUpgrade]", func() { It("should maintain a functioning cluster [Feature:NodeUpgrade]", func() {
By("Validating cluster before node upgrade") By("Validating cluster before node upgrade")
expectNoError(validate(f, svcName, rcName, ingress, replicas)) framework.ExpectNoError(validate(f, svcName, rcName, ingress, replicas))
By("Performing a node upgrade") By("Performing a node upgrade")
// Circumnavigate testUpgrade, since services don't necessarily stay up. // Circumnavigate testUpgrade, since services don't necessarily stay up.
Logf("Starting upgrade") framework.Logf("Starting upgrade")
expectNoError(nodeUpgrade(f, replicas, v)) framework.ExpectNoError(nodeUpgrade(f, replicas, v))
Logf("Upgrade complete") framework.Logf("Upgrade complete")
By("Checking node versions") By("Checking node versions")
expectNoError(checkNodesVersions(f.Client, v)) framework.ExpectNoError(checkNodesVersions(f.Client, v))
By("Validating cluster after node upgrade") By("Validating cluster after node upgrade")
expectNoError(validate(f, svcName, rcName, ingress, replicas)) framework.ExpectNoError(validate(f, svcName, rcName, ingress, replicas))
}) })
It("should maintain responsive services [Feature:ExperimentalNodeUpgrade]", func() { It("should maintain responsive services [Feature:ExperimentalNodeUpgrade]", func() {
By("Validating cluster before node upgrade") By("Validating cluster before node upgrade")
expectNoError(validate(f, svcName, rcName, ingress, replicas)) framework.ExpectNoError(validate(f, svcName, rcName, ingress, replicas))
By("Performing a node upgrade") By("Performing a node upgrade")
testUpgrade(ip, v, func(v string) error { testUpgrade(ip, v, func(v string) error {
return nodeUpgrade(f, replicas, v) return nodeUpgrade(f, replicas, v)
}) })
By("Checking node versions") By("Checking node versions")
expectNoError(checkNodesVersions(f.Client, v)) framework.ExpectNoError(checkNodesVersions(f.Client, v))
By("Validating cluster after node upgrade") By("Validating cluster after node upgrade")
expectNoError(validate(f, svcName, rcName, ingress, replicas)) framework.ExpectNoError(validate(f, svcName, rcName, ingress, replicas))
}) })
}) })
KubeDescribe("cluster upgrade", func() { framework.KubeDescribe("cluster upgrade", func() {
It("should maintain responsive services [Feature:ClusterUpgrade]", func() { It("should maintain responsive services [Feature:ClusterUpgrade]", func() {
By("Validating cluster before master upgrade") By("Validating cluster before master upgrade")
expectNoError(validate(f, svcName, rcName, ingress, replicas)) framework.ExpectNoError(validate(f, svcName, rcName, ingress, replicas))
By("Performing a master upgrade") By("Performing a master upgrade")
testUpgrade(ip, v, masterUpgrade) testUpgrade(ip, v, masterUpgrade)
By("Checking master version") By("Checking master version")
expectNoError(checkMasterVersion(f.Client, v)) framework.ExpectNoError(checkMasterVersion(f.Client, v))
By("Validating cluster after master upgrade") By("Validating cluster after master upgrade")
expectNoError(validate(f, svcName, rcName, ingress, replicas)) framework.ExpectNoError(validate(f, svcName, rcName, ingress, replicas))
By("Validating cluster before node upgrade") By("Validating cluster before node upgrade")
expectNoError(validate(f, svcName, rcName, ingress, replicas)) framework.ExpectNoError(validate(f, svcName, rcName, ingress, replicas))
By("Performing a node upgrade") By("Performing a node upgrade")
// Circumnavigate testUpgrade, since services don't necessarily stay up. // Circumnavigate testUpgrade, since services don't necessarily stay up.
Logf("Starting upgrade") framework.Logf("Starting upgrade")
expectNoError(nodeUpgrade(f, replicas, v)) framework.ExpectNoError(nodeUpgrade(f, replicas, v))
Logf("Upgrade complete") framework.Logf("Upgrade complete")
By("Checking node versions") By("Checking node versions")
expectNoError(checkNodesVersions(f.Client, v)) framework.ExpectNoError(checkNodesVersions(f.Client, v))
By("Validating cluster after node upgrade") By("Validating cluster after node upgrade")
expectNoError(validate(f, svcName, rcName, ingress, replicas)) framework.ExpectNoError(validate(f, svcName, rcName, ingress, replicas))
}) })
It("should maintain responsive services [Feature:ExperimentalClusterUpgrade]", func() { It("should maintain responsive services [Feature:ExperimentalClusterUpgrade]", func() {
By("Validating cluster before master upgrade") By("Validating cluster before master upgrade")
expectNoError(validate(f, svcName, rcName, ingress, replicas)) framework.ExpectNoError(validate(f, svcName, rcName, ingress, replicas))
By("Performing a master upgrade") By("Performing a master upgrade")
testUpgrade(ip, v, masterUpgrade) testUpgrade(ip, v, masterUpgrade)
By("Checking master version") By("Checking master version")
expectNoError(checkMasterVersion(f.Client, v)) framework.ExpectNoError(checkMasterVersion(f.Client, v))
By("Validating cluster after master upgrade") By("Validating cluster after master upgrade")
expectNoError(validate(f, svcName, rcName, ingress, replicas)) framework.ExpectNoError(validate(f, svcName, rcName, ingress, replicas))
By("Validating cluster before node upgrade") By("Validating cluster before node upgrade")
expectNoError(validate(f, svcName, rcName, ingress, replicas)) framework.ExpectNoError(validate(f, svcName, rcName, ingress, replicas))
By("Performing a node upgrade") By("Performing a node upgrade")
testUpgrade(ip, v, func(v string) error { testUpgrade(ip, v, func(v string) error {
return nodeUpgrade(f, replicas, v) return nodeUpgrade(f, replicas, v)
}) })
By("Checking node versions") By("Checking node versions")
expectNoError(checkNodesVersions(f.Client, v)) framework.ExpectNoError(checkNodesVersions(f.Client, v))
By("Validating cluster after node upgrade") By("Validating cluster after node upgrade")
expectNoError(validate(f, svcName, rcName, ingress, replicas)) framework.ExpectNoError(validate(f, svcName, rcName, ingress, replicas))
}) })
}) })
}) })
func testUpgrade(ip, v string, upF func(v string) error) { func testUpgrade(ip, v string, upF func(v string) error) {
Logf("Starting async validation") framework.Logf("Starting async validation")
httpClient := http.Client{Timeout: 2 * time.Second} httpClient := http.Client{Timeout: 2 * time.Second}
done := make(chan struct{}, 1) done := make(chan struct{}, 1)
// Let's make sure we've finished the heartbeat before shutting things down. // Let's make sure we've finished the heartbeat before shutting things down.
@ -323,14 +324,14 @@ func testUpgrade(ip, v string, upF func(v string) error) {
wg.Add(1) wg.Add(1)
defer wg.Done() defer wg.Done()
if err := wait.Poll(poll, singleCallTimeout, func() (bool, error) { if err := wait.Poll(framework.Poll, framework.SingleCallTimeout, func() (bool, error) {
r, err := httpClient.Get("http://" + ip) r, err := httpClient.Get("http://" + ip)
if err != nil { if err != nil {
Logf("Error reaching %s: %v", ip, err) framework.Logf("Error reaching %s: %v", ip, err)
return false, nil return false, nil
} }
if r.StatusCode < http.StatusOK || r.StatusCode >= http.StatusNotFound { if r.StatusCode < http.StatusOK || r.StatusCode >= http.StatusNotFound {
Logf("Bad response; status: %d, response: %v", r.StatusCode, r) framework.Logf("Bad response; status: %d, response: %v", r.StatusCode, r)
return false, nil return false, nil
} }
return true, nil return true, nil
@ -340,17 +341,17 @@ func testUpgrade(ip, v string, upF func(v string) error) {
// a failure is very confusing to track down because from the logs // a failure is very confusing to track down because from the logs
// everything looks fine. // everything looks fine.
msg := fmt.Sprintf("Failed to contact service during upgrade: %v", err) msg := fmt.Sprintf("Failed to contact service during upgrade: %v", err)
Logf(msg) framework.Logf(msg)
Failf(msg) framework.Failf(msg)
} }
}, 200*time.Millisecond, done) }, 200*time.Millisecond, done)
Logf("Starting upgrade") framework.Logf("Starting upgrade")
expectNoError(upF(v)) framework.ExpectNoError(upF(v))
done <- struct{}{} done <- struct{}{}
Logf("Stopping async validation") framework.Logf("Stopping async validation")
wg.Wait() wg.Wait()
Logf("Upgrade complete") framework.Logf("Upgrade complete")
} }
func checkMasterVersion(c *client.Client, want string) error { func checkMasterVersion(c *client.Client, want string) error {
@ -366,12 +367,12 @@ func checkMasterVersion(c *client.Client, want string) error {
return fmt.Errorf("master had kube-apiserver version %s which does not start with %s", return fmt.Errorf("master had kube-apiserver version %s which does not start with %s",
got, want) got, want)
} }
Logf("Master is at version %s", want) framework.Logf("Master is at version %s", want)
return nil return nil
} }
func checkNodesVersions(c *client.Client, want string) error { func checkNodesVersions(c *client.Client, want string) error {
l := ListSchedulableNodesOrDie(c) l := framework.ListSchedulableNodesOrDie(c)
for _, n := range l.Items { for _, n := range l.Items {
// We do prefix trimming and then matching because: // We do prefix trimming and then matching because:
// want looks like: 0.19.3-815-g50e67d4 // want looks like: 0.19.3-815-g50e67d4
@ -390,15 +391,15 @@ func checkNodesVersions(c *client.Client, want string) error {
return nil return nil
} }
// retryCmd runs cmd using args and retries it for up to singleCallTimeout if // retryCmd runs cmd using args and retries it for up to framework.SingleCallTimeout if
// it returns an error. It returns stdout and stderr. // it returns an error. It returns stdout and stderr.
func retryCmd(command string, args ...string) (string, string, error) { func retryCmd(command string, args ...string) (string, string, error) {
var err error var err error
stdout, stderr := "", "" stdout, stderr := "", ""
wait.Poll(poll, singleCallTimeout, func() (bool, error) { wait.Poll(framework.Poll, framework.SingleCallTimeout, func() (bool, error) {
stdout, stderr, err = runCmd(command, args...) stdout, stderr, err = runCmd(command, args...)
if err != nil { if err != nil {
Logf("Got %v", err) framework.Logf("Got %v", err)
return false, nil return false, nil
} }
return true, nil return true, nil
@ -412,7 +413,7 @@ func retryCmd(command string, args ...string) (string, string, error) {
// TODO(ihmccreery) This function should either be moved into util.go or // TODO(ihmccreery) This function should either be moved into util.go or
// removed; other e2e's use bare exe.Command. // removed; other e2e's use bare exe.Command.
func runCmd(command string, args ...string) (string, string, error) { func runCmd(command string, args ...string) (string, string, error) {
Logf("Running %s %v", command, args) framework.Logf("Running %s %v", command, args)
var bout, berr bytes.Buffer var bout, berr bytes.Buffer
cmd := exec.Command(command, args...) cmd := exec.Command(command, args...)
// We also output to the OS stdout/stderr to aid in debugging in case cmd // We also output to the OS stdout/stderr to aid in debugging in case cmd
@ -428,8 +429,8 @@ func runCmd(command string, args ...string) (string, string, error) {
return stdout, stderr, nil return stdout, stderr, nil
} }
func validate(f *Framework, svcNameWant, rcNameWant string, ingress api.LoadBalancerIngress, podsWant int) error { func validate(f *framework.Framework, svcNameWant, rcNameWant string, ingress api.LoadBalancerIngress, podsWant int) error {
Logf("Beginning cluster validation") framework.Logf("Beginning cluster validation")
// Verify RC. // Verify RC.
rcs, err := f.Client.ReplicationControllers(f.Namespace.Name).List(api.ListOptions{}) rcs, err := f.Client.ReplicationControllers(f.Namespace.Name).List(api.ListOptions{})
if err != nil { if err != nil {
@ -443,7 +444,7 @@ func validate(f *Framework, svcNameWant, rcNameWant string, ingress api.LoadBala
} }
// Verify pods. // Verify pods.
if err := verifyPods(f.Client, f.Namespace.Name, rcNameWant, false, podsWant); err != nil { if err := framework.VerifyPods(f.Client, f.Namespace.Name, rcNameWant, false, podsWant); err != nil {
return fmt.Errorf("failed to find %d %q pods: %v", podsWant, rcNameWant, err) return fmt.Errorf("failed to find %d %q pods: %v", podsWant, rcNameWant, err)
} }
@ -458,7 +459,7 @@ func validate(f *Framework, svcNameWant, rcNameWant string, ingress api.LoadBala
// TODO(mikedanese): Make testLoadBalancerReachable return an error. // TODO(mikedanese): Make testLoadBalancerReachable return an error.
testLoadBalancerReachable(ingress, 80) testLoadBalancerReachable(ingress, 80)
Logf("Cluster validation succeeded") framework.Logf("Cluster validation succeeded")
return nil return nil
} }
@ -486,15 +487,15 @@ func migTemplate() (string, error) {
var errLast error var errLast error
var templ string var templ string
key := "instanceTemplate" key := "instanceTemplate"
if wait.Poll(poll, singleCallTimeout, func() (bool, error) { if wait.Poll(framework.Poll, framework.SingleCallTimeout, func() (bool, error) {
// TODO(mikedanese): make this hit the compute API directly instead of // TODO(mikedanese): make this hit the compute API directly instead of
// shelling out to gcloud. // shelling out to gcloud.
// An `instance-groups managed describe` call outputs what we want to stdout. // An `instance-groups managed describe` call outputs what we want to stdout.
output, _, err := retryCmd("gcloud", "compute", "instance-groups", "managed", output, _, err := retryCmd("gcloud", "compute", "instance-groups", "managed",
fmt.Sprintf("--project=%s", testContext.CloudConfig.ProjectID), fmt.Sprintf("--project=%s", framework.TestContext.CloudConfig.ProjectID),
"describe", "describe",
fmt.Sprintf("--zone=%s", testContext.CloudConfig.Zone), fmt.Sprintf("--zone=%s", framework.TestContext.CloudConfig.Zone),
testContext.CloudConfig.NodeInstanceGroup) framework.TestContext.CloudConfig.NodeInstanceGroup)
if err != nil { if err != nil {
errLast = fmt.Errorf("gcloud compute instance-groups managed describe call failed with err: %v", err) errLast = fmt.Errorf("gcloud compute instance-groups managed describe call failed with err: %v", err)
return false, nil return false, nil
@ -503,10 +504,10 @@ func migTemplate() (string, error) {
// The 'describe' call probably succeeded; parse the output and try to // The 'describe' call probably succeeded; parse the output and try to
// find the line that looks like "instanceTemplate: url/to/<templ>" and // find the line that looks like "instanceTemplate: url/to/<templ>" and
// return <templ>. // return <templ>.
if val := parseKVLines(output, key); len(val) > 0 { if val := framework.ParseKVLines(output, key); len(val) > 0 {
url := strings.Split(val, "/") url := strings.Split(val, "/")
templ = url[len(url)-1] templ = url[len(url)-1]
Logf("MIG group %s using template: %s", testContext.CloudConfig.NodeInstanceGroup, templ) framework.Logf("MIG group %s using template: %s", framework.TestContext.CloudConfig.NodeInstanceGroup, templ)
return true, nil return true, nil
} }
errLast = fmt.Errorf("couldn't find %s in output to get MIG template. Output: %s", key, output) errLast = fmt.Errorf("couldn't find %s in output to get MIG template. Output: %s", key, output)
@ -524,7 +525,7 @@ func migRollingUpdateStart(templ string, nt time.Duration) (string, error) {
var errLast error var errLast error
var id string var id string
prefix, suffix := "Started [", "]." prefix, suffix := "Started [", "]."
if err := wait.Poll(poll, singleCallTimeout, func() (bool, error) { if err := wait.Poll(framework.Poll, framework.SingleCallTimeout, func() (bool, error) {
// TODO(mikedanese): make this hit the compute API directly instead of // TODO(mikedanese): make this hit the compute API directly instead of
// shelling out to gcloud. // shelling out to gcloud.
// NOTE(mikedanese): If you are changing this gcloud command, update // NOTE(mikedanese): If you are changing this gcloud command, update
@ -532,11 +533,11 @@ func migRollingUpdateStart(templ string, nt time.Duration) (string, error) {
// A `rolling-updates start` call outputs what we want to stderr. // A `rolling-updates start` call outputs what we want to stderr.
_, output, err := retryCmd("gcloud", "alpha", "compute", _, output, err := retryCmd("gcloud", "alpha", "compute",
"rolling-updates", "rolling-updates",
fmt.Sprintf("--project=%s", testContext.CloudConfig.ProjectID), fmt.Sprintf("--project=%s", framework.TestContext.CloudConfig.ProjectID),
fmt.Sprintf("--zone=%s", testContext.CloudConfig.Zone), fmt.Sprintf("--zone=%s", framework.TestContext.CloudConfig.Zone),
"start", "start",
// Required args. // Required args.
fmt.Sprintf("--group=%s", testContext.CloudConfig.NodeInstanceGroup), fmt.Sprintf("--group=%s", framework.TestContext.CloudConfig.NodeInstanceGroup),
fmt.Sprintf("--template=%s", templ), fmt.Sprintf("--template=%s", templ),
// Optional args to fine-tune behavior. // Optional args to fine-tune behavior.
fmt.Sprintf("--instance-startup-timeout=%ds", int(nt.Seconds())), fmt.Sprintf("--instance-startup-timeout=%ds", int(nt.Seconds())),
@ -560,7 +561,7 @@ func migRollingUpdateStart(templ string, nt time.Duration) (string, error) {
} }
url := strings.Split(strings.TrimSuffix(strings.TrimPrefix(line, prefix), suffix), "/") url := strings.Split(strings.TrimSuffix(strings.TrimPrefix(line, prefix), suffix), "/")
id = url[len(url)-1] id = url[len(url)-1]
Logf("Started MIG rolling update; ID: %s", id) framework.Logf("Started MIG rolling update; ID: %s", id)
return true, nil return true, nil
} }
errLast = fmt.Errorf("couldn't find line like '%s ... %s' in output to MIG rolling-update start. Output: %s", errLast = fmt.Errorf("couldn't find line like '%s ... %s' in output to MIG rolling-update start. Output: %s",
@ -578,42 +579,42 @@ func migRollingUpdateStart(templ string, nt time.Duration) (string, error) {
func migRollingUpdatePoll(id string, nt time.Duration) error { func migRollingUpdatePoll(id string, nt time.Duration) error {
// Two keys and a val. // Two keys and a val.
status, progress, done := "status", "statusMessage", "ROLLED_OUT" status, progress, done := "status", "statusMessage", "ROLLED_OUT"
start, timeout := time.Now(), nt*time.Duration(testContext.CloudConfig.NumNodes) start, timeout := time.Now(), nt*time.Duration(framework.TestContext.CloudConfig.NumNodes)
var errLast error var errLast error
Logf("Waiting up to %v for MIG rolling update to complete.", timeout) framework.Logf("Waiting up to %v for MIG rolling update to complete.", timeout)
if wait.Poll(restartPoll, timeout, func() (bool, error) { if wait.Poll(restartPoll, timeout, func() (bool, error) {
// A `rolling-updates describe` call outputs what we want to stdout. // A `rolling-updates describe` call outputs what we want to stdout.
output, _, err := retryCmd("gcloud", "alpha", "compute", output, _, err := retryCmd("gcloud", "alpha", "compute",
"rolling-updates", "rolling-updates",
fmt.Sprintf("--project=%s", testContext.CloudConfig.ProjectID), fmt.Sprintf("--project=%s", framework.TestContext.CloudConfig.ProjectID),
fmt.Sprintf("--zone=%s", testContext.CloudConfig.Zone), fmt.Sprintf("--zone=%s", framework.TestContext.CloudConfig.Zone),
"describe", "describe",
id) id)
if err != nil { if err != nil {
errLast = fmt.Errorf("Error calling rolling-updates describe %s: %v", id, err) errLast = fmt.Errorf("Error calling rolling-updates describe %s: %v", id, err)
Logf("%v", errLast) framework.Logf("%v", errLast)
return false, nil return false, nil
} }
// The 'describe' call probably succeeded; parse the output and try to // The 'describe' call probably succeeded; parse the output and try to
// find the line that looks like "status: <status>" and see whether it's // find the line that looks like "status: <status>" and see whether it's
// done. // done.
Logf("Waiting for MIG rolling update: %s (%v elapsed)", framework.Logf("Waiting for MIG rolling update: %s (%v elapsed)",
parseKVLines(output, progress), time.Since(start)) framework.ParseKVLines(output, progress), time.Since(start))
if st := parseKVLines(output, status); st == done { if st := framework.ParseKVLines(output, status); st == done {
return true, nil return true, nil
} }
return false, nil return false, nil
}) != nil { }) != nil {
return fmt.Errorf("timeout waiting %v for MIG rolling update to complete. Last error: %v", timeout, errLast) return fmt.Errorf("timeout waiting %v for MIG rolling update to complete. Last error: %v", timeout, errLast)
} }
Logf("MIG rolling update complete after %v", time.Since(start)) framework.Logf("MIG rolling update complete after %v", time.Since(start))
return nil return nil
} }
func testLoadBalancerReachable(ingress api.LoadBalancerIngress, port int) bool { func testLoadBalancerReachable(ingress api.LoadBalancerIngress, port int) bool {
loadBalancerLagTimeout := loadBalancerLagTimeoutDefault loadBalancerLagTimeout := loadBalancerLagTimeoutDefault
if providerIs("aws") { if framework.ProviderIs("aws") {
loadBalancerLagTimeout = loadBalancerLagTimeoutAWS loadBalancerLagTimeout = loadBalancerLagTimeoutAWS
} }
return testLoadBalancerReachableInTime(ingress, port, loadBalancerLagTimeout) return testLoadBalancerReachableInTime(ingress, port, loadBalancerLagTimeout)
@ -637,7 +638,7 @@ func conditionFuncDecorator(ip string, port int, fn func(string, int, string, st
func testReachableInTime(testFunc wait.ConditionFunc, timeout time.Duration) bool { func testReachableInTime(testFunc wait.ConditionFunc, timeout time.Duration) bool {
By(fmt.Sprintf("Waiting up to %v", timeout)) By(fmt.Sprintf("Waiting up to %v", timeout))
err := wait.PollImmediate(poll, timeout, testFunc) err := wait.PollImmediate(framework.Poll, timeout, testFunc)
if err != nil { if err != nil {
Expect(err).NotTo(HaveOccurred(), "Error waiting") Expect(err).NotTo(HaveOccurred(), "Error waiting")
return false return false
@ -655,14 +656,14 @@ func waitForLoadBalancerIngress(c *client.Client, serviceName, namespace string)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(3 * time.Second) { for start := time.Now(); time.Since(start) < timeout; time.Sleep(3 * time.Second) {
service, err := c.Services(namespace).Get(serviceName) service, err := c.Services(namespace).Get(serviceName)
if err != nil { if err != nil {
Logf("Get service failed, ignoring for 5s: %v", err) framework.Logf("Get service failed, ignoring for 5s: %v", err)
continue continue
} }
if len(service.Status.LoadBalancer.Ingress) > 0 { if len(service.Status.LoadBalancer.Ingress) > 0 {
return service, nil return service, nil
} }
if i%5 == 0 { if i%5 == 0 {
Logf("Waiting for service %s in namespace %s to have a LoadBalancer ingress point (%v)", serviceName, namespace, time.Since(start)) framework.Logf("Waiting for service %s in namespace %s to have a LoadBalancer ingress point (%v)", serviceName, namespace, time.Since(start))
} }
i++ i++
} }

View File

@ -22,14 +22,15 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
) )
var _ = KubeDescribe("ConfigMap", func() { var _ = framework.KubeDescribe("ConfigMap", func() {
f := NewDefaultFramework("configmap") f := framework.NewDefaultFramework("configmap")
It("should be consumable from pods in volume [Conformance]", func() { It("should be consumable from pods in volume [Conformance]", func() {
doConfigMapE2EWithoutMappings(f, 0, 0) doConfigMapE2EWithoutMappings(f, 0, 0)
@ -82,12 +83,12 @@ var _ = KubeDescribe("ConfigMap", func() {
defer func() { defer func() {
By("Cleaning up the configMap") By("Cleaning up the configMap")
if err := f.Client.ConfigMaps(f.Namespace.Name).Delete(configMap.Name); err != nil { if err := f.Client.ConfigMaps(f.Namespace.Name).Delete(configMap.Name); err != nil {
Failf("unable to delete configMap %v: %v", configMap.Name, err) framework.Failf("unable to delete configMap %v: %v", configMap.Name, err)
} }
}() }()
var err error var err error
if configMap, err = f.Client.ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { if configMap, err = f.Client.ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
Failf("unable to create test configMap %s: %v", configMap.Name, err) framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
} }
pod := &api.Pod{ pod := &api.Pod{
@ -133,13 +134,13 @@ var _ = KubeDescribe("ConfigMap", func() {
_, err = f.Client.Pods(f.Namespace.Name).Create(pod) _, err = f.Client.Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
expectNoError(waitForPodRunningInNamespace(f.Client, pod.Name, f.Namespace.Name)) framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.Client, pod.Name, f.Namespace.Name))
pollLogs := func() (string, error) { pollLogs := func() (string, error) {
return getPodLogs(f.Client, f.Namespace.Name, pod.Name, containerName) return framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, containerName)
} }
Eventually(pollLogs, podLogTimeout, poll).Should(ContainSubstring("value-1")) Eventually(pollLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
By(fmt.Sprintf("Updating configmap %v", configMap.Name)) By(fmt.Sprintf("Updating configmap %v", configMap.Name))
configMap.ResourceVersion = "" // to force update configMap.ResourceVersion = "" // to force update
@ -148,7 +149,7 @@ var _ = KubeDescribe("ConfigMap", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("waiting to observe update in volume") By("waiting to observe update in volume")
Eventually(pollLogs, podLogTimeout, poll).Should(ContainSubstring("value-2")) Eventually(pollLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-2"))
}) })
It("should be consumable via environment variable [Conformance]", func() { It("should be consumable via environment variable [Conformance]", func() {
@ -158,12 +159,12 @@ var _ = KubeDescribe("ConfigMap", func() {
defer func() { defer func() {
By("Cleaning up the configMap") By("Cleaning up the configMap")
if err := f.Client.ConfigMaps(f.Namespace.Name).Delete(configMap.Name); err != nil { if err := f.Client.ConfigMaps(f.Namespace.Name).Delete(configMap.Name); err != nil {
Failf("unable to delete configMap %v: %v", configMap.Name, err) framework.Failf("unable to delete configMap %v: %v", configMap.Name, err)
} }
}() }()
var err error var err error
if configMap, err = f.Client.ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { if configMap, err = f.Client.ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
Failf("unable to create test configMap %s: %v", configMap.Name, err) framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
} }
pod := &api.Pod{ pod := &api.Pod{
@ -195,13 +196,13 @@ var _ = KubeDescribe("ConfigMap", func() {
}, },
} }
testContainerOutput("consume configMaps", f.Client, pod, 0, []string{ framework.TestContainerOutput("consume configMaps", f.Client, pod, 0, []string{
"CONFIG_DATA_1=value-1", "CONFIG_DATA_1=value-1",
}, f.Namespace.Name) }, f.Namespace.Name)
}) })
}) })
func newConfigMap(f *Framework, name string) *api.ConfigMap { func newConfigMap(f *framework.Framework, name string) *api.ConfigMap {
return &api.ConfigMap{ return &api.ConfigMap{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
@ -215,7 +216,7 @@ func newConfigMap(f *Framework, name string) *api.ConfigMap {
} }
} }
func doConfigMapE2EWithoutMappings(f *Framework, uid, fsGroup int64) { func doConfigMapE2EWithoutMappings(f *framework.Framework, uid, fsGroup int64) {
var ( var (
name = "configmap-test-volume-" + string(util.NewUUID()) name = "configmap-test-volume-" + string(util.NewUUID())
volumeName = "configmap-volume" volumeName = "configmap-volume"
@ -227,12 +228,12 @@ func doConfigMapE2EWithoutMappings(f *Framework, uid, fsGroup int64) {
defer func() { defer func() {
By("Cleaning up the configMap") By("Cleaning up the configMap")
if err := f.Client.ConfigMaps(f.Namespace.Name).Delete(configMap.Name); err != nil { if err := f.Client.ConfigMaps(f.Namespace.Name).Delete(configMap.Name); err != nil {
Failf("unable to delete configMap %v: %v", configMap.Name, err) framework.Failf("unable to delete configMap %v: %v", configMap.Name, err)
} }
}() }()
var err error var err error
if configMap, err = f.Client.ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { if configMap, err = f.Client.ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
Failf("unable to create test configMap %s: %v", configMap.Name, err) framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
} }
pod := &api.Pod{ pod := &api.Pod{
@ -279,13 +280,13 @@ func doConfigMapE2EWithoutMappings(f *Framework, uid, fsGroup int64) {
pod.Spec.SecurityContext.FSGroup = &fsGroup pod.Spec.SecurityContext.FSGroup = &fsGroup
} }
testContainerOutput("consume configMaps", f.Client, pod, 0, []string{ framework.TestContainerOutput("consume configMaps", f.Client, pod, 0, []string{
"content of file \"/etc/configmap-volume/data-1\": value-1", "content of file \"/etc/configmap-volume/data-1\": value-1",
}, f.Namespace.Name) }, f.Namespace.Name)
} }
func doConfigMapE2EWithMappings(f *Framework, uid, fsGroup int64) { func doConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup int64) {
var ( var (
name = "configmap-test-volume-map-" + string(util.NewUUID()) name = "configmap-test-volume-map-" + string(util.NewUUID())
volumeName = "configmap-volume" volumeName = "configmap-volume"
@ -297,12 +298,12 @@ func doConfigMapE2EWithMappings(f *Framework, uid, fsGroup int64) {
defer func() { defer func() {
By("Cleaning up the configMap") By("Cleaning up the configMap")
if err := f.Client.ConfigMaps(f.Namespace.Name).Delete(configMap.Name); err != nil { if err := f.Client.ConfigMaps(f.Namespace.Name).Delete(configMap.Name); err != nil {
Failf("unable to delete configMap %v: %v", configMap.Name, err) framework.Failf("unable to delete configMap %v: %v", configMap.Name, err)
} }
}() }()
var err error var err error
if configMap, err = f.Client.ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { if configMap, err = f.Client.ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
Failf("unable to create test configMap %s: %v", configMap.Name, err) framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
} }
pod := &api.Pod{ pod := &api.Pod{
@ -355,7 +356,7 @@ func doConfigMapE2EWithMappings(f *Framework, uid, fsGroup int64) {
pod.Spec.SecurityContext.FSGroup = &fsGroup pod.Spec.SecurityContext.FSGroup = &fsGroup
} }
testContainerOutput("consume configMaps", f.Client, pod, 0, []string{ framework.TestContainerOutput("consume configMaps", f.Client, pod, 0, []string{
"content of file \"/etc/configmap-volume/path/to/data-2\": value-2", "content of file \"/etc/configmap-volume/path/to/data-2\": value-2",
}, f.Namespace.Name) }, f.Namespace.Name)
} }

View File

@ -25,6 +25,7 @@ import (
"k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
@ -35,49 +36,49 @@ const (
probTestInitialDelaySeconds = 30 probTestInitialDelaySeconds = 30
) )
var _ = KubeDescribe("Probing container", func() { var _ = framework.KubeDescribe("Probing container", func() {
framework := NewDefaultFramework("container-probe") f := framework.NewDefaultFramework("container-probe")
var podClient client.PodInterface var podClient client.PodInterface
probe := webserverProbeBuilder{} probe := webserverProbeBuilder{}
BeforeEach(func() { BeforeEach(func() {
podClient = framework.Client.Pods(framework.Namespace.Name) podClient = f.Client.Pods(f.Namespace.Name)
}) })
It("with readiness probe should not be ready before initial delay and never restart [Conformance]", func() { It("with readiness probe should not be ready before initial delay and never restart [Conformance]", func() {
p, err := podClient.Create(makePodSpec(probe.withInitialDelay().build(), nil)) p, err := podClient.Create(makePodSpec(probe.withInitialDelay().build(), nil))
expectNoError(err) framework.ExpectNoError(err)
Expect(wait.Poll(poll, 240*time.Second, func() (bool, error) { Expect(wait.Poll(framework.Poll, 240*time.Second, func() (bool, error) {
p, err := podClient.Get(p.Name) p, err := podClient.Get(p.Name)
if err != nil { if err != nil {
return false, err return false, err
} }
ready := api.IsPodReady(p) ready := api.IsPodReady(p)
if !ready { if !ready {
Logf("pod is not yet ready; pod has phase %q.", p.Status.Phase) framework.Logf("pod is not yet ready; pod has phase %q.", p.Status.Phase)
return false, nil return false, nil
} }
return true, nil return true, nil
})).NotTo(HaveOccurred(), "pod never became ready") })).NotTo(HaveOccurred(), "pod never became ready")
p, err = podClient.Get(p.Name) p, err = podClient.Get(p.Name)
expectNoError(err) framework.ExpectNoError(err)
isReady, err := podRunningReady(p) isReady, err := framework.PodRunningReady(p)
expectNoError(err) framework.ExpectNoError(err)
Expect(isReady).To(BeTrue(), "pod should be ready") Expect(isReady).To(BeTrue(), "pod should be ready")
// We assume the pod became ready when the container became ready. This // We assume the pod became ready when the container became ready. This
// is true for a single container pod. // is true for a single container pod.
readyTime, err := getTransitionTimeForReadyCondition(p) readyTime, err := getTransitionTimeForReadyCondition(p)
expectNoError(err) framework.ExpectNoError(err)
startedTime, err := getContainerStartedTime(p, probTestContainerName) startedTime, err := getContainerStartedTime(p, probTestContainerName)
expectNoError(err) framework.ExpectNoError(err)
Logf("Container started at %v, pod became ready at %v", startedTime, readyTime) framework.Logf("Container started at %v, pod became ready at %v", startedTime, readyTime)
initialDelay := probTestInitialDelaySeconds * time.Second initialDelay := probTestInitialDelaySeconds * time.Second
if readyTime.Sub(startedTime) < initialDelay { if readyTime.Sub(startedTime) < initialDelay {
Failf("Pod became ready before it's %v initial delay", initialDelay) framework.Failf("Pod became ready before it's %v initial delay", initialDelay)
} }
restartCount := getRestartCount(p) restartCount := getRestartCount(p)
@ -86,9 +87,9 @@ var _ = KubeDescribe("Probing container", func() {
It("with readiness probe that fails should never be ready and never restart [Conformance]", func() { It("with readiness probe that fails should never be ready and never restart [Conformance]", func() {
p, err := podClient.Create(makePodSpec(probe.withFailing().build(), nil)) p, err := podClient.Create(makePodSpec(probe.withFailing().build(), nil))
expectNoError(err) framework.ExpectNoError(err)
err = wait.Poll(poll, 180*time.Second, func() (bool, error) { err = wait.Poll(framework.Poll, 180*time.Second, func() (bool, error) {
p, err := podClient.Get(p.Name) p, err := podClient.Get(p.Name)
if err != nil { if err != nil {
return false, err return false, err
@ -96,13 +97,13 @@ var _ = KubeDescribe("Probing container", func() {
return api.IsPodReady(p), nil return api.IsPodReady(p), nil
}) })
if err != wait.ErrWaitTimeout { if err != wait.ErrWaitTimeout {
Failf("expecting wait timeout error but got: %v", err) framework.Failf("expecting wait timeout error but got: %v", err)
} }
p, err = podClient.Get(p.Name) p, err = podClient.Get(p.Name)
expectNoError(err) framework.ExpectNoError(err)
isReady, err := podRunningReady(p) isReady, err := framework.PodRunningReady(p)
Expect(isReady).NotTo(BeTrue(), "pod should be not ready") Expect(isReady).NotTo(BeTrue(), "pod should be not ready")
restartCount := getRestartCount(p) restartCount := getRestartCount(p)

View File

@ -32,6 +32,7 @@ import (
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/pkg/watch" "k8s.io/kubernetes/pkg/watch"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
@ -55,10 +56,10 @@ const (
) )
// nodeExec execs the given cmd on node via SSH. Note that the nodeName is an sshable name, // nodeExec execs the given cmd on node via SSH. Note that the nodeName is an sshable name,
// eg: the name returned by getMasterHost(). This is also not guaranteed to work across // eg: the name returned by framework.GetMasterHost(). This is also not guaranteed to work across
// cloud providers since it involves ssh. // cloud providers since it involves ssh.
func nodeExec(nodeName, cmd string) (SSHResult, error) { func nodeExec(nodeName, cmd string) (framework.SSHResult, error) {
result, err := SSH(cmd, fmt.Sprintf("%v:%v", nodeName, sshPort), testContext.Provider) result, err := framework.SSH(cmd, fmt.Sprintf("%v:%v", nodeName, sshPort), framework.TestContext.Provider)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
return result, err return result, err
} }
@ -75,8 +76,8 @@ type restartDaemonConfig struct {
// NewRestartConfig creates a restartDaemonConfig for the given node and daemon. // NewRestartConfig creates a restartDaemonConfig for the given node and daemon.
func NewRestartConfig(nodeName, daemonName string, healthzPort int, pollInterval, pollTimeout time.Duration) *restartDaemonConfig { func NewRestartConfig(nodeName, daemonName string, healthzPort int, pollInterval, pollTimeout time.Duration) *restartDaemonConfig {
if !providerIs("gce") { if !framework.ProviderIs("gce") {
Logf("WARNING: SSH through the restart config might not work on %s", testContext.Provider) framework.Logf("WARNING: SSH through the restart config might not work on %s", framework.TestContext.Provider)
} }
return &restartDaemonConfig{ return &restartDaemonConfig{
nodeName: nodeName, nodeName: nodeName,
@ -93,31 +94,31 @@ func (r *restartDaemonConfig) String() string {
// waitUp polls healthz of the daemon till it returns "ok" or the polling hits the pollTimeout // waitUp polls healthz of the daemon till it returns "ok" or the polling hits the pollTimeout
func (r *restartDaemonConfig) waitUp() { func (r *restartDaemonConfig) waitUp() {
Logf("Checking if %v is up by polling for a 200 on its /healthz endpoint", r) framework.Logf("Checking if %v is up by polling for a 200 on its /healthz endpoint", r)
healthzCheck := fmt.Sprintf( healthzCheck := fmt.Sprintf(
"curl -s -o /dev/null -I -w \"%%{http_code}\" http://localhost:%v/healthz", r.healthzPort) "curl -s -o /dev/null -I -w \"%%{http_code}\" http://localhost:%v/healthz", r.healthzPort)
err := wait.Poll(r.pollInterval, r.pollTimeout, func() (bool, error) { err := wait.Poll(r.pollInterval, r.pollTimeout, func() (bool, error) {
result, err := nodeExec(r.nodeName, healthzCheck) result, err := nodeExec(r.nodeName, healthzCheck)
expectNoError(err) framework.ExpectNoError(err)
if result.Code == 0 { if result.Code == 0 {
httpCode, err := strconv.Atoi(result.Stdout) httpCode, err := strconv.Atoi(result.Stdout)
if err != nil { if err != nil {
Logf("Unable to parse healthz http return code: %v", err) framework.Logf("Unable to parse healthz http return code: %v", err)
} else if httpCode == 200 { } else if httpCode == 200 {
return true, nil return true, nil
} }
} }
Logf("node %v exec command, '%v' failed with exitcode %v: \n\tstdout: %v\n\tstderr: %v", framework.Logf("node %v exec command, '%v' failed with exitcode %v: \n\tstdout: %v\n\tstderr: %v",
r.nodeName, healthzCheck, result.Code, result.Stdout, result.Stderr) r.nodeName, healthzCheck, result.Code, result.Stdout, result.Stderr)
return false, nil return false, nil
}) })
expectNoError(err, "%v did not respond with a 200 via %v within %v", r, healthzCheck, r.pollTimeout) framework.ExpectNoError(err, "%v did not respond with a 200 via %v within %v", r, healthzCheck, r.pollTimeout)
} }
// kill sends a SIGTERM to the daemon // kill sends a SIGTERM to the daemon
func (r *restartDaemonConfig) kill() { func (r *restartDaemonConfig) kill() {
Logf("Killing %v", r) framework.Logf("Killing %v", r)
nodeExec(r.nodeName, fmt.Sprintf("pgrep %v | xargs -I {} sudo kill {}", r.daemonName)) nodeExec(r.nodeName, fmt.Sprintf("pgrep %v | xargs -I {} sudo kill {}", r.daemonName))
} }
@ -163,7 +164,7 @@ func replacePods(pods []*api.Pod, store cache.Store) {
for i := range pods { for i := range pods {
found = append(found, pods[i]) found = append(found, pods[i])
} }
expectNoError(store.Replace(found, "0")) framework.ExpectNoError(store.Replace(found, "0"))
} }
// getContainerRestarts returns the count of container restarts across all pods matching the given labelSelector, // getContainerRestarts returns the count of container restarts across all pods matching the given labelSelector,
@ -171,26 +172,26 @@ func replacePods(pods []*api.Pod, store cache.Store) {
func getContainerRestarts(c *client.Client, ns string, labelSelector labels.Selector) (int, []string) { func getContainerRestarts(c *client.Client, ns string, labelSelector labels.Selector) (int, []string) {
options := api.ListOptions{LabelSelector: labelSelector} options := api.ListOptions{LabelSelector: labelSelector}
pods, err := c.Pods(ns).List(options) pods, err := c.Pods(ns).List(options)
expectNoError(err) framework.ExpectNoError(err)
failedContainers := 0 failedContainers := 0
containerRestartNodes := sets.NewString() containerRestartNodes := sets.NewString()
for _, p := range pods.Items { for _, p := range pods.Items {
for _, v := range FailedContainers(&p) { for _, v := range framework.FailedContainers(&p) {
failedContainers = failedContainers + v.restarts failedContainers = failedContainers + v.Restarts
containerRestartNodes.Insert(p.Spec.NodeName) containerRestartNodes.Insert(p.Spec.NodeName)
} }
} }
return failedContainers, containerRestartNodes.List() return failedContainers, containerRestartNodes.List()
} }
var _ = KubeDescribe("DaemonRestart [Disruptive]", func() { var _ = framework.KubeDescribe("DaemonRestart [Disruptive]", func() {
framework := NewDefaultFramework("daemonrestart") f := framework.NewDefaultFramework("daemonrestart")
rcName := "daemonrestart" + strconv.Itoa(numPods) + "-" + string(util.NewUUID()) rcName := "daemonrestart" + strconv.Itoa(numPods) + "-" + string(util.NewUUID())
labelSelector := labels.Set(map[string]string{"name": rcName}).AsSelector() labelSelector := labels.Set(map[string]string{"name": rcName}).AsSelector()
existingPods := cache.NewStore(cache.MetaNamespaceKeyFunc) existingPods := cache.NewStore(cache.MetaNamespaceKeyFunc)
var ns string var ns string
var config RCConfig var config framework.RCConfig
var controller *controllerframework.Controller var controller *controllerframework.Controller
var newPods cache.Store var newPods cache.Store
var stopCh chan struct{} var stopCh chan struct{}
@ -199,20 +200,20 @@ var _ = KubeDescribe("DaemonRestart [Disruptive]", func() {
BeforeEach(func() { BeforeEach(func() {
// These tests require SSH // These tests require SSH
// TODO(11834): Enable this test in GKE once experimental API there is switched on // TODO(11834): Enable this test in GKE once experimental API there is switched on
SkipUnlessProviderIs("gce", "aws") framework.SkipUnlessProviderIs("gce", "aws")
ns = framework.Namespace.Name ns = f.Namespace.Name
// All the restart tests need an rc and a watch on pods of the rc. // All the restart tests need an rc and a watch on pods of the rc.
// Additionally some of them might scale the rc during the test. // Additionally some of them might scale the rc during the test.
config = RCConfig{ config = framework.RCConfig{
Client: framework.Client, Client: f.Client,
Name: rcName, Name: rcName,
Namespace: ns, Namespace: ns,
Image: "gcr.io/google_containers/pause:2.0", Image: "gcr.io/google_containers/pause:2.0",
Replicas: numPods, Replicas: numPods,
CreatedPods: &[]*api.Pod{}, CreatedPods: &[]*api.Pod{},
} }
Expect(RunRC(config)).NotTo(HaveOccurred()) Expect(framework.RunRC(config)).NotTo(HaveOccurred())
replacePods(*config.CreatedPods, existingPods) replacePods(*config.CreatedPods, existingPods)
stopCh = make(chan struct{}) stopCh = make(chan struct{})
@ -221,11 +222,11 @@ var _ = KubeDescribe("DaemonRestart [Disruptive]", func() {
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
options.LabelSelector = labelSelector options.LabelSelector = labelSelector
return framework.Client.Pods(ns).List(options) return f.Client.Pods(ns).List(options)
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
options.LabelSelector = labelSelector options.LabelSelector = labelSelector
return framework.Client.Pods(ns).Watch(options) return f.Client.Pods(ns).Watch(options)
}, },
}, },
&api.Pod{}, &api.Pod{},
@ -252,7 +253,7 @@ var _ = KubeDescribe("DaemonRestart [Disruptive]", func() {
It("Controller Manager should not create/delete replicas across restart", func() { It("Controller Manager should not create/delete replicas across restart", func() {
restarter := NewRestartConfig( restarter := NewRestartConfig(
getMasterHost(), "kube-controller", ports.ControllerManagerPort, restartPollInterval, restartTimeout) framework.GetMasterHost(), "kube-controller", ports.ControllerManagerPort, restartPollInterval, restartTimeout)
restarter.restart() restarter.restart()
// The intent is to ensure the replication controller manager has observed and reported status of // The intent is to ensure the replication controller manager has observed and reported status of
@ -260,7 +261,7 @@ var _ = KubeDescribe("DaemonRestart [Disruptive]", func() {
// that it had the opportunity to create/delete pods, if it were going to do so. Scaling the RC // that it had the opportunity to create/delete pods, if it were going to do so. Scaling the RC
// to the same size achieves this, because the scale operation advances the RC's sequence number // to the same size achieves this, because the scale operation advances the RC's sequence number
// and awaits it to be observed and reported back in the RC's status. // and awaits it to be observed and reported back in the RC's status.
ScaleRC(framework.Client, ns, rcName, numPods, true) framework.ScaleRC(f.Client, ns, rcName, numPods, true)
// Only check the keys, the pods can be different if the kubelet updated it. // Only check the keys, the pods can be different if the kubelet updated it.
// TODO: Can it really? // TODO: Can it really?
@ -274,14 +275,14 @@ var _ = KubeDescribe("DaemonRestart [Disruptive]", func() {
} }
if len(newKeys.List()) != len(existingKeys.List()) || if len(newKeys.List()) != len(existingKeys.List()) ||
!newKeys.IsSuperset(existingKeys) { !newKeys.IsSuperset(existingKeys) {
Failf("RcManager created/deleted pods after restart \n\n %+v", tracker) framework.Failf("RcManager created/deleted pods after restart \n\n %+v", tracker)
} }
}) })
It("Scheduler should continue assigning pods to nodes across restart", func() { It("Scheduler should continue assigning pods to nodes across restart", func() {
restarter := NewRestartConfig( restarter := NewRestartConfig(
getMasterHost(), "kube-scheduler", ports.SchedulerPort, restartPollInterval, restartTimeout) framework.GetMasterHost(), "kube-scheduler", ports.SchedulerPort, restartPollInterval, restartTimeout)
// Create pods while the scheduler is down and make sure the scheduler picks them up by // Create pods while the scheduler is down and make sure the scheduler picks them up by
// scaling the rc to the same size. // scaling the rc to the same size.
@ -289,28 +290,28 @@ var _ = KubeDescribe("DaemonRestart [Disruptive]", func() {
restarter.kill() restarter.kill()
// This is best effort to try and create pods while the scheduler is down, // This is best effort to try and create pods while the scheduler is down,
// since we don't know exactly when it is restarted after the kill signal. // since we don't know exactly when it is restarted after the kill signal.
expectNoError(ScaleRC(framework.Client, ns, rcName, numPods+5, false)) framework.ExpectNoError(framework.ScaleRC(f.Client, ns, rcName, numPods+5, false))
restarter.waitUp() restarter.waitUp()
expectNoError(ScaleRC(framework.Client, ns, rcName, numPods+5, true)) framework.ExpectNoError(framework.ScaleRC(f.Client, ns, rcName, numPods+5, true))
}) })
It("Kubelet should not restart containers across restart", func() { It("Kubelet should not restart containers across restart", func() {
nodeIPs, err := getNodePublicIps(framework.Client) nodeIPs, err := getNodePublicIps(f.Client)
expectNoError(err) framework.ExpectNoError(err)
preRestarts, badNodes := getContainerRestarts(framework.Client, ns, labelSelector) preRestarts, badNodes := getContainerRestarts(f.Client, ns, labelSelector)
if preRestarts != 0 { if preRestarts != 0 {
Logf("WARNING: Non-zero container restart count: %d across nodes %v", preRestarts, badNodes) framework.Logf("WARNING: Non-zero container restart count: %d across nodes %v", preRestarts, badNodes)
} }
for _, ip := range nodeIPs { for _, ip := range nodeIPs {
restarter := NewRestartConfig( restarter := NewRestartConfig(
ip, "kubelet", ports.KubeletReadOnlyPort, restartPollInterval, restartTimeout) ip, "kubelet", ports.KubeletReadOnlyPort, restartPollInterval, restartTimeout)
restarter.restart() restarter.restart()
} }
postRestarts, badNodes := getContainerRestarts(framework.Client, ns, labelSelector) postRestarts, badNodes := getContainerRestarts(f.Client, ns, labelSelector)
if postRestarts != preRestarts { if postRestarts != preRestarts {
dumpNodeDebugInfo(framework.Client, badNodes) framework.DumpNodeDebugInfo(f.Client, badNodes)
Failf("Net container restart count went from %v -> %v after kubelet restart on nodes %v \n\n %+v", preRestarts, postRestarts, badNodes, tracker) framework.Failf("Net container restart count went from %v -> %v after kubelet restart on nodes %v \n\n %+v", preRestarts, postRestarts, badNodes, tracker)
} }
}) })
}) })

View File

@ -32,6 +32,7 @@ import (
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
@ -53,25 +54,25 @@ const (
// happen. In the future, running in parallel may work if we have an eviction // happen. In the future, running in parallel may work if we have an eviction
// model which lets the DS controller kick out other pods to make room. // model which lets the DS controller kick out other pods to make room.
// See http://issues.k8s.io/21767 for more details // See http://issues.k8s.io/21767 for more details
var _ = KubeDescribe("Daemon set [Serial]", func() { var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
var f *Framework var f *framework.Framework
AfterEach(func() { AfterEach(func() {
if daemonsets, err := f.Client.DaemonSets(f.Namespace.Name).List(api.ListOptions{}); err == nil { if daemonsets, err := f.Client.DaemonSets(f.Namespace.Name).List(api.ListOptions{}); err == nil {
Logf("daemonset: %s", runtime.EncodeOrDie(api.Codecs.LegacyCodec(registered.EnabledVersions()...), daemonsets)) framework.Logf("daemonset: %s", runtime.EncodeOrDie(api.Codecs.LegacyCodec(registered.EnabledVersions()...), daemonsets))
} else { } else {
Logf("unable to dump daemonsets: %v", err) framework.Logf("unable to dump daemonsets: %v", err)
} }
if pods, err := f.Client.Pods(f.Namespace.Name).List(api.ListOptions{}); err == nil { if pods, err := f.Client.Pods(f.Namespace.Name).List(api.ListOptions{}); err == nil {
Logf("pods: %s", runtime.EncodeOrDie(api.Codecs.LegacyCodec(registered.EnabledVersions()...), pods)) framework.Logf("pods: %s", runtime.EncodeOrDie(api.Codecs.LegacyCodec(registered.EnabledVersions()...), pods))
} else { } else {
Logf("unable to dump pods: %v", err) framework.Logf("unable to dump pods: %v", err)
} }
err := clearDaemonSetNodeLabels(f.Client) err := clearDaemonSetNodeLabels(f.Client)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
f = NewDefaultFramework("daemonsets") f = framework.NewDefaultFramework("daemonsets")
image := "gcr.io/google_containers/serve_hostname:v1.4" image := "gcr.io/google_containers/serve_hostname:v1.4"
dsName := "daemon-set" dsName := "daemon-set"
@ -89,7 +90,7 @@ var _ = KubeDescribe("Daemon set [Serial]", func() {
It("should run and stop simple daemon", func() { It("should run and stop simple daemon", func() {
label := map[string]string{daemonsetNameLabel: dsName} label := map[string]string{daemonsetNameLabel: dsName}
Logf("Creating simple daemon set %s", dsName) framework.Logf("Creating simple daemon set %s", dsName)
_, err := c.DaemonSets(ns).Create(&extensions.DaemonSet{ _, err := c.DaemonSets(ns).Create(&extensions.DaemonSet{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Name: dsName, Name: dsName,
@ -113,7 +114,7 @@ var _ = KubeDescribe("Daemon set [Serial]", func() {
}) })
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
defer func() { defer func() {
Logf("Check that reaper kills all daemon pods for %s", dsName) framework.Logf("Check that reaper kills all daemon pods for %s", dsName)
dsReaper, err := kubectl.ReaperFor(extensions.Kind("DaemonSet"), c) dsReaper, err := kubectl.ReaperFor(extensions.Kind("DaemonSet"), c)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
err = dsReaper.Stop(ns, dsName, 0, nil) err = dsReaper.Stop(ns, dsName, 0, nil)
@ -146,7 +147,7 @@ var _ = KubeDescribe("Daemon set [Serial]", func() {
It("should run and stop complex daemon", func() { It("should run and stop complex daemon", func() {
complexLabel := map[string]string{daemonsetNameLabel: dsName} complexLabel := map[string]string{daemonsetNameLabel: dsName}
nodeSelector := map[string]string{daemonsetColorLabel: "blue"} nodeSelector := map[string]string{daemonsetColorLabel: "blue"}
Logf("Creating daemon with a node selector %s", dsName) framework.Logf("Creating daemon with a node selector %s", dsName)
_, err := c.DaemonSets(ns).Create(&extensions.DaemonSet{ _, err := c.DaemonSets(ns).Create(&extensions.DaemonSet{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Name: dsName, Name: dsName,
@ -177,7 +178,7 @@ var _ = KubeDescribe("Daemon set [Serial]", func() {
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes") Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes")
By("Change label of node, check that daemon pod is launched.") By("Change label of node, check that daemon pod is launched.")
nodeList := ListSchedulableNodesOrDie(f.Client) nodeList := framework.ListSchedulableNodesOrDie(f.Client)
Expect(len(nodeList.Items)).To(BeNumerically(">", 0)) Expect(len(nodeList.Items)).To(BeNumerically(">", 0))
newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector) newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
Expect(err).NotTo(HaveOccurred(), "error setting labels on node") Expect(err).NotTo(HaveOccurred(), "error setting labels on node")
@ -212,7 +213,7 @@ func separateDaemonSetNodeLabels(labels map[string]string) (map[string]string, m
} }
func clearDaemonSetNodeLabels(c *client.Client) error { func clearDaemonSetNodeLabels(c *client.Client) error {
nodeList := ListSchedulableNodesOrDie(c) nodeList := framework.ListSchedulableNodesOrDie(c)
for _, node := range nodeList.Items { for _, node := range nodeList.Items {
_, err := setDaemonSetNodeLabels(c, node.Name, map[string]string{}) _, err := setDaemonSetNodeLabels(c, node.Name, map[string]string{})
if err != nil { if err != nil {
@ -248,7 +249,7 @@ func setDaemonSetNodeLabels(c *client.Client, nodeName string, labels map[string
return true, err return true, err
} }
if se, ok := err.(*apierrs.StatusError); ok && se.ErrStatus.Reason == unversioned.StatusReasonConflict { if se, ok := err.(*apierrs.StatusError); ok && se.ErrStatus.Reason == unversioned.StatusReasonConflict {
Logf("failed to update node due to resource version conflict") framework.Logf("failed to update node due to resource version conflict")
return false, nil return false, nil
} }
return false, err return false, err
@ -262,7 +263,7 @@ func setDaemonSetNodeLabels(c *client.Client, nodeName string, labels map[string
return newNode, nil return newNode, nil
} }
func checkDaemonPodOnNodes(f *Framework, selector map[string]string, nodeNames []string) func() (bool, error) { func checkDaemonPodOnNodes(f *framework.Framework, selector map[string]string, nodeNames []string) func() (bool, error) {
return func() (bool, error) { return func() (bool, error) {
selector := labels.Set(selector).AsSelector() selector := labels.Set(selector).AsSelector()
options := api.ListOptions{LabelSelector: selector} options := api.ListOptions{LabelSelector: selector}
@ -276,7 +277,7 @@ func checkDaemonPodOnNodes(f *Framework, selector map[string]string, nodeNames [
for _, pod := range pods { for _, pod := range pods {
nodesToPodCount[pod.Spec.NodeName] += 1 nodesToPodCount[pod.Spec.NodeName] += 1
} }
Logf("nodesToPodCount: %#v", nodesToPodCount) framework.Logf("nodesToPodCount: %#v", nodesToPodCount)
// Ensure that exactly 1 pod is running on all nodes in nodeNames. // Ensure that exactly 1 pod is running on all nodes in nodeNames.
for _, nodeName := range nodeNames { for _, nodeName := range nodeNames {
@ -292,10 +293,10 @@ func checkDaemonPodOnNodes(f *Framework, selector map[string]string, nodeNames [
} }
} }
func checkRunningOnAllNodes(f *Framework, selector map[string]string) func() (bool, error) { func checkRunningOnAllNodes(f *framework.Framework, selector map[string]string) func() (bool, error) {
return func() (bool, error) { return func() (bool, error) {
nodeList, err := f.Client.Nodes().List(api.ListOptions{}) nodeList, err := f.Client.Nodes().List(api.ListOptions{})
expectNoError(err) framework.ExpectNoError(err)
nodeNames := make([]string, 0) nodeNames := make([]string, 0)
for _, node := range nodeList.Items { for _, node := range nodeList.Items {
nodeNames = append(nodeNames, node.Name) nodeNames = append(nodeNames, node.Name)
@ -304,6 +305,6 @@ func checkRunningOnAllNodes(f *Framework, selector map[string]string) func() (bo
} }
} }
func checkRunningOnNoNodes(f *Framework, selector map[string]string) func() (bool, error) { func checkRunningOnNoNodes(f *framework.Framework, selector map[string]string) func() (bool, error) {
return checkDaemonPodOnNodes(f, selector, make([]string, 0)) return checkDaemonPodOnNodes(f, selector, make([]string, 0))
} }

View File

@ -23,12 +23,13 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
) )
var _ = KubeDescribe("Kubernetes Dashboard", func() { var _ = framework.KubeDescribe("Kubernetes Dashboard", func() {
const ( const (
uiServiceName = "kubernetes-dashboard" uiServiceName = "kubernetes-dashboard"
uiAppName = uiServiceName uiAppName = uiServiceName
@ -37,36 +38,36 @@ var _ = KubeDescribe("Kubernetes Dashboard", func() {
serverStartTimeout = 1 * time.Minute serverStartTimeout = 1 * time.Minute
) )
f := NewDefaultFramework(uiServiceName) f := framework.NewDefaultFramework(uiServiceName)
It("should check that the kubernetes-dashboard instance is alive", func() { It("should check that the kubernetes-dashboard instance is alive", func() {
By("Checking whether the kubernetes-dashboard service exists.") By("Checking whether the kubernetes-dashboard service exists.")
err := waitForService(f.Client, uiNamespace, uiServiceName, true, poll, serviceStartTimeout) err := framework.WaitForService(f.Client, uiNamespace, uiServiceName, true, framework.Poll, framework.ServiceStartTimeout)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Checking to make sure the kubernetes-dashboard pods are running") By("Checking to make sure the kubernetes-dashboard pods are running")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": uiAppName})) selector := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": uiAppName}))
err = waitForPodsWithLabelRunning(f.Client, uiNamespace, selector) err = framework.WaitForPodsWithLabelRunning(f.Client, uiNamespace, selector)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Checking to make sure we get a response from the kubernetes-dashboard.") By("Checking to make sure we get a response from the kubernetes-dashboard.")
err = wait.Poll(poll, serverStartTimeout, func() (bool, error) { err = wait.Poll(framework.Poll, serverStartTimeout, func() (bool, error) {
var status int var status int
proxyRequest, errProxy := getServicesProxyRequest(f.Client, f.Client.Get()) proxyRequest, errProxy := framework.GetServicesProxyRequest(f.Client, f.Client.Get())
if errProxy != nil { if errProxy != nil {
Logf("Get services proxy request failed: %v", errProxy) framework.Logf("Get services proxy request failed: %v", errProxy)
} }
// Query against the proxy URL for the kube-ui service. // Query against the proxy URL for the kube-ui service.
err := proxyRequest.Namespace(uiNamespace). err := proxyRequest.Namespace(uiNamespace).
Name(uiServiceName). Name(uiServiceName).
Timeout(singleCallTimeout). Timeout(framework.SingleCallTimeout).
Do(). Do().
StatusCode(&status). StatusCode(&status).
Error() Error()
if status != http.StatusOK { if status != http.StatusOK {
Logf("Unexpected status from kubernetes-dashboard: %v", status) framework.Logf("Unexpected status from kubernetes-dashboard: %v", status)
} else if err != nil { } else if err != nil {
Logf("Request to kube-ui failed: %v", err) framework.Logf("Request to kube-ui failed: %v", err)
} }
// Don't return err here as it aborts polling. // Don't return err here as it aborts polling.
return status == http.StatusOK, nil return status == http.StatusOK, nil
@ -77,7 +78,7 @@ var _ = KubeDescribe("Kubernetes Dashboard", func() {
var status int var status int
err = f.Client.Get(). err = f.Client.Get().
AbsPath("/ui"). AbsPath("/ui").
Timeout(singleCallTimeout). Timeout(framework.SingleCallTimeout).
Do(). Do().
StatusCode(&status). StatusCode(&status).
Error() Error()

View File

@ -36,6 +36,7 @@ import (
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/watch" "k8s.io/kubernetes/pkg/watch"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
@ -49,36 +50,36 @@ const (
// Maximum container failures this test tolerates before failing. // Maximum container failures this test tolerates before failing.
var MaxContainerFailures = 0 var MaxContainerFailures = 0
func density30AddonResourceVerifier() map[string]resourceConstraint { func density30AddonResourceVerifier() map[string]framework.ResourceConstraint {
constraints := make(map[string]resourceConstraint) constraints := make(map[string]framework.ResourceConstraint)
constraints["fluentd-elasticsearch"] = resourceConstraint{ constraints["fluentd-elasticsearch"] = framework.ResourceConstraint{
cpuConstraint: 0.1, CPUConstraint: 0.1,
memoryConstraint: 250 * (1024 * 1024), MemoryConstraint: 250 * (1024 * 1024),
} }
constraints["elasticsearch-logging"] = resourceConstraint{ constraints["elasticsearch-logging"] = framework.ResourceConstraint{
cpuConstraint: 2, CPUConstraint: 2,
// TODO: bring it down to 750MB again, when we lower Kubelet verbosity level. I.e. revert #19164 // TODO: bring it down to 750MB again, when we lower Kubelet verbosity level. I.e. revert #19164
memoryConstraint: 5000 * (1024 * 1024), MemoryConstraint: 5000 * (1024 * 1024),
} }
constraints["heapster"] = resourceConstraint{ constraints["heapster"] = framework.ResourceConstraint{
cpuConstraint: 2, CPUConstraint: 2,
memoryConstraint: 1800 * (1024 * 1024), MemoryConstraint: 1800 * (1024 * 1024),
} }
constraints["kibana-logging"] = resourceConstraint{ constraints["kibana-logging"] = framework.ResourceConstraint{
cpuConstraint: 0.2, CPUConstraint: 0.2,
memoryConstraint: 100 * (1024 * 1024), MemoryConstraint: 100 * (1024 * 1024),
} }
constraints["kube-proxy"] = resourceConstraint{ constraints["kube-proxy"] = framework.ResourceConstraint{
cpuConstraint: 0.05, CPUConstraint: 0.05,
memoryConstraint: 20 * (1024 * 1024), MemoryConstraint: 20 * (1024 * 1024),
} }
constraints["l7-lb-controller"] = resourceConstraint{ constraints["l7-lb-controller"] = framework.ResourceConstraint{
cpuConstraint: 0.05, CPUConstraint: 0.05,
memoryConstraint: 20 * (1024 * 1024), MemoryConstraint: 20 * (1024 * 1024),
} }
constraints["influxdb"] = resourceConstraint{ constraints["influxdb"] = framework.ResourceConstraint{
cpuConstraint: 2, CPUConstraint: 2,
memoryConstraint: 500 * (1024 * 1024), MemoryConstraint: 500 * (1024 * 1024),
} }
return constraints return constraints
} }
@ -90,7 +91,7 @@ func density30AddonResourceVerifier() map[string]resourceConstraint {
// IMPORTANT: This test is designed to work on large (>= 100 Nodes) clusters. For smaller ones // IMPORTANT: This test is designed to work on large (>= 100 Nodes) clusters. For smaller ones
// results will not be representative for control-plane performance as we'll start hitting // results will not be representative for control-plane performance as we'll start hitting
// limits on Docker's concurrent container startup. // limits on Docker's concurrent container startup.
var _ = KubeDescribe("Density", func() { var _ = framework.KubeDescribe("Density", func() {
var c *client.Client var c *client.Client
var nodeCount int var nodeCount int
var RCName string var RCName string
@ -109,35 +110,35 @@ var _ = KubeDescribe("Density", func() {
saturationThreshold = MinSaturationThreshold saturationThreshold = MinSaturationThreshold
} }
Expect(e2eStartupTime).NotTo(BeNumerically(">", saturationThreshold)) Expect(e2eStartupTime).NotTo(BeNumerically(">", saturationThreshold))
saturationData := SaturationTime{ saturationData := framework.SaturationTime{
TimeToSaturate: e2eStartupTime, TimeToSaturate: e2eStartupTime,
NumberOfNodes: nodeCount, NumberOfNodes: nodeCount,
NumberOfPods: totalPods, NumberOfPods: totalPods,
Throughput: float32(totalPods) / float32(e2eStartupTime/time.Second), Throughput: float32(totalPods) / float32(e2eStartupTime/time.Second),
} }
Logf("Cluster saturation time: %s", prettyPrintJSON(saturationData)) framework.Logf("Cluster saturation time: %s", framework.PrettyPrintJSON(saturationData))
// Verify latency metrics. // Verify latency metrics.
highLatencyRequests, err := HighLatencyRequests(c) highLatencyRequests, err := framework.HighLatencyRequests(c)
expectNoError(err) framework.ExpectNoError(err)
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests") Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests")
// Verify scheduler metrics. // Verify scheduler metrics.
// TODO: Reset metrics at the beginning of the test. // TODO: Reset metrics at the beginning of the test.
// We should do something similar to how we do it for APIserver. // We should do something similar to how we do it for APIserver.
expectNoError(VerifySchedulerLatency(c)) framework.ExpectNoError(framework.VerifySchedulerLatency(c))
}) })
// Explicitly put here, to delete namespace at the end of the test // Explicitly put here, to delete namespace at the end of the test
// (after measuring latency metrics, etc.). // (after measuring latency metrics, etc.).
framework := NewDefaultFramework("density") f := framework.NewDefaultFramework("density")
framework.NamespaceDeletionTimeout = time.Hour f.NamespaceDeletionTimeout = time.Hour
BeforeEach(func() { BeforeEach(func() {
c = framework.Client c = f.Client
ns = framework.Namespace.Name ns = f.Namespace.Name
nodes := ListSchedulableNodesOrDie(c) nodes := framework.ListSchedulableNodesOrDie(c)
nodeCount = len(nodes.Items) nodeCount = len(nodes.Items)
Expect(nodeCount).NotTo(BeZero()) Expect(nodeCount).NotTo(BeZero())
@ -147,15 +148,15 @@ var _ = KubeDescribe("Density", func() {
// Terminating a namespace (deleting the remaining objects from it - which // Terminating a namespace (deleting the remaining objects from it - which
// generally means events) can affect the current run. Thus we wait for all // generally means events) can affect the current run. Thus we wait for all
// terminating namespace to be finally deleted before starting this test. // terminating namespace to be finally deleted before starting this test.
err := checkTestingNSDeletedExcept(c, ns) err := framework.CheckTestingNSDeletedExcept(c, ns)
expectNoError(err) framework.ExpectNoError(err)
uuid = string(util.NewUUID()) uuid = string(util.NewUUID())
expectNoError(resetMetrics(c)) framework.ExpectNoError(framework.ResetMetrics(c))
expectNoError(os.Mkdir(fmt.Sprintf(testContext.OutputDir+"/%s", uuid), 0777)) framework.ExpectNoError(os.Mkdir(fmt.Sprintf(framework.TestContext.OutputDir+"/%s", uuid), 0777))
Logf("Listing nodes for easy debugging:\n") framework.Logf("Listing nodes for easy debugging:\n")
for _, node := range nodes.Items { for _, node := range nodes.Items {
var internalIP, externalIP string var internalIP, externalIP string
for _, address := range node.Status.Addresses { for _, address := range node.Status.Addresses {
@ -166,7 +167,7 @@ var _ = KubeDescribe("Density", func() {
externalIP = address.Address externalIP = address.Address
} }
} }
Logf("Name: %v, clusterIP: %v, externalIP: %v", node.ObjectMeta.Name, internalIP, externalIP) framework.Logf("Name: %v, clusterIP: %v, externalIP: %v", node.ObjectMeta.Name, internalIP, externalIP)
} }
}) })
@ -192,7 +193,7 @@ var _ = KubeDescribe("Density", func() {
switch testArg.podsPerNode { switch testArg.podsPerNode {
case 30: case 30:
name = "[Feature:Performance] " + name name = "[Feature:Performance] " + name
framework.addonResourceConstraints = density30AddonResourceVerifier() f.AddonResourceConstraints = density30AddonResourceVerifier()
case 95: case 95:
name = "[Feature:HighDensityPerformance]" + name name = "[Feature:HighDensityPerformance]" + name
default: default:
@ -203,10 +204,10 @@ var _ = KubeDescribe("Density", func() {
podsPerNode := itArg.podsPerNode podsPerNode := itArg.podsPerNode
totalPods = podsPerNode * nodeCount totalPods = podsPerNode * nodeCount
RCName = "density" + strconv.Itoa(totalPods) + "-" + uuid RCName = "density" + strconv.Itoa(totalPods) + "-" + uuid
fileHndl, err := os.Create(fmt.Sprintf(testContext.OutputDir+"/%s/pod_states.csv", uuid)) fileHndl, err := os.Create(fmt.Sprintf(framework.TestContext.OutputDir+"/%s/pod_states.csv", uuid))
expectNoError(err) framework.ExpectNoError(err)
defer fileHndl.Close() defer fileHndl.Close()
config := RCConfig{Client: c, config := framework.RCConfig{Client: c,
Image: "gcr.io/google_containers/pause:2.0", Image: "gcr.io/google_containers/pause:2.0",
Name: RCName, Name: RCName,
Namespace: ns, Namespace: ns,
@ -274,10 +275,10 @@ var _ = KubeDescribe("Density", func() {
// Start the replication controller. // Start the replication controller.
startTime := time.Now() startTime := time.Now()
expectNoError(RunRC(config)) framework.ExpectNoError(framework.RunRC(config))
e2eStartupTime = time.Now().Sub(startTime) e2eStartupTime = time.Now().Sub(startTime)
Logf("E2E startup time for %d pods: %v", totalPods, e2eStartupTime) framework.Logf("E2E startup time for %d pods: %v", totalPods, e2eStartupTime)
Logf("Throughput (pods/s) during cluster saturation phase: %v", float32(totalPods)/float32(e2eStartupTime/time.Second)) framework.Logf("Throughput (pods/s) during cluster saturation phase: %v", float32(totalPods)/float32(e2eStartupTime/time.Second))
By("Waiting for all events to be recorded") By("Waiting for all events to be recorded")
last := -1 last := -1
@ -302,21 +303,21 @@ var _ = KubeDescribe("Density", func() {
close(stop) close(stop)
if current != last { if current != last {
Logf("Warning: Not all events were recorded after waiting %.2f minutes", timeout.Minutes()) framework.Logf("Warning: Not all events were recorded after waiting %.2f minutes", timeout.Minutes())
} }
Logf("Found %d events", current) framework.Logf("Found %d events", current)
if currentCount != lastCount { if currentCount != lastCount {
Logf("Warning: Not all updates were recorded after waiting %.2f minutes", timeout.Minutes()) framework.Logf("Warning: Not all updates were recorded after waiting %.2f minutes", timeout.Minutes())
} }
Logf("Found %d updates", currentCount) framework.Logf("Found %d updates", currentCount)
// Tune the threshold for allowed failures. // Tune the threshold for allowed failures.
badEvents := BadEvents(events) badEvents := framework.BadEvents(events)
Expect(badEvents).NotTo(BeNumerically(">", int(math.Floor(0.01*float64(totalPods))))) Expect(badEvents).NotTo(BeNumerically(">", int(math.Floor(0.01*float64(totalPods)))))
// Print some data about Pod to Node allocation // Print some data about Pod to Node allocation
By("Printing Pod to Node allocation data") By("Printing Pod to Node allocation data")
podList, err := c.Pods(api.NamespaceAll).List(api.ListOptions{}) podList, err := c.Pods(api.NamespaceAll).List(api.ListOptions{})
expectNoError(err) framework.ExpectNoError(err)
pausePodAllocation := make(map[string]int) pausePodAllocation := make(map[string]int)
systemPodAllocation := make(map[string][]string) systemPodAllocation := make(map[string][]string)
for _, pod := range podList.Items { for _, pod := range podList.Items {
@ -332,7 +333,7 @@ var _ = KubeDescribe("Density", func() {
} }
sort.Strings(nodeNames) sort.Strings(nodeNames)
for _, node := range nodeNames { for _, node := range nodeNames {
Logf("%v: %v pause pods, system pods: %v", node, pausePodAllocation[node], systemPodAllocation[node]) framework.Logf("%v: %v pause pods, system pods: %v", node, pausePodAllocation[node], systemPodAllocation[node])
} }
if itArg.runLatencyTest { if itArg.runLatencyTest {
@ -366,7 +367,7 @@ var _ = KubeDescribe("Density", func() {
if startTime != unversioned.NewTime(time.Time{}) { if startTime != unversioned.NewTime(time.Time{}) {
runTimes[p.Name] = startTime runTimes[p.Name] = startTime
} else { } else {
Failf("Pod %v is reported to be running, but none of its containers is", p.Name) framework.Failf("Pod %v is reported to be running, but none of its containers is", p.Name)
} }
} }
} }
@ -428,7 +429,7 @@ var _ = KubeDescribe("Density", func() {
By("Waiting for all Pods begin observed by the watch...") By("Waiting for all Pods begin observed by the watch...")
for start := time.Now(); len(watchTimes) < nodeCount; time.Sleep(10 * time.Second) { for start := time.Now(); len(watchTimes) < nodeCount; time.Sleep(10 * time.Second) {
if time.Since(start) < timeout { if time.Since(start) < timeout {
Failf("Timeout reached waiting for all Pods being observed by the watch.") framework.Failf("Timeout reached waiting for all Pods being observed by the watch.")
} }
} }
close(stopCh) close(stopCh)
@ -440,7 +441,7 @@ var _ = KubeDescribe("Density", func() {
} }
for node, count := range nodeToLatencyPods { for node, count := range nodeToLatencyPods {
if count > 1 { if count > 1 {
Logf("%d latency pods scheduled on %s", count, node) framework.Logf("%d latency pods scheduled on %s", count, node)
} }
} }
@ -451,7 +452,7 @@ var _ = KubeDescribe("Density", func() {
}.AsSelector() }.AsSelector()
options := api.ListOptions{FieldSelector: selector} options := api.ListOptions{FieldSelector: selector}
schedEvents, err := c.Events(ns).List(options) schedEvents, err := c.Events(ns).List(options)
expectNoError(err) framework.ExpectNoError(err)
for k := range createTimes { for k := range createTimes {
for _, event := range schedEvents.Items { for _, event := range schedEvents.Items {
if event.InvolvedObject.Name == k { if event.InvolvedObject.Name == k {
@ -461,11 +462,11 @@ var _ = KubeDescribe("Density", func() {
} }
} }
scheduleLag := make([]podLatencyData, 0) scheduleLag := make([]framework.PodLatencyData, 0)
startupLag := make([]podLatencyData, 0) startupLag := make([]framework.PodLatencyData, 0)
watchLag := make([]podLatencyData, 0) watchLag := make([]framework.PodLatencyData, 0)
schedToWatchLag := make([]podLatencyData, 0) schedToWatchLag := make([]framework.PodLatencyData, 0)
e2eLag := make([]podLatencyData, 0) e2eLag := make([]framework.PodLatencyData, 0)
for name, create := range createTimes { for name, create := range createTimes {
sched, ok := scheduleTimes[name] sched, ok := scheduleTimes[name]
@ -477,30 +478,30 @@ var _ = KubeDescribe("Density", func() {
node, ok := nodes[name] node, ok := nodes[name]
Expect(ok).To(Equal(true)) Expect(ok).To(Equal(true))
scheduleLag = append(scheduleLag, podLatencyData{name, node, sched.Time.Sub(create.Time)}) scheduleLag = append(scheduleLag, framework.PodLatencyData{name, node, sched.Time.Sub(create.Time)})
startupLag = append(startupLag, podLatencyData{name, node, run.Time.Sub(sched.Time)}) startupLag = append(startupLag, framework.PodLatencyData{name, node, run.Time.Sub(sched.Time)})
watchLag = append(watchLag, podLatencyData{name, node, watch.Time.Sub(run.Time)}) watchLag = append(watchLag, framework.PodLatencyData{name, node, watch.Time.Sub(run.Time)})
schedToWatchLag = append(schedToWatchLag, podLatencyData{name, node, watch.Time.Sub(sched.Time)}) schedToWatchLag = append(schedToWatchLag, framework.PodLatencyData{name, node, watch.Time.Sub(sched.Time)})
e2eLag = append(e2eLag, podLatencyData{name, node, watch.Time.Sub(create.Time)}) e2eLag = append(e2eLag, framework.PodLatencyData{name, node, watch.Time.Sub(create.Time)})
} }
sort.Sort(latencySlice(scheduleLag)) sort.Sort(framework.LatencySlice(scheduleLag))
sort.Sort(latencySlice(startupLag)) sort.Sort(framework.LatencySlice(startupLag))
sort.Sort(latencySlice(watchLag)) sort.Sort(framework.LatencySlice(watchLag))
sort.Sort(latencySlice(schedToWatchLag)) sort.Sort(framework.LatencySlice(schedToWatchLag))
sort.Sort(latencySlice(e2eLag)) sort.Sort(framework.LatencySlice(e2eLag))
printLatencies(scheduleLag, "worst schedule latencies") framework.PrintLatencies(scheduleLag, "worst schedule latencies")
printLatencies(startupLag, "worst run-after-schedule latencies") framework.PrintLatencies(startupLag, "worst run-after-schedule latencies")
printLatencies(watchLag, "worst watch latencies") framework.PrintLatencies(watchLag, "worst watch latencies")
printLatencies(schedToWatchLag, "worst scheduled-to-end total latencies") framework.PrintLatencies(schedToWatchLag, "worst scheduled-to-end total latencies")
printLatencies(e2eLag, "worst e2e total latencies") framework.PrintLatencies(e2eLag, "worst e2e total latencies")
// Test whether e2e pod startup time is acceptable. // Test whether e2e pod startup time is acceptable.
podStartupLatency := PodStartupLatency{Latency: extractLatencyMetrics(e2eLag)} podStartupLatency := framework.PodStartupLatency{Latency: framework.ExtractLatencyMetrics(e2eLag)}
expectNoError(VerifyPodStartupLatency(podStartupLatency)) framework.ExpectNoError(framework.VerifyPodStartupLatency(podStartupLatency))
logSuspiciousLatency(startupLag, e2eLag, nodeCount, c) framework.LogSuspiciousLatency(startupLag, e2eLag, nodeCount, c)
} }
By("Deleting ReplicationController") By("Deleting ReplicationController")
@ -508,8 +509,8 @@ var _ = KubeDescribe("Density", func() {
rc, err := c.ReplicationControllers(ns).Get(RCName) rc, err := c.ReplicationControllers(ns).Get(RCName)
if err == nil && rc.Spec.Replicas != 0 { if err == nil && rc.Spec.Replicas != 0 {
By("Cleaning up the replication controller") By("Cleaning up the replication controller")
err := DeleteRC(c, ns, RCName) err := framework.DeleteRC(c, ns, RCName)
expectNoError(err) framework.ExpectNoError(err)
} }
By("Removing additional replication controllers if any") By("Removing additional replication controllers if any")
@ -559,7 +560,7 @@ func createRunningPodFromRC(wg *sync.WaitGroup, c *client.Client, name, ns, imag
}, },
} }
_, err := c.ReplicationControllers(ns).Create(rc) _, err := c.ReplicationControllers(ns).Create(rc)
expectNoError(err) framework.ExpectNoError(err)
expectNoError(waitForRCPodsRunning(c, ns, name)) framework.ExpectNoError(framework.WaitForRCPodsRunning(c, ns, name))
Logf("Found pod '%s' running", name) framework.Logf("Found pod '%s' running", name)
} }

View File

@ -34,6 +34,7 @@ import (
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/pkg/watch" "k8s.io/kubernetes/pkg/watch"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
@ -46,8 +47,8 @@ const (
redisImageName = "redis" redisImageName = "redis"
) )
var _ = KubeDescribe("Deployment", func() { var _ = framework.KubeDescribe("Deployment", func() {
f := NewDefaultFramework("deployment") f := framework.NewDefaultFramework("deployment")
It("deployment should create new pods", func() { It("deployment should create new pods", func() {
testNewDeployment(f) testNewDeployment(f)
@ -174,25 +175,25 @@ func stopDeployment(c *clientset.Clientset, oldC client.Interface, ns, deploymen
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Logf("deleting deployment %s", deploymentName) framework.Logf("deleting deployment %s", deploymentName)
reaper, err := kubectl.ReaperFor(extensions.Kind("Deployment"), oldC) reaper, err := kubectl.ReaperFor(extensions.Kind("Deployment"), oldC)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
timeout := 1 * time.Minute timeout := 1 * time.Minute
err = reaper.Stop(ns, deployment.Name, timeout, api.NewDeleteOptions(0)) err = reaper.Stop(ns, deployment.Name, timeout, api.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Logf("ensuring deployment %s was deleted", deploymentName) framework.Logf("ensuring deployment %s was deleted", deploymentName)
_, err = c.Extensions().Deployments(ns).Get(deployment.Name) _, err = c.Extensions().Deployments(ns).Get(deployment.Name)
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
Expect(errors.IsNotFound(err)).To(BeTrue()) Expect(errors.IsNotFound(err)).To(BeTrue())
Logf("ensuring deployment %s RSes were deleted", deploymentName) framework.Logf("ensuring deployment %s RSes were deleted", deploymentName)
selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector) selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
options := api.ListOptions{LabelSelector: selector} options := api.ListOptions{LabelSelector: selector}
rss, err := c.Extensions().ReplicaSets(ns).List(options) rss, err := c.Extensions().ReplicaSets(ns).List(options)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Expect(rss.Items).Should(HaveLen(0)) Expect(rss.Items).Should(HaveLen(0))
Logf("ensuring deployment %s pods were deleted", deploymentName) framework.Logf("ensuring deployment %s pods were deleted", deploymentName)
var pods *api.PodList var pods *api.PodList
if err := wait.PollImmediate(time.Second, wait.ForeverTestTimeout, func() (bool, error) { if err := wait.PollImmediate(time.Second, wait.ForeverTestTimeout, func() (bool, error) {
pods, err = c.Core().Pods(ns).List(api.ListOptions{}) pods, err = c.Core().Pods(ns).List(api.ListOptions{})
@ -204,11 +205,11 @@ func stopDeployment(c *clientset.Clientset, oldC client.Interface, ns, deploymen
} }
return false, nil return false, nil
}); err != nil { }); err != nil {
Failf("Err : %s\n. Failed to remove deployment %s pods : %+v", err, deploymentName, pods) framework.Failf("Err : %s\n. Failed to remove deployment %s pods : %+v", err, deploymentName, pods)
} }
} }
func testNewDeployment(f *Framework) { func testNewDeployment(f *framework.Framework) {
ns := f.Namespace.Name ns := f.Namespace.Name
// TODO: remove unversionedClient when the refactoring is done. Currently some // TODO: remove unversionedClient when the refactoring is done. Currently some
// functions like verifyPod still expects a unversioned#Client. // functions like verifyPod still expects a unversioned#Client.
@ -217,7 +218,7 @@ func testNewDeployment(f *Framework) {
deploymentName := "test-new-deployment" deploymentName := "test-new-deployment"
podLabels := map[string]string{"name": nginxImageName} podLabels := map[string]string{"name": nginxImageName}
replicas := 1 replicas := 1
Logf("Creating simple deployment %s", deploymentName) framework.Logf("Creating simple deployment %s", deploymentName)
d := newDeployment(deploymentName, replicas, podLabels, nginxImageName, nginxImage, extensions.RollingUpdateDeploymentStrategyType, nil) d := newDeployment(deploymentName, replicas, podLabels, nginxImageName, nginxImage, extensions.RollingUpdateDeploymentStrategyType, nil)
d.Annotations = map[string]string{"test": "should-copy-to-replica-set", kubectl.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"} d.Annotations = map[string]string{"test": "should-copy-to-replica-set", kubectl.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"}
_, err := c.Extensions().Deployments(ns).Create(d) _, err := c.Extensions().Deployments(ns).Create(d)
@ -225,10 +226,10 @@ func testNewDeployment(f *Framework) {
defer stopDeployment(c, f.Client, ns, deploymentName) defer stopDeployment(c, f.Client, ns, deploymentName)
// Wait for it to be updated to revision 1 // Wait for it to be updated to revision 1
err = waitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", nginxImage) err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", nginxImage)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
err = waitForDeploymentStatus(c, ns, deploymentName, replicas, replicas-1, replicas+1, 0) err = framework.WaitForDeploymentStatus(c, ns, deploymentName, replicas, replicas-1, replicas+1, 0)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
@ -242,7 +243,7 @@ func testNewDeployment(f *Framework) {
Expect(deployment.Annotations[kubectl.LastAppliedConfigAnnotation]).Should(Equal("should-not-copy-to-replica-set")) Expect(deployment.Annotations[kubectl.LastAppliedConfigAnnotation]).Should(Equal("should-not-copy-to-replica-set"))
} }
func testRollingUpdateDeployment(f *Framework) { func testRollingUpdateDeployment(f *framework.Framework) {
ns := f.Namespace.Name ns := f.Namespace.Name
// TODO: remove unversionedClient when the refactoring is done. Currently some // TODO: remove unversionedClient when the refactoring is done. Currently some
// functions like verifyPod still expects a unversioned#Client. // functions like verifyPod still expects a unversioned#Client.
@ -260,24 +261,24 @@ func testRollingUpdateDeployment(f *Framework) {
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, nginxImageName, nginxImage)) _, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, nginxImageName, nginxImage))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Verify that the required pods have come up. // Verify that the required pods have come up.
err = verifyPods(unversionedClient, ns, "sample-pod", false, 3) err = framework.VerifyPods(unversionedClient, ns, "sample-pod", false, 3)
if err != nil { if err != nil {
Logf("error in waiting for pods to come up: %s", err) framework.Logf("error in waiting for pods to come up: %s", err)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
} }
// Create a deployment to delete nginx pods and instead bring up redis pods. // Create a deployment to delete nginx pods and instead bring up redis pods.
deploymentName := "test-rolling-update-deployment" deploymentName := "test-rolling-update-deployment"
Logf("Creating deployment %s", deploymentName) framework.Logf("Creating deployment %s", deploymentName)
_, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, redisImageName, redisImage, extensions.RollingUpdateDeploymentStrategyType, nil)) _, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, redisImageName, redisImage, extensions.RollingUpdateDeploymentStrategyType, nil))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
defer stopDeployment(c, f.Client, ns, deploymentName) defer stopDeployment(c, f.Client, ns, deploymentName)
// Wait for it to be updated to revision 1 // Wait for it to be updated to revision 1
err = waitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", redisImage) err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", redisImage)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
err = waitForDeploymentStatus(c, ns, deploymentName, replicas, replicas-1, replicas+1, 0) err = framework.WaitForDeploymentStatus(c, ns, deploymentName, replicas, replicas-1, replicas+1, 0)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// There should be 1 old RS (nginx-controller, which is adopted) // There should be 1 old RS (nginx-controller, which is adopted)
@ -292,7 +293,7 @@ func testRollingUpdateDeployment(f *Framework) {
Expect(len(allOldRSs[0].Spec.Template.Labels[extensions.DefaultDeploymentUniqueLabelKey])).Should(BeNumerically(">", 0)) Expect(len(allOldRSs[0].Spec.Template.Labels[extensions.DefaultDeploymentUniqueLabelKey])).Should(BeNumerically(">", 0))
} }
func testRollingUpdateDeploymentEvents(f *Framework) { func testRollingUpdateDeploymentEvents(f *framework.Framework) {
ns := f.Namespace.Name ns := f.Namespace.Name
// TODO: remove unversionedClient when the refactoring is done. Currently some // TODO: remove unversionedClient when the refactoring is done. Currently some
// functions like verifyPod still expects a unversioned#Client. // functions like verifyPod still expects a unversioned#Client.
@ -316,32 +317,32 @@ func testRollingUpdateDeploymentEvents(f *Framework) {
_, err := c.Extensions().ReplicaSets(ns).Create(rs) _, err := c.Extensions().ReplicaSets(ns).Create(rs)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Verify that the required pods have come up. // Verify that the required pods have come up.
err = verifyPods(unversionedClient, ns, "sample-pod-2", false, 1) err = framework.VerifyPods(unversionedClient, ns, "sample-pod-2", false, 1)
if err != nil { if err != nil {
Logf("error in waiting for pods to come up: %s", err) framework.Logf("error in waiting for pods to come up: %s", err)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
} }
// Create a deployment to delete nginx pods and instead bring up redis pods. // Create a deployment to delete nginx pods and instead bring up redis pods.
deploymentName := "test-rolling-scale-deployment" deploymentName := "test-rolling-scale-deployment"
Logf("Creating deployment %s", deploymentName) framework.Logf("Creating deployment %s", deploymentName)
_, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, redisImageName, redisImage, extensions.RollingUpdateDeploymentStrategyType, nil)) _, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, redisImageName, redisImage, extensions.RollingUpdateDeploymentStrategyType, nil))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
defer stopDeployment(c, f.Client, ns, deploymentName) defer stopDeployment(c, f.Client, ns, deploymentName)
// Wait for it to be updated to revision 3546343826724305833 // Wait for it to be updated to revision 3546343826724305833
err = waitForDeploymentRevisionAndImage(c, ns, deploymentName, "3546343826724305833", redisImage) err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3546343826724305833", redisImage)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
err = waitForDeploymentStatus(c, ns, deploymentName, replicas, replicas-1, replicas+1, 0) err = framework.WaitForDeploymentStatus(c, ns, deploymentName, replicas, replicas-1, replicas+1, 0)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Verify that the pods were scaled up and down as expected. We use events to verify that. // Verify that the pods were scaled up and down as expected. We use events to verify that.
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
waitForEvents(unversionedClient, ns, deployment, 2) framework.WaitForEvents(unversionedClient, ns, deployment, 2)
events, err := c.Core().Events(ns).Search(deployment) events, err := c.Core().Events(ns).Search(deployment)
if err != nil { if err != nil {
Logf("error in listing events: %s", err) framework.Logf("error in listing events: %s", err)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
} }
// There should be 2 events, one to scale up the new ReplicaSet and then to scale down // There should be 2 events, one to scale up the new ReplicaSet and then to scale down
@ -354,7 +355,7 @@ func testRollingUpdateDeploymentEvents(f *Framework) {
Expect(events.Items[1].Message).Should(Equal(fmt.Sprintf("Scaled down replica set %s to 0", rsName))) Expect(events.Items[1].Message).Should(Equal(fmt.Sprintf("Scaled down replica set %s to 0", rsName)))
} }
func testRecreateDeployment(f *Framework) { func testRecreateDeployment(f *framework.Framework) {
ns := f.Namespace.Name ns := f.Namespace.Name
// TODO: remove unversionedClient when the refactoring is done. Currently some // TODO: remove unversionedClient when the refactoring is done. Currently some
// functions like verifyPod still expects a unversioned#Client. // functions like verifyPod still expects a unversioned#Client.
@ -372,33 +373,33 @@ func testRecreateDeployment(f *Framework) {
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, nginxImageName, nginxImage)) _, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, nginxImageName, nginxImage))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Verify that the required pods have come up. // Verify that the required pods have come up.
err = verifyPods(unversionedClient, ns, "sample-pod-3", false, 3) err = framework.VerifyPods(unversionedClient, ns, "sample-pod-3", false, 3)
if err != nil { if err != nil {
Logf("error in waiting for pods to come up: %s", err) framework.Logf("error in waiting for pods to come up: %s", err)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
} }
// Create a deployment to delete nginx pods and instead bring up redis pods. // Create a deployment to delete nginx pods and instead bring up redis pods.
deploymentName := "test-recreate-deployment" deploymentName := "test-recreate-deployment"
Logf("Creating deployment %s", deploymentName) framework.Logf("Creating deployment %s", deploymentName)
_, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, redisImageName, redisImage, extensions.RecreateDeploymentStrategyType, nil)) _, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, redisImageName, redisImage, extensions.RecreateDeploymentStrategyType, nil))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
defer stopDeployment(c, f.Client, ns, deploymentName) defer stopDeployment(c, f.Client, ns, deploymentName)
// Wait for it to be updated to revision 1 // Wait for it to be updated to revision 1
err = waitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", redisImage) err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", redisImage)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
err = waitForDeploymentStatus(c, ns, deploymentName, replicas, 0, replicas, 0) err = framework.WaitForDeploymentStatus(c, ns, deploymentName, replicas, 0, replicas, 0)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Verify that the pods were scaled up and down as expected. We use events to verify that. // Verify that the pods were scaled up and down as expected. We use events to verify that.
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
waitForEvents(unversionedClient, ns, deployment, 2) framework.WaitForEvents(unversionedClient, ns, deployment, 2)
events, err := c.Core().Events(ns).Search(deployment) events, err := c.Core().Events(ns).Search(deployment)
if err != nil { if err != nil {
Logf("error in listing events: %s", err) framework.Logf("error in listing events: %s", err)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
} }
// There should be 2 events, one to scale up the new ReplicaSet and then to scale down the old ReplicaSet. // There should be 2 events, one to scale up the new ReplicaSet and then to scale down the old ReplicaSet.
@ -411,7 +412,7 @@ func testRecreateDeployment(f *Framework) {
} }
// testDeploymentCleanUpPolicy tests that deployment supports cleanup policy // testDeploymentCleanUpPolicy tests that deployment supports cleanup policy
func testDeploymentCleanUpPolicy(f *Framework) { func testDeploymentCleanUpPolicy(f *framework.Framework) {
ns := f.Namespace.Name ns := f.Namespace.Name
unversionedClient := f.Client unversionedClient := f.Client
c := adapter.FromUnversionedClient(unversionedClient) c := adapter.FromUnversionedClient(unversionedClient)
@ -428,15 +429,15 @@ func testDeploymentCleanUpPolicy(f *Framework) {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Verify that the required pods have come up. // Verify that the required pods have come up.
err = verifyPods(unversionedClient, ns, "cleanup-pod", false, 1) err = framework.VerifyPods(unversionedClient, ns, "cleanup-pod", false, 1)
if err != nil { if err != nil {
Logf("error in waiting for pods to come up: %s", err) framework.Logf("error in waiting for pods to come up: %s", err)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
} }
// Create a deployment to delete nginx pods and instead bring up redis pods. // Create a deployment to delete nginx pods and instead bring up redis pods.
deploymentName := "test-cleanup-deployment" deploymentName := "test-cleanup-deployment"
Logf("Creating deployment %s", deploymentName) framework.Logf("Creating deployment %s", deploymentName)
pods, err := c.Pods(ns).List(api.ListOptions{LabelSelector: labels.Everything()}) pods, err := c.Pods(ns).List(api.ListOptions{LabelSelector: labels.Everything()})
if err != nil { if err != nil {
@ -459,14 +460,14 @@ func testDeploymentCleanUpPolicy(f *Framework) {
} }
numPodCreation-- numPodCreation--
if numPodCreation < 0 { if numPodCreation < 0 {
Failf("Expect only one pod creation, the second creation event: %#v\n", event) framework.Failf("Expect only one pod creation, the second creation event: %#v\n", event)
} }
pod, ok := event.Object.(*api.Pod) pod, ok := event.Object.(*api.Pod)
if !ok { if !ok {
Fail("Expect event Object to be a pod") Fail("Expect event Object to be a pod")
} }
if pod.Spec.Containers[0].Name != redisImageName { if pod.Spec.Containers[0].Name != redisImageName {
Failf("Expect the created pod to have container name %s, got pod %#v\n", redisImageName, pod) framework.Failf("Expect the created pod to have container name %s, got pod %#v\n", redisImageName, pod)
} }
case <-stopCh: case <-stopCh:
return return
@ -477,14 +478,14 @@ func testDeploymentCleanUpPolicy(f *Framework) {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
defer stopDeployment(c, f.Client, ns, deploymentName) defer stopDeployment(c, f.Client, ns, deploymentName)
err = waitForDeploymentOldRSsNum(c, ns, deploymentName, *revisionHistoryLimit) err = framework.WaitForDeploymentOldRSsNum(c, ns, deploymentName, *revisionHistoryLimit)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
close(stopCh) close(stopCh)
} }
// testRolloverDeployment tests that deployment supports rollover. // testRolloverDeployment tests that deployment supports rollover.
// i.e. we can change desired state and kick off rolling update, then change desired state again before it finishes. // i.e. we can change desired state and kick off rolling update, then change desired state again before it finishes.
func testRolloverDeployment(f *Framework) { func testRolloverDeployment(f *framework.Framework) {
ns := f.Namespace.Name ns := f.Namespace.Name
// TODO: remove unversionedClient when the refactoring is done. Currently some // TODO: remove unversionedClient when the refactoring is done. Currently some
// functions like verifyPod still expects a unversioned#Client. // functions like verifyPod still expects a unversioned#Client.
@ -502,14 +503,14 @@ func testRolloverDeployment(f *Framework) {
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, rsReplicas, rsPodLabels, nginxImageName, nginxImage)) _, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, rsReplicas, rsPodLabels, nginxImageName, nginxImage))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Verify that the required pods have come up. // Verify that the required pods have come up.
err = verifyPods(unversionedClient, ns, podName, false, rsReplicas) err = framework.VerifyPods(unversionedClient, ns, podName, false, rsReplicas)
if err != nil { if err != nil {
Logf("error in waiting for pods to come up: %s", err) framework.Logf("error in waiting for pods to come up: %s", err)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
} }
// Wait for the required pods to be ready for at least minReadySeconds (be available) // Wait for the required pods to be ready for at least minReadySeconds (be available)
deploymentMinReadySeconds := 5 deploymentMinReadySeconds := 5
err = waitForPodsReady(c, ns, podName, deploymentMinReadySeconds) err = framework.WaitForPodsReady(c, ns, podName, deploymentMinReadySeconds)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Create a deployment to delete nginx pods and instead bring up redis-slave pods. // Create a deployment to delete nginx pods and instead bring up redis-slave pods.
@ -517,7 +518,7 @@ func testRolloverDeployment(f *Framework) {
deploymentReplicas := 4 deploymentReplicas := 4
deploymentImage := "gcr.io/google_samples/gb-redisslave:v1" deploymentImage := "gcr.io/google_samples/gb-redisslave:v1"
deploymentStrategyType := extensions.RollingUpdateDeploymentStrategyType deploymentStrategyType := extensions.RollingUpdateDeploymentStrategyType
Logf("Creating deployment %s", deploymentName) framework.Logf("Creating deployment %s", deploymentName)
newDeployment := newDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType, nil) newDeployment := newDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType, nil)
newDeployment.Spec.MinReadySeconds = deploymentMinReadySeconds newDeployment.Spec.MinReadySeconds = deploymentMinReadySeconds
newDeployment.Spec.Strategy.RollingUpdate = &extensions.RollingUpdateDeployment{ newDeployment.Spec.Strategy.RollingUpdate = &extensions.RollingUpdateDeployment{
@ -532,7 +533,7 @@ func testRolloverDeployment(f *Framework) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Make sure the deployment starts to scale up and down replica sets // Make sure the deployment starts to scale up and down replica sets
waitForPartialEvents(unversionedClient, ns, deployment, 2) framework.WaitForPartialEvents(unversionedClient, ns, deployment, 2)
// Check if it's updated to revision 1 correctly // Check if it's updated to revision 1 correctly
_, newRS := checkDeploymentRevision(c, ns, deploymentName, "1", deploymentImageName, deploymentImage) _, newRS := checkDeploymentRevision(c, ns, deploymentName, "1", deploymentImageName, deploymentImage)
@ -540,25 +541,25 @@ func testRolloverDeployment(f *Framework) {
// If the deployment already finished here, the test would fail. When this happens, increase its minReadySeconds or replicas to prevent it. // If the deployment already finished here, the test would fail. When this happens, increase its minReadySeconds or replicas to prevent it.
Expect(newRS.Spec.Replicas).Should(BeNumerically("<", deploymentReplicas)) Expect(newRS.Spec.Replicas).Should(BeNumerically("<", deploymentReplicas))
updatedDeploymentImageName, updatedDeploymentImage := redisImageName, redisImage updatedDeploymentImageName, updatedDeploymentImage := redisImageName, redisImage
deployment, err = updateDeploymentWithRetries(c, ns, newDeployment.Name, func(update *extensions.Deployment) { deployment, err = framework.UpdateDeploymentWithRetries(c, ns, newDeployment.Name, func(update *extensions.Deployment) {
update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
}) })
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Use observedGeneration to determine if the controller noticed the pod template update. // Use observedGeneration to determine if the controller noticed the pod template update.
err = waitForObservedDeployment(c, ns, deploymentName, deployment.Generation) err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Wait for it to be updated to revision 2 // Wait for it to be updated to revision 2
err = waitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage) err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
err = waitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, deploymentMinReadySeconds) err = framework.WaitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, deploymentMinReadySeconds)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
} }
func testPausedDeployment(f *Framework) { func testPausedDeployment(f *framework.Framework) {
ns := f.Namespace.Name ns := f.Namespace.Name
// TODO: remove unversionedClient when the refactoring is done. Currently some // TODO: remove unversionedClient when the refactoring is done. Currently some
// functions like verifyPod still expects a unversioned#Client. // functions like verifyPod still expects a unversioned#Client.
@ -568,7 +569,7 @@ func testPausedDeployment(f *Framework) {
podLabels := map[string]string{"name": nginxImageName} podLabels := map[string]string{"name": nginxImageName}
d := newDeployment(deploymentName, 1, podLabels, nginxImageName, nginxImage, extensions.RollingUpdateDeploymentStrategyType, nil) d := newDeployment(deploymentName, 1, podLabels, nginxImageName, nginxImage, extensions.RollingUpdateDeploymentStrategyType, nil)
d.Spec.Paused = true d.Spec.Paused = true
Logf("Creating paused deployment %s", deploymentName) framework.Logf("Creating paused deployment %s", deploymentName)
_, err := c.Extensions().Deployments(ns).Create(d) _, err := c.Extensions().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
defer stopDeployment(c, f.Client, ns, deploymentName) defer stopDeployment(c, f.Client, ns, deploymentName)
@ -585,13 +586,13 @@ func testPausedDeployment(f *Framework) {
} }
// Update the deployment to run // Update the deployment to run
deployment, err = updateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) { deployment, err = framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) {
update.Spec.Paused = false update.Spec.Paused = false
}) })
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Use observedGeneration to determine if the controller noticed the resume. // Use observedGeneration to determine if the controller noticed the resume.
err = waitForObservedDeployment(c, ns, deploymentName, deployment.Generation) err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector) selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector)
@ -612,18 +613,18 @@ func testPausedDeployment(f *Framework) {
// Pause the deployment and delete the replica set. // Pause the deployment and delete the replica set.
// The paused deployment shouldn't recreate a new one. // The paused deployment shouldn't recreate a new one.
deployment, err = updateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) { deployment, err = framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) {
update.Spec.Paused = true update.Spec.Paused = true
}) })
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Use observedGeneration to determine if the controller noticed the pause. // Use observedGeneration to determine if the controller noticed the pause.
err = waitForObservedDeployment(c, ns, deploymentName, deployment.Generation) err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c) newRS, err := deploymentutil.GetNewReplicaSet(deployment, c)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Expect(DeleteReplicaSet(unversionedClient, ns, newRS.Name)).NotTo(HaveOccurred()) Expect(framework.DeleteReplicaSet(unversionedClient, ns, newRS.Name)).NotTo(HaveOccurred())
deployment, err = c.Extensions().Deployments(ns).Get(deploymentName) deployment, err = c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -643,7 +644,7 @@ func testPausedDeployment(f *Framework) {
// testRollbackDeployment tests that a deployment is created (revision 1) and updated (revision 2), and // testRollbackDeployment tests that a deployment is created (revision 1) and updated (revision 2), and
// then rollback to revision 1 (should update template to revision 1, and then update revision 1 to 3), // then rollback to revision 1 (should update template to revision 1, and then update revision 1 to 3),
// and then rollback to last revision. // and then rollback to last revision.
func testRollbackDeployment(f *Framework) { func testRollbackDeployment(f *framework.Framework) {
ns := f.Namespace.Name ns := f.Namespace.Name
unversionedClient := f.Client unversionedClient := f.Client
c := adapter.FromUnversionedClient(unversionedClient) c := adapter.FromUnversionedClient(unversionedClient)
@ -655,7 +656,7 @@ func testRollbackDeployment(f *Framework) {
deploymentReplicas := 1 deploymentReplicas := 1
deploymentImage := nginxImage deploymentImage := nginxImage
deploymentStrategyType := extensions.RollingUpdateDeploymentStrategyType deploymentStrategyType := extensions.RollingUpdateDeploymentStrategyType
Logf("Creating deployment %s", deploymentName) framework.Logf("Creating deployment %s", deploymentName)
d := newDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType, nil) d := newDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType, nil)
createAnnotation := map[string]string{"action": "create", "author": "minion"} createAnnotation := map[string]string{"action": "create", "author": "minion"}
d.Annotations = createAnnotation d.Annotations = createAnnotation
@ -664,21 +665,21 @@ func testRollbackDeployment(f *Framework) {
defer stopDeployment(c, f.Client, ns, deploymentName) defer stopDeployment(c, f.Client, ns, deploymentName)
// Wait for it to be updated to revision 1 // Wait for it to be updated to revision 1
err = waitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage) err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
err = waitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0) err = framework.WaitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Current newRS annotation should be "create" // Current newRS annotation should be "create"
err = checkNewRSAnnotations(c, ns, deploymentName, createAnnotation) err = framework.CheckNewRSAnnotations(c, ns, deploymentName, createAnnotation)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// 2. Update the deployment to create redis pods. // 2. Update the deployment to create redis pods.
updatedDeploymentImage := redisImage updatedDeploymentImage := redisImage
updatedDeploymentImageName := redisImageName updatedDeploymentImageName := redisImageName
updateAnnotation := map[string]string{"action": "update", "log": "I need to update it"} updateAnnotation := map[string]string{"action": "update", "log": "I need to update it"}
deployment, err := updateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) { deployment, err := framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) {
update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
update.Annotations = updateAnnotation update.Annotations = updateAnnotation
@ -686,62 +687,62 @@ func testRollbackDeployment(f *Framework) {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Use observedGeneration to determine if the controller noticed the pod template update. // Use observedGeneration to determine if the controller noticed the pod template update.
err = waitForObservedDeployment(c, ns, deploymentName, deployment.Generation) err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Wait for it to be updated to revision 2 // Wait for it to be updated to revision 2
err = waitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage) err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
err = waitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0) err = framework.WaitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Current newRS annotation should be "update" // Current newRS annotation should be "update"
err = checkNewRSAnnotations(c, ns, deploymentName, updateAnnotation) err = framework.CheckNewRSAnnotations(c, ns, deploymentName, updateAnnotation)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// 3. Update the deploymentRollback to rollback to revision 1 // 3. Update the deploymentRollback to rollback to revision 1
revision := int64(1) revision := int64(1)
Logf("rolling back deployment %s to revision %d", deploymentName, revision) framework.Logf("rolling back deployment %s to revision %d", deploymentName, revision)
rollback := newDeploymentRollback(deploymentName, nil, revision) rollback := newDeploymentRollback(deploymentName, nil, revision)
err = c.Extensions().Deployments(ns).Rollback(rollback) err = c.Extensions().Deployments(ns).Rollback(rollback)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Wait for the deployment to start rolling back // Wait for the deployment to start rolling back
err = waitForDeploymentRollbackCleared(c, ns, deploymentName) err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// TODO: report RollbackDone in deployment status and check it here // TODO: report RollbackDone in deployment status and check it here
// Wait for it to be updated to revision 3 // Wait for it to be updated to revision 3
err = waitForDeploymentRevisionAndImage(c, ns, deploymentName, "3", deploymentImage) err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3", deploymentImage)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
err = waitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0) err = framework.WaitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Current newRS annotation should be "create", after the rollback // Current newRS annotation should be "create", after the rollback
err = checkNewRSAnnotations(c, ns, deploymentName, createAnnotation) err = framework.CheckNewRSAnnotations(c, ns, deploymentName, createAnnotation)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// 4. Update the deploymentRollback to rollback to last revision // 4. Update the deploymentRollback to rollback to last revision
revision = 0 revision = 0
Logf("rolling back deployment %s to last revision", deploymentName) framework.Logf("rolling back deployment %s to last revision", deploymentName)
rollback = newDeploymentRollback(deploymentName, nil, revision) rollback = newDeploymentRollback(deploymentName, nil, revision)
err = c.Extensions().Deployments(ns).Rollback(rollback) err = c.Extensions().Deployments(ns).Rollback(rollback)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
err = waitForDeploymentRollbackCleared(c, ns, deploymentName) err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Wait for it to be updated to revision 4 // Wait for it to be updated to revision 4
err = waitForDeploymentRevisionAndImage(c, ns, deploymentName, "4", updatedDeploymentImage) err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "4", updatedDeploymentImage)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
err = waitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0) err = framework.WaitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Current newRS annotation should be "update", after the rollback // Current newRS annotation should be "update", after the rollback
err = checkNewRSAnnotations(c, ns, deploymentName, updateAnnotation) err = framework.CheckNewRSAnnotations(c, ns, deploymentName, updateAnnotation)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
} }
@ -752,7 +753,7 @@ func testRollbackDeployment(f *Framework) {
// becomes v3. Then rollback the deployment to v10 (doesn't exist in history) should fail. // becomes v3. Then rollback the deployment to v10 (doesn't exist in history) should fail.
// Finally, rollback the deployment (v3) to v3 should be no-op. // Finally, rollback the deployment (v3) to v3 should be no-op.
// TODO: When we finished reporting rollback status in deployment status, check the rollback status here in each case. // TODO: When we finished reporting rollback status in deployment status, check the rollback status here in each case.
func testRollbackDeploymentRSNoRevision(f *Framework) { func testRollbackDeploymentRSNoRevision(f *framework.Framework) {
ns := f.Namespace.Name ns := f.Namespace.Name
c := adapter.FromUnversionedClient(f.Client) c := adapter.FromUnversionedClient(f.Client)
podName := "nginx" podName := "nginx"
@ -776,17 +777,17 @@ func testRollbackDeploymentRSNoRevision(f *Framework) {
deploymentReplicas := 1 deploymentReplicas := 1
deploymentImage := nginxImage deploymentImage := nginxImage
deploymentStrategyType := extensions.RollingUpdateDeploymentStrategyType deploymentStrategyType := extensions.RollingUpdateDeploymentStrategyType
Logf("Creating deployment %s", deploymentName) framework.Logf("Creating deployment %s", deploymentName)
d := newDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType, nil) d := newDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType, nil)
_, err = c.Extensions().Deployments(ns).Create(d) _, err = c.Extensions().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
defer stopDeployment(c, f.Client, ns, deploymentName) defer stopDeployment(c, f.Client, ns, deploymentName)
// Wait for it to be updated to revision 1 // Wait for it to be updated to revision 1
err = waitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage) err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
err = waitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0) err = framework.WaitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Check that the replica set we created still doesn't contain revision information // Check that the replica set we created still doesn't contain revision information
@ -797,13 +798,13 @@ func testRollbackDeploymentRSNoRevision(f *Framework) {
// 2. Update the deploymentRollback to rollback to last revision // 2. Update the deploymentRollback to rollback to last revision
// Since there's only 1 revision in history, it should stay as revision 1 // Since there's only 1 revision in history, it should stay as revision 1
revision := int64(0) revision := int64(0)
Logf("rolling back deployment %s to last revision", deploymentName) framework.Logf("rolling back deployment %s to last revision", deploymentName)
rollback := newDeploymentRollback(deploymentName, nil, revision) rollback := newDeploymentRollback(deploymentName, nil, revision)
err = c.Extensions().Deployments(ns).Rollback(rollback) err = c.Extensions().Deployments(ns).Rollback(rollback)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Wait for the deployment to start rolling back // Wait for the deployment to start rolling back
err = waitForDeploymentRollbackCleared(c, ns, deploymentName) err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// TODO: report RollbackRevisionNotFound in deployment status and check it here // TODO: report RollbackRevisionNotFound in deployment status and check it here
@ -814,53 +815,53 @@ func testRollbackDeploymentRSNoRevision(f *Framework) {
// 3. Update the deployment to create redis pods. // 3. Update the deployment to create redis pods.
updatedDeploymentImage := redisImage updatedDeploymentImage := redisImage
updatedDeploymentImageName := redisImageName updatedDeploymentImageName := redisImageName
deployment, err := updateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) { deployment, err := framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) {
update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
}) })
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Use observedGeneration to determine if the controller noticed the pod template update. // Use observedGeneration to determine if the controller noticed the pod template update.
err = waitForObservedDeployment(c, ns, deploymentName, deployment.Generation) err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Wait for it to be updated to revision 2 // Wait for it to be updated to revision 2
err = waitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage) err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
err = waitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0) err = framework.WaitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// 4. Update the deploymentRollback to rollback to revision 1 // 4. Update the deploymentRollback to rollback to revision 1
revision = 1 revision = 1
Logf("rolling back deployment %s to revision %d", deploymentName, revision) framework.Logf("rolling back deployment %s to revision %d", deploymentName, revision)
rollback = newDeploymentRollback(deploymentName, nil, revision) rollback = newDeploymentRollback(deploymentName, nil, revision)
err = c.Extensions().Deployments(ns).Rollback(rollback) err = c.Extensions().Deployments(ns).Rollback(rollback)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Wait for the deployment to start rolling back // Wait for the deployment to start rolling back
err = waitForDeploymentRollbackCleared(c, ns, deploymentName) err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// TODO: report RollbackDone in deployment status and check it here // TODO: report RollbackDone in deployment status and check it here
// The pod template should be updated to the one in revision 1 // The pod template should be updated to the one in revision 1
// Wait for it to be updated to revision 3 // Wait for it to be updated to revision 3
err = waitForDeploymentRevisionAndImage(c, ns, deploymentName, "3", deploymentImage) err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3", deploymentImage)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
err = waitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0) err = framework.WaitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// 5. Update the deploymentRollback to rollback to revision 10 // 5. Update the deploymentRollback to rollback to revision 10
// Since there's no revision 10 in history, it should stay as revision 3 // Since there's no revision 10 in history, it should stay as revision 3
revision = 10 revision = 10
Logf("rolling back deployment %s to revision %d", deploymentName, revision) framework.Logf("rolling back deployment %s to revision %d", deploymentName, revision)
rollback = newDeploymentRollback(deploymentName, nil, revision) rollback = newDeploymentRollback(deploymentName, nil, revision)
err = c.Extensions().Deployments(ns).Rollback(rollback) err = c.Extensions().Deployments(ns).Rollback(rollback)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Wait for the deployment to start rolling back // Wait for the deployment to start rolling back
err = waitForDeploymentRollbackCleared(c, ns, deploymentName) err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// TODO: report RollbackRevisionNotFound in deployment status and check it here // TODO: report RollbackRevisionNotFound in deployment status and check it here
@ -871,13 +872,13 @@ func testRollbackDeploymentRSNoRevision(f *Framework) {
// 6. Update the deploymentRollback to rollback to revision 3 // 6. Update the deploymentRollback to rollback to revision 3
// Since it's already revision 3, it should be no-op // Since it's already revision 3, it should be no-op
revision = 3 revision = 3
Logf("rolling back deployment %s to revision %d", deploymentName, revision) framework.Logf("rolling back deployment %s to revision %d", deploymentName, revision)
rollback = newDeploymentRollback(deploymentName, nil, revision) rollback = newDeploymentRollback(deploymentName, nil, revision)
err = c.Extensions().Deployments(ns).Rollback(rollback) err = c.Extensions().Deployments(ns).Rollback(rollback)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Wait for the deployment to start rolling back // Wait for the deployment to start rolling back
err = waitForDeploymentRollbackCleared(c, ns, deploymentName) err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// TODO: report RollbackTemplateUnchanged in deployment status and check it here // TODO: report RollbackTemplateUnchanged in deployment status and check it here
@ -886,7 +887,7 @@ func testRollbackDeploymentRSNoRevision(f *Framework) {
checkDeploymentRevision(c, ns, deploymentName, "3", deploymentImageName, deploymentImage) checkDeploymentRevision(c, ns, deploymentName, "3", deploymentImageName, deploymentImage)
} }
func testDeploymentLabelAdopted(f *Framework) { func testDeploymentLabelAdopted(f *framework.Framework) {
ns := f.Namespace.Name ns := f.Namespace.Name
// TODO: remove unversionedClient when the refactoring is done. Currently some // TODO: remove unversionedClient when the refactoring is done. Currently some
// functions like verifyPod still expects a unversioned#Client. // functions like verifyPod still expects a unversioned#Client.
@ -902,25 +903,25 @@ func testDeploymentLabelAdopted(f *Framework) {
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, podLabels, podName, image)) _, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, podLabels, podName, image))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Verify that the required pods have come up. // Verify that the required pods have come up.
err = verifyPods(unversionedClient, ns, podName, false, 3) err = framework.VerifyPods(unversionedClient, ns, podName, false, 3)
if err != nil { if err != nil {
Logf("error in waiting for pods to come up: %s", err) framework.Logf("error in waiting for pods to come up: %s", err)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
} }
// Create a nginx deployment to adopt the old rs. // Create a nginx deployment to adopt the old rs.
deploymentName := "test-adopted-deployment" deploymentName := "test-adopted-deployment"
Logf("Creating deployment %s", deploymentName) framework.Logf("Creating deployment %s", deploymentName)
_, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, podLabels, podName, image, extensions.RollingUpdateDeploymentStrategyType, nil)) _, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, podLabels, podName, image, extensions.RollingUpdateDeploymentStrategyType, nil))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
defer stopDeployment(c, f.Client, ns, deploymentName) defer stopDeployment(c, f.Client, ns, deploymentName)
// Wait for it to be updated to revision 1 // Wait for it to be updated to revision 1
err = waitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", image) err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", image)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// The RS and pods should be relabeled before the status is updated by syncRollingUpdateDeployment // The RS and pods should be relabeled before the status is updated by syncRollingUpdateDeployment
err = waitForDeploymentStatus(c, ns, deploymentName, replicas, replicas-1, replicas+1, 0) err = framework.WaitForDeploymentStatus(c, ns, deploymentName, replicas, replicas-1, replicas+1, 0)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// There should be no old RSs (overlapping RS) // There should be no old RSs (overlapping RS)
@ -933,7 +934,7 @@ func testDeploymentLabelAdopted(f *Framework) {
// New RS should contain pod-template-hash in its selector, label, and template label // New RS should contain pod-template-hash in its selector, label, and template label
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c) newRS, err := deploymentutil.GetNewReplicaSet(deployment, c)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
err = checkRSHashLabel(newRS) err = framework.CheckRSHashLabel(newRS)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// All pods targeted by the deployment should contain pod-template-hash in their labels, and there should be only 3 pods // All pods targeted by the deployment should contain pod-template-hash in their labels, and there should be only 3 pods
selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector) selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector)
@ -941,7 +942,7 @@ func testDeploymentLabelAdopted(f *Framework) {
options := api.ListOptions{LabelSelector: selector} options := api.ListOptions{LabelSelector: selector}
pods, err := c.Core().Pods(ns).List(options) pods, err := c.Core().Pods(ns).List(options)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
err = checkPodHashLabel(pods) err = framework.CheckPodHashLabel(pods)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Expect(len(pods.Items)).Should(Equal(replicas)) Expect(len(pods.Items)).Should(Equal(replicas))
} }

View File

@ -31,6 +31,7 @@ import (
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
) )
const dnsTestPodHostName = "dns-querier-1" const dnsTestPodHostName = "dns-querier-1"
@ -150,9 +151,9 @@ func createProbeCommand(namesToResolve []string, hostEntries []string, fileNameP
func assertFilesExist(fileNames []string, fileDir string, pod *api.Pod, client *client.Client) { func assertFilesExist(fileNames []string, fileDir string, pod *api.Pod, client *client.Client) {
var failed []string var failed []string
expectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) { framework.ExpectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) {
failed = []string{} failed = []string{}
subResourceProxyAvailable, err := serverVersionGTE(subResourcePodProxyVersion, client) subResourceProxyAvailable, err := framework.ServerVersionGTE(framework.SubResourcePodProxyVersion, client)
if err != nil { if err != nil {
return false, err return false, err
} }
@ -175,20 +176,20 @@ func assertFilesExist(fileNames []string, fileDir string, pod *api.Pod, client *
Do().Raw() Do().Raw()
} }
if err != nil { if err != nil {
Logf("Unable to read %s from pod %s: %v", fileName, pod.Name, err) framework.Logf("Unable to read %s from pod %s: %v", fileName, pod.Name, err)
failed = append(failed, fileName) failed = append(failed, fileName)
} }
} }
if len(failed) == 0 { if len(failed) == 0 {
return true, nil return true, nil
} }
Logf("Lookups using %s failed for: %v\n", pod.Name, failed) framework.Logf("Lookups using %s failed for: %v\n", pod.Name, failed)
return false, nil return false, nil
})) }))
Expect(len(failed)).To(Equal(0)) Expect(len(failed)).To(Equal(0))
} }
func validateDNSResults(f *Framework, pod *api.Pod, fileNames []string) { func validateDNSResults(f *framework.Framework, pod *api.Pod, fileNames []string) {
By("submitting the pod to kubernetes") By("submitting the pod to kubernetes")
podClient := f.Client.Pods(f.Namespace.Name) podClient := f.Client.Pods(f.Namespace.Name)
@ -198,15 +199,15 @@ func validateDNSResults(f *Framework, pod *api.Pod, fileNames []string) {
podClient.Delete(pod.Name, api.NewDeleteOptions(0)) podClient.Delete(pod.Name, api.NewDeleteOptions(0))
}() }()
if _, err := podClient.Create(pod); err != nil { if _, err := podClient.Create(pod); err != nil {
Failf("Failed to create %s pod: %v", pod.Name, err) framework.Failf("Failed to create %s pod: %v", pod.Name, err)
} }
expectNoError(f.WaitForPodRunning(pod.Name)) framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
By("retrieving the pod") By("retrieving the pod")
pod, err := podClient.Get(pod.Name) pod, err := podClient.Get(pod.Name)
if err != nil { if err != nil {
Failf("Failed to get pod %s: %v", pod.Name, err) framework.Failf("Failed to get pod %s: %v", pod.Name, err)
} }
// Try to find results for each expected name. // Try to find results for each expected name.
By("looking for the results for each expected name from probiers") By("looking for the results for each expected name from probiers")
@ -214,21 +215,21 @@ func validateDNSResults(f *Framework, pod *api.Pod, fileNames []string) {
// TODO: probe from the host, too. // TODO: probe from the host, too.
Logf("DNS probes using %s succeeded\n", pod.Name) framework.Logf("DNS probes using %s succeeded\n", pod.Name)
} }
func verifyDNSPodIsRunning(f *Framework) { func verifyDNSPodIsRunning(f *framework.Framework) {
systemClient := f.Client.Pods(api.NamespaceSystem) systemClient := f.Client.Pods(api.NamespaceSystem)
By("Waiting for DNS Service to be Running") By("Waiting for DNS Service to be Running")
options := api.ListOptions{LabelSelector: dnsServiceLabelSelector} options := api.ListOptions{LabelSelector: dnsServiceLabelSelector}
dnsPods, err := systemClient.List(options) dnsPods, err := systemClient.List(options)
if err != nil { if err != nil {
Failf("Failed to list all dns service pods") framework.Failf("Failed to list all dns service pods")
} }
if len(dnsPods.Items) != 1 { if len(dnsPods.Items) != 1 {
Failf("Unexpected number of pods (%d) matches the label selector %v", len(dnsPods.Items), dnsServiceLabelSelector.String()) framework.Failf("Unexpected number of pods (%d) matches the label selector %v", len(dnsPods.Items), dnsServiceLabelSelector.String())
} }
expectNoError(waitForPodRunningInNamespace(f.Client, dnsPods.Items[0].Name, api.NamespaceSystem)) framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.Client, dnsPods.Items[0].Name, api.NamespaceSystem))
} }
func createServiceSpec(serviceName string, isHeadless bool, selector map[string]string) *api.Service { func createServiceSpec(serviceName string, isHeadless bool, selector map[string]string) *api.Service {
@ -249,8 +250,8 @@ func createServiceSpec(serviceName string, isHeadless bool, selector map[string]
return headlessService return headlessService
} }
var _ = KubeDescribe("DNS", func() { var _ = framework.KubeDescribe("DNS", func() {
f := NewDefaultFramework("dns") f := framework.NewDefaultFramework("dns")
It("should provide DNS for the cluster [Conformance]", func() { It("should provide DNS for the cluster [Conformance]", func() {
verifyDNSPodIsRunning(f) verifyDNSPodIsRunning(f)
@ -264,7 +265,7 @@ var _ = KubeDescribe("DNS", func() {
"google.com", "google.com",
} }
// Added due to #8512. This is critical for GCE and GKE deployments. // Added due to #8512. This is critical for GCE and GKE deployments.
if providerIs("gce", "gke") { if framework.ProviderIs("gce", "gke") {
namesToResolve = append(namesToResolve, "metadata") namesToResolve = append(namesToResolve, "metadata")
} }

View File

@ -20,22 +20,23 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
) )
var _ = KubeDescribe("Docker Containers", func() { var _ = framework.KubeDescribe("Docker Containers", func() {
framework := NewDefaultFramework("containers") f := framework.NewDefaultFramework("containers")
var c *client.Client var c *client.Client
var ns string var ns string
BeforeEach(func() { BeforeEach(func() {
c = framework.Client c = f.Client
ns = framework.Namespace.Name ns = f.Namespace.Name
}) })
It("should use the image defaults if command and args are blank [Conformance]", func() { It("should use the image defaults if command and args are blank [Conformance]", func() {
testContainerOutput("use defaults", c, entrypointTestPod(), 0, []string{ framework.TestContainerOutput("use defaults", c, entrypointTestPod(), 0, []string{
"[/ep default arguments]", "[/ep default arguments]",
}, ns) }, ns)
}) })
@ -44,7 +45,7 @@ var _ = KubeDescribe("Docker Containers", func() {
pod := entrypointTestPod() pod := entrypointTestPod()
pod.Spec.Containers[0].Args = []string{"override", "arguments"} pod.Spec.Containers[0].Args = []string{"override", "arguments"}
testContainerOutput("override arguments", c, pod, 0, []string{ framework.TestContainerOutput("override arguments", c, pod, 0, []string{
"[/ep override arguments]", "[/ep override arguments]",
}, ns) }, ns)
}) })
@ -55,7 +56,7 @@ var _ = KubeDescribe("Docker Containers", func() {
pod := entrypointTestPod() pod := entrypointTestPod()
pod.Spec.Containers[0].Command = []string{"/ep-2"} pod.Spec.Containers[0].Command = []string{"/ep-2"}
testContainerOutput("override command", c, pod, 0, []string{ framework.TestContainerOutput("override command", c, pod, 0, []string{
"[/ep-2]", "[/ep-2]",
}, ns) }, ns)
}) })
@ -65,7 +66,7 @@ var _ = KubeDescribe("Docker Containers", func() {
pod.Spec.Containers[0].Command = []string{"/ep-2"} pod.Spec.Containers[0].Command = []string{"/ep-2"}
pod.Spec.Containers[0].Args = []string{"override", "arguments"} pod.Spec.Containers[0].Args = []string{"override", "arguments"}
testContainerOutput("override all", c, pod, 0, []string{ framework.TestContainerOutput("override all", c, pod, 0, []string{
"[/ep-2 override arguments]", "[/ep-2 override arguments]",
}, ns) }, ns)
}) })

View File

@ -21,12 +21,13 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
) )
var _ = KubeDescribe("Downward API", func() { var _ = framework.KubeDescribe("Downward API", func() {
framework := NewDefaultFramework("downward-api") f := framework.NewDefaultFramework("downward-api")
It("should provide pod name and namespace as env vars [Conformance]", func() { It("should provide pod name and namespace as env vars [Conformance]", func() {
podName := "downward-api-" + string(util.NewUUID()) podName := "downward-api-" + string(util.NewUUID())
@ -53,10 +54,10 @@ var _ = KubeDescribe("Downward API", func() {
expectations := []string{ expectations := []string{
fmt.Sprintf("POD_NAME=%v", podName), fmt.Sprintf("POD_NAME=%v", podName),
fmt.Sprintf("POD_NAMESPACE=%v", framework.Namespace.Name), fmt.Sprintf("POD_NAMESPACE=%v", f.Namespace.Name),
} }
testDownwardAPI(framework, podName, env, expectations) testDownwardAPI(f, podName, env, expectations)
}) })
It("should provide pod IP as an env var", func() { It("should provide pod IP as an env var", func() {
@ -77,11 +78,11 @@ var _ = KubeDescribe("Downward API", func() {
"POD_IP=(?:\\d+)\\.(?:\\d+)\\.(?:\\d+)\\.(?:\\d+)", "POD_IP=(?:\\d+)\\.(?:\\d+)\\.(?:\\d+)\\.(?:\\d+)",
} }
testDownwardAPI(framework, podName, env, expectations) testDownwardAPI(f, podName, env, expectations)
}) })
}) })
func testDownwardAPI(framework *Framework, podName string, env []api.EnvVar, expectations []string) { func testDownwardAPI(f *framework.Framework, podName string, env []api.EnvVar, expectations []string) {
pod := &api.Pod{ pod := &api.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Name: podName, Name: podName,
@ -100,5 +101,5 @@ func testDownwardAPI(framework *Framework, podName string, env []api.EnvVar, exp
}, },
} }
framework.TestContainerOutputRegexp("downward api env vars", pod, 0, expectations) f.TestContainerOutputRegexp("downward api env vars", pod, 0, expectations)
} }

View File

@ -22,21 +22,22 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
) )
var _ = KubeDescribe("Downward API volume", func() { var _ = framework.KubeDescribe("Downward API volume", func() {
// How long to wait for a log pod to be displayed // How long to wait for a log pod to be displayed
const podLogTimeout = 45 * time.Second const podLogTimeout = 45 * time.Second
f := NewDefaultFramework("downward-api") f := framework.NewDefaultFramework("downward-api")
It("should provide podname only [Conformance]", func() { It("should provide podname only [Conformance]", func() {
podName := "downwardapi-volume-" + string(util.NewUUID()) podName := "downwardapi-volume-" + string(util.NewUUID())
pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podname") pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podname")
testContainerOutput("downward API volume plugin", f.Client, pod, 0, []string{ framework.TestContainerOutput("downward API volume plugin", f.Client, pod, 0, []string{
fmt.Sprintf("%s\n", podName), fmt.Sprintf("%s\n", podName),
}, f.Namespace.Name) }, f.Namespace.Name)
}) })
@ -50,7 +51,7 @@ var _ = KubeDescribe("Downward API volume", func() {
RunAsUser: &uid, RunAsUser: &uid,
FSGroup: &gid, FSGroup: &gid,
} }
testContainerOutput("downward API volume plugin", f.Client, pod, 0, []string{ framework.TestContainerOutput("downward API volume plugin", f.Client, pod, 0, []string{
fmt.Sprintf("%s\n", podName), fmt.Sprintf("%s\n", podName),
}, f.Namespace.Name) }, f.Namespace.Name)
}) })
@ -71,15 +72,15 @@ var _ = KubeDescribe("Downward API volume", func() {
_, err := f.Client.Pods(f.Namespace.Name).Create(pod) _, err := f.Client.Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
expectNoError(waitForPodRunningInNamespace(f.Client, pod.Name, f.Namespace.Name)) framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.Client, pod.Name, f.Namespace.Name))
pod, err = f.Client.Pods(f.Namespace.Name).Get(pod.Name) pod, err = f.Client.Pods(f.Namespace.Name).Get(pod.Name)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Eventually(func() (string, error) { Eventually(func() (string, error) {
return getPodLogs(f.Client, f.Namespace.Name, pod.Name, containerName) return framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, containerName)
}, },
podLogTimeout, poll).Should(ContainSubstring("key1=\"value1\"\n")) podLogTimeout, framework.Poll).Should(ContainSubstring("key1=\"value1\"\n"))
//modify labels //modify labels
pod.Labels["key3"] = "value3" pod.Labels["key3"] = "value3"
@ -88,9 +89,9 @@ var _ = KubeDescribe("Downward API volume", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Eventually(func() (string, error) { Eventually(func() (string, error) {
return getPodLogs(f.Client, f.Namespace.Name, pod.Name, containerName) return framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, containerName)
}, },
podLogTimeout, poll).Should(ContainSubstring("key3=\"value3\"\n")) podLogTimeout, framework.Poll).Should(ContainSubstring("key3=\"value3\"\n"))
}) })
@ -108,15 +109,15 @@ var _ = KubeDescribe("Downward API volume", func() {
By("Creating the pod") By("Creating the pod")
_, err := f.Client.Pods(f.Namespace.Name).Create(pod) _, err := f.Client.Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
expectNoError(waitForPodRunningInNamespace(f.Client, pod.Name, f.Namespace.Name)) framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.Client, pod.Name, f.Namespace.Name))
pod, err = f.Client.Pods(f.Namespace.Name).Get(pod.Name) pod, err = f.Client.Pods(f.Namespace.Name).Get(pod.Name)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Eventually(func() (string, error) { Eventually(func() (string, error) {
return getPodLogs(f.Client, f.Namespace.Name, pod.Name, containerName) return framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, containerName)
}, },
podLogTimeout, poll).Should(ContainSubstring("builder=\"bar\"\n")) podLogTimeout, framework.Poll).Should(ContainSubstring("builder=\"bar\"\n"))
//modify annotations //modify annotations
pod.Annotations["builder"] = "foo" pod.Annotations["builder"] = "foo"
@ -125,9 +126,9 @@ var _ = KubeDescribe("Downward API volume", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Eventually(func() (string, error) { Eventually(func() (string, error) {
return getPodLogs(f.Client, f.Namespace.Name, pod.Name, containerName) return framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, containerName)
}, },
podLogTimeout, poll).Should(ContainSubstring("builder=\"foo\"\n")) podLogTimeout, framework.Poll).Should(ContainSubstring("builder=\"foo\"\n"))
}) })
}) })

View File

@ -36,6 +36,7 @@ import (
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/runtime" "k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/test/e2e/framework"
) )
const ( const (
@ -99,19 +100,19 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
// Delete any namespaces except default and kube-system. This ensures no // Delete any namespaces except default and kube-system. This ensures no
// lingering resources are left over from a previous test run. // lingering resources are left over from a previous test run.
if testContext.CleanStart { if framework.TestContext.CleanStart {
c, err := loadClient() c, err := framework.LoadClient()
if err != nil { if err != nil {
glog.Fatal("Error loading client: ", err) glog.Fatal("Error loading client: ", err)
} }
deleted, err := deleteNamespaces(c, nil /* deleteFilter */, []string{api.NamespaceSystem, api.NamespaceDefault}) deleted, err := framework.DeleteNamespaces(c, nil /* deleteFilter */, []string{api.NamespaceSystem, api.NamespaceDefault})
if err != nil { if err != nil {
Failf("Error deleting orphaned namespaces: %v", err) framework.Failf("Error deleting orphaned namespaces: %v", err)
} }
glog.Infof("Waiting for deletion of the following namespaces: %v", deleted) glog.Infof("Waiting for deletion of the following namespaces: %v", deleted)
if err := waitForNamespacesDeleted(c, deleted, namespaceCleanupTimeout); err != nil { if err := framework.WaitForNamespacesDeleted(c, deleted, framework.NamespaceCleanupTimeout); err != nil {
Failf("Failed to delete orphaned namespaces %v: %v", deleted, err) framework.Failf("Failed to delete orphaned namespaces %v: %v", deleted, err)
} }
} }
@ -119,15 +120,15 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
// cluster infrastructure pods that are being pulled or started can block // cluster infrastructure pods that are being pulled or started can block
// test pods from running, and tests that ensure all pods are running and // test pods from running, and tests that ensure all pods are running and
// ready will fail). // ready will fail).
if err := waitForPodsRunningReady(api.NamespaceSystem, testContext.MinStartupPods, podStartupTimeout); err != nil { if err := framework.WaitForPodsRunningReady(api.NamespaceSystem, framework.TestContext.MinStartupPods, podStartupTimeout); err != nil {
if c, errClient := loadClient(); errClient != nil { if c, errClient := framework.LoadClient(); errClient != nil {
Logf("Unable to dump cluster information because: %v", errClient) framework.Logf("Unable to dump cluster information because: %v", errClient)
} else { } else {
dumpAllNamespaceInfo(c, api.NamespaceSystem) framework.DumpAllNamespaceInfo(c, api.NamespaceSystem)
} }
logFailedContainers(api.NamespaceSystem) framework.LogFailedContainers(api.NamespaceSystem)
runKubernetesServiceTestContainer(testContext.RepoRoot, api.NamespaceDefault) framework.RunKubernetesServiceTestContainer(framework.TestContext.RepoRoot, api.NamespaceDefault)
Failf("Error waiting for all pods to be running and ready: %v", err) framework.Failf("Error waiting for all pods to be running and ready: %v", err)
} }
return nil return nil
@ -188,7 +189,7 @@ var _ = ginkgo.SynchronizedAfterSuite(func() {
}, func() { }, func() {
// Run only Ginkgo on node 1 // Run only Ginkgo on node 1
if framework.TestContext.ReportDir != "" { if framework.TestContext.ReportDir != "" {
CoreDump(framework.TestContext.ReportDir) framework.CoreDump(framework.TestContext.ReportDir)
} }
}) })
@ -225,6 +226,6 @@ func RunE2ETests(t *testing.T) {
r = append(r, reporters.NewJUnitReporter(path.Join(framework.TestContext.ReportDir, fmt.Sprintf("junit_%v%02d.xml", framework.TestContext.ReportPrefix, config.GinkgoConfig.ParallelNode)))) r = append(r, reporters.NewJUnitReporter(path.Join(framework.TestContext.ReportDir, fmt.Sprintf("junit_%v%02d.xml", framework.TestContext.ReportPrefix, config.GinkgoConfig.ParallelNode))))
} }
} }
glog.Infof("Starting e2e run %q on Ginkgo node %d", runId, config.GinkgoConfig.ParallelNode) glog.Infof("Starting e2e run %q on Ginkgo node %d", framework.RunId, config.GinkgoConfig.ParallelNode)
ginkgo.RunSpecsWithDefaultAndCustomReporters(t, "Kubernetes e2e suite", r) ginkgo.RunSpecsWithDefaultAndCustomReporters(t, "Kubernetes e2e suite", r)
} }

View File

@ -18,10 +18,12 @@ package e2e
import ( import (
"testing" "testing"
"k8s.io/kubernetes/test/e2e/framework"
) )
func init() { func init() {
RegisterFlags() framework.RegisterFlags()
} }
func TestE2E(t *testing.T) { func TestE2E(t *testing.T) {

View File

@ -24,6 +24,7 @@ import (
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
) )
@ -33,9 +34,9 @@ const (
testImageNonRootUid = "gcr.io/google_containers/mounttest-user:0.3" testImageNonRootUid = "gcr.io/google_containers/mounttest-user:0.3"
) )
var _ = KubeDescribe("EmptyDir volumes", func() { var _ = framework.KubeDescribe("EmptyDir volumes", func() {
f := NewDefaultFramework("emptydir") f := framework.NewDefaultFramework("emptydir")
Context("when FSGroup is specified [Feature:FSGroup]", func() { Context("when FSGroup is specified [Feature:FSGroup]", func() {
It("new files should be created with FSGroup ownership when container is root", func() { It("new files should be created with FSGroup ownership when container is root", func() {
@ -117,7 +118,7 @@ const (
volumeName = "test-volume" volumeName = "test-volume"
) )
func doTestSetgidFSGroup(f *Framework, image string, medium api.StorageMedium) { func doTestSetgidFSGroup(f *framework.Framework, image string, medium api.StorageMedium) {
var ( var (
volumePath = "/test-volume" volumePath = "/test-volume"
filePath = path.Join(volumePath, "test-file") filePath = path.Join(volumePath, "test-file")
@ -147,7 +148,7 @@ func doTestSetgidFSGroup(f *Framework, image string, medium api.StorageMedium) {
f.TestContainerOutput(msg, pod, 0, out) f.TestContainerOutput(msg, pod, 0, out)
} }
func doTestVolumeModeFSGroup(f *Framework, image string, medium api.StorageMedium) { func doTestVolumeModeFSGroup(f *framework.Framework, image string, medium api.StorageMedium) {
var ( var (
volumePath = "/test-volume" volumePath = "/test-volume"
source = &api.EmptyDirVolumeSource{Medium: medium} source = &api.EmptyDirVolumeSource{Medium: medium}
@ -172,7 +173,7 @@ func doTestVolumeModeFSGroup(f *Framework, image string, medium api.StorageMediu
f.TestContainerOutput(msg, pod, 0, out) f.TestContainerOutput(msg, pod, 0, out)
} }
func doTest0644FSGroup(f *Framework, image string, medium api.StorageMedium) { func doTest0644FSGroup(f *framework.Framework, image string, medium api.StorageMedium) {
var ( var (
volumePath = "/test-volume" volumePath = "/test-volume"
filePath = path.Join(volumePath, "test-file") filePath = path.Join(volumePath, "test-file")
@ -200,7 +201,7 @@ func doTest0644FSGroup(f *Framework, image string, medium api.StorageMedium) {
f.TestContainerOutput(msg, pod, 0, out) f.TestContainerOutput(msg, pod, 0, out)
} }
func doTestVolumeMode(f *Framework, image string, medium api.StorageMedium) { func doTestVolumeMode(f *framework.Framework, image string, medium api.StorageMedium) {
var ( var (
volumePath = "/test-volume" volumePath = "/test-volume"
source = &api.EmptyDirVolumeSource{Medium: medium} source = &api.EmptyDirVolumeSource{Medium: medium}
@ -222,7 +223,7 @@ func doTestVolumeMode(f *Framework, image string, medium api.StorageMedium) {
f.TestContainerOutput(msg, pod, 0, out) f.TestContainerOutput(msg, pod, 0, out)
} }
func doTest0644(f *Framework, image string, medium api.StorageMedium) { func doTest0644(f *framework.Framework, image string, medium api.StorageMedium) {
var ( var (
volumePath = "/test-volume" volumePath = "/test-volume"
filePath = path.Join(volumePath, "test-file") filePath = path.Join(volumePath, "test-file")
@ -247,7 +248,7 @@ func doTest0644(f *Framework, image string, medium api.StorageMedium) {
f.TestContainerOutput(msg, pod, 0, out) f.TestContainerOutput(msg, pod, 0, out)
} }
func doTest0666(f *Framework, image string, medium api.StorageMedium) { func doTest0666(f *framework.Framework, image string, medium api.StorageMedium) {
var ( var (
volumePath = "/test-volume" volumePath = "/test-volume"
filePath = path.Join(volumePath, "test-file") filePath = path.Join(volumePath, "test-file")
@ -272,7 +273,7 @@ func doTest0666(f *Framework, image string, medium api.StorageMedium) {
f.TestContainerOutput(msg, pod, 0, out) f.TestContainerOutput(msg, pod, 0, out)
} }
func doTest0777(f *Framework, image string, medium api.StorageMedium) { func doTest0777(f *framework.Framework, image string, medium api.StorageMedium) {
var ( var (
volumePath = "/test-volume" volumePath = "/test-volume"
filePath = path.Join(volumePath, "test-file") filePath = path.Join(volumePath, "test-file")

View File

@ -20,6 +20,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/test/e2e/framework"
"strconv" "strconv"
@ -28,8 +29,8 @@ import (
// This test will create a pod with a secret volume and gitRepo volume // This test will create a pod with a secret volume and gitRepo volume
// Thus requests a secret, a git server pod, and a git server service // Thus requests a secret, a git server pod, and a git server service
var _ = KubeDescribe("EmptyDir wrapper volumes", func() { var _ = framework.KubeDescribe("EmptyDir wrapper volumes", func() {
f := NewDefaultFramework("emptydir-wrapper") f := framework.NewDefaultFramework("emptydir-wrapper")
It("should becomes running", func() { It("should becomes running", func() {
name := "emptydir-wrapper-test-" + string(util.NewUUID()) name := "emptydir-wrapper-test-" + string(util.NewUUID())
@ -48,7 +49,7 @@ var _ = KubeDescribe("EmptyDir wrapper volumes", func() {
var err error var err error
if secret, err = f.Client.Secrets(f.Namespace.Name).Create(secret); err != nil { if secret, err = f.Client.Secrets(f.Namespace.Name).Create(secret); err != nil {
Failf("unable to create test secret %s: %v", secret.Name, err) framework.Failf("unable to create test secret %s: %v", secret.Name, err)
} }
gitServerPodName := "git-server-" + string(util.NewUUID()) gitServerPodName := "git-server-" + string(util.NewUUID())
@ -76,7 +77,7 @@ var _ = KubeDescribe("EmptyDir wrapper volumes", func() {
} }
if gitServerPod, err = f.Client.Pods(f.Namespace.Name).Create(gitServerPod); err != nil { if gitServerPod, err = f.Client.Pods(f.Namespace.Name).Create(gitServerPod); err != nil {
Failf("unable to create test git server pod %s: %v", gitServerPod.Name, err) framework.Failf("unable to create test git server pod %s: %v", gitServerPod.Name, err)
} }
// Portal IP and port // Portal IP and port
@ -99,7 +100,7 @@ var _ = KubeDescribe("EmptyDir wrapper volumes", func() {
} }
if gitServerSvc, err = f.Client.Services(f.Namespace.Name).Create(gitServerSvc); err != nil { if gitServerSvc, err = f.Client.Services(f.Namespace.Name).Create(gitServerSvc); err != nil {
Failf("unable to create test git server service %s: %v", gitServerSvc.Name, err) framework.Failf("unable to create test git server service %s: %v", gitServerSvc.Name, err)
} }
gitVolumeName := "git-volume" gitVolumeName := "git-volume"
@ -152,28 +153,28 @@ var _ = KubeDescribe("EmptyDir wrapper volumes", func() {
} }
if pod, err = f.Client.Pods(f.Namespace.Name).Create(pod); err != nil { if pod, err = f.Client.Pods(f.Namespace.Name).Create(pod); err != nil {
Failf("unable to create pod %v: %v", pod.Name, err) framework.Failf("unable to create pod %v: %v", pod.Name, err)
} }
defer func() { defer func() {
By("Cleaning up the secret") By("Cleaning up the secret")
if err := f.Client.Secrets(f.Namespace.Name).Delete(secret.Name); err != nil { if err := f.Client.Secrets(f.Namespace.Name).Delete(secret.Name); err != nil {
Failf("unable to delete secret %v: %v", secret.Name, err) framework.Failf("unable to delete secret %v: %v", secret.Name, err)
} }
By("Cleaning up the git server pod") By("Cleaning up the git server pod")
if err = f.Client.Pods(f.Namespace.Name).Delete(gitServerPod.Name, api.NewDeleteOptions(0)); err != nil { if err = f.Client.Pods(f.Namespace.Name).Delete(gitServerPod.Name, api.NewDeleteOptions(0)); err != nil {
Failf("unable to delete git server pod %v: %v", gitServerPod.Name, err) framework.Failf("unable to delete git server pod %v: %v", gitServerPod.Name, err)
} }
By("Cleaning up the git server svc") By("Cleaning up the git server svc")
if err = f.Client.Services(f.Namespace.Name).Delete(gitServerSvc.Name); err != nil { if err = f.Client.Services(f.Namespace.Name).Delete(gitServerSvc.Name); err != nil {
Failf("unable to delete git server svc %v: %v", gitServerSvc.Name, err) framework.Failf("unable to delete git server svc %v: %v", gitServerSvc.Name, err)
} }
By("Cleaning up the git vol pod") By("Cleaning up the git vol pod")
if err = f.Client.Pods(f.Namespace.Name).Delete(pod.Name, api.NewDeleteOptions(0)); err != nil { if err = f.Client.Pods(f.Namespace.Name).Delete(pod.Name, api.NewDeleteOptions(0)); err != nil {
Failf("unable to delete git vol pod %v: %v", pod.Name, err) framework.Failf("unable to delete git vol pod %v: %v", pod.Name, err)
} }
}() }()
expectNoError(waitForPodRunningInNamespace(f.Client, pod.Name, f.Namespace.Name)) framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.Client, pod.Name, f.Namespace.Name))
}) })
}) })

View File

@ -25,19 +25,20 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
) )
var _ = KubeDescribe("Cluster level logging using Elasticsearch [Feature:Elasticsearch]", func() { var _ = framework.KubeDescribe("Cluster level logging using Elasticsearch [Feature:Elasticsearch]", func() {
f := NewDefaultFramework("es-logging") f := framework.NewDefaultFramework("es-logging")
BeforeEach(func() { BeforeEach(func() {
// TODO: For now assume we are only testing cluster logging with Elasticsearch // TODO: For now assume we are only testing cluster logging with Elasticsearch
// on GCE. Once we are sure that Elasticsearch cluster level logging // on GCE. Once we are sure that Elasticsearch cluster level logging
// works for other providers we should widen this scope of this test. // works for other providers we should widen this scope of this test.
SkipUnlessProviderIs("gce") framework.SkipUnlessProviderIs("gce")
}) })
It("should check that logs from pods on all nodes are ingested into Elasticsearch", func() { It("should check that logs from pods on all nodes are ingested into Elasticsearch", func() {
@ -54,7 +55,7 @@ const (
func bodyToJSON(body []byte) (map[string]interface{}, error) { func bodyToJSON(body []byte) (map[string]interface{}, error) {
var r map[string]interface{} var r map[string]interface{}
if err := json.Unmarshal(body, &r); err != nil { if err := json.Unmarshal(body, &r); err != nil {
Logf("Bad JSON: %s", string(body)) framework.Logf("Bad JSON: %s", string(body))
return nil, fmt.Errorf("failed to unmarshal Elasticsearch response: %v", err) return nil, fmt.Errorf("failed to unmarshal Elasticsearch response: %v", err)
} }
return r, nil return r, nil
@ -70,7 +71,7 @@ func nodeInNodeList(nodeName string, nodeList *api.NodeList) bool {
} }
// ClusterLevelLoggingWithElasticsearch is an end to end test for cluster level logging. // ClusterLevelLoggingWithElasticsearch is an end to end test for cluster level logging.
func ClusterLevelLoggingWithElasticsearch(f *Framework) { func ClusterLevelLoggingWithElasticsearch(f *framework.Framework) {
// graceTime is how long to keep retrying requests for status information. // graceTime is how long to keep retrying requests for status information.
const graceTime = 5 * time.Minute const graceTime = 5 * time.Minute
// ingestionTimeout is how long to keep retrying to wait for all the // ingestionTimeout is how long to keep retrying to wait for all the
@ -87,7 +88,7 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) {
if _, err = s.Get("elasticsearch-logging"); err == nil { if _, err = s.Get("elasticsearch-logging"); err == nil {
break break
} }
Logf("Attempt to check for the existence of the Elasticsearch service failed after %v", time.Since(start)) framework.Logf("Attempt to check for the existence of the Elasticsearch service failed after %v", time.Since(start))
} }
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -98,7 +99,7 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) {
pods, err := f.Client.Pods(api.NamespaceSystem).List(options) pods, err := f.Client.Pods(api.NamespaceSystem).List(options)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
for _, pod := range pods.Items { for _, pod := range pods.Items {
err = waitForPodRunningInNamespace(f.Client, pod.Name, api.NamespaceSystem) err = framework.WaitForPodRunningInNamespace(f.Client, pod.Name, api.NamespaceSystem)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
} }
@ -109,9 +110,9 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) {
err = nil err = nil
var body []byte var body []byte
for start := time.Now(); time.Since(start) < graceTime; time.Sleep(10 * time.Second) { for start := time.Now(); time.Since(start) < graceTime; time.Sleep(10 * time.Second) {
proxyRequest, errProxy := getServicesProxyRequest(f.Client, f.Client.Get()) proxyRequest, errProxy := framework.GetServicesProxyRequest(f.Client, f.Client.Get())
if errProxy != nil { if errProxy != nil {
Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy) framework.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy)
continue continue
} }
// Query against the root URL for Elasticsearch. // Query against the root URL for Elasticsearch.
@ -119,42 +120,42 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) {
Name("elasticsearch-logging"). Name("elasticsearch-logging").
DoRaw() DoRaw()
if err != nil { if err != nil {
Logf("After %v proxy call to elasticsearch-loigging failed: %v", time.Since(start), err) framework.Logf("After %v proxy call to elasticsearch-loigging failed: %v", time.Since(start), err)
continue continue
} }
esResponse, err = bodyToJSON(body) esResponse, err = bodyToJSON(body)
if err != nil { if err != nil {
Logf("After %v failed to convert Elasticsearch JSON response %v to map[string]interface{}: %v", time.Since(start), string(body), err) framework.Logf("After %v failed to convert Elasticsearch JSON response %v to map[string]interface{}: %v", time.Since(start), string(body), err)
continue continue
} }
statusIntf, ok := esResponse["status"] statusIntf, ok := esResponse["status"]
if !ok { if !ok {
Logf("After %v Elasticsearch response has no status field: %v", time.Since(start), esResponse) framework.Logf("After %v Elasticsearch response has no status field: %v", time.Since(start), esResponse)
continue continue
} }
statusCode, ok = statusIntf.(float64) statusCode, ok = statusIntf.(float64)
if !ok { if !ok {
// Assume this is a string returning Failure. Retry. // Assume this is a string returning Failure. Retry.
Logf("After %v expected status to be a float64 but got %v of type %T", time.Since(start), statusIntf, statusIntf) framework.Logf("After %v expected status to be a float64 but got %v of type %T", time.Since(start), statusIntf, statusIntf)
continue continue
} }
if int(statusCode) != 200 { if int(statusCode) != 200 {
Logf("After %v Elasticsearch cluster has a bad status: %v", time.Since(start), statusCode) framework.Logf("After %v Elasticsearch cluster has a bad status: %v", time.Since(start), statusCode)
continue continue
} }
break break
} }
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
if int(statusCode) != 200 { if int(statusCode) != 200 {
Failf("Elasticsearch cluster has a bad status: %v", statusCode) framework.Failf("Elasticsearch cluster has a bad status: %v", statusCode)
} }
// Check to see if have a cluster_name field. // Check to see if have a cluster_name field.
clusterName, ok := esResponse["cluster_name"] clusterName, ok := esResponse["cluster_name"]
if !ok { if !ok {
Failf("No cluster_name field in Elasticsearch response: %v", esResponse) framework.Failf("No cluster_name field in Elasticsearch response: %v", esResponse)
} }
if clusterName != "kubernetes-logging" { if clusterName != "kubernetes-logging" {
Failf("Connected to wrong cluster %q (expecting kubernetes_logging)", clusterName) framework.Failf("Connected to wrong cluster %q (expecting kubernetes_logging)", clusterName)
} }
// Now assume we really are talking to an Elasticsearch instance. // Now assume we really are talking to an Elasticsearch instance.
@ -162,9 +163,9 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) {
By("Checking health of Elasticsearch service.") By("Checking health of Elasticsearch service.")
healthy := false healthy := false
for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) { for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) {
proxyRequest, errProxy := getServicesProxyRequest(f.Client, f.Client.Get()) proxyRequest, errProxy := framework.GetServicesProxyRequest(f.Client, f.Client.Get())
if errProxy != nil { if errProxy != nil {
Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy) framework.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy)
continue continue
} }
body, err = proxyRequest.Namespace(api.NamespaceSystem). body, err = proxyRequest.Namespace(api.NamespaceSystem).
@ -177,17 +178,17 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) {
} }
health, err := bodyToJSON(body) health, err := bodyToJSON(body)
if err != nil { if err != nil {
Logf("Bad json response from elasticsearch: %v", err) framework.Logf("Bad json response from elasticsearch: %v", err)
continue continue
} }
statusIntf, ok := health["status"] statusIntf, ok := health["status"]
if !ok { if !ok {
Logf("No status field found in cluster health response: %v", health) framework.Logf("No status field found in cluster health response: %v", health)
continue continue
} }
status := statusIntf.(string) status := statusIntf.(string)
if status != "green" && status != "yellow" { if status != "green" && status != "yellow" {
Logf("Cluster health has bad status: %v", health) framework.Logf("Cluster health has bad status: %v", health)
continue continue
} }
if err == nil && ok { if err == nil && ok {
@ -196,27 +197,27 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) {
} }
} }
if !healthy { if !healthy {
Failf("After %v elasticsearch cluster is not healthy", graceTime) framework.Failf("After %v elasticsearch cluster is not healthy", graceTime)
} }
// Obtain a list of nodes so we can place one synthetic logger on each node. // Obtain a list of nodes so we can place one synthetic logger on each node.
nodes := ListSchedulableNodesOrDie(f.Client) nodes := framework.ListSchedulableNodesOrDie(f.Client)
nodeCount := len(nodes.Items) nodeCount := len(nodes.Items)
if nodeCount == 0 { if nodeCount == 0 {
Failf("Failed to find any nodes") framework.Failf("Failed to find any nodes")
} }
Logf("Found %d nodes.", len(nodes.Items)) framework.Logf("Found %d nodes.", len(nodes.Items))
// Filter out unhealthy nodes. // Filter out unhealthy nodes.
// Previous tests may have cause failures of some nodes. Let's skip // Previous tests may have cause failures of some nodes. Let's skip
// 'Not Ready' nodes, just in case (there is no need to fail the test). // 'Not Ready' nodes, just in case (there is no need to fail the test).
filterNodes(nodes, func(node api.Node) bool { framework.FilterNodes(nodes, func(node api.Node) bool {
return isNodeConditionSetAsExpected(&node, api.NodeReady, true) return framework.IsNodeConditionSetAsExpected(&node, api.NodeReady, true)
}) })
if len(nodes.Items) < 2 { if len(nodes.Items) < 2 {
Failf("Less than two nodes were found Ready: %d", len(nodes.Items)) framework.Failf("Less than two nodes were found Ready: %d", len(nodes.Items))
} }
Logf("Found %d healthy nodes.", len(nodes.Items)) framework.Logf("Found %d healthy nodes.", len(nodes.Items))
// Wait for the Fluentd pods to enter the running state. // Wait for the Fluentd pods to enter the running state.
By("Checking to make sure the Fluentd pod are running on each healthy node") By("Checking to make sure the Fluentd pod are running on each healthy node")
@ -226,7 +227,7 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
for _, pod := range fluentdPods.Items { for _, pod := range fluentdPods.Items {
if nodeInNodeList(pod.Spec.NodeName, nodes) { if nodeInNodeList(pod.Spec.NodeName, nodes) {
err = waitForPodRunningInNamespace(f.Client, pod.Name, api.NamespaceSystem) err = framework.WaitForPodRunningInNamespace(f.Client, pod.Name, api.NamespaceSystem)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
} }
} }
@ -241,7 +242,7 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) {
} }
} }
if !exists { if !exists {
Failf("Node %v does not have fluentd pod running on it.", node.Name) framework.Failf("Node %v does not have fluentd pod running on it.", node.Name)
} }
} }
@ -253,7 +254,7 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) {
// Form a unique name to taint log lines to be collected. // Form a unique name to taint log lines to be collected.
// Replace '-' characters with '_' to prevent the analyzer from breaking apart names. // Replace '-' characters with '_' to prevent the analyzer from breaking apart names.
taintName := strings.Replace(ns+name, "-", "_", -1) taintName := strings.Replace(ns+name, "-", "_", -1)
Logf("Tainting log lines with %v", taintName) framework.Logf("Tainting log lines with %v", taintName)
// podNames records the names of the synthetic logging pods that are created in the // podNames records the names of the synthetic logging pods that are created in the
// loop below. // loop below.
var podNames []string var podNames []string
@ -288,7 +289,7 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) {
defer func() { defer func() {
for _, pod := range podNames { for _, pod := range podNames {
if err = f.Client.Pods(ns).Delete(pod, nil); err != nil { if err = f.Client.Pods(ns).Delete(pod, nil); err != nil {
Logf("Failed to delete pod %s: %v", pod, err) framework.Logf("Failed to delete pod %s: %v", pod, err)
} }
} }
}() }()
@ -296,7 +297,7 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) {
// Wait for the synthetic logging pods to finish. // Wait for the synthetic logging pods to finish.
By("Waiting for the pods to succeed.") By("Waiting for the pods to succeed.")
for _, pod := range podNames { for _, pod := range podNames {
err = waitForPodSuccessInNamespace(f.Client, pod, "synth-logger", ns) err = framework.WaitForPodSuccessInNamespace(f.Client, pod, "synth-logger", ns)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
} }
@ -315,18 +316,18 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) {
options := api.ListOptions{LabelSelector: selector} options := api.ListOptions{LabelSelector: selector}
esPods, err := f.Client.Pods(api.NamespaceSystem).List(options) esPods, err := f.Client.Pods(api.NamespaceSystem).List(options)
if err != nil { if err != nil {
Logf("Attempt to list Elasticsearch nodes encountered a problem -- may retry: %v", err) framework.Logf("Attempt to list Elasticsearch nodes encountered a problem -- may retry: %v", err)
continue continue
} else { } else {
for i, pod := range esPods.Items { for i, pod := range esPods.Items {
Logf("pod %d: %s PodIP %s phase %s condition %+v", i, pod.Name, pod.Status.PodIP, pod.Status.Phase, framework.Logf("pod %d: %s PodIP %s phase %s condition %+v", i, pod.Name, pod.Status.PodIP, pod.Status.Phase,
pod.Status.Conditions) pod.Status.Conditions)
} }
} }
proxyRequest, errProxy := getServicesProxyRequest(f.Client, f.Client.Get()) proxyRequest, errProxy := framework.GetServicesProxyRequest(f.Client, f.Client.Get())
if errProxy != nil { if errProxy != nil {
Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy) framework.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy)
continue continue
} }
// Ask Elasticsearch to return all the log lines that were tagged with the underscore // Ask Elasticsearch to return all the log lines that were tagged with the underscore
@ -339,33 +340,33 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) {
Param("size", strconv.Itoa(2*expected)). Param("size", strconv.Itoa(2*expected)).
DoRaw() DoRaw()
if err != nil { if err != nil {
Logf("After %v failed to make proxy call to elasticsearch-logging: %v", time.Since(start), err) framework.Logf("After %v failed to make proxy call to elasticsearch-logging: %v", time.Since(start), err)
continue continue
} }
response, err := bodyToJSON(body) response, err := bodyToJSON(body)
if err != nil { if err != nil {
Logf("After %v failed to unmarshal response: %v", time.Since(start), err) framework.Logf("After %v failed to unmarshal response: %v", time.Since(start), err)
Logf("Body: %s", string(body)) framework.Logf("Body: %s", string(body))
continue continue
} }
hits, ok := response["hits"].(map[string]interface{}) hits, ok := response["hits"].(map[string]interface{})
if !ok { if !ok {
Logf("response[hits] not of the expected type: %T", response["hits"]) framework.Logf("response[hits] not of the expected type: %T", response["hits"])
continue continue
} }
totalF, ok := hits["total"].(float64) totalF, ok := hits["total"].(float64)
if !ok { if !ok {
Logf("After %v hits[total] not of the expected type: %T", time.Since(start), hits["total"]) framework.Logf("After %v hits[total] not of the expected type: %T", time.Since(start), hits["total"])
continue continue
} }
total := int(totalF) total := int(totalF)
if total != expected { if total != expected {
Logf("After %v expecting to find %d log lines but saw %d", time.Since(start), expected, total) framework.Logf("After %v expecting to find %d log lines but saw %d", time.Since(start), expected, total)
} }
h, ok := hits["hits"].([]interface{}) h, ok := hits["hits"].([]interface{})
if !ok { if !ok {
Logf("After %v hits not of the expected type: %T", time.Since(start), hits["hits"]) framework.Logf("After %v hits not of the expected type: %T", time.Since(start), hits["hits"])
continue continue
} }
// Initialize data-structure for observing counts. // Initialize data-structure for observing counts.
@ -377,44 +378,44 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) {
for _, e := range h { for _, e := range h {
l, ok := e.(map[string]interface{}) l, ok := e.(map[string]interface{})
if !ok { if !ok {
Logf("element of hit not of expected type: %T", e) framework.Logf("element of hit not of expected type: %T", e)
continue continue
} }
source, ok := l["_source"].(map[string]interface{}) source, ok := l["_source"].(map[string]interface{})
if !ok { if !ok {
Logf("_source not of the expected type: %T", l["_source"]) framework.Logf("_source not of the expected type: %T", l["_source"])
continue continue
} }
msg, ok := source["log"].(string) msg, ok := source["log"].(string)
if !ok { if !ok {
Logf("log not of the expected type: %T", source["log"]) framework.Logf("log not of the expected type: %T", source["log"])
continue continue
} }
words := strings.Split(msg, " ") words := strings.Split(msg, " ")
if len(words) != 4 { if len(words) != 4 {
Logf("Malformed log line: %s", msg) framework.Logf("Malformed log line: %s", msg)
continue continue
} }
n, err := strconv.ParseUint(words[0], 10, 0) n, err := strconv.ParseUint(words[0], 10, 0)
if err != nil { if err != nil {
Logf("Expecting numer of node as first field of %s", msg) framework.Logf("Expecting numer of node as first field of %s", msg)
continue continue
} }
if n < 0 || int(n) >= nodeCount { if n < 0 || int(n) >= nodeCount {
Logf("Node count index out of range: %d", nodeCount) framework.Logf("Node count index out of range: %d", nodeCount)
continue continue
} }
index, err := strconv.ParseUint(words[2], 10, 0) index, err := strconv.ParseUint(words[2], 10, 0)
if err != nil { if err != nil {
Logf("Expecting number as third field of %s", msg) framework.Logf("Expecting number as third field of %s", msg)
continue continue
} }
if index < 0 || index >= countTo { if index < 0 || index >= countTo {
Logf("Index value out of range: %d", index) framework.Logf("Index value out of range: %d", index)
continue continue
} }
if words[1] != taintName { if words[1] != taintName {
Logf("Elasticsearch query return unexpected log line: %s", msg) framework.Logf("Elasticsearch query return unexpected log line: %s", msg)
continue continue
} }
// Record the observation of a log line from node n at the given index. // Record the observation of a log line from node n at the given index.
@ -431,45 +432,45 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) {
missingPerNode[n]++ missingPerNode[n]++
} }
if c < 0 || c > 1 { if c < 0 || c > 1 {
Logf("Got incorrect count for node %d index %d: %d", n, i, c) framework.Logf("Got incorrect count for node %d index %d: %d", n, i, c)
incorrectCount = true incorrectCount = true
} }
} }
} }
if incorrectCount { if incorrectCount {
Logf("After %v es still return duplicated log lines", time.Since(start)) framework.Logf("After %v es still return duplicated log lines", time.Since(start))
continue continue
} }
if totalMissing != 0 { if totalMissing != 0 {
Logf("After %v still missing %d log lines", time.Since(start), totalMissing) framework.Logf("After %v still missing %d log lines", time.Since(start), totalMissing)
continue continue
} }
Logf("After %s found all %d log lines", time.Since(start), expected) framework.Logf("After %s found all %d log lines", time.Since(start), expected)
return return
} }
for n := range missingPerNode { for n := range missingPerNode {
if missingPerNode[n] > 0 { if missingPerNode[n] > 0 {
Logf("Node %d %s is missing %d logs", n, nodes.Items[n].Name, missingPerNode[n]) framework.Logf("Node %d %s is missing %d logs", n, nodes.Items[n].Name, missingPerNode[n])
opts := &api.PodLogOptions{} opts := &api.PodLogOptions{}
body, err = f.Client.Pods(ns).GetLogs(podNames[n], opts).DoRaw() body, err = f.Client.Pods(ns).GetLogs(podNames[n], opts).DoRaw()
if err != nil { if err != nil {
Logf("Cannot get logs from pod %v", podNames[n]) framework.Logf("Cannot get logs from pod %v", podNames[n])
continue continue
} }
Logf("Pod %s has the following logs: %s", podNames[n], body) framework.Logf("Pod %s has the following logs: %s", podNames[n], body)
for _, pod := range fluentdPods.Items { for _, pod := range fluentdPods.Items {
if pod.Spec.NodeName == nodes.Items[n].Name { if pod.Spec.NodeName == nodes.Items[n].Name {
body, err = f.Client.Pods(api.NamespaceSystem).GetLogs(pod.Name, opts).DoRaw() body, err = f.Client.Pods(api.NamespaceSystem).GetLogs(pod.Name, opts).DoRaw()
if err != nil { if err != nil {
Logf("Cannot get logs from pod %v", pod.Name) framework.Logf("Cannot get logs from pod %v", pod.Name)
break break
} }
Logf("Fluentd Pod %s on node %s has the following logs: %s", pod.Name, nodes.Items[n].Name, body) framework.Logf("Fluentd Pod %s on node %s has the following logs: %s", pod.Name, nodes.Items[n].Name, body)
break break
} }
} }
} }
} }
Failf("Failed to find all %d log lines", expected) framework.Failf("Failed to find all %d log lines", expected)
} }

View File

@ -22,14 +22,15 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
) )
var _ = KubeDescribe("Etcd failure [Disruptive]", func() { var _ = framework.KubeDescribe("Etcd failure [Disruptive]", func() {
framework := NewDefaultFramework("etcd-failure") f := framework.NewDefaultFramework("etcd-failure")
BeforeEach(func() { BeforeEach(func() {
// This test requires: // This test requires:
@ -37,12 +38,12 @@ var _ = KubeDescribe("Etcd failure [Disruptive]", func() {
// - master access // - master access
// ... so the provider check should be identical to the intersection of // ... so the provider check should be identical to the intersection of
// providers that provide those capabilities. // providers that provide those capabilities.
SkipUnlessProviderIs("gce") framework.SkipUnlessProviderIs("gce")
Expect(RunRC(RCConfig{ Expect(framework.RunRC(framework.RCConfig{
Client: framework.Client, Client: f.Client,
Name: "baz", Name: "baz",
Namespace: framework.Namespace.Name, Namespace: f.Namespace.Name,
Image: "gcr.io/google_containers/pause:2.0", Image: "gcr.io/google_containers/pause:2.0",
Replicas: 1, Replicas: 1,
})).NotTo(HaveOccurred()) })).NotTo(HaveOccurred())
@ -50,7 +51,7 @@ var _ = KubeDescribe("Etcd failure [Disruptive]", func() {
It("should recover from network partition with master", func() { It("should recover from network partition with master", func() {
etcdFailTest( etcdFailTest(
framework, f,
"sudo iptables -A INPUT -p tcp --destination-port 4001 -j DROP", "sudo iptables -A INPUT -p tcp --destination-port 4001 -j DROP",
"sudo iptables -D INPUT -p tcp --destination-port 4001 -j DROP", "sudo iptables -D INPUT -p tcp --destination-port 4001 -j DROP",
) )
@ -58,19 +59,19 @@ var _ = KubeDescribe("Etcd failure [Disruptive]", func() {
It("should recover from SIGKILL", func() { It("should recover from SIGKILL", func() {
etcdFailTest( etcdFailTest(
framework, f,
"pgrep etcd | xargs -I {} sudo kill -9 {}", "pgrep etcd | xargs -I {} sudo kill -9 {}",
"echo 'do nothing. monit should restart etcd.'", "echo 'do nothing. monit should restart etcd.'",
) )
}) })
}) })
func etcdFailTest(framework *Framework, failCommand, fixCommand string) { func etcdFailTest(f *framework.Framework, failCommand, fixCommand string) {
doEtcdFailure(failCommand, fixCommand) doEtcdFailure(failCommand, fixCommand)
checkExistingRCRecovers(framework) checkExistingRCRecovers(f)
ServeImageOrFail(framework, "basic", "gcr.io/google_containers/serve_hostname:v1.4") ServeImageOrFail(f, "basic", "gcr.io/google_containers/serve_hostname:v1.4")
} }
// For this duration, etcd will be failed by executing a failCommand on the master. // For this duration, etcd will be failed by executing a failCommand on the master.
@ -89,25 +90,25 @@ func doEtcdFailure(failCommand, fixCommand string) {
} }
func masterExec(cmd string) { func masterExec(cmd string) {
result, err := SSH(cmd, getMasterHost()+":22", testContext.Provider) result, err := framework.SSH(cmd, framework.GetMasterHost()+":22", framework.TestContext.Provider)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
if result.Code != 0 { if result.Code != 0 {
LogSSHResult(result) framework.LogSSHResult(result)
Failf("master exec command returned non-zero") framework.Failf("master exec command returned non-zero")
} }
} }
func checkExistingRCRecovers(f *Framework) { func checkExistingRCRecovers(f *framework.Framework) {
By("assert that the pre-existing replication controller recovers") By("assert that the pre-existing replication controller recovers")
podClient := f.Client.Pods(f.Namespace.Name) podClient := f.Client.Pods(f.Namespace.Name)
rcSelector := labels.Set{"name": "baz"}.AsSelector() rcSelector := labels.Set{"name": "baz"}.AsSelector()
By("deleting pods from existing replication controller") By("deleting pods from existing replication controller")
expectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) { framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
options := api.ListOptions{LabelSelector: rcSelector} options := api.ListOptions{LabelSelector: rcSelector}
pods, err := podClient.List(options) pods, err := podClient.List(options)
if err != nil { if err != nil {
Logf("apiserver returned error, as expected before recovery: %v", err) framework.Logf("apiserver returned error, as expected before recovery: %v", err)
return false, nil return false, nil
} }
if len(pods.Items) == 0 { if len(pods.Items) == 0 {
@ -117,12 +118,12 @@ func checkExistingRCRecovers(f *Framework) {
err = podClient.Delete(pod.Name, api.NewDeleteOptions(0)) err = podClient.Delete(pod.Name, api.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
} }
Logf("apiserver has recovered") framework.Logf("apiserver has recovered")
return true, nil return true, nil
})) }))
By("waiting for replication controller to recover") By("waiting for replication controller to recover")
expectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) { framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
options := api.ListOptions{LabelSelector: rcSelector} options := api.ListOptions{LabelSelector: rcSelector}
pods, err := podClient.List(options) pods, err := podClient.List(options)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())

View File

@ -26,17 +26,18 @@ import (
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
) )
var _ = KubeDescribe("Events", func() { var _ = framework.KubeDescribe("Events", func() {
framework := NewDefaultFramework("events") f := framework.NewDefaultFramework("events")
It("should be sent by kubelets and the scheduler about pods scheduling and running [Conformance]", func() { It("should be sent by kubelets and the scheduler about pods scheduling and running [Conformance]", func() {
podClient := framework.Client.Pods(framework.Namespace.Name) podClient := f.Client.Pods(f.Namespace.Name)
By("creating the pod") By("creating the pod")
name := "send-events-" + string(util.NewUUID()) name := "send-events-" + string(util.NewUUID())
@ -66,10 +67,10 @@ var _ = KubeDescribe("Events", func() {
podClient.Delete(pod.Name, nil) podClient.Delete(pod.Name, nil)
}() }()
if _, err := podClient.Create(pod); err != nil { if _, err := podClient.Create(pod); err != nil {
Failf("Failed to create pod: %v", err) framework.Failf("Failed to create pod: %v", err)
} }
expectNoError(framework.WaitForPodRunning(pod.Name)) framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
By("verifying the pod is in kubernetes") By("verifying the pod is in kubernetes")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
@ -80,21 +81,21 @@ var _ = KubeDescribe("Events", func() {
By("retrieving the pod") By("retrieving the pod")
podWithUid, err := podClient.Get(pod.Name) podWithUid, err := podClient.Get(pod.Name)
if err != nil { if err != nil {
Failf("Failed to get pod: %v", err) framework.Failf("Failed to get pod: %v", err)
} }
fmt.Printf("%+v\n", podWithUid) fmt.Printf("%+v\n", podWithUid)
var events *api.EventList var events *api.EventList
// Check for scheduler event about the pod. // Check for scheduler event about the pod.
By("checking for scheduler event about the pod") By("checking for scheduler event about the pod")
expectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) { framework.ExpectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) {
selector := fields.Set{ selector := fields.Set{
"involvedObject.kind": "Pod", "involvedObject.kind": "Pod",
"involvedObject.uid": string(podWithUid.UID), "involvedObject.uid": string(podWithUid.UID),
"involvedObject.namespace": framework.Namespace.Name, "involvedObject.namespace": f.Namespace.Name,
"source": api.DefaultSchedulerName, "source": api.DefaultSchedulerName,
}.AsSelector() }.AsSelector()
options := api.ListOptions{FieldSelector: selector} options := api.ListOptions{FieldSelector: selector}
events, err := framework.Client.Events(framework.Namespace.Name).List(options) events, err := f.Client.Events(f.Namespace.Name).List(options)
if err != nil { if err != nil {
return false, err return false, err
} }
@ -106,15 +107,15 @@ var _ = KubeDescribe("Events", func() {
})) }))
// Check for kubelet event about the pod. // Check for kubelet event about the pod.
By("checking for kubelet event about the pod") By("checking for kubelet event about the pod")
expectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) { framework.ExpectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) {
selector := fields.Set{ selector := fields.Set{
"involvedObject.uid": string(podWithUid.UID), "involvedObject.uid": string(podWithUid.UID),
"involvedObject.kind": "Pod", "involvedObject.kind": "Pod",
"involvedObject.namespace": framework.Namespace.Name, "involvedObject.namespace": f.Namespace.Name,
"source": "kubelet", "source": "kubelet",
}.AsSelector() }.AsSelector()
options := api.ListOptions{FieldSelector: selector} options := api.ListOptions{FieldSelector: selector}
events, err = framework.Client.Events(framework.Namespace.Name).List(options) events, err = f.Client.Events(f.Namespace.Name).List(options)
if err != nil { if err != nil {
return false, err return false, err
} }

View File

@ -24,6 +24,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
@ -41,17 +42,17 @@ try:
except: except:
print 'err'` print 'err'`
var _ = KubeDescribe("ClusterDns [Feature:Example]", func() { var _ = framework.KubeDescribe("ClusterDns [Feature:Example]", func() {
framework := NewDefaultFramework("cluster-dns") f := framework.NewDefaultFramework("cluster-dns")
var c *client.Client var c *client.Client
BeforeEach(func() { BeforeEach(func() {
c = framework.Client c = f.Client
}) })
It("should create pod that uses dns [Conformance]", func() { It("should create pod that uses dns [Conformance]", func() {
mkpath := func(file string) string { mkpath := func(file string) string {
return filepath.Join(testContext.RepoRoot, "examples/cluster-dns", file) return filepath.Join(framework.TestContext.RepoRoot, "examples/cluster-dns", file)
} }
// contrary to the example, this test does not use contexts, for simplicity // contrary to the example, this test does not use contexts, for simplicity
@ -75,22 +76,22 @@ var _ = KubeDescribe("ClusterDns [Feature:Example]", func() {
namespaces := []*api.Namespace{nil, nil} namespaces := []*api.Namespace{nil, nil}
for i := range namespaces { for i := range namespaces {
var err error var err error
namespaces[i], err = framework.CreateNamespace(fmt.Sprintf("dnsexample%d", i), nil) namespaces[i], err = f.CreateNamespace(fmt.Sprintf("dnsexample%d", i), nil)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
} }
for _, ns := range namespaces { for _, ns := range namespaces {
runKubectlOrDie("create", "-f", backendRcYaml, getNsCmdFlag(ns)) framework.RunKubectlOrDie("create", "-f", backendRcYaml, getNsCmdFlag(ns))
} }
for _, ns := range namespaces { for _, ns := range namespaces {
runKubectlOrDie("create", "-f", backendSvcYaml, getNsCmdFlag(ns)) framework.RunKubectlOrDie("create", "-f", backendSvcYaml, getNsCmdFlag(ns))
} }
// wait for objects // wait for objects
for _, ns := range namespaces { for _, ns := range namespaces {
waitForRCPodsRunning(c, ns.Name, backendRcName) framework.WaitForRCPodsRunning(c, ns.Name, backendRcName)
waitForService(c, ns.Name, backendSvcName, true, poll, serviceStartTimeout) framework.WaitForService(c, ns.Name, backendSvcName, true, framework.Poll, framework.ServiceStartTimeout)
} }
// it is not enough that pods are running because they may be set to running, but // it is not enough that pods are running because they may be set to running, but
// the application itself may have not been initialized. Just query the application. // the application itself may have not been initialized. Just query the application.
@ -99,11 +100,11 @@ var _ = KubeDescribe("ClusterDns [Feature:Example]", func() {
options := api.ListOptions{LabelSelector: label} options := api.ListOptions{LabelSelector: label}
pods, err := c.Pods(ns.Name).List(options) pods, err := c.Pods(ns.Name).List(options)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
err = podsResponding(c, ns.Name, backendPodName, false, pods) err = framework.PodsResponding(c, ns.Name, backendPodName, false, pods)
Expect(err).NotTo(HaveOccurred(), "waiting for all pods to respond") Expect(err).NotTo(HaveOccurred(), "waiting for all pods to respond")
Logf("found %d backend pods responding in namespace %s", len(pods.Items), ns.Name) framework.Logf("found %d backend pods responding in namespace %s", len(pods.Items), ns.Name)
err = serviceResponding(c, ns.Name, backendSvcName) err = framework.ServiceResponding(c, ns.Name, backendSvcName)
Expect(err).NotTo(HaveOccurred(), "waiting for the service to respond") Expect(err).NotTo(HaveOccurred(), "waiting for the service to respond")
} }
@ -120,31 +121,31 @@ var _ = KubeDescribe("ClusterDns [Feature:Example]", func() {
pods, err := c.Pods(namespaces[0].Name).List(options) pods, err := c.Pods(namespaces[0].Name).List(options)
if err != nil || pods == nil || len(pods.Items) == 0 { if err != nil || pods == nil || len(pods.Items) == 0 {
Failf("no running pods found") framework.Failf("no running pods found")
} }
podName := pods.Items[0].Name podName := pods.Items[0].Name
queryDns := fmt.Sprintf(queryDnsPythonTemplate, backendSvcName+"."+namespaces[0].Name) queryDns := fmt.Sprintf(queryDnsPythonTemplate, backendSvcName+"."+namespaces[0].Name)
_, err = lookForStringInPodExec(namespaces[0].Name, podName, []string{"python", "-c", queryDns}, "ok", dnsReadyTimeout) _, err = framework.LookForStringInPodExec(namespaces[0].Name, podName, []string{"python", "-c", queryDns}, "ok", dnsReadyTimeout)
Expect(err).NotTo(HaveOccurred(), "waiting for output from pod exec") Expect(err).NotTo(HaveOccurred(), "waiting for output from pod exec")
updatedPodYaml := prepareResourceWithReplacedString(frontendPodYaml, "dns-backend.development.cluster.local", fmt.Sprintf("dns-backend.%s.svc.cluster.local", namespaces[0].Name)) updatedPodYaml := prepareResourceWithReplacedString(frontendPodYaml, "dns-backend.development.cluster.local", fmt.Sprintf("dns-backend.%s.svc.cluster.local", namespaces[0].Name))
// create a pod in each namespace // create a pod in each namespace
for _, ns := range namespaces { for _, ns := range namespaces {
newKubectlCommand("create", "-f", "-", getNsCmdFlag(ns)).withStdinData(updatedPodYaml).execOrDie() framework.NewKubectlCommand("create", "-f", "-", getNsCmdFlag(ns)).WithStdinData(updatedPodYaml).ExecOrDie()
} }
// wait until the pods have been scheduler, i.e. are not Pending anymore. Remember // wait until the pods have been scheduler, i.e. are not Pending anymore. Remember
// that we cannot wait for the pods to be running because our pods terminate by themselves. // that we cannot wait for the pods to be running because our pods terminate by themselves.
for _, ns := range namespaces { for _, ns := range namespaces {
err := waitForPodNotPending(c, ns.Name, frontendPodName) err := framework.WaitForPodNotPending(c, ns.Name, frontendPodName)
expectNoError(err) framework.ExpectNoError(err)
} }
// wait for pods to print their result // wait for pods to print their result
for _, ns := range namespaces { for _, ns := range namespaces {
_, err := lookForStringInLog(ns.Name, frontendPodName, frontendPodContainerName, podOutput, podStartTimeout) _, err := framework.LookForStringInLog(ns.Name, frontendPodName, frontendPodContainerName, podOutput, framework.PodStartTimeout)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
} }
}) })

View File

@ -26,6 +26,7 @@ import (
"time" "time"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
@ -33,7 +34,7 @@ import (
const ( const (
k8bpsContainerVersion = "r.2.8.19" // Container version, see the examples/k8petstore dockerfiles for details. k8bpsContainerVersion = "r.2.8.19" // Container version, see the examples/k8petstore dockerfiles for details.
k8bpsThroughputDummy = "0" // Polling time = 0, since we poll in ginkgo rather than using the shell script tests. k8bpsThroughputDummy = "0" // Polling time = 0, since we framework.Poll in ginkgo rather than using the shell script tests.
k8bpsRedisSlaves = "1" // Number of redis slaves. k8bpsRedisSlaves = "1" // Number of redis slaves.
k8bpsDontRunTest = "0" // Don't bother embedded test. k8bpsDontRunTest = "0" // Don't bother embedded test.
k8bpsStartupTimeout = 30 * time.Second // Amount of elapsed time before petstore transactions are being stored. k8bpsStartupTimeout = 30 * time.Second // Amount of elapsed time before petstore transactions are being stored.
@ -47,7 +48,7 @@ const (
// readTransactions reads # of transactions from the k8petstore web server endpoint. // readTransactions reads # of transactions from the k8petstore web server endpoint.
// for more details see the source of the k8petstore web server. // for more details see the source of the k8petstore web server.
func readTransactions(c *client.Client, ns string) (error, int) { func readTransactions(c *client.Client, ns string) (error, int) {
proxyRequest, errProxy := getServicesProxyRequest(c, c.Get()) proxyRequest, errProxy := framework.GetServicesProxyRequest(c, c.Get())
if errProxy != nil { if errProxy != nil {
return errProxy, -1 return errProxy, -1
} }
@ -68,11 +69,11 @@ func readTransactions(c *client.Client, ns string) (error, int) {
func runK8petstore(restServers int, loadGenerators int, c *client.Client, ns string, finalTransactionsExpected int, maxTime time.Duration) { func runK8petstore(restServers int, loadGenerators int, c *client.Client, ns string, finalTransactionsExpected int, maxTime time.Duration) {
var err error = nil var err error = nil
k8bpsScriptLocation := filepath.Join(testContext.RepoRoot, "examples/k8petstore/k8petstore-nodeport.sh") k8bpsScriptLocation := filepath.Join(framework.TestContext.RepoRoot, "examples/k8petstore/k8petstore-nodeport.sh")
cmd := exec.Command( cmd := exec.Command(
k8bpsScriptLocation, k8bpsScriptLocation,
testContext.KubectlPath, framework.TestContext.KubectlPath,
k8bpsContainerVersion, k8bpsContainerVersion,
k8bpsThroughputDummy, k8bpsThroughputDummy,
strconv.Itoa(restServers), strconv.Itoa(restServers),
@ -85,25 +86,25 @@ func runK8petstore(restServers int, loadGenerators int, c *client.Client, ns str
cmd.Stdout = os.Stdout cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr cmd.Stderr = os.Stderr
Logf("Starting k8petstore application....") framework.Logf("Starting k8petstore application....")
// Run the k8petstore app, and log / fail if it returns any errors. // Run the k8petstore app, and log / fail if it returns any errors.
// This should return quickly, assuming containers are downloaded. // This should return quickly, assuming containers are downloaded.
if err = cmd.Start(); err != nil { if err = cmd.Start(); err != nil {
Failf("%v", err) framework.Failf("%v", err)
} }
// Make sure there are no command errors. // Make sure there are no command errors.
if err = cmd.Wait(); err != nil { if err = cmd.Wait(); err != nil {
if exiterr, ok := err.(*exec.ExitError); ok { if exiterr, ok := err.(*exec.ExitError); ok {
if status, ok := exiterr.Sys().(syscall.WaitStatus); ok { if status, ok := exiterr.Sys().(syscall.WaitStatus); ok {
Logf("Exit Status: %d", status.ExitStatus()) framework.Logf("Exit Status: %d", status.ExitStatus())
} }
} }
} }
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Logf("... Done starting k8petstore ") framework.Logf("... Done starting k8petstore ")
totalTransactions := 0 totalTransactions := 0
Logf("Start polling, timeout is %v seconds", maxTime) framework.Logf("Start polling, timeout is %v seconds", maxTime)
// How long until the FIRST transactions are created. // How long until the FIRST transactions are created.
startupTimeout := time.After(time.Duration(k8bpsStartupTimeout)) startupTimeout := time.After(time.Duration(k8bpsStartupTimeout))
@ -113,18 +114,18 @@ func runK8petstore(restServers int, loadGenerators int, c *client.Client, ns str
tick := time.Tick(2 * time.Second) tick := time.Tick(2 * time.Second)
var ready = false var ready = false
Logf("Now waiting %v seconds to see progress (transactions > 3)", k8bpsStartupTimeout) framework.Logf("Now waiting %v seconds to see progress (transactions > 3)", k8bpsStartupTimeout)
T: T:
for { for {
select { select {
case <-transactionsCompleteTimeout: case <-transactionsCompleteTimeout:
Logf("Completion timeout %v reached, %v transactions not complete. Breaking!", time.Duration(maxTime), finalTransactionsExpected) framework.Logf("Completion timeout %v reached, %v transactions not complete. Breaking!", time.Duration(maxTime), finalTransactionsExpected)
break T break T
case <-tick: case <-tick:
// Don't fail if there's an error. We expect a few failures might happen in the cloud. // Don't fail if there's an error. We expect a few failures might happen in the cloud.
err, totalTransactions = readTransactions(c, ns) err, totalTransactions = readTransactions(c, ns)
if err == nil { if err == nil {
Logf("PetStore : Time: %v, %v = total petstore transactions stored into redis.", time.Now(), totalTransactions) framework.Logf("PetStore : Time: %v, %v = total petstore transactions stored into redis.", time.Now(), totalTransactions)
if totalTransactions >= k8bpsMinTransactionsOnStartup { if totalTransactions >= k8bpsMinTransactionsOnStartup {
ready = true ready = true
} }
@ -133,14 +134,14 @@ T:
} }
} else { } else {
if ready { if ready {
Logf("Blip: during polling: %v", err) framework.Logf("Blip: during polling: %v", err)
} else { } else {
Logf("Not ready yet: %v", err) framework.Logf("Not ready yet: %v", err)
} }
} }
case <-startupTimeout: case <-startupTimeout:
if !ready { if !ready {
Logf("Startup Timeout %v reached: Its been too long and we still haven't started accumulating %v transactions!", startupTimeout, k8bpsMinTransactionsOnStartup) framework.Logf("Startup Timeout %v reached: Its been too long and we still haven't started accumulating %v transactions!", startupTimeout, k8bpsMinTransactionsOnStartup)
break T break T
} }
} }
@ -152,19 +153,19 @@ T:
Ω(totalTransactions).Should(BeNumerically(">", finalTransactionsExpected)) Ω(totalTransactions).Should(BeNumerically(">", finalTransactionsExpected))
} }
var _ = KubeDescribe("Pet Store [Feature:Example]", func() { var _ = framework.KubeDescribe("Pet Store [Feature:Example]", func() {
BeforeEach(func() { BeforeEach(func() {
// The shell scripts in k8petstore break on jenkins... Pure golang rewrite is in progress. // The shell scripts in k8petstore break on jenkins... Pure golang rewrite is in progress.
SkipUnlessProviderIs("local") framework.SkipUnlessProviderIs("local")
}) })
// The number of nodes dictates total number of generators/transaction expectations. // The number of nodes dictates total number of generators/transaction expectations.
var nodeCount int var nodeCount int
f := NewDefaultFramework("petstore") f := framework.NewDefaultFramework("petstore")
It(fmt.Sprintf("should scale to persist a nominal number ( %v ) of transactions in %v seconds", k8bpsSmokeTestFinalTransactions, k8bpsSmokeTestTimeout), func() { It(fmt.Sprintf("should scale to persist a nominal number ( %v ) of transactions in %v seconds", k8bpsSmokeTestFinalTransactions, k8bpsSmokeTestTimeout), func() {
nodes := ListSchedulableNodesOrDie(f.Client) nodes := framework.ListSchedulableNodesOrDie(f.Client)
nodeCount = len(nodes.Items) nodeCount = len(nodes.Items)
loadGenerators := nodeCount loadGenerators := nodeCount

View File

@ -28,28 +28,29 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
) )
const ( const (
serverStartTimeout = podStartTimeout + 3*time.Minute serverStartTimeout = framework.PodStartTimeout + 3*time.Minute
) )
var _ = KubeDescribe("[Feature:Example]", func() { var _ = framework.KubeDescribe("[Feature:Example]", func() {
framework := NewDefaultFramework("examples") f := framework.NewDefaultFramework("examples")
var c *client.Client var c *client.Client
var ns string var ns string
BeforeEach(func() { BeforeEach(func() {
c = framework.Client c = f.Client
ns = framework.Namespace.Name ns = f.Namespace.Name
}) })
KubeDescribe("Redis", func() { framework.KubeDescribe("Redis", func() {
It("should create and stop redis servers", func() { It("should create and stop redis servers", func() {
mkpath := func(file string) string { mkpath := func(file string) string {
return filepath.Join(testContext.RepoRoot, "examples/redis", file) return filepath.Join(framework.TestContext.RepoRoot, "examples/redis", file)
} }
bootstrapYaml := mkpath("redis-master.yaml") bootstrapYaml := mkpath("redis-master.yaml")
sentinelServiceYaml := mkpath("redis-sentinel-service.yaml") sentinelServiceYaml := mkpath("redis-sentinel-service.yaml")
@ -64,35 +65,35 @@ var _ = KubeDescribe("[Feature:Example]", func() {
expectedOnSentinel := "+monitor master" expectedOnSentinel := "+monitor master"
By("starting redis bootstrap") By("starting redis bootstrap")
runKubectlOrDie("create", "-f", bootstrapYaml, nsFlag) framework.RunKubectlOrDie("create", "-f", bootstrapYaml, nsFlag)
err := waitForPodRunningInNamespace(c, bootstrapPodName, ns) err := framework.WaitForPodRunningInNamespace(c, bootstrapPodName, ns)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
_, err = lookForStringInLog(ns, bootstrapPodName, "master", expectedOnServer, serverStartTimeout) _, err = framework.LookForStringInLog(ns, bootstrapPodName, "master", expectedOnServer, serverStartTimeout)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
_, err = lookForStringInLog(ns, bootstrapPodName, "sentinel", expectedOnSentinel, serverStartTimeout) _, err = framework.LookForStringInLog(ns, bootstrapPodName, "sentinel", expectedOnSentinel, serverStartTimeout)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("setting up services and controllers") By("setting up services and controllers")
runKubectlOrDie("create", "-f", sentinelServiceYaml, nsFlag) framework.RunKubectlOrDie("create", "-f", sentinelServiceYaml, nsFlag)
runKubectlOrDie("create", "-f", sentinelControllerYaml, nsFlag) framework.RunKubectlOrDie("create", "-f", sentinelControllerYaml, nsFlag)
runKubectlOrDie("create", "-f", controllerYaml, nsFlag) framework.RunKubectlOrDie("create", "-f", controllerYaml, nsFlag)
By("scaling up the deployment") By("scaling up the deployment")
runKubectlOrDie("scale", "rc", redisRC, "--replicas=3", nsFlag) framework.RunKubectlOrDie("scale", "rc", redisRC, "--replicas=3", nsFlag)
runKubectlOrDie("scale", "rc", sentinelRC, "--replicas=3", nsFlag) framework.RunKubectlOrDie("scale", "rc", sentinelRC, "--replicas=3", nsFlag)
By("checking up the services") By("checking up the services")
checkAllLogs := func() { checkAllLogs := func() {
forEachPod(c, ns, "name", "redis", func(pod api.Pod) { forEachPod(c, ns, "name", "redis", func(pod api.Pod) {
if pod.Name != bootstrapPodName { if pod.Name != bootstrapPodName {
_, err := lookForStringInLog(ns, pod.Name, "redis", expectedOnServer, serverStartTimeout) _, err := framework.LookForStringInLog(ns, pod.Name, "redis", expectedOnServer, serverStartTimeout)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
} }
}) })
forEachPod(c, ns, "name", "redis-sentinel", func(pod api.Pod) { forEachPod(c, ns, "name", "redis-sentinel", func(pod api.Pod) {
if pod.Name != bootstrapPodName { if pod.Name != bootstrapPodName {
_, err := lookForStringInLog(ns, pod.Name, "sentinel", expectedOnSentinel, serverStartTimeout) _, err := framework.LookForStringInLog(ns, pod.Name, "sentinel", expectedOnSentinel, serverStartTimeout)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
} }
}) })
@ -100,18 +101,18 @@ var _ = KubeDescribe("[Feature:Example]", func() {
checkAllLogs() checkAllLogs()
By("turning down bootstrap") By("turning down bootstrap")
runKubectlOrDie("delete", "-f", bootstrapYaml, nsFlag) framework.RunKubectlOrDie("delete", "-f", bootstrapYaml, nsFlag)
err = waitForRCPodToDisappear(c, ns, redisRC, bootstrapPodName) err = framework.WaitForRCPodToDisappear(c, ns, redisRC, bootstrapPodName)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("waiting for the new master election") By("waiting for the new master election")
checkAllLogs() checkAllLogs()
}) })
}) })
KubeDescribe("Celery-RabbitMQ", func() { framework.KubeDescribe("Celery-RabbitMQ", func() {
It("should create and stop celery+rabbitmq servers", func() { It("should create and stop celery+rabbitmq servers", func() {
mkpath := func(file string) string { mkpath := func(file string) string {
return filepath.Join(testContext.RepoRoot, "examples", "celery-rabbitmq", file) return filepath.Join(framework.TestContext.RepoRoot, "examples", "celery-rabbitmq", file)
} }
rabbitmqServiceYaml := mkpath("rabbitmq-service.yaml") rabbitmqServiceYaml := mkpath("rabbitmq-service.yaml")
rabbitmqControllerYaml := mkpath("rabbitmq-controller.yaml") rabbitmqControllerYaml := mkpath("rabbitmq-controller.yaml")
@ -121,40 +122,40 @@ var _ = KubeDescribe("[Feature:Example]", func() {
nsFlag := fmt.Sprintf("--namespace=%v", ns) nsFlag := fmt.Sprintf("--namespace=%v", ns)
By("starting rabbitmq") By("starting rabbitmq")
runKubectlOrDie("create", "-f", rabbitmqServiceYaml, nsFlag) framework.RunKubectlOrDie("create", "-f", rabbitmqServiceYaml, nsFlag)
runKubectlOrDie("create", "-f", rabbitmqControllerYaml, nsFlag) framework.RunKubectlOrDie("create", "-f", rabbitmqControllerYaml, nsFlag)
forEachPod(c, ns, "component", "rabbitmq", func(pod api.Pod) { forEachPod(c, ns, "component", "rabbitmq", func(pod api.Pod) {
_, err := lookForStringInLog(ns, pod.Name, "rabbitmq", "Server startup complete", serverStartTimeout) _, err := framework.LookForStringInLog(ns, pod.Name, "rabbitmq", "Server startup complete", serverStartTimeout)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
err := waitForEndpoint(c, ns, "rabbitmq-service") err := framework.WaitForEndpoint(c, ns, "rabbitmq-service")
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("starting celery") By("starting celery")
runKubectlOrDie("create", "-f", celeryControllerYaml, nsFlag) framework.RunKubectlOrDie("create", "-f", celeryControllerYaml, nsFlag)
forEachPod(c, ns, "component", "celery", func(pod api.Pod) { forEachPod(c, ns, "component", "celery", func(pod api.Pod) {
_, err := lookForStringInFile(ns, pod.Name, "celery", "/data/celery.log", " ready.", serverStartTimeout) _, err := framework.LookForStringInFile(ns, pod.Name, "celery", "/data/celery.log", " ready.", serverStartTimeout)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
By("starting flower") By("starting flower")
runKubectlOrDie("create", "-f", flowerServiceYaml, nsFlag) framework.RunKubectlOrDie("create", "-f", flowerServiceYaml, nsFlag)
runKubectlOrDie("create", "-f", flowerControllerYaml, nsFlag) framework.RunKubectlOrDie("create", "-f", flowerControllerYaml, nsFlag)
forEachPod(c, ns, "component", "flower", func(pod api.Pod) { forEachPod(c, ns, "component", "flower", func(pod api.Pod) {
// Do nothing. just wait for it to be up and running. // Do nothing. just wait for it to be up and running.
}) })
content, err := makeHttpRequestToService(c, ns, "flower-service", "/", endpointRegisterTimeout) content, err := makeHttpRequestToService(c, ns, "flower-service", "/", framework.EndpointRegisterTimeout)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
if !strings.Contains(content, "<title>Celery Flower</title>") { if !strings.Contains(content, "<title>Celery Flower</title>") {
Failf("Flower HTTP request failed") framework.Failf("Flower HTTP request failed")
} }
}) })
}) })
KubeDescribe("Spark", func() { framework.KubeDescribe("Spark", func() {
It("should start spark master, driver and workers", func() { It("should start spark master, driver and workers", func() {
mkpath := func(file string) string { mkpath := func(file string) string {
return filepath.Join(testContext.RepoRoot, "examples", "spark", file) return filepath.Join(framework.TestContext.RepoRoot, "examples", "spark", file)
} }
// TODO: Add Zepplin and Web UI to this example. // TODO: Add Zepplin and Web UI to this example.
@ -165,33 +166,33 @@ var _ = KubeDescribe("[Feature:Example]", func() {
master := func() { master := func() {
By("starting master") By("starting master")
runKubectlOrDie("create", "-f", serviceYaml, nsFlag) framework.RunKubectlOrDie("create", "-f", serviceYaml, nsFlag)
runKubectlOrDie("create", "-f", masterYaml, nsFlag) framework.RunKubectlOrDie("create", "-f", masterYaml, nsFlag)
Logf("Now polling for Master startup...") framework.Logf("Now polling for Master startup...")
// Only one master pod: But its a natural way to look up pod names. // Only one master pod: But its a natural way to look up pod names.
forEachPod(c, ns, "component", "spark-master", func(pod api.Pod) { forEachPod(c, ns, "component", "spark-master", func(pod api.Pod) {
Logf("Now waiting for master to startup in %v", pod.Name) framework.Logf("Now waiting for master to startup in %v", pod.Name)
_, err := lookForStringInLog(ns, pod.Name, "spark-master", "Starting Spark master at", serverStartTimeout) _, err := framework.LookForStringInLog(ns, pod.Name, "spark-master", "Starting Spark master at", serverStartTimeout)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
By("waiting for master endpoint") By("waiting for master endpoint")
err := waitForEndpoint(c, ns, "spark-master") err := framework.WaitForEndpoint(c, ns, "spark-master")
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
} }
worker := func() { worker := func() {
By("starting workers") By("starting workers")
Logf("Now starting Workers") framework.Logf("Now starting Workers")
runKubectlOrDie("create", "-f", workerControllerYaml, nsFlag) framework.RunKubectlOrDie("create", "-f", workerControllerYaml, nsFlag)
// For now, scaling is orthogonal to the core test. // For now, scaling is orthogonal to the core test.
// ScaleRC(c, ns, "spark-worker-controller", 2, true) // framework.ScaleRC(c, ns, "spark-worker-controller", 2, true)
Logf("Now polling for worker startup...") framework.Logf("Now polling for worker startup...")
forEachPod(c, ns, "component", "spark-worker", func(pod api.Pod) { forEachPod(c, ns, "component", "spark-worker", func(pod api.Pod) {
_, err := lookForStringInLog(ns, pod.Name, "spark-worker", "Successfully registered with master", serverStartTimeout) _, err := framework.LookForStringInLog(ns, pod.Name, "spark-worker", "Successfully registered with master", serverStartTimeout)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
} }
@ -201,10 +202,10 @@ var _ = KubeDescribe("[Feature:Example]", func() {
}) })
}) })
KubeDescribe("Cassandra", func() { framework.KubeDescribe("Cassandra", func() {
It("should create and scale cassandra", func() { It("should create and scale cassandra", func() {
mkpath := func(file string) string { mkpath := func(file string) string {
return filepath.Join(testContext.RepoRoot, "examples", "cassandra", file) return filepath.Join(framework.TestContext.RepoRoot, "examples", "cassandra", file)
} }
serviceYaml := mkpath("cassandra-service.yaml") serviceYaml := mkpath("cassandra-service.yaml")
podYaml := mkpath("cassandra.yaml") podYaml := mkpath("cassandra.yaml")
@ -212,46 +213,46 @@ var _ = KubeDescribe("[Feature:Example]", func() {
nsFlag := fmt.Sprintf("--namespace=%v", ns) nsFlag := fmt.Sprintf("--namespace=%v", ns)
By("Starting the cassandra service and pod") By("Starting the cassandra service and pod")
runKubectlOrDie("create", "-f", serviceYaml, nsFlag) framework.RunKubectlOrDie("create", "-f", serviceYaml, nsFlag)
runKubectlOrDie("create", "-f", podYaml, nsFlag) framework.RunKubectlOrDie("create", "-f", podYaml, nsFlag)
Logf("waiting for first cassandra pod") framework.Logf("waiting for first cassandra pod")
err := waitForPodRunningInNamespace(c, "cassandra", ns) err := framework.WaitForPodRunningInNamespace(c, "cassandra", ns)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Logf("waiting for thrift listener online") framework.Logf("waiting for thrift listener online")
_, err = lookForStringInLog(ns, "cassandra", "cassandra", "Listening for thrift clients", serverStartTimeout) _, err = framework.LookForStringInLog(ns, "cassandra", "cassandra", "Listening for thrift clients", serverStartTimeout)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Logf("wait for service") framework.Logf("wait for service")
err = waitForEndpoint(c, ns, "cassandra") err = framework.WaitForEndpoint(c, ns, "cassandra")
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Create an RC with n nodes in it. Each node will then be verified. // Create an RC with n nodes in it. Each node will then be verified.
By("Creating a Cassandra RC") By("Creating a Cassandra RC")
runKubectlOrDie("create", "-f", controllerYaml, nsFlag) framework.RunKubectlOrDie("create", "-f", controllerYaml, nsFlag)
forEachPod(c, ns, "app", "cassandra", func(pod api.Pod) { forEachPod(c, ns, "app", "cassandra", func(pod api.Pod) {
Logf("Verifying pod %v ", pod.Name) framework.Logf("Verifying pod %v ", pod.Name)
_, err = lookForStringInLog(ns, pod.Name, "cassandra", "Listening for thrift clients", serverStartTimeout) _, err = framework.LookForStringInLog(ns, pod.Name, "cassandra", "Listening for thrift clients", serverStartTimeout)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
_, err = lookForStringInLog(ns, pod.Name, "cassandra", "Handshaking version", serverStartTimeout) _, err = framework.LookForStringInLog(ns, pod.Name, "cassandra", "Handshaking version", serverStartTimeout)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
By("Finding each node in the nodetool status lines") By("Finding each node in the nodetool status lines")
output := runKubectlOrDie("exec", "cassandra", nsFlag, "--", "nodetool", "status") output := framework.RunKubectlOrDie("exec", "cassandra", nsFlag, "--", "nodetool", "status")
forEachPod(c, ns, "app", "cassandra", func(pod api.Pod) { forEachPod(c, ns, "app", "cassandra", func(pod api.Pod) {
if !strings.Contains(output, pod.Status.PodIP) { if !strings.Contains(output, pod.Status.PodIP) {
Failf("Pod ip %s not found in nodetool status", pod.Status.PodIP) framework.Failf("Pod ip %s not found in nodetool status", pod.Status.PodIP)
} }
}) })
}) })
}) })
KubeDescribe("Storm", func() { framework.KubeDescribe("Storm", func() {
It("should create and stop Zookeeper, Nimbus and Storm worker servers", func() { It("should create and stop Zookeeper, Nimbus and Storm worker servers", func() {
mkpath := func(file string) string { mkpath := func(file string) string {
return filepath.Join(testContext.RepoRoot, "examples", "storm", file) return filepath.Join(framework.TestContext.RepoRoot, "examples", "storm", file)
} }
zookeeperServiceJson := mkpath("zookeeper-service.json") zookeeperServiceJson := mkpath("zookeeper-service.json")
zookeeperPodJson := mkpath("zookeeper.json") zookeeperPodJson := mkpath("zookeeper.json")
@ -262,28 +263,28 @@ var _ = KubeDescribe("[Feature:Example]", func() {
zookeeperPod := "zookeeper" zookeeperPod := "zookeeper"
By("starting Zookeeper") By("starting Zookeeper")
runKubectlOrDie("create", "-f", zookeeperPodJson, nsFlag) framework.RunKubectlOrDie("create", "-f", zookeeperPodJson, nsFlag)
runKubectlOrDie("create", "-f", zookeeperServiceJson, nsFlag) framework.RunKubectlOrDie("create", "-f", zookeeperServiceJson, nsFlag)
err := waitForPodRunningInNamespace(c, zookeeperPod, ns) err := framework.WaitForPodRunningInNamespace(c, zookeeperPod, ns)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("checking if zookeeper is up and running") By("checking if zookeeper is up and running")
_, err = lookForStringInLog(ns, zookeeperPod, "zookeeper", "binding to port", serverStartTimeout) _, err = framework.LookForStringInLog(ns, zookeeperPod, "zookeeper", "binding to port", serverStartTimeout)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
err = waitForEndpoint(c, ns, "zookeeper") err = framework.WaitForEndpoint(c, ns, "zookeeper")
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("starting Nimbus") By("starting Nimbus")
runKubectlOrDie("create", "-f", nimbusPodJson, nsFlag) framework.RunKubectlOrDie("create", "-f", nimbusPodJson, nsFlag)
runKubectlOrDie("create", "-f", nimbusServiceJson, nsFlag) framework.RunKubectlOrDie("create", "-f", nimbusServiceJson, nsFlag)
err = waitForPodRunningInNamespace(c, "nimbus", ns) err = framework.WaitForPodRunningInNamespace(c, "nimbus", ns)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
err = waitForEndpoint(c, ns, "nimbus") err = framework.WaitForEndpoint(c, ns, "nimbus")
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("starting workers") By("starting workers")
runKubectlOrDie("create", "-f", workerControllerJson, nsFlag) framework.RunKubectlOrDie("create", "-f", workerControllerJson, nsFlag)
forEachPod(c, ns, "name", "storm-worker", func(pod api.Pod) { forEachPod(c, ns, "name", "storm-worker", func(pod api.Pod) {
//do nothing, just wait for the pod to be running //do nothing, just wait for the pod to be running
}) })
@ -292,46 +293,46 @@ var _ = KubeDescribe("[Feature:Example]", func() {
time.Sleep(20 * time.Second) time.Sleep(20 * time.Second)
By("checking if there are established connections to Zookeeper") By("checking if there are established connections to Zookeeper")
_, err = lookForStringInLog(ns, zookeeperPod, "zookeeper", "Established session", serverStartTimeout) _, err = framework.LookForStringInLog(ns, zookeeperPod, "zookeeper", "Established session", serverStartTimeout)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("checking if Nimbus responds to requests") By("checking if Nimbus responds to requests")
lookForString("No topologies running.", time.Minute, func() string { framework.LookForString("No topologies running.", time.Minute, func() string {
return runKubectlOrDie("exec", "nimbus", nsFlag, "--", "bin/storm", "list") return framework.RunKubectlOrDie("exec", "nimbus", nsFlag, "--", "bin/storm", "list")
}) })
}) })
}) })
KubeDescribe("Liveness", func() { framework.KubeDescribe("Liveness", func() {
It("liveness pods should be automatically restarted", func() { It("liveness pods should be automatically restarted", func() {
mkpath := func(file string) string { mkpath := func(file string) string {
return filepath.Join(testContext.RepoRoot, "docs", "user-guide", "liveness", file) return filepath.Join(framework.TestContext.RepoRoot, "docs", "user-guide", "liveness", file)
} }
execYaml := mkpath("exec-liveness.yaml") execYaml := mkpath("exec-liveness.yaml")
httpYaml := mkpath("http-liveness.yaml") httpYaml := mkpath("http-liveness.yaml")
nsFlag := fmt.Sprintf("--namespace=%v", ns) nsFlag := fmt.Sprintf("--namespace=%v", ns)
runKubectlOrDie("create", "-f", execYaml, nsFlag) framework.RunKubectlOrDie("create", "-f", execYaml, nsFlag)
runKubectlOrDie("create", "-f", httpYaml, nsFlag) framework.RunKubectlOrDie("create", "-f", httpYaml, nsFlag)
// Since both containers start rapidly, we can easily run this test in parallel. // Since both containers start rapidly, we can easily run this test in parallel.
var wg sync.WaitGroup var wg sync.WaitGroup
passed := true passed := true
checkRestart := func(podName string, timeout time.Duration) { checkRestart := func(podName string, timeout time.Duration) {
err := waitForPodRunningInNamespace(c, podName, ns) err := framework.WaitForPodRunningInNamespace(c, podName, ns)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
for t := time.Now(); time.Since(t) < timeout; time.Sleep(poll) { for t := time.Now(); time.Since(t) < timeout; time.Sleep(framework.Poll) {
pod, err := c.Pods(ns).Get(podName) pod, err := c.Pods(ns).Get(podName)
expectNoError(err, fmt.Sprintf("getting pod %s", podName)) framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podName))
stat := api.GetExistingContainerStatus(pod.Status.ContainerStatuses, podName) stat := api.GetExistingContainerStatus(pod.Status.ContainerStatuses, podName)
Logf("Pod: %s, restart count:%d", stat.Name, stat.RestartCount) framework.Logf("Pod: %s, restart count:%d", stat.Name, stat.RestartCount)
if stat.RestartCount > 0 { if stat.RestartCount > 0 {
Logf("Saw %v restart, succeeded...", podName) framework.Logf("Saw %v restart, succeeded...", podName)
wg.Done() wg.Done()
return return
} }
} }
Logf("Failed waiting for %v restart! ", podName) framework.Logf("Failed waiting for %v restart! ", podName)
passed = false passed = false
wg.Done() wg.Done()
} }
@ -347,15 +348,15 @@ var _ = KubeDescribe("[Feature:Example]", func() {
} }
wg.Wait() wg.Wait()
if !passed { if !passed {
Failf("At least one liveness example failed. See the logs above.") framework.Failf("At least one liveness example failed. See the logs above.")
} }
}) })
}) })
KubeDescribe("Secret", func() { framework.KubeDescribe("Secret", func() {
It("should create a pod that reads a secret", func() { It("should create a pod that reads a secret", func() {
mkpath := func(file string) string { mkpath := func(file string) string {
return filepath.Join(testContext.RepoRoot, "docs", "user-guide", "secrets", file) return filepath.Join(framework.TestContext.RepoRoot, "docs", "user-guide", "secrets", file)
} }
secretYaml := mkpath("secret.yaml") secretYaml := mkpath("secret.yaml")
podYaml := mkpath("secret-pod.yaml") podYaml := mkpath("secret-pod.yaml")
@ -363,43 +364,43 @@ var _ = KubeDescribe("[Feature:Example]", func() {
podName := "secret-test-pod" podName := "secret-test-pod"
By("creating secret and pod") By("creating secret and pod")
runKubectlOrDie("create", "-f", secretYaml, nsFlag) framework.RunKubectlOrDie("create", "-f", secretYaml, nsFlag)
runKubectlOrDie("create", "-f", podYaml, nsFlag) framework.RunKubectlOrDie("create", "-f", podYaml, nsFlag)
err := waitForPodNoLongerRunningInNamespace(c, podName, ns) err := framework.WaitForPodNoLongerRunningInNamespace(c, podName, ns)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("checking if secret was read correctly") By("checking if secret was read correctly")
_, err = lookForStringInLog(ns, "secret-test-pod", "test-container", "value-1", serverStartTimeout) _, err = framework.LookForStringInLog(ns, "secret-test-pod", "test-container", "value-1", serverStartTimeout)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
}) })
KubeDescribe("Downward API", func() { framework.KubeDescribe("Downward API", func() {
It("should create a pod that prints his name and namespace", func() { It("should create a pod that prints his name and namespace", func() {
mkpath := func(file string) string { mkpath := func(file string) string {
return filepath.Join(testContext.RepoRoot, "docs", "user-guide", "downward-api", file) return filepath.Join(framework.TestContext.RepoRoot, "docs", "user-guide", "downward-api", file)
} }
podYaml := mkpath("dapi-pod.yaml") podYaml := mkpath("dapi-pod.yaml")
nsFlag := fmt.Sprintf("--namespace=%v", ns) nsFlag := fmt.Sprintf("--namespace=%v", ns)
podName := "dapi-test-pod" podName := "dapi-test-pod"
By("creating the pod") By("creating the pod")
runKubectlOrDie("create", "-f", podYaml, nsFlag) framework.RunKubectlOrDie("create", "-f", podYaml, nsFlag)
err := waitForPodNoLongerRunningInNamespace(c, podName, ns) err := framework.WaitForPodNoLongerRunningInNamespace(c, podName, ns)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("checking if name and namespace were passed correctly") By("checking if name and namespace were passed correctly")
_, err = lookForStringInLog(ns, podName, "test-container", fmt.Sprintf("MY_POD_NAMESPACE=%v", ns), serverStartTimeout) _, err = framework.LookForStringInLog(ns, podName, "test-container", fmt.Sprintf("MY_POD_NAMESPACE=%v", ns), serverStartTimeout)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
_, err = lookForStringInLog(ns, podName, "test-container", fmt.Sprintf("MY_POD_NAME=%v", podName), serverStartTimeout) _, err = framework.LookForStringInLog(ns, podName, "test-container", fmt.Sprintf("MY_POD_NAME=%v", podName), serverStartTimeout)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
}) })
KubeDescribe("RethinkDB", func() { framework.KubeDescribe("RethinkDB", func() {
It("should create and stop rethinkdb servers", func() { It("should create and stop rethinkdb servers", func() {
mkpath := func(file string) string { mkpath := func(file string) string {
return filepath.Join(testContext.RepoRoot, "examples", "rethinkdb", file) return filepath.Join(framework.TestContext.RepoRoot, "examples", "rethinkdb", file)
} }
driverServiceYaml := mkpath("driver-service.yaml") driverServiceYaml := mkpath("driver-service.yaml")
rethinkDbControllerYaml := mkpath("rc.yaml") rethinkDbControllerYaml := mkpath("rc.yaml")
@ -408,62 +409,62 @@ var _ = KubeDescribe("[Feature:Example]", func() {
nsFlag := fmt.Sprintf("--namespace=%v", ns) nsFlag := fmt.Sprintf("--namespace=%v", ns)
By("starting rethinkdb") By("starting rethinkdb")
runKubectlOrDie("create", "-f", driverServiceYaml, nsFlag) framework.RunKubectlOrDie("create", "-f", driverServiceYaml, nsFlag)
runKubectlOrDie("create", "-f", rethinkDbControllerYaml, nsFlag) framework.RunKubectlOrDie("create", "-f", rethinkDbControllerYaml, nsFlag)
checkDbInstances := func() { checkDbInstances := func() {
forEachPod(c, ns, "db", "rethinkdb", func(pod api.Pod) { forEachPod(c, ns, "db", "rethinkdb", func(pod api.Pod) {
_, err := lookForStringInLog(ns, pod.Name, "rethinkdb", "Server ready", serverStartTimeout) _, err := framework.LookForStringInLog(ns, pod.Name, "rethinkdb", "Server ready", serverStartTimeout)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
} }
checkDbInstances() checkDbInstances()
err := waitForEndpoint(c, ns, "rethinkdb-driver") err := framework.WaitForEndpoint(c, ns, "rethinkdb-driver")
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("scaling rethinkdb") By("scaling rethinkdb")
ScaleRC(c, ns, "rethinkdb-rc", 2, true) framework.ScaleRC(c, ns, "rethinkdb-rc", 2, true)
checkDbInstances() checkDbInstances()
By("starting admin") By("starting admin")
runKubectlOrDie("create", "-f", adminServiceYaml, nsFlag) framework.RunKubectlOrDie("create", "-f", adminServiceYaml, nsFlag)
runKubectlOrDie("create", "-f", adminPodYaml, nsFlag) framework.RunKubectlOrDie("create", "-f", adminPodYaml, nsFlag)
err = waitForPodRunningInNamespace(c, "rethinkdb-admin", ns) err = framework.WaitForPodRunningInNamespace(c, "rethinkdb-admin", ns)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
checkDbInstances() checkDbInstances()
content, err := makeHttpRequestToService(c, ns, "rethinkdb-admin", "/", endpointRegisterTimeout) content, err := makeHttpRequestToService(c, ns, "rethinkdb-admin", "/", framework.EndpointRegisterTimeout)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
if !strings.Contains(content, "<title>RethinkDB Administration Console</title>") { if !strings.Contains(content, "<title>RethinkDB Administration Console</title>") {
Failf("RethinkDB console is not running") framework.Failf("RethinkDB console is not running")
} }
}) })
}) })
KubeDescribe("Hazelcast", func() { framework.KubeDescribe("Hazelcast", func() {
It("should create and scale hazelcast", func() { It("should create and scale hazelcast", func() {
mkpath := func(file string) string { mkpath := func(file string) string {
return filepath.Join(testContext.RepoRoot, "examples", "hazelcast", file) return filepath.Join(framework.TestContext.RepoRoot, "examples", "hazelcast", file)
} }
serviceYaml := mkpath("hazelcast-service.yaml") serviceYaml := mkpath("hazelcast-service.yaml")
controllerYaml := mkpath("hazelcast-controller.yaml") controllerYaml := mkpath("hazelcast-controller.yaml")
nsFlag := fmt.Sprintf("--namespace=%v", ns) nsFlag := fmt.Sprintf("--namespace=%v", ns)
By("starting hazelcast") By("starting hazelcast")
runKubectlOrDie("create", "-f", serviceYaml, nsFlag) framework.RunKubectlOrDie("create", "-f", serviceYaml, nsFlag)
runKubectlOrDie("create", "-f", controllerYaml, nsFlag) framework.RunKubectlOrDie("create", "-f", controllerYaml, nsFlag)
forEachPod(c, ns, "name", "hazelcast", func(pod api.Pod) { forEachPod(c, ns, "name", "hazelcast", func(pod api.Pod) {
_, err := lookForStringInLog(ns, pod.Name, "hazelcast", "Members [1]", serverStartTimeout) _, err := framework.LookForStringInLog(ns, pod.Name, "hazelcast", "Members [1]", serverStartTimeout)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
_, err = lookForStringInLog(ns, pod.Name, "hazelcast", "is STARTED", serverStartTimeout) _, err = framework.LookForStringInLog(ns, pod.Name, "hazelcast", "is STARTED", serverStartTimeout)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
err := waitForEndpoint(c, ns, "hazelcast") err := framework.WaitForEndpoint(c, ns, "hazelcast")
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("scaling hazelcast") By("scaling hazelcast")
ScaleRC(c, ns, "hazelcast", 2, true) framework.ScaleRC(c, ns, "hazelcast", 2, true)
forEachPod(c, ns, "name", "hazelcast", func(pod api.Pod) { forEachPod(c, ns, "name", "hazelcast", func(pod api.Pod) {
_, err := lookForStringInLog(ns, pod.Name, "hazelcast", "Members [2]", serverStartTimeout) _, err := framework.LookForStringInLog(ns, pod.Name, "hazelcast", "Members [2]", serverStartTimeout)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
}) })
@ -473,8 +474,8 @@ var _ = KubeDescribe("[Feature:Example]", func() {
func makeHttpRequestToService(c *client.Client, ns, service, path string, timeout time.Duration) (string, error) { func makeHttpRequestToService(c *client.Client, ns, service, path string, timeout time.Duration) (string, error) {
var result []byte var result []byte
var err error var err error
for t := time.Now(); time.Since(t) < timeout; time.Sleep(poll) { for t := time.Now(); time.Since(t) < timeout; time.Sleep(framework.Poll) {
proxyRequest, errProxy := getServicesProxyRequest(c, c.Get()) proxyRequest, errProxy := framework.GetServicesProxyRequest(c, c.Get())
if errProxy != nil { if errProxy != nil {
break break
} }
@ -503,7 +504,7 @@ func prepareResourceWithReplacedString(inputFile, old, new string) string {
func forEachPod(c *client.Client, ns, selectorKey, selectorValue string, fn func(api.Pod)) { func forEachPod(c *client.Client, ns, selectorKey, selectorValue string, fn func(api.Pod)) {
pods := []*api.Pod{} pods := []*api.Pod{}
for t := time.Now(); time.Since(t) < podListTimeout; time.Sleep(poll) { for t := time.Now(); time.Since(t) < framework.PodListTimeout; time.Sleep(framework.Poll) {
selector := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue})) selector := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue}))
options := api.ListOptions{LabelSelector: selector} options := api.ListOptions{LabelSelector: selector}
podList, err := c.Pods(ns).List(options) podList, err := c.Pods(ns).List(options)
@ -518,10 +519,10 @@ func forEachPod(c *client.Client, ns, selectorKey, selectorValue string, fn func
} }
} }
if pods == nil || len(pods) == 0 { if pods == nil || len(pods) == 0 {
Failf("No pods found") framework.Failf("No pods found")
} }
for _, pod := range pods { for _, pod := range pods {
err := waitForPodRunningInNamespace(c, pod.Name, ns) err := framework.WaitForPodRunningInNamespace(c, pod.Name, ns)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
fn(*pod) fn(*pod)
} }

View File

@ -19,14 +19,15 @@ package e2e
import ( import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
) )
// These tests exercise the Kubernetes expansion syntax $(VAR). // These tests exercise the Kubernetes expansion syntax $(VAR).
// For more information, see: docs/design/expansion.md // For more information, see: docs/design/expansion.md
var _ = KubeDescribe("Variable Expansion", func() { var _ = framework.KubeDescribe("Variable Expansion", func() {
framework := NewDefaultFramework("var-expansion") f := framework.NewDefaultFramework("var-expansion")
It("should allow composing env vars into new env vars [Conformance]", func() { It("should allow composing env vars into new env vars [Conformance]", func() {
podName := "var-expansion-" + string(util.NewUUID()) podName := "var-expansion-" + string(util.NewUUID())
@ -61,7 +62,7 @@ var _ = KubeDescribe("Variable Expansion", func() {
}, },
} }
framework.TestContainerOutput("env composition", pod, 0, []string{ f.TestContainerOutput("env composition", pod, 0, []string{
"FOO=foo-value", "FOO=foo-value",
"BAR=bar-value", "BAR=bar-value",
"FOOBAR=foo-value;;bar-value", "FOOBAR=foo-value;;bar-value",
@ -93,7 +94,7 @@ var _ = KubeDescribe("Variable Expansion", func() {
}, },
} }
framework.TestContainerOutput("substitution in container's command", pod, 0, []string{ f.TestContainerOutput("substitution in container's command", pod, 0, []string{
"test-value", "test-value",
}) })
}) })
@ -124,7 +125,7 @@ var _ = KubeDescribe("Variable Expansion", func() {
}, },
} }
framework.TestContainerOutput("substitution in container's args", pod, 0, []string{ f.TestContainerOutput("substitution in container's args", pod, 0, []string{
"test-value", "test-value",
}) })
}) })

View File

@ -1,5 +1,5 @@
/* /*
Copyright 2015 The Kubernetes Authors All rights reserved. Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@ -56,15 +56,15 @@ type Framework struct {
// Constraints that passed to a check which is executed after data is gathered to // Constraints that passed to a check which is executed after data is gathered to
// see if 99% of results are within acceptable bounds. It as to be injected in the test, // see if 99% of results are within acceptable bounds. It as to be injected in the test,
// as expectations vary greatly. Constraints are groupped by the container names. // as expectations vary greatly. Constraints are groupped by the container names.
addonResourceConstraints map[string]resourceConstraint AddonResourceConstraints map[string]ResourceConstraint
logsSizeWaitGroup sync.WaitGroup logsSizeWaitGroup sync.WaitGroup
logsSizeCloseChannel chan bool logsSizeCloseChannel chan bool
logsSizeVerifier *LogsSizeVerifier logsSizeVerifier *LogsSizeVerifier
// To make sure that this framework cleans up after itself, no matter what, // To make sure that this framework cleans up after itself, no matter what,
// we install a cleanup action before each test and clear it after. If we // we install a Cleanup action before each test and clear it after. If we
// should abort, the AfterSuite hook should run all cleanup actions. // should abort, the AfterSuite hook should run all Cleanup actions.
cleanupHandle CleanupActionHandle cleanupHandle CleanupActionHandle
// configuration for framework's client // configuration for framework's client
@ -77,16 +77,16 @@ type TestDataSummary interface {
} }
type FrameworkOptions struct { type FrameworkOptions struct {
clientQPS float32 ClientQPS float32
clientBurst int ClientBurst int
} }
// NewFramework makes a new framework and sets up a BeforeEach/AfterEach for // NewFramework makes a new framework and sets up a BeforeEach/AfterEach for
// you (you can write additional before/after each functions). // you (you can write additional before/after each functions).
func NewDefaultFramework(baseName string) *Framework { func NewDefaultFramework(baseName string) *Framework {
options := FrameworkOptions{ options := FrameworkOptions{
clientQPS: 20, ClientQPS: 20,
clientBurst: 50, ClientBurst: 50,
} }
return NewFramework(baseName, options) return NewFramework(baseName, options)
} }
@ -94,27 +94,27 @@ func NewDefaultFramework(baseName string) *Framework {
func NewFramework(baseName string, options FrameworkOptions) *Framework { func NewFramework(baseName string, options FrameworkOptions) *Framework {
f := &Framework{ f := &Framework{
BaseName: baseName, BaseName: baseName,
addonResourceConstraints: make(map[string]resourceConstraint), AddonResourceConstraints: make(map[string]ResourceConstraint),
options: options, options: options,
} }
BeforeEach(f.beforeEach) BeforeEach(f.BeforeEach)
AfterEach(f.afterEach) AfterEach(f.AfterEach)
return f return f
} }
// beforeEach gets a client and makes a namespace. // BeforeEach gets a client and makes a namespace.
func (f *Framework) beforeEach() { func (f *Framework) BeforeEach() {
// The fact that we need this feels like a bug in ginkgo. // The fact that we need this feels like a bug in ginkgo.
// https://github.com/onsi/ginkgo/issues/222 // https://github.com/onsi/ginkgo/issues/222
f.cleanupHandle = AddCleanupAction(f.afterEach) f.cleanupHandle = AddCleanupAction(f.AfterEach)
By("Creating a kubernetes client") By("Creating a kubernetes client")
config, err := loadConfig() config, err := LoadConfig()
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
config.QPS = f.options.clientQPS config.QPS = f.options.ClientQPS
config.Burst = f.options.clientBurst config.Burst = f.options.ClientBurst
c, err := loadClientFromConfig(config) c, err := loadClientFromConfig(config)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -129,15 +129,15 @@ func (f *Framework) beforeEach() {
f.Namespace = namespace f.Namespace = namespace
if testContext.VerifyServiceAccount { if TestContext.VerifyServiceAccount {
By("Waiting for a default service account to be provisioned in namespace") By("Waiting for a default service account to be provisioned in namespace")
err = waitForDefaultServiceAccountInNamespace(c, namespace.Name) err = WaitForDefaultServiceAccountInNamespace(c, namespace.Name)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
} else { } else {
Logf("Skipping waiting for service account") Logf("Skipping waiting for service account")
} }
if testContext.GatherKubeSystemResourceUsageData { if TestContext.GatherKubeSystemResourceUsageData {
f.gatherer, err = NewResourceUsageGatherer(c) f.gatherer, err = NewResourceUsageGatherer(c)
if err != nil { if err != nil {
Logf("Error while creating NewResourceUsageGatherer: %v", err) Logf("Error while creating NewResourceUsageGatherer: %v", err)
@ -146,7 +146,7 @@ func (f *Framework) beforeEach() {
} }
} }
if testContext.GatherLogsSizes { if TestContext.GatherLogsSizes {
f.logsSizeWaitGroup = sync.WaitGroup{} f.logsSizeWaitGroup = sync.WaitGroup{}
f.logsSizeWaitGroup.Add(1) f.logsSizeWaitGroup.Add(1)
f.logsSizeCloseChannel = make(chan bool) f.logsSizeCloseChannel = make(chan bool)
@ -158,14 +158,14 @@ func (f *Framework) beforeEach() {
} }
} }
// afterEach deletes the namespace, after reading its events. // AfterEach deletes the namespace, after reading its events.
func (f *Framework) afterEach() { func (f *Framework) AfterEach() {
RemoveCleanupAction(f.cleanupHandle) RemoveCleanupAction(f.cleanupHandle)
// DeleteNamespace at the very end in defer, to avoid any // DeleteNamespace at the very end in defer, to avoid any
// expectation failures preventing deleting the namespace. // expectation failures preventing deleting the namespace.
defer func() { defer func() {
if testContext.DeleteNamespace { if TestContext.DeleteNamespace {
for _, ns := range f.namespacesToDelete { for _, ns := range f.namespacesToDelete {
By(fmt.Sprintf("Destroying namespace %q for this suite.", ns.Name)) By(fmt.Sprintf("Destroying namespace %q for this suite.", ns.Name))
@ -193,23 +193,23 @@ func (f *Framework) afterEach() {
// Print events if the test failed. // Print events if the test failed.
if CurrentGinkgoTestDescription().Failed { if CurrentGinkgoTestDescription().Failed {
dumpAllNamespaceInfo(f.Client, f.Namespace.Name) DumpAllNamespaceInfo(f.Client, f.Namespace.Name)
} }
summaries := make([]TestDataSummary, 0) summaries := make([]TestDataSummary, 0)
if testContext.GatherKubeSystemResourceUsageData && f.gatherer != nil { if TestContext.GatherKubeSystemResourceUsageData && f.gatherer != nil {
By("Collecting resource usage data") By("Collecting resource usage data")
summaries = append(summaries, f.gatherer.stopAndSummarize([]int{90, 99, 100}, f.addonResourceConstraints)) summaries = append(summaries, f.gatherer.stopAndSummarize([]int{90, 99, 100}, f.AddonResourceConstraints))
} }
if testContext.GatherLogsSizes { if TestContext.GatherLogsSizes {
By("Gathering log sizes data") By("Gathering log sizes data")
close(f.logsSizeCloseChannel) close(f.logsSizeCloseChannel)
f.logsSizeWaitGroup.Wait() f.logsSizeWaitGroup.Wait()
summaries = append(summaries, f.logsSizeVerifier.GetSummary()) summaries = append(summaries, f.logsSizeVerifier.GetSummary())
} }
if testContext.GatherMetricsAfterTest { if TestContext.GatherMetricsAfterTest {
By("Gathering metrics") By("Gathering metrics")
// TODO: enable Scheduler and ControllerManager metrics grabbing when Master's Kubelet will be registered. // TODO: enable Scheduler and ControllerManager metrics grabbing when Master's Kubelet will be registered.
grabber, err := metrics.NewMetricsGrabber(f.Client, true, false, false, true) grabber, err := metrics.NewMetricsGrabber(f.Client, true, false, false, true)
@ -225,7 +225,7 @@ func (f *Framework) afterEach() {
} }
} }
outputTypes := strings.Split(testContext.OutputPrintType, ",") outputTypes := strings.Split(TestContext.OutputPrintType, ",")
for _, printType := range outputTypes { for _, printType := range outputTypes {
switch printType { switch printType {
case "hr": case "hr":
@ -246,13 +246,13 @@ func (f *Framework) afterEach() {
// Check whether all nodes are ready after the test. // Check whether all nodes are ready after the test.
// This is explicitly done at the very end of the test, to avoid // This is explicitly done at the very end of the test, to avoid
// e.g. not removing namespace in case of this failure. // e.g. not removing namespace in case of this failure.
if err := allNodesReady(f.Client, time.Minute); err != nil { if err := AllNodesReady(f.Client, time.Minute); err != nil {
Failf("All nodes should be ready after test, %v", err) Failf("All nodes should be ready after test, %v", err)
} }
} }
func (f *Framework) CreateNamespace(baseName string, labels map[string]string) (*api.Namespace, error) { func (f *Framework) CreateNamespace(baseName string, labels map[string]string) (*api.Namespace, error) {
createTestingNS := testContext.CreateTestingNS createTestingNS := TestContext.CreateTestingNS
if createTestingNS == nil { if createTestingNS == nil {
createTestingNS = CreateTestingNS createTestingNS = CreateTestingNS
} }
@ -270,12 +270,12 @@ func (f *Framework) WaitForPodTerminated(podName, reason string) error {
// WaitForPodRunning waits for the pod to run in the namespace. // WaitForPodRunning waits for the pod to run in the namespace.
func (f *Framework) WaitForPodRunning(podName string) error { func (f *Framework) WaitForPodRunning(podName string) error {
return waitForPodRunningInNamespace(f.Client, podName, f.Namespace.Name) return WaitForPodRunningInNamespace(f.Client, podName, f.Namespace.Name)
} }
// WaitForPodReady waits for the pod to flip to ready in the namespace. // WaitForPodReady waits for the pod to flip to ready in the namespace.
func (f *Framework) WaitForPodReady(podName string) error { func (f *Framework) WaitForPodReady(podName string) error {
return waitTimeoutForPodReadyInNamespace(f.Client, podName, f.Namespace.Name, podStartTimeout) return waitTimeoutForPodReadyInNamespace(f.Client, podName, f.Namespace.Name, PodStartTimeout)
} }
// WaitForPodRunningSlow waits for the pod to run in the namespace. // WaitForPodRunningSlow waits for the pod to run in the namespace.
@ -287,12 +287,12 @@ func (f *Framework) WaitForPodRunningSlow(podName string) error {
// WaitForPodNoLongerRunning waits for the pod to no longer be running in the namespace, for either // WaitForPodNoLongerRunning waits for the pod to no longer be running in the namespace, for either
// success or failure. // success or failure.
func (f *Framework) WaitForPodNoLongerRunning(podName string) error { func (f *Framework) WaitForPodNoLongerRunning(podName string) error {
return waitForPodNoLongerRunningInNamespace(f.Client, podName, f.Namespace.Name) return WaitForPodNoLongerRunningInNamespace(f.Client, podName, f.Namespace.Name)
} }
// Runs the given pod and verifies that the output of exact container matches the desired output. // Runs the given pod and verifies that the output of exact container matches the desired output.
func (f *Framework) TestContainerOutput(scenarioName string, pod *api.Pod, containerIndex int, expectedOutput []string) { func (f *Framework) TestContainerOutput(scenarioName string, pod *api.Pod, containerIndex int, expectedOutput []string) {
testContainerOutput(scenarioName, f.Client, pod, containerIndex, expectedOutput, f.Namespace.Name) TestContainerOutput(scenarioName, f.Client, pod, containerIndex, expectedOutput, f.Namespace.Name)
} }
// Runs the given pod and verifies that the output of exact container matches the desired regexps. // Runs the given pod and verifies that the output of exact container matches the desired regexps.
@ -406,7 +406,7 @@ func kubectlExec(namespace string, podName, containerName string, args ...string
} }
cmdArgs = append(cmdArgs, args...) cmdArgs = append(cmdArgs, args...)
cmd := kubectlCmd(cmdArgs...) cmd := KubectlCmd(cmdArgs...)
cmd.Stdout, cmd.Stderr = &stdout, &stderr cmd.Stdout, cmd.Stderr = &stdout, &stderr
Logf("Running '%s %s'", cmd.Path, strings.Join(cmd.Args, " ")) Logf("Running '%s %s'", cmd.Path, strings.Join(cmd.Args, " "))

View File

@ -156,7 +156,7 @@ func getContainerInfo(c *client.Client, nodeName string, req *kubeletstats.Stats
if err != nil { if err != nil {
return nil, err return nil, err
} }
subResourceProxyAvailable, err := serverVersionGTE(subResourceServiceAndNodeProxyVersion, c) subResourceProxyAvailable, err := ServerVersionGTE(subResourceServiceAndNodeProxyVersion, c)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -207,7 +207,7 @@ func getContainerInfo(c *client.Client, nodeName string, req *kubeletstats.Stats
// polls every second, we'd need to get N stats points for N-second interval. // polls every second, we'd need to get N stats points for N-second interval.
// Note that this is an approximation and may not be accurate, hence we also // Note that this is an approximation and may not be accurate, hence we also
// write the actual interval used for calculation (based on the timestamps of // write the actual interval used for calculation (based on the timestamps of
// the stats points in containerResourceUsage.CPUInterval. // the stats points in ContainerResourceUsage.CPUInterval.
// //
// containerNames is a function returning a collection of container names in which // containerNames is a function returning a collection of container names in which
// user is interested in. ExpectMissingContainers is a flag which says if the test // user is interested in. ExpectMissingContainers is a flag which says if the test
@ -222,7 +222,7 @@ func getOneTimeResourceUsageOnNode(
cpuInterval time.Duration, cpuInterval time.Duration,
containerNames func() []string, containerNames func() []string,
expectMissingContainers bool, expectMissingContainers bool,
) (resourceUsagePerContainer, error) { ) (ResourceUsagePerContainer, error) {
const ( const (
// cadvisor records stats about every second. // cadvisor records stats about every second.
cadvisorStatsPollingIntervalInSeconds float64 = 1.0 cadvisorStatsPollingIntervalInSeconds float64 = 1.0
@ -244,8 +244,8 @@ func getOneTimeResourceUsageOnNode(
return nil, err return nil, err
} }
f := func(name string, oldStats, newStats *cadvisorapi.ContainerStats) *containerResourceUsage { f := func(name string, oldStats, newStats *cadvisorapi.ContainerStats) *ContainerResourceUsage {
return &containerResourceUsage{ return &ContainerResourceUsage{
Name: name, Name: name,
Timestamp: newStats.Timestamp, Timestamp: newStats.Timestamp,
CPUUsageInCores: float64(newStats.Cpu.Usage.Total-oldStats.Cpu.Usage.Total) / float64(newStats.Timestamp.Sub(oldStats.Timestamp).Nanoseconds()), CPUUsageInCores: float64(newStats.Cpu.Usage.Total-oldStats.Cpu.Usage.Total) / float64(newStats.Timestamp.Sub(oldStats.Timestamp).Nanoseconds()),
@ -257,7 +257,7 @@ func getOneTimeResourceUsageOnNode(
} }
// Process container infos that are relevant to us. // Process container infos that are relevant to us.
containers := containerNames() containers := containerNames()
usageMap := make(resourceUsagePerContainer, len(containers)) usageMap := make(ResourceUsagePerContainer, len(containers))
for _, name := range containers { for _, name := range containers {
info, ok := containerInfos[name] info, ok := containerInfos[name]
if !ok { if !ok {
@ -274,7 +274,7 @@ func getOneTimeResourceUsageOnNode(
} }
func getNodeStatsSummary(c *client.Client, nodeName string) (*stats.Summary, error) { func getNodeStatsSummary(c *client.Client, nodeName string) (*stats.Summary, error) {
subResourceProxyAvailable, err := serverVersionGTE(subResourceServiceAndNodeProxyVersion, c) subResourceProxyAvailable, err := ServerVersionGTE(subResourceServiceAndNodeProxyVersion, c)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -331,7 +331,7 @@ const (
) )
// A list of containers for which we want to collect resource usage. // A list of containers for which we want to collect resource usage.
func targetContainers() []string { func TargetContainers() []string {
return []string{ return []string{
rootContainerName, rootContainerName,
stats.SystemContainerRuntime, stats.SystemContainerRuntime,
@ -340,7 +340,7 @@ func targetContainers() []string {
} }
} }
type containerResourceUsage struct { type ContainerResourceUsage struct {
Name string Name string
Timestamp time.Time Timestamp time.Time
CPUUsageInCores float64 CPUUsageInCores float64
@ -351,14 +351,14 @@ type containerResourceUsage struct {
CPUInterval time.Duration CPUInterval time.Duration
} }
func (r *containerResourceUsage) isStrictlyGreaterThan(rhs *containerResourceUsage) bool { func (r *ContainerResourceUsage) isStrictlyGreaterThan(rhs *ContainerResourceUsage) bool {
return r.CPUUsageInCores > rhs.CPUUsageInCores && r.MemoryWorkingSetInBytes > rhs.MemoryWorkingSetInBytes return r.CPUUsageInCores > rhs.CPUUsageInCores && r.MemoryWorkingSetInBytes > rhs.MemoryWorkingSetInBytes
} }
type resourceUsagePerContainer map[string]*containerResourceUsage type ResourceUsagePerContainer map[string]*ContainerResourceUsage
type resourceUsagePerNode map[string]resourceUsagePerContainer type ResourceUsagePerNode map[string]ResourceUsagePerContainer
func formatResourceUsageStats(nodeName string, containerStats resourceUsagePerContainer) string { func formatResourceUsageStats(nodeName string, containerStats ResourceUsagePerContainer) string {
// Example output: // Example output:
// //
// Resource usage for node "e2e-test-foo-minion-abcde": // Resource usage for node "e2e-test-foo-minion-abcde":
@ -417,7 +417,7 @@ func getKubeletMetricsThroughNode(nodeName string) (string, error) {
return string(body), nil return string(body), nil
} }
func getKubeletHeapStats(c *client.Client, nodeName string) (string, error) { func GetKubeletHeapStats(c *client.Client, nodeName string) (string, error) {
client, err := NodeProxyRequest(c, nodeName, "debug/pprof/heap") client, err := NodeProxyRequest(c, nodeName, "debug/pprof/heap")
if err != nil { if err != nil {
return "", err return "", err
@ -448,8 +448,8 @@ func PrintAllKubeletPods(c *client.Client, nodeName string) {
} }
} }
func computeContainerResourceUsage(name string, oldStats, newStats *stats.ContainerStats) *containerResourceUsage { func computeContainerResourceUsage(name string, oldStats, newStats *stats.ContainerStats) *ContainerResourceUsage {
return &containerResourceUsage{ return &ContainerResourceUsage{
Name: name, Name: name,
Timestamp: newStats.CPU.Time.Time, Timestamp: newStats.CPU.Time.Time,
CPUUsageInCores: float64(*newStats.CPU.UsageCoreNanoSeconds-*oldStats.CPU.UsageCoreNanoSeconds) / float64(newStats.CPU.Time.Time.Sub(oldStats.CPU.Time.Time).Nanoseconds()), CPUUsageInCores: float64(*newStats.CPU.UsageCoreNanoSeconds-*oldStats.CPU.UsageCoreNanoSeconds) / float64(newStats.CPU.Time.Time.Sub(oldStats.CPU.Time.Time).Nanoseconds()),
@ -468,13 +468,13 @@ type resourceCollector struct {
node string node string
containers []string containers []string
client *client.Client client *client.Client
buffers map[string][]*containerResourceUsage buffers map[string][]*ContainerResourceUsage
pollingInterval time.Duration pollingInterval time.Duration
stopCh chan struct{} stopCh chan struct{}
} }
func newResourceCollector(c *client.Client, nodeName string, containerNames []string, pollingInterval time.Duration) *resourceCollector { func newResourceCollector(c *client.Client, nodeName string, containerNames []string, pollingInterval time.Duration) *resourceCollector {
buffers := make(map[string][]*containerResourceUsage) buffers := make(map[string][]*ContainerResourceUsage)
return &resourceCollector{ return &resourceCollector{
node: nodeName, node: nodeName,
containers: containerNames, containers: containerNames,
@ -484,7 +484,7 @@ func newResourceCollector(c *client.Client, nodeName string, containerNames []st
} }
} }
// Start starts a goroutine to poll the node every pollingInterval. // Start starts a goroutine to Poll the node every pollingInterval.
func (r *resourceCollector) Start() { func (r *resourceCollector) Start() {
r.stopCh = make(chan struct{}, 1) r.stopCh = make(chan struct{}, 1)
// Keep the last observed stats for comparison. // Keep the last observed stats for comparison.
@ -527,10 +527,10 @@ func (r *resourceCollector) collectStats(oldStatsMap map[string]*stats.Container
} }
} }
func (r *resourceCollector) GetLatest() (resourceUsagePerContainer, error) { func (r *resourceCollector) GetLatest() (ResourceUsagePerContainer, error) {
r.lock.RLock() r.lock.RLock()
defer r.lock.RUnlock() defer r.lock.RUnlock()
stats := make(resourceUsagePerContainer) stats := make(ResourceUsagePerContainer)
for _, name := range r.containers { for _, name := range r.containers {
contStats, ok := r.buffers[name] contStats, ok := r.buffers[name]
if !ok || len(contStats) == 0 { if !ok || len(contStats) == 0 {
@ -546,11 +546,11 @@ func (r *resourceCollector) Reset() {
r.lock.Lock() r.lock.Lock()
defer r.lock.Unlock() defer r.lock.Unlock()
for _, name := range r.containers { for _, name := range r.containers {
r.buffers[name] = []*containerResourceUsage{} r.buffers[name] = []*ContainerResourceUsage{}
} }
} }
type resourceUsageByCPU []*containerResourceUsage type resourceUsageByCPU []*ContainerResourceUsage
func (r resourceUsageByCPU) Len() int { return len(r) } func (r resourceUsageByCPU) Len() int { return len(r) }
func (r resourceUsageByCPU) Swap(i, j int) { r[i], r[j] = r[j], r[i] } func (r resourceUsageByCPU) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
@ -579,27 +579,27 @@ func (r *resourceCollector) GetBasicCPUStats(containerName string) map[float64]f
return result return result
} }
// resourceMonitor manages a resourceCollector per node. // ResourceMonitor manages a resourceCollector per node.
type resourceMonitor struct { type ResourceMonitor struct {
client *client.Client client *client.Client
containers []string containers []string
pollingInterval time.Duration pollingInterval time.Duration
collectors map[string]*resourceCollector collectors map[string]*resourceCollector
} }
func newResourceMonitor(c *client.Client, containerNames []string, pollingInterval time.Duration) *resourceMonitor { func NewResourceMonitor(c *client.Client, containerNames []string, pollingInterval time.Duration) *ResourceMonitor {
return &resourceMonitor{ return &ResourceMonitor{
containers: containerNames, containers: containerNames,
client: c, client: c,
pollingInterval: pollingInterval, pollingInterval: pollingInterval,
} }
} }
func (r *resourceMonitor) Start() { func (r *ResourceMonitor) Start() {
// It should be OK to monitor unschedulable Nodes // It should be OK to monitor unschedulable Nodes
nodes, err := r.client.Nodes().List(api.ListOptions{}) nodes, err := r.client.Nodes().List(api.ListOptions{})
if err != nil { if err != nil {
Failf("resourceMonitor: unable to get list of nodes: %v", err) Failf("ResourceMonitor: unable to get list of nodes: %v", err)
} }
r.collectors = make(map[string]*resourceCollector, 0) r.collectors = make(map[string]*resourceCollector, 0)
for _, node := range nodes.Items { for _, node := range nodes.Items {
@ -609,19 +609,19 @@ func (r *resourceMonitor) Start() {
} }
} }
func (r *resourceMonitor) Stop() { func (r *ResourceMonitor) Stop() {
for _, collector := range r.collectors { for _, collector := range r.collectors {
collector.Stop() collector.Stop()
} }
} }
func (r *resourceMonitor) Reset() { func (r *ResourceMonitor) Reset() {
for _, collector := range r.collectors { for _, collector := range r.collectors {
collector.Reset() collector.Reset()
} }
} }
func (r *resourceMonitor) LogLatest() { func (r *ResourceMonitor) LogLatest() {
summary, err := r.GetLatest() summary, err := r.GetLatest()
if err != nil { if err != nil {
Logf("%v", err) Logf("%v", err)
@ -629,7 +629,7 @@ func (r *resourceMonitor) LogLatest() {
Logf("%s", r.FormatResourceUsage(summary)) Logf("%s", r.FormatResourceUsage(summary))
} }
func (r *resourceMonitor) FormatResourceUsage(s resourceUsagePerNode) string { func (r *ResourceMonitor) FormatResourceUsage(s ResourceUsagePerNode) string {
summary := []string{} summary := []string{}
for node, usage := range s { for node, usage := range s {
summary = append(summary, formatResourceUsageStats(node, usage)) summary = append(summary, formatResourceUsageStats(node, usage))
@ -637,8 +637,8 @@ func (r *resourceMonitor) FormatResourceUsage(s resourceUsagePerNode) string {
return strings.Join(summary, "\n") return strings.Join(summary, "\n")
} }
func (r *resourceMonitor) GetLatest() (resourceUsagePerNode, error) { func (r *ResourceMonitor) GetLatest() (ResourceUsagePerNode, error) {
result := make(resourceUsagePerNode) result := make(ResourceUsagePerNode)
errs := []error{} errs := []error{}
for key, collector := range r.collectors { for key, collector := range r.collectors {
s, err := collector.GetLatest() s, err := collector.GetLatest()
@ -651,15 +651,15 @@ func (r *resourceMonitor) GetLatest() (resourceUsagePerNode, error) {
return result, utilerrors.NewAggregate(errs) return result, utilerrors.NewAggregate(errs)
} }
// containersCPUSummary is indexed by the container name with each entry a // ContainersCPUSummary is indexed by the container name with each entry a
// (percentile, value) map. // (percentile, value) map.
type containersCPUSummary map[string]map[float64]float64 type ContainersCPUSummary map[string]map[float64]float64
// nodesCPUSummary is indexed by the node name with each entry a // NodesCPUSummary is indexed by the node name with each entry a
// containersCPUSummary map. // ContainersCPUSummary map.
type nodesCPUSummary map[string]containersCPUSummary type NodesCPUSummary map[string]ContainersCPUSummary
func (r *resourceMonitor) FormatCPUSummary(summary nodesCPUSummary) string { func (r *ResourceMonitor) FormatCPUSummary(summary NodesCPUSummary) string {
// Example output for a node (the percentiles may differ): // Example output for a node (the percentiles may differ):
// CPU usage of containers on node "e2e-test-foo-minion-0vj7": // CPU usage of containers on node "e2e-test-foo-minion-0vj7":
// container 5th% 50th% 90th% 95th% // container 5th% 50th% 90th% 95th%
@ -677,7 +677,7 @@ func (r *resourceMonitor) FormatCPUSummary(summary nodesCPUSummary) string {
buf := &bytes.Buffer{} buf := &bytes.Buffer{}
w := tabwriter.NewWriter(buf, 1, 0, 1, ' ', 0) w := tabwriter.NewWriter(buf, 1, 0, 1, ' ', 0)
fmt.Fprintf(w, "%s\n", strings.Join(header, "\t")) fmt.Fprintf(w, "%s\n", strings.Join(header, "\t"))
for _, containerName := range targetContainers() { for _, containerName := range TargetContainers() {
var s []string var s []string
s = append(s, fmt.Sprintf("%q", containerName)) s = append(s, fmt.Sprintf("%q", containerName))
data, ok := containers[containerName] data, ok := containers[containerName]
@ -696,16 +696,16 @@ func (r *resourceMonitor) FormatCPUSummary(summary nodesCPUSummary) string {
return strings.Join(summaryStrings, "\n") return strings.Join(summaryStrings, "\n")
} }
func (r *resourceMonitor) LogCPUSummary() { func (r *ResourceMonitor) LogCPUSummary() {
summary := r.GetCPUSummary() summary := r.GetCPUSummary()
Logf("%s", r.FormatCPUSummary(summary)) Logf("%s", r.FormatCPUSummary(summary))
} }
func (r *resourceMonitor) GetCPUSummary() nodesCPUSummary { func (r *ResourceMonitor) GetCPUSummary() NodesCPUSummary {
result := make(nodesCPUSummary) result := make(NodesCPUSummary)
for nodeName, collector := range r.collectors { for nodeName, collector := range r.collectors {
result[nodeName] = make(containersCPUSummary) result[nodeName] = make(ContainersCPUSummary)
for _, containerName := range targetContainers() { for _, containerName := range TargetContainers() {
data := collector.GetBasicCPUStats(containerName) data := collector.GetBasicCPUStats(containerName)
result[nodeName][containerName] = data result[nodeName][containerName] = data
} }

View File

@ -101,7 +101,7 @@ func (s *LogsSizeDataSummary) PrintHumanReadable() string {
} }
func (s *LogsSizeDataSummary) PrintJSON() string { func (s *LogsSizeDataSummary) PrintJSON() string {
return prettyPrintJSON(*s) return PrettyPrintJSON(*s)
} }
type LogsSizeData struct { type LogsSizeData struct {
@ -144,8 +144,8 @@ func (d *LogsSizeData) AddNewData(ip, path string, timestamp time.Time, size int
// NewLogsVerifier creates a new LogsSizeVerifier which will stop when stopChannel is closed // NewLogsVerifier creates a new LogsSizeVerifier which will stop when stopChannel is closed
func NewLogsVerifier(c *client.Client, stopChannel chan bool) *LogsSizeVerifier { func NewLogsVerifier(c *client.Client, stopChannel chan bool) *LogsSizeVerifier {
nodeAddresses, err := NodeSSHHosts(c) nodeAddresses, err := NodeSSHHosts(c)
expectNoError(err) ExpectNoError(err)
masterAddress := getMasterHost() + ":22" masterAddress := GetMasterHost() + ":22"
workChannel := make(chan WorkItem, len(nodeAddresses)+1) workChannel := make(chan WorkItem, len(nodeAddresses)+1)
workers := make([]*LogSizeGatherer, workersNo) workers := make([]*LogSizeGatherer, workersNo)
@ -241,7 +241,7 @@ func (g *LogSizeGatherer) Work() bool {
sshResult, err := SSH( sshResult, err := SSH(
fmt.Sprintf("ls -l %v | awk '{print $9, $5}' | tr '\n' ' '", strings.Join(workItem.paths, " ")), fmt.Sprintf("ls -l %v | awk '{print $9, $5}' | tr '\n' ' '", strings.Join(workItem.paths, " ")),
workItem.ip, workItem.ip,
testContext.Provider, TestContext.Provider,
) )
if err != nil { if err != nil {
Logf("Error while trying to SSH to %v, skipping probe. Error: %v", workItem.ip, err) Logf("Error while trying to SSH to %v, skipping probe. Error: %v", workItem.ip, err)

View File

@ -91,7 +91,7 @@ func (m *MetricsForE2E) PrintHumanReadable() string {
func (m *MetricsForE2E) PrintJSON() string { func (m *MetricsForE2E) PrintJSON() string {
m.filterMetrics() m.filterMetrics()
return prettyPrintJSON(*m) return PrettyPrintJSON(*m)
} }
var InterestingApiServerMetrics = []string{ var InterestingApiServerMetrics = []string{
@ -287,7 +287,7 @@ func HighLatencyRequests(c *client.Client) (int, error) {
} }
} }
Logf("API calls latencies: %s", prettyPrintJSON(metrics)) Logf("API calls latencies: %s", PrettyPrintJSON(metrics))
return badMetrics, nil return badMetrics, nil
} }
@ -295,7 +295,7 @@ func HighLatencyRequests(c *client.Client) (int, error) {
// Verifies whether 50, 90 and 99th percentiles of PodStartupLatency are // Verifies whether 50, 90 and 99th percentiles of PodStartupLatency are
// within the threshold. // within the threshold.
func VerifyPodStartupLatency(latency PodStartupLatency) error { func VerifyPodStartupLatency(latency PodStartupLatency) error {
Logf("Pod startup latency: %s", prettyPrintJSON(latency)) Logf("Pod startup latency: %s", PrettyPrintJSON(latency))
if latency.Latency.Perc50 > podStartupThreshold { if latency.Latency.Perc50 > podStartupThreshold {
return fmt.Errorf("too high pod startup latency 50th percentile: %v", latency.Latency.Perc50) return fmt.Errorf("too high pod startup latency 50th percentile: %v", latency.Latency.Perc50)
@ -310,9 +310,9 @@ func VerifyPodStartupLatency(latency PodStartupLatency) error {
} }
// Resets latency metrics in apiserver. // Resets latency metrics in apiserver.
func resetMetrics(c *client.Client) error { func ResetMetrics(c *client.Client) error {
Logf("Resetting latency metrics in apiserver...") Logf("Resetting latency metrics in apiserver...")
body, err := c.Get().AbsPath("/resetMetrics").DoRaw() body, err := c.Get().AbsPath("/ResetMetrics").DoRaw()
if err != nil { if err != nil {
return err return err
} }
@ -337,7 +337,7 @@ func getSchedulingLatency(c *client.Client) (SchedulingLatency, error) {
// Check if master Node is registered // Check if master Node is registered
nodes, err := c.Nodes().List(api.ListOptions{}) nodes, err := c.Nodes().List(api.ListOptions{})
expectNoError(err) ExpectNoError(err)
var data string var data string
var masterRegistered = false var masterRegistered = false
@ -351,16 +351,16 @@ func getSchedulingLatency(c *client.Client) (SchedulingLatency, error) {
Prefix("proxy"). Prefix("proxy").
Namespace(api.NamespaceSystem). Namespace(api.NamespaceSystem).
Resource("pods"). Resource("pods").
Name(fmt.Sprintf("kube-scheduler-%v:%v", testContext.CloudConfig.MasterName, ports.SchedulerPort)). Name(fmt.Sprintf("kube-scheduler-%v:%v", TestContext.CloudConfig.MasterName, ports.SchedulerPort)).
Suffix("metrics"). Suffix("metrics").
Do().Raw() Do().Raw()
expectNoError(err) ExpectNoError(err)
data = string(rawData) data = string(rawData)
} else { } else {
// If master is not registered fall back to old method of using SSH. // If master is not registered fall back to old method of using SSH.
cmd := "curl http://localhost:10251/metrics" cmd := "curl http://localhost:10251/metrics"
sshResult, err := SSH(cmd, getMasterHost()+":22", testContext.Provider) sshResult, err := SSH(cmd, GetMasterHost()+":22", TestContext.Provider)
if err != nil || sshResult.Code != 0 { if err != nil || sshResult.Code != 0 {
return result, fmt.Errorf("unexpected error (code: %d) in ssh connection to master: %#v", sshResult.Code, err) return result, fmt.Errorf("unexpected error (code: %d) in ssh connection to master: %#v", sshResult.Code, err)
} }
@ -401,13 +401,13 @@ func VerifySchedulerLatency(c *client.Client) error {
if err != nil { if err != nil {
return err return err
} }
Logf("Scheduling latency: %s", prettyPrintJSON(latency)) Logf("Scheduling latency: %s", PrettyPrintJSON(latency))
// TODO: Add some reasonable checks once we know more about the values. // TODO: Add some reasonable checks once we know more about the values.
return nil return nil
} }
func prettyPrintJSON(metrics interface{}) string { func PrettyPrintJSON(metrics interface{}) string {
output := &bytes.Buffer{} output := &bytes.Buffer{}
if err := json.NewEncoder(output).Encode(metrics); err != nil { if err := json.NewEncoder(output).Encode(metrics); err != nil {
Logf("Error building encoder: %v", err) Logf("Error building encoder: %v", err)
@ -446,8 +446,8 @@ func extractMetricSamples(metricsBlob string) ([]*model.Sample, error) {
} }
} }
// podLatencyData encapsulates pod startup latency information. // PodLatencyData encapsulates pod startup latency information.
type podLatencyData struct { type PodLatencyData struct {
// Name of the pod // Name of the pod
Name string Name string
// Node this pod was running on // Node this pod was running on
@ -456,13 +456,13 @@ type podLatencyData struct {
Latency time.Duration Latency time.Duration
} }
type latencySlice []podLatencyData type LatencySlice []PodLatencyData
func (a latencySlice) Len() int { return len(a) } func (a LatencySlice) Len() int { return len(a) }
func (a latencySlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a LatencySlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a latencySlice) Less(i, j int) bool { return a[i].Latency < a[j].Latency } func (a LatencySlice) Less(i, j int) bool { return a[i].Latency < a[j].Latency }
func extractLatencyMetrics(latencies []podLatencyData) LatencyMetric { func ExtractLatencyMetrics(latencies []PodLatencyData) LatencyMetric {
length := len(latencies) length := len(latencies)
perc50 := latencies[int(math.Ceil(float64(length*50)/100))-1].Latency perc50 := latencies[int(math.Ceil(float64(length*50)/100))-1].Latency
perc90 := latencies[int(math.Ceil(float64(length*90)/100))-1].Latency perc90 := latencies[int(math.Ceil(float64(length*90)/100))-1].Latency
@ -470,9 +470,9 @@ func extractLatencyMetrics(latencies []podLatencyData) LatencyMetric {
return LatencyMetric{Perc50: perc50, Perc90: perc90, Perc99: perc99} return LatencyMetric{Perc50: perc50, Perc90: perc90, Perc99: perc99}
} }
// logSuspiciousLatency logs metrics/docker errors from all nodes that had slow startup times // LogSuspiciousLatency logs metrics/docker errors from all nodes that had slow startup times
// If latencyDataLag is nil then it will be populated from latencyData // If latencyDataLag is nil then it will be populated from latencyData
func logSuspiciousLatency(latencyData []podLatencyData, latencyDataLag []podLatencyData, nodeCount int, c *client.Client) { func LogSuspiciousLatency(latencyData []PodLatencyData, latencyDataLag []PodLatencyData, nodeCount int, c *client.Client) {
if latencyDataLag == nil { if latencyDataLag == nil {
latencyDataLag = latencyData latencyDataLag = latencyData
} }
@ -489,15 +489,15 @@ func logSuspiciousLatency(latencyData []podLatencyData, latencyDataLag []podLate
// the given time.Duration. Since the arrays are sorted we are looking at the last // the given time.Duration. Since the arrays are sorted we are looking at the last
// element which will always be the highest. If the latency is higher than the max Failf // element which will always be the highest. If the latency is higher than the max Failf
// is called. // is called.
func testMaximumLatencyValue(latencies []podLatencyData, max time.Duration, name string) { func testMaximumLatencyValue(latencies []PodLatencyData, max time.Duration, name string) {
highestLatency := latencies[len(latencies)-1] highestLatency := latencies[len(latencies)-1]
if !(highestLatency.Latency <= max) { if !(highestLatency.Latency <= max) {
Failf("%s were not all under %s: %#v", name, max.String(), latencies) Failf("%s were not all under %s: %#v", name, max.String(), latencies)
} }
} }
func printLatencies(latencies []podLatencyData, header string) { func PrintLatencies(latencies []PodLatencyData, header string) {
metrics := extractLatencyMetrics(latencies) metrics := ExtractLatencyMetrics(latencies)
Logf("10%% %s: %v", header, latencies[(len(latencies)*9)/10:]) Logf("10%% %s: %v", header, latencies[(len(latencies)*9)/10:])
Logf("perc50: %v, perc90: %v, perc99: %v", metrics.Perc50, metrics.Perc90, metrics.Perc99) Logf("perc50: %v, perc90: %v, perc99: %v", metrics.Perc50, metrics.Perc90, metrics.Perc99)
} }

View File

@ -39,11 +39,11 @@ var prom_registered = false
// Reusable function for pushing metrics to prometheus. Handles initialization and so on. // Reusable function for pushing metrics to prometheus. Handles initialization and so on.
func promPushRunningPending(running, pending int) error { func promPushRunningPending(running, pending int) error {
if testContext.PrometheusPushGateway == "" { if TestContext.PrometheusPushGateway == "" {
return nil return nil
} else { } else {
// Register metrics if necessary // Register metrics if necessary
if !prom_registered && testContext.PrometheusPushGateway != "" { if !prom_registered && TestContext.PrometheusPushGateway != "" {
prometheus.Register(runningMetric) prometheus.Register(runningMetric)
prometheus.Register(pendingMetric) prometheus.Register(pendingMetric)
prom_registered = true prom_registered = true
@ -57,7 +57,7 @@ func promPushRunningPending(running, pending int) error {
if err := prometheus.Push( if err := prometheus.Push(
"e2e", "e2e",
"none", "none",
testContext.PrometheusPushGateway, //i.e. "127.0.0.1:9091" TestContext.PrometheusPushGateway, //i.e. "127.0.0.1:9091"
); err != nil { ); err != nil {
fmt.Println("failed at pushing to pushgateway ", err) fmt.Println("failed at pushing to pushgateway ", err)
return err return err

View File

@ -38,9 +38,9 @@ const (
probeDuration = 15 * time.Second probeDuration = 15 * time.Second
) )
type resourceConstraint struct { type ResourceConstraint struct {
cpuConstraint float64 CPUConstraint float64
memoryConstraint uint64 MemoryConstraint uint64
} }
type SingleContainerSummary struct { type SingleContainerSummary struct {
@ -67,12 +67,12 @@ func (s *ResourceUsageSummary) PrintHumanReadable() string {
} }
func (s *ResourceUsageSummary) PrintJSON() string { func (s *ResourceUsageSummary) PrintJSON() string {
return prettyPrintJSON(*s) return PrettyPrintJSON(*s)
} }
func computePercentiles(timeSeries []resourceUsagePerContainer, percentilesToCompute []int) map[int]resourceUsagePerContainer { func computePercentiles(timeSeries []ResourceUsagePerContainer, percentilesToCompute []int) map[int]ResourceUsagePerContainer {
if len(timeSeries) == 0 { if len(timeSeries) == 0 {
return make(map[int]resourceUsagePerContainer) return make(map[int]ResourceUsagePerContainer)
} }
dataMap := make(map[string]*usageDataPerContainer) dataMap := make(map[string]*usageDataPerContainer)
for i := range timeSeries { for i := range timeSeries {
@ -95,12 +95,12 @@ func computePercentiles(timeSeries []resourceUsagePerContainer, percentilesToCom
sort.Sort(uint64arr(v.memWorkSetData)) sort.Sort(uint64arr(v.memWorkSetData))
} }
result := make(map[int]resourceUsagePerContainer) result := make(map[int]ResourceUsagePerContainer)
for _, perc := range percentilesToCompute { for _, perc := range percentilesToCompute {
data := make(resourceUsagePerContainer) data := make(ResourceUsagePerContainer)
for k, v := range dataMap { for k, v := range dataMap {
percentileIndex := int(math.Ceil(float64(len(v.cpuData)*perc)/100)) - 1 percentileIndex := int(math.Ceil(float64(len(v.cpuData)*perc)/100)) - 1
data[k] = &containerResourceUsage{ data[k] = &ContainerResourceUsage{
Name: k, Name: k,
CPUUsageInCores: v.cpuData[percentileIndex], CPUUsageInCores: v.cpuData[percentileIndex],
MemoryUsageInBytes: v.memUseData[percentileIndex], MemoryUsageInBytes: v.memUseData[percentileIndex],
@ -112,8 +112,8 @@ func computePercentiles(timeSeries []resourceUsagePerContainer, percentilesToCom
return result return result
} }
func leftMergeData(left, right map[int]resourceUsagePerContainer) map[int]resourceUsagePerContainer { func leftMergeData(left, right map[int]ResourceUsagePerContainer) map[int]ResourceUsagePerContainer {
result := make(map[int]resourceUsagePerContainer) result := make(map[int]ResourceUsagePerContainer)
for percentile, data := range left { for percentile, data := range left {
result[percentile] = data result[percentile] = data
if _, ok := right[percentile]; !ok { if _, ok := right[percentile]; !ok {
@ -133,12 +133,12 @@ type resourceGatherWorker struct {
containerIDToNameMap map[string]string containerIDToNameMap map[string]string
containerIDs []string containerIDs []string
stopCh chan struct{} stopCh chan struct{}
dataSeries []resourceUsagePerContainer dataSeries []ResourceUsagePerContainer
finished bool finished bool
} }
func (w *resourceGatherWorker) singleProbe() { func (w *resourceGatherWorker) singleProbe() {
data := make(resourceUsagePerContainer) data := make(ResourceUsagePerContainer)
nodeUsage, err := getOneTimeResourceUsageOnNode(w.c, w.nodeName, probeDuration, func() []string { return w.containerIDs }, true) nodeUsage, err := getOneTimeResourceUsageOnNode(w.c, w.nodeName, probeDuration, func() []string { return w.containerIDs }, true)
if err != nil { if err != nil {
Logf("Error while reading data from %v: %v", w.nodeName, err) Logf("Error while reading data from %v: %v", w.nodeName, err)
@ -236,7 +236,7 @@ func (g *containerResourceGatherer) startGatheringData() {
g.getKubeSystemContainersResourceUsage(g.client) g.getKubeSystemContainersResourceUsage(g.client)
} }
func (g *containerResourceGatherer) stopAndSummarize(percentiles []int, constraints map[string]resourceConstraint) *ResourceUsageSummary { func (g *containerResourceGatherer) stopAndSummarize(percentiles []int, constraints map[string]ResourceConstraint) *ResourceUsageSummary {
close(g.stopCh) close(g.stopCh)
Logf("Closed stop channel. Waiting for %v workers", len(g.workers)) Logf("Closed stop channel. Waiting for %v workers", len(g.workers))
finished := make(chan struct{}) finished := make(chan struct{})
@ -261,7 +261,7 @@ func (g *containerResourceGatherer) stopAndSummarize(percentiles []int, constrai
Logf("Warning! Empty percentile list for stopAndPrintData.") Logf("Warning! Empty percentile list for stopAndPrintData.")
return &ResourceUsageSummary{} return &ResourceUsageSummary{}
} }
data := make(map[int]resourceUsagePerContainer) data := make(map[int]ResourceUsagePerContainer)
for i := range g.workers { for i := range g.workers {
if g.workers[i].finished { if g.workers[i].finished {
stats := computePercentiles(g.workers[i].dataSeries, percentiles) stats := computePercentiles(g.workers[i].dataSeries, percentiles)
@ -290,23 +290,23 @@ func (g *containerResourceGatherer) stopAndSummarize(percentiles []int, constrai
// Name has a form: <pod_name>/<container_name> // Name has a form: <pod_name>/<container_name>
containerName := strings.Split(name, "/")[1] containerName := strings.Split(name, "/")[1]
if constraint, ok := constraints[containerName]; ok { if constraint, ok := constraints[containerName]; ok {
if usage.CPUUsageInCores > constraint.cpuConstraint { if usage.CPUUsageInCores > constraint.CPUConstraint {
violatedConstraints = append( violatedConstraints = append(
violatedConstraints, violatedConstraints,
fmt.Sprintf("Container %v is using %v/%v CPU", fmt.Sprintf("Container %v is using %v/%v CPU",
name, name,
usage.CPUUsageInCores, usage.CPUUsageInCores,
constraint.cpuConstraint, constraint.CPUConstraint,
), ),
) )
} }
if usage.MemoryWorkingSetInBytes > constraint.memoryConstraint { if usage.MemoryWorkingSetInBytes > constraint.MemoryConstraint {
violatedConstraints = append( violatedConstraints = append(
violatedConstraints, violatedConstraints,
fmt.Sprintf("Container %v is using %v/%v MB of memory", fmt.Sprintf("Container %v is using %v/%v MB of memory",
name, name,
float64(usage.MemoryWorkingSetInBytes)/(1024*1024), float64(usage.MemoryWorkingSetInBytes)/(1024*1024),
float64(constraint.memoryConstraint)/(1024*1024), float64(constraint.MemoryConstraint)/(1024*1024),
), ),
) )
} }

View File

@ -98,7 +98,7 @@ func RegisterFlags() {
flag.StringVar(&TestContext.KubectlPath, "kubectl-path", "kubectl", "The kubectl binary to use. For development, you might use 'cluster/kubectl.sh' here.") flag.StringVar(&TestContext.KubectlPath, "kubectl-path", "kubectl", "The kubectl binary to use. For development, you might use 'cluster/kubectl.sh' here.")
flag.StringVar(&TestContext.OutputDir, "e2e-output-dir", "/tmp", "Output directory for interesting/useful test data, like performance data, benchmarks, and other metrics.") flag.StringVar(&TestContext.OutputDir, "e2e-output-dir", "/tmp", "Output directory for interesting/useful test data, like performance data, benchmarks, and other metrics.")
flag.StringVar(&TestContext.ReportDir, "report-dir", "", "Path to the directory where the JUnit XML reports should be saved. Default is empty, which doesn't generate these reports.") flag.StringVar(&TestContext.ReportDir, "report-dir", "", "Path to the directory where the JUnit XML reports should be saved. Default is empty, which doesn't generate these reports.")
flag.StringVar(&testContext.ReportPrefix, "report-prefix", "", "Optional prefix for JUnit XML reports. Default is empty, which doesn't prepend anything to the default name.") flag.StringVar(&TestContext.ReportPrefix, "report-prefix", "", "Optional prefix for JUnit XML reports. Default is empty, which doesn't prepend anything to the default name.")
flag.StringVar(&TestContext.Prefix, "prefix", "e2e", "A prefix to be added to cloud resources created during testing.") flag.StringVar(&TestContext.Prefix, "prefix", "e2e", "A prefix to be added to cloud resources created during testing.")
flag.StringVar(&TestContext.OSDistro, "os-distro", "debian", "The OS distribution of cluster VM instances (debian, trusty, or coreos).") flag.StringVar(&TestContext.OSDistro, "os-distro", "debian", "The OS distribution of cluster VM instances (debian, trusty, or coreos).")

File diff suppressed because it is too large Load Diff

View File

@ -25,13 +25,14 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
) )
// This test requires that --terminated-pod-gc-threshold=100 be set on the controller manager // This test requires that --terminated-pod-gc-threshold=100 be set on the controller manager
// //
// Slow by design (7 min) // Slow by design (7 min)
var _ = KubeDescribe("Garbage collector [Feature:GarbageCollector] [Slow]", func() { var _ = framework.KubeDescribe("Garbage collector [Feature:GarbageCollector] [Slow]", func() {
f := NewDefaultFramework("garbage-collector") f := framework.NewDefaultFramework("garbage-collector")
It("should handle the creation of 1000 pods", func() { It("should handle the creation of 1000 pods", func() {
var count int var count int
for count < 1000 { for count < 1000 {
@ -40,16 +41,16 @@ var _ = KubeDescribe("Garbage collector [Feature:GarbageCollector] [Slow]", func
pod.Status.Phase = api.PodFailed pod.Status.Phase = api.PodFailed
pod, err = f.Client.Pods(f.Namespace.Name).UpdateStatus(pod) pod, err = f.Client.Pods(f.Namespace.Name).UpdateStatus(pod)
if err != nil { if err != nil {
Failf("err failing pod: %v", err) framework.Failf("err failing pod: %v", err)
} }
count++ count++
if count%50 == 0 { if count%50 == 0 {
Logf("count: %v", count) framework.Logf("count: %v", count)
} }
} }
Logf("created: %v", count) framework.Logf("created: %v", count)
// The gc controller polls every 30s and fires off a goroutine per // The gc controller polls every 30s and fires off a goroutine per
// pod to terminate. // pod to terminate.
@ -62,22 +63,22 @@ var _ = KubeDescribe("Garbage collector [Feature:GarbageCollector] [Slow]", func
pollErr := wait.Poll(1*time.Minute, timeout, func() (bool, error) { pollErr := wait.Poll(1*time.Minute, timeout, func() (bool, error) {
pods, err = f.Client.Pods(f.Namespace.Name).List(api.ListOptions{}) pods, err = f.Client.Pods(f.Namespace.Name).List(api.ListOptions{})
if err != nil { if err != nil {
Logf("Failed to list pod %v", err) framework.Logf("Failed to list pod %v", err)
return false, nil return false, nil
} }
if len(pods.Items) != gcThreshold { if len(pods.Items) != gcThreshold {
Logf("Number of observed pods %v, waiting for %v", len(pods.Items), gcThreshold) framework.Logf("Number of observed pods %v, waiting for %v", len(pods.Items), gcThreshold)
return false, nil return false, nil
} }
return true, nil return true, nil
}) })
if pollErr != nil { if pollErr != nil {
Failf("Failed to GC pods within %v, %v pods remaining, error: %v", timeout, len(pods.Items), err) framework.Failf("Failed to GC pods within %v, %v pods remaining, error: %v", timeout, len(pods.Items), err)
} }
}) })
}) })
func createTerminatingPod(f *Framework) (*api.Pod, error) { func createTerminatingPod(f *framework.Framework) (*api.Pod, error) {
uuid := util.NewUUID() uuid := util.NewUUID()
pod := &api.Pod{ pod := &api.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{

View File

@ -27,15 +27,16 @@ import (
"k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/watch" "k8s.io/kubernetes/pkg/watch"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
) )
var _ = KubeDescribe("Generated release_1_2 clientset", func() { var _ = framework.KubeDescribe("Generated release_1_2 clientset", func() {
framework := NewDefaultFramework("clientset") f := framework.NewDefaultFramework("clientset")
It("should create pods, delete pods, watch pods", func() { It("should create pods, delete pods, watch pods", func() {
podClient := framework.Clientset_1_2.Core().Pods(framework.Namespace.Name) podClient := f.Clientset_1_2.Core().Pods(f.Namespace.Name)
By("creating the pod") By("creating the pod")
name := "pod" + string(util.NewUUID()) name := "pod" + string(util.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond()) value := strconv.Itoa(time.Now().Nanosecond())
@ -72,7 +73,7 @@ var _ = KubeDescribe("Generated release_1_2 clientset", func() {
options := api.ListOptions{LabelSelector: selector} options := api.ListOptions{LabelSelector: selector}
pods, err := podClient.List(options) pods, err := podClient.List(options)
if err != nil { if err != nil {
Failf("Failed to query for pods: %v", err) framework.Failf("Failed to query for pods: %v", err)
} }
Expect(len(pods.Items)).To(Equal(0)) Expect(len(pods.Items)).To(Equal(0))
options = api.ListOptions{ options = api.ListOptions{
@ -81,7 +82,7 @@ var _ = KubeDescribe("Generated release_1_2 clientset", func() {
} }
w, err := podClient.Watch(options) w, err := podClient.Watch(options)
if err != nil { if err != nil {
Failf("Failed to set up watch: %v", err) framework.Failf("Failed to set up watch: %v", err)
} }
By("submitting the pod to kubernetes") By("submitting the pod to kubernetes")
@ -91,7 +92,7 @@ var _ = KubeDescribe("Generated release_1_2 clientset", func() {
defer podClient.Delete(pod.Name, api.NewDeleteOptions(0)) defer podClient.Delete(pod.Name, api.NewDeleteOptions(0))
pod, err = podClient.Create(pod) pod, err = podClient.Create(pod)
if err != nil { if err != nil {
Failf("Failed to create pod: %v", err) framework.Failf("Failed to create pod: %v", err)
} }
By("verifying the pod is in kubernetes") By("verifying the pod is in kubernetes")
@ -102,7 +103,7 @@ var _ = KubeDescribe("Generated release_1_2 clientset", func() {
} }
pods, err = podClient.List(options) pods, err = podClient.List(options)
if err != nil { if err != nil {
Failf("Failed to query for pods: %v", err) framework.Failf("Failed to query for pods: %v", err)
} }
Expect(len(pods.Items)).To(Equal(1)) Expect(len(pods.Items)).To(Equal(1))
@ -110,19 +111,19 @@ var _ = KubeDescribe("Generated release_1_2 clientset", func() {
select { select {
case event, _ := <-w.ResultChan(): case event, _ := <-w.ResultChan():
if event.Type != watch.Added { if event.Type != watch.Added {
Failf("Failed to observe pod creation: %v", event) framework.Failf("Failed to observe pod creation: %v", event)
} }
case <-time.After(podStartTimeout): case <-time.After(framework.PodStartTimeout):
Fail("Timeout while waiting for pod creation") Fail("Timeout while waiting for pod creation")
} }
// We need to wait for the pod to be scheduled, otherwise the deletion // We need to wait for the pod to be scheduled, otherwise the deletion
// will be carried out immediately rather than gracefully. // will be carried out immediately rather than gracefully.
expectNoError(framework.WaitForPodRunning(pod.Name)) framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
By("deleting the pod gracefully") By("deleting the pod gracefully")
if err := podClient.Delete(pod.Name, api.NewDeleteOptions(30)); err != nil { if err := podClient.Delete(pod.Name, api.NewDeleteOptions(30)); err != nil {
Failf("Failed to delete pod: %v", err) framework.Failf("Failed to delete pod: %v", err)
} }
By("verifying pod deletion was observed") By("verifying pod deletion was observed")

View File

@ -24,6 +24,8 @@ import (
"time" "time"
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/test/e2e/framework"
) )
// TODO: These should really just use the GCE API client library or at least use // TODO: These should really just use the GCE API client library or at least use
@ -36,12 +38,12 @@ func createGCEStaticIP(name string) (string, error) {
// NAME REGION ADDRESS STATUS // NAME REGION ADDRESS STATUS
// test-static-ip us-central1 104.197.143.7 RESERVED // test-static-ip us-central1 104.197.143.7 RESERVED
glog.Infof("Creating static IP with name %q in project %q", name, testContext.CloudConfig.ProjectID) glog.Infof("Creating static IP with name %q in project %q", name, framework.TestContext.CloudConfig.ProjectID)
var outputBytes []byte var outputBytes []byte
var err error var err error
for attempts := 0; attempts < 4; attempts++ { for attempts := 0; attempts < 4; attempts++ {
outputBytes, err = exec.Command("gcloud", "compute", "addresses", "create", outputBytes, err = exec.Command("gcloud", "compute", "addresses", "create",
name, "--project", testContext.CloudConfig.ProjectID, name, "--project", framework.TestContext.CloudConfig.ProjectID,
"--region", "us-central1", "-q").CombinedOutput() "--region", "us-central1", "-q").CombinedOutput()
if err == nil { if err == nil {
break break
@ -76,7 +78,7 @@ func deleteGCEStaticIP(name string) error {
// test-static-ip us-central1 104.197.143.7 RESERVED // test-static-ip us-central1 104.197.143.7 RESERVED
outputBytes, err := exec.Command("gcloud", "compute", "addresses", "delete", outputBytes, err := exec.Command("gcloud", "compute", "addresses", "delete",
name, "--project", testContext.CloudConfig.ProjectID, name, "--project", framework.TestContext.CloudConfig.ProjectID,
"--region", "us-central1", "-q").CombinedOutput() "--region", "us-central1", "-q").CombinedOutput()
if err != nil { if err != nil {
// Ditch the error, since the stderr in the output is what actually contains // Ditch the error, since the stderr in the output is what actually contains

View File

@ -21,6 +21,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
) )
@ -34,15 +35,15 @@ const (
// These tests don't seem to be running properly in parallel: issue: #20338. // These tests don't seem to be running properly in parallel: issue: #20338.
// //
var _ = KubeDescribe("Horizontal pod autoscaling (scale resource: CPU)", func() { var _ = framework.KubeDescribe("Horizontal pod autoscaling (scale resource: CPU)", func() {
var rc *ResourceConsumer var rc *ResourceConsumer
f := NewDefaultFramework("horizontal-pod-autoscaling") f := framework.NewDefaultFramework("horizontal-pod-autoscaling")
titleUp := "Should scale from 1 pod to 3 pods and from 3 to 5 and verify decision stability" titleUp := "Should scale from 1 pod to 3 pods and from 3 to 5 and verify decision stability"
titleDown := "Should scale from 5 pods to 3 pods and from 3 to 1 and verify decision stability" titleDown := "Should scale from 5 pods to 3 pods and from 3 to 1 and verify decision stability"
// These tests take ~20 minutes each. // These tests take ~20 minutes each.
KubeDescribe("[Serial] [Slow] Deployment", func() { framework.KubeDescribe("[Serial] [Slow] Deployment", func() {
// CPU tests via deployments // CPU tests via deployments
It(titleUp, func() { It(titleUp, func() {
scaleUp("test-deployment", kindDeployment, rc, f) scaleUp("test-deployment", kindDeployment, rc, f)
@ -53,7 +54,7 @@ var _ = KubeDescribe("Horizontal pod autoscaling (scale resource: CPU)", func()
}) })
// These tests take ~20 minutes each. // These tests take ~20 minutes each.
KubeDescribe("[Serial] [Slow] ReplicaSet", func() { framework.KubeDescribe("[Serial] [Slow] ReplicaSet", func() {
// CPU tests via deployments // CPU tests via deployments
It(titleUp, func() { It(titleUp, func() {
scaleUp("rs", kindReplicaSet, rc, f) scaleUp("rs", kindReplicaSet, rc, f)
@ -63,7 +64,7 @@ var _ = KubeDescribe("Horizontal pod autoscaling (scale resource: CPU)", func()
}) })
}) })
// These tests take ~20 minutes each. // These tests take ~20 minutes each.
KubeDescribe("[Serial] [Slow] ReplicationController", func() { framework.KubeDescribe("[Serial] [Slow] ReplicationController", func() {
// CPU tests via replication controllers // CPU tests via replication controllers
It(titleUp, func() { It(titleUp, func() {
scaleUp("rc", kindRC, rc, f) scaleUp("rc", kindRC, rc, f)
@ -73,7 +74,7 @@ var _ = KubeDescribe("Horizontal pod autoscaling (scale resource: CPU)", func()
}) })
}) })
KubeDescribe("ReplicationController light", func() { framework.KubeDescribe("ReplicationController light", func() {
It("Should scale from 1 pod to 2 pods", func() { It("Should scale from 1 pod to 2 pods", func() {
scaleTest := &HPAScaleTest{ scaleTest := &HPAScaleTest{
initPods: 1, initPods: 1,
@ -123,7 +124,7 @@ type HPAScaleTest struct {
// The first state change is due to the CPU being consumed initially, which HPA responds to by changing pod counts. // The first state change is due to the CPU being consumed initially, which HPA responds to by changing pod counts.
// The second state change (optional) is due to the CPU burst parameter, which HPA again responds to. // The second state change (optional) is due to the CPU burst parameter, which HPA again responds to.
// TODO The use of 3 states is arbitrary, we could eventually make this test handle "n" states once this test stabilizes. // TODO The use of 3 states is arbitrary, we could eventually make this test handle "n" states once this test stabilizes.
func (scaleTest *HPAScaleTest) run(name, kind string, rc *ResourceConsumer, f *Framework) { func (scaleTest *HPAScaleTest) run(name, kind string, rc *ResourceConsumer, f *framework.Framework) {
rc = NewDynamicResourceConsumer(name, kind, scaleTest.initPods, scaleTest.totalInitialCPUUsage, 0, 0, scaleTest.perPodCPURequest, 100, f) rc = NewDynamicResourceConsumer(name, kind, scaleTest.initPods, scaleTest.totalInitialCPUUsage, 0, 0, scaleTest.perPodCPURequest, 100, f)
defer rc.CleanUp() defer rc.CleanUp()
createCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods, scaleTest.useV1) createCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods, scaleTest.useV1)
@ -137,7 +138,7 @@ func (scaleTest *HPAScaleTest) run(name, kind string, rc *ResourceConsumer, f *F
} }
} }
func scaleUp(name, kind string, rc *ResourceConsumer, f *Framework) { func scaleUp(name, kind string, rc *ResourceConsumer, f *framework.Framework) {
scaleTest := &HPAScaleTest{ scaleTest := &HPAScaleTest{
initPods: 1, initPods: 1,
totalInitialCPUUsage: 250, totalInitialCPUUsage: 250,
@ -153,7 +154,7 @@ func scaleUp(name, kind string, rc *ResourceConsumer, f *Framework) {
scaleTest.run(name, kind, rc, f) scaleTest.run(name, kind, rc, f)
} }
func scaleDown(name, kind string, rc *ResourceConsumer, f *Framework) { func scaleDown(name, kind string, rc *ResourceConsumer, f *framework.Framework) {
scaleTest := &HPAScaleTest{ scaleTest := &HPAScaleTest{
initPods: 5, initPods: 5,
totalInitialCPUUsage: 400, totalInitialCPUUsage: 400,
@ -192,5 +193,5 @@ func createCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, ma
} else { } else {
_, errHPA = rc.framework.Client.Extensions().HorizontalPodAutoscalers(rc.framework.Namespace.Name).Create(hpa) _, errHPA = rc.framework.Client.Extensions().HorizontalPodAutoscalers(rc.framework.Namespace.Name).Create(hpa)
} }
expectNoError(errHPA) framework.ExpectNoError(errHPA)
} }

View File

@ -25,20 +25,21 @@ import (
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
) )
//TODO : Consolidate this code with the code for emptyDir. //TODO : Consolidate this code with the code for emptyDir.
//This will require some smart. //This will require some smart.
var _ = KubeDescribe("hostPath", func() { var _ = framework.KubeDescribe("hostPath", func() {
framework := NewDefaultFramework("hostpath") f := framework.NewDefaultFramework("hostpath")
var c *client.Client var c *client.Client
var namespace *api.Namespace var namespace *api.Namespace
BeforeEach(func() { BeforeEach(func() {
c = framework.Client c = f.Client
namespace = framework.Namespace namespace = f.Namespace
//cleanup before running the test. //cleanup before running the test.
_ = os.Remove("/tmp/test-file") _ = os.Remove("/tmp/test-file")
@ -55,7 +56,7 @@ var _ = KubeDescribe("hostPath", func() {
fmt.Sprintf("--fs_type=%v", volumePath), fmt.Sprintf("--fs_type=%v", volumePath),
fmt.Sprintf("--file_mode=%v", volumePath), fmt.Sprintf("--file_mode=%v", volumePath),
} }
testContainerOutput("hostPath mode", c, pod, 0, []string{ framework.TestContainerOutput("hostPath mode", c, pod, 0, []string{
"mode of file \"/test-volume\": dtrwxrwxrwx", // we expect the sticky bit (mode flag t) to be set for the dir "mode of file \"/test-volume\": dtrwxrwxrwx", // we expect the sticky bit (mode flag t) to be set for the dir
}, },
namespace.Name) namespace.Name)
@ -82,7 +83,7 @@ var _ = KubeDescribe("hostPath", func() {
} }
//Read the content of the file with the second container to //Read the content of the file with the second container to
//verify volumes being shared properly among containers within the pod. //verify volumes being shared properly among containers within the pod.
testContainerOutput("hostPath r/w", c, pod, 1, []string{ framework.TestContainerOutput("hostPath r/w", c, pod, 1, []string{
"content of file \"/test-volume/test-file\": mount-tester new file", "content of file \"/test-volume/test-file\": mount-tester new file",
}, namespace.Name, }, namespace.Name,
) )

View File

@ -34,6 +34,7 @@ import (
utilexec "k8s.io/kubernetes/pkg/util/exec" utilexec "k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
@ -79,7 +80,7 @@ var (
verifyHTTPGET = true verifyHTTPGET = true
// On average it takes ~6 minutes for a single backend to come online. // On average it takes ~6 minutes for a single backend to come online.
// We *don't* expect this poll to consistently take 15 minutes for every // We *don't* expect this framework.Poll to consistently take 15 minutes for every
// Ingress as GCE is creating/checking backends in parallel, but at the // Ingress as GCE is creating/checking backends in parallel, but at the
// same time, we're not testing GCE startup latency. So give it enough // same time, we're not testing GCE startup latency. So give it enough
// time, and fail if the average is too high. // time, and fail if the average is too high.
@ -181,13 +182,13 @@ func createApp(c *client.Client, ns string, i int) {
name := fmt.Sprintf("%v%d", appPrefix, i) name := fmt.Sprintf("%v%d", appPrefix, i)
l := map[string]string{} l := map[string]string{}
Logf("Creating svc %v", name) framework.Logf("Creating svc %v", name)
svc := svcByName(name, httpContainerPort) svc := svcByName(name, httpContainerPort)
svc.Spec.Type = api.ServiceTypeNodePort svc.Spec.Type = api.ServiceTypeNodePort
_, err := c.Services(ns).Create(svc) _, err := c.Services(ns).Create(svc)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Logf("Creating rc %v", name) framework.Logf("Creating rc %v", name)
rc := rcByNamePort(name, 1, testImage, httpContainerPort, api.ProtocolTCP, l) rc := rcByNamePort(name, 1, testImage, httpContainerPort, api.ProtocolTCP, l)
rc.Spec.Template.Spec.Containers[0].Args = []string{ rc.Spec.Template.Spec.Containers[0].Args = []string{
"--num=1", "--num=1",
@ -215,19 +216,19 @@ func gcloudUnmarshal(resource, regex, project string, out interface{}) {
if exitErr, ok := err.(utilexec.ExitError); ok { if exitErr, ok := err.(utilexec.ExitError); ok {
errCode = exitErr.ExitStatus() errCode = exitErr.ExitStatus()
} }
Logf("Error running gcloud command 'gcloud %s': err: %v, output: %v, status: %d", strings.Join(command, " "), err, string(output), errCode) framework.Logf("Error running gcloud command 'gcloud %s': err: %v, output: %v, status: %d", strings.Join(command, " "), err, string(output), errCode)
} }
if err := json.Unmarshal([]byte(output), out); err != nil { if err := json.Unmarshal([]byte(output), out); err != nil {
Logf("Error unmarshalling gcloud output for %v: %v, output: %v", resource, err, string(output)) framework.Logf("Error unmarshalling gcloud output for %v: %v, output: %v", resource, err, string(output))
} }
} }
func gcloudDelete(resource, name, project string) { func gcloudDelete(resource, name, project string) {
Logf("Deleting %v: %v", resource, name) framework.Logf("Deleting %v: %v", resource, name)
output, err := exec.Command("gcloud", "compute", resource, "delete", output, err := exec.Command("gcloud", "compute", resource, "delete",
name, fmt.Sprintf("--project=%v", project), "-q").CombinedOutput() name, fmt.Sprintf("--project=%v", project), "-q").CombinedOutput()
if err != nil { if err != nil {
Logf("Error deleting %v, output: %v\nerror: %+v", resource, string(output), err) framework.Logf("Error deleting %v, output: %v\nerror: %+v", resource, string(output), err)
} }
} }
@ -237,17 +238,17 @@ func kubectlLogLBController(c *client.Client, ns string) {
options := api.ListOptions{LabelSelector: selector} options := api.ListOptions{LabelSelector: selector}
podList, err := c.Pods(api.NamespaceAll).List(options) podList, err := c.Pods(api.NamespaceAll).List(options)
if err != nil { if err != nil {
Logf("Cannot log L7 controller output, error listing pods %v", err) framework.Logf("Cannot log L7 controller output, error listing pods %v", err)
return return
} }
if len(podList.Items) == 0 { if len(podList.Items) == 0 {
Logf("Loadbalancer controller pod not found") framework.Logf("Loadbalancer controller pod not found")
return return
} }
for _, p := range podList.Items { for _, p := range podList.Items {
Logf("\nLast 100 log lines of %v\n", p.Name) framework.Logf("\nLast 100 log lines of %v\n", p.Name)
l, _ := runKubectl("logs", p.Name, fmt.Sprintf("--namespace=%v", ns), "-c", lbContainerName, "--tail=100") l, _ := framework.RunKubectl("logs", p.Name, fmt.Sprintf("--namespace=%v", ns), "-c", lbContainerName, "--tail=100")
Logf(l) framework.Logf(l)
} }
} }
@ -270,7 +271,7 @@ func (cont *IngressController) create() {
// for issues like #16337. Currently, all names should fall within 63 chars. // for issues like #16337. Currently, all names should fall within 63 chars.
testName := fmt.Sprintf("k8s-fw-foo-app-X-%v--%v", cont.ns, cont.UID) testName := fmt.Sprintf("k8s-fw-foo-app-X-%v--%v", cont.ns, cont.UID)
if len(testName) > nameLenLimit { if len(testName) > nameLenLimit {
Failf("Cannot reliably test the given namespace(%v)/uid(%v), too close to GCE limit of %v", framework.Failf("Cannot reliably test the given namespace(%v)/uid(%v), too close to GCE limit of %v",
cont.ns, cont.UID, nameLenLimit) cont.ns, cont.UID, nameLenLimit)
} }
@ -289,7 +290,7 @@ func (cont *IngressController) create() {
existingRCs, err := cont.c.ReplicationControllers(api.NamespaceSystem).List(listOpts) existingRCs, err := cont.c.ReplicationControllers(api.NamespaceSystem).List(listOpts)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
if len(existingRCs.Items) != 1 { if len(existingRCs.Items) != 1 {
Failf("Unexpected number of lb cluster addons %v with label %v in kube-system namespace", len(existingRCs.Items), clusterAddonLBLabels) framework.Failf("Unexpected number of lb cluster addons %v with label %v in kube-system namespace", len(existingRCs.Items), clusterAddonLBLabels)
} }
// Merge the existing spec and new spec. The modifications should not // Merge the existing spec and new spec. The modifications should not
@ -321,7 +322,7 @@ func (cont *IngressController) create() {
cont.rc = rc cont.rc = rc
_, err = cont.c.ReplicationControllers(cont.ns).Create(cont.rc) _, err = cont.c.ReplicationControllers(cont.ns).Create(cont.rc)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Expect(waitForRCPodsRunning(cont.c, cont.ns, cont.rc.Name)).NotTo(HaveOccurred()) Expect(framework.WaitForRCPodsRunning(cont.c, cont.ns, cont.rc.Name)).NotTo(HaveOccurred())
} }
func (cont *IngressController) Cleanup(del bool) error { func (cont *IngressController) Cleanup(del bool) error {
@ -336,11 +337,11 @@ func (cont *IngressController) Cleanup(del bool) error {
for _, f := range fwList { for _, f := range fwList {
msg += fmt.Sprintf("%v\n", f.Name) msg += fmt.Sprintf("%v\n", f.Name)
if del { if del {
Logf("Deleting forwarding-rule: %v", f.Name) framework.Logf("Deleting forwarding-rule: %v", f.Name)
output, err := exec.Command("gcloud", "compute", "forwarding-rules", "delete", output, err := exec.Command("gcloud", "compute", "forwarding-rules", "delete",
f.Name, fmt.Sprintf("--project=%v", cont.Project), "-q", "--global").CombinedOutput() f.Name, fmt.Sprintf("--project=%v", cont.Project), "-q", "--global").CombinedOutput()
if err != nil { if err != nil {
Logf("Error deleting forwarding rules, output: %v\nerror:%v", string(output), err) framework.Logf("Error deleting forwarding rules, output: %v\nerror:%v", string(output), err)
} }
} }
} }
@ -440,7 +441,7 @@ func (cont *IngressController) Cleanup(del bool) error {
// test requires at least 5. // test requires at least 5.
// //
// Slow by design (10 min) // Slow by design (10 min)
var _ = KubeDescribe("GCE L7 LoadBalancer Controller [Feature:Ingress]", func() { var _ = framework.KubeDescribe("GCE L7 LoadBalancer Controller [Feature:Ingress]", func() {
// These variables are initialized after framework's beforeEach. // These variables are initialized after framework's beforeEach.
var ns string var ns string
var addonDir string var addonDir string
@ -448,18 +449,18 @@ var _ = KubeDescribe("GCE L7 LoadBalancer Controller [Feature:Ingress]", func()
var responseTimes, creationTimes []time.Duration var responseTimes, creationTimes []time.Duration
var ingController *IngressController var ingController *IngressController
framework := Framework{BaseName: "glbc"} f := framework.Framework{BaseName: "glbc"}
BeforeEach(func() { BeforeEach(func() {
// This test requires a GCE/GKE only cluster-addon // This test requires a GCE/GKE only cluster-addon
SkipUnlessProviderIs("gce", "gke") framework.SkipUnlessProviderIs("gce", "gke")
framework.beforeEach() f.BeforeEach()
client = framework.Client client = f.Client
ns = framework.Namespace.Name ns = f.Namespace.Name
// Scaled down the existing Ingress controller so it doesn't interfere with the test. // Scaled down the existing Ingress controller so it doesn't interfere with the test.
Expect(scaleRCByLabels(client, api.NamespaceSystem, clusterAddonLBLabels, 0)).NotTo(HaveOccurred()) Expect(framework.ScaleRCByLabels(client, api.NamespaceSystem, clusterAddonLBLabels, 0)).NotTo(HaveOccurred())
addonDir = filepath.Join( addonDir = filepath.Join(
testContext.RepoRoot, "cluster", "addons", "cluster-loadbalancing", "glbc") framework.TestContext.RepoRoot, "cluster", "addons", "cluster-loadbalancing", "glbc")
nsParts := strings.Split(ns, "-") nsParts := strings.Split(ns, "-")
ingController = &IngressController{ ingController = &IngressController{
@ -467,13 +468,13 @@ var _ = KubeDescribe("GCE L7 LoadBalancer Controller [Feature:Ingress]", func()
// The UID in the namespace was generated by the master, so it's // The UID in the namespace was generated by the master, so it's
// global to the cluster. // global to the cluster.
UID: nsParts[len(nsParts)-1], UID: nsParts[len(nsParts)-1],
Project: testContext.CloudConfig.ProjectID, Project: framework.TestContext.CloudConfig.ProjectID,
rcPath: filepath.Join(addonDir, "glbc-controller.yaml"), rcPath: filepath.Join(addonDir, "glbc-controller.yaml"),
defaultSvcPath: filepath.Join(addonDir, "default-svc.yaml"), defaultSvcPath: filepath.Join(addonDir, "default-svc.yaml"),
c: client, c: client,
} }
ingController.create() ingController.create()
Logf("Finished creating ingress controller") framework.Logf("Finished creating ingress controller")
// If we somehow get the same namespace uid as someone else in this // If we somehow get the same namespace uid as someone else in this
// gce project, just back off. // gce project, just back off.
Expect(ingController.Cleanup(false)).NotTo(HaveOccurred()) Expect(ingController.Cleanup(false)).NotTo(HaveOccurred())
@ -482,47 +483,47 @@ var _ = KubeDescribe("GCE L7 LoadBalancer Controller [Feature:Ingress]", func()
}) })
AfterEach(func() { AfterEach(func() {
Logf("Average creation time %+v, health check time %+v", creationTimes, responseTimes) framework.Logf("Average creation time %+v, health check time %+v", creationTimes, responseTimes)
if CurrentGinkgoTestDescription().Failed { if CurrentGinkgoTestDescription().Failed {
kubectlLogLBController(client, ns) kubectlLogLBController(client, ns)
Logf("\nOutput of kubectl describe ing:\n") framework.Logf("\nOutput of kubectl describe ing:\n")
desc, _ := runKubectl("describe", "ing", fmt.Sprintf("--namespace=%v", ns)) desc, _ := framework.RunKubectl("describe", "ing", fmt.Sprintf("--namespace=%v", ns))
Logf(desc) framework.Logf(desc)
} }
// Delete all Ingress, then wait for the controller to cleanup. // Delete all Ingress, then wait for the controller to cleanup.
ings, err := client.Extensions().Ingress(ns).List(api.ListOptions{}) ings, err := client.Extensions().Ingress(ns).List(api.ListOptions{})
if err != nil { if err != nil {
Logf("WARNING: Failed to list ingress: %+v", err) framework.Logf("WARNING: Failed to list ingress: %+v", err)
} else { } else {
for _, ing := range ings.Items { for _, ing := range ings.Items {
Logf("Deleting ingress %v/%v", ing.Namespace, ing.Name) framework.Logf("Deleting ingress %v/%v", ing.Namespace, ing.Name)
if err := client.Extensions().Ingress(ns).Delete(ing.Name, nil); err != nil { if err := client.Extensions().Ingress(ns).Delete(ing.Name, nil); err != nil {
Logf("WARNING: Failed to delete ingress %v: %v", ing.Name, err) framework.Logf("WARNING: Failed to delete ingress %v: %v", ing.Name, err)
} }
} }
} }
pollErr := wait.Poll(5*time.Second, lbCleanupTimeout, func() (bool, error) { pollErr := wait.Poll(5*time.Second, lbCleanupTimeout, func() (bool, error) {
if err := ingController.Cleanup(false); err != nil { if err := ingController.Cleanup(false); err != nil {
Logf("Still waiting for glbc to cleanup: %v", err) framework.Logf("Still waiting for glbc to cleanup: %v", err)
return false, nil return false, nil
} }
return true, nil return true, nil
}) })
// TODO: Remove this once issue #17802 is fixed // TODO: Remove this once issue #17802 is fixed
Expect(scaleRCByLabels(client, ingController.rc.Namespace, ingController.rc.Labels, 0)).NotTo(HaveOccurred()) Expect(framework.ScaleRCByLabels(client, ingController.rc.Namespace, ingController.rc.Labels, 0)).NotTo(HaveOccurred())
// If the controller failed to cleanup the test will fail, but we want to cleanup // If the controller failed to cleanup the test will fail, but we want to cleanup
// resources before that. // resources before that.
if pollErr != nil { if pollErr != nil {
if cleanupErr := ingController.Cleanup(true); cleanupErr != nil { if cleanupErr := ingController.Cleanup(true); cleanupErr != nil {
Logf("WARNING: Failed to cleanup resources %v", cleanupErr) framework.Logf("WARNING: Failed to cleanup resources %v", cleanupErr)
} }
Failf("Failed to cleanup GCE L7 resources.") framework.Failf("Failed to cleanup GCE L7 resources.")
} }
// Restore the cluster Addon. // Restore the cluster Addon.
Expect(scaleRCByLabels(client, api.NamespaceSystem, clusterAddonLBLabels, 1)).NotTo(HaveOccurred()) Expect(framework.ScaleRCByLabels(client, api.NamespaceSystem, clusterAddonLBLabels, 1)).NotTo(HaveOccurred())
framework.afterEach() f.AfterEach()
Logf("Successfully verified GCE L7 loadbalancer via Ingress.") framework.Logf("Successfully verified GCE L7 loadbalancer via Ingress.")
}) })
It("should create GCE L7 loadbalancers and verify Ingress", func() { It("should create GCE L7 loadbalancers and verify Ingress", func() {
@ -536,9 +537,9 @@ var _ = KubeDescribe("GCE L7 LoadBalancer Controller [Feature:Ingress]", func()
// foo0.bar.com: /foo0 // foo0.bar.com: /foo0
// foo1.bar.com: /foo1 // foo1.bar.com: /foo1
if numApps < numIng { if numApps < numIng {
Failf("Need more apps than Ingress") framework.Failf("Need more apps than Ingress")
} }
Logf("Starting ingress test") framework.Logf("Starting ingress test")
appsPerIngress := numApps / numIng appsPerIngress := numApps / numIng
By(fmt.Sprintf("Creating %d rcs + svc, and %d apps per Ingress", numApps, appsPerIngress)) By(fmt.Sprintf("Creating %d rcs + svc, and %d apps per Ingress", numApps, appsPerIngress))
@ -569,9 +570,9 @@ var _ = KubeDescribe("GCE L7 LoadBalancer Controller [Feature:Ingress]", func()
for _, ing := range ings.Items { for _, ing := range ings.Items {
// Wait for the loadbalancer IP. // Wait for the loadbalancer IP.
start := time.Now() start := time.Now()
address, err := waitForIngressAddress(client, ing.Namespace, ing.Name, lbPollTimeout) address, err := framework.WaitForIngressAddress(client, ing.Namespace, ing.Name, lbPollTimeout)
if err != nil { if err != nil {
Failf("Ingress failed to acquire an IP address within %v", lbPollTimeout) framework.Failf("Ingress failed to acquire an IP address within %v", lbPollTimeout)
} }
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Found address %v for ingress %v, took %v to come online", By(fmt.Sprintf("Found address %v for ingress %v, took %v to come online",
@ -592,9 +593,9 @@ var _ = KubeDescribe("GCE L7 LoadBalancer Controller [Feature:Ingress]", func()
for _, p := range rules.IngressRuleValue.HTTP.Paths { for _, p := range rules.IngressRuleValue.HTTP.Paths {
route := fmt.Sprintf("https://%v%v", address, p.Path) route := fmt.Sprintf("https://%v%v", address, p.Path)
Logf("Testing route %v host %v with simple GET", route, rules.Host) framework.Logf("Testing route %v host %v with simple GET", route, rules.Host)
if err != nil { if err != nil {
Failf("Unable to create transport: %v", err) framework.Failf("Unable to create transport: %v", err)
} }
// Make sure the service node port is reachable // Make sure the service node port is reachable
Expect(curlServiceNodePort(client, ns, p.Backend.ServiceName, int(p.Backend.ServicePort.IntVal))).NotTo(HaveOccurred()) Expect(curlServiceNodePort(client, ns, p.Backend.ServiceName, int(p.Backend.ServicePort.IntVal))).NotTo(HaveOccurred())
@ -605,7 +606,7 @@ var _ = KubeDescribe("GCE L7 LoadBalancer Controller [Feature:Ingress]", func()
var err error var err error
lastBody, err = simpleGET(timeoutClient, route, rules.Host) lastBody, err = simpleGET(timeoutClient, route, rules.Host)
if err != nil { if err != nil {
Logf("host %v path %v: %v", rules.Host, route, err) framework.Logf("host %v path %v: %v", rules.Host, route, err)
return false, nil return false, nil
} }
return true, nil return true, nil
@ -618,7 +619,7 @@ var _ = KubeDescribe("GCE L7 LoadBalancer Controller [Feature:Ingress]", func()
if err := curlServiceNodePort(client, ns, p.Backend.ServiceName, int(p.Backend.ServicePort.IntVal)); err != nil { if err := curlServiceNodePort(client, ns, p.Backend.ServiceName, int(p.Backend.ServicePort.IntVal)); err != nil {
msg += fmt.Sprintf("Also unable to curl service node port: %v", err) msg += fmt.Sprintf("Also unable to curl service node port: %v", err)
} }
Failf(msg) framework.Failf(msg)
} }
rt := time.Since(GETStart) rt := time.Since(GETStart)
By(fmt.Sprintf("Route %v host %v took %v to respond", route, rules.Host, rt)) By(fmt.Sprintf("Route %v host %v took %v to respond", route, rules.Host, rt))
@ -632,7 +633,7 @@ var _ = KubeDescribe("GCE L7 LoadBalancer Controller [Feature:Ingress]", func()
sort.Sort(timeSlice(creationTimes)) sort.Sort(timeSlice(creationTimes))
perc50 := creationTimes[len(creationTimes)/2] perc50 := creationTimes[len(creationTimes)/2]
if perc50 > expectedLBCreationTime { if perc50 > expectedLBCreationTime {
Logf("WARNING: Average creation time is too high: %+v", creationTimes) framework.Logf("WARNING: Average creation time is too high: %+v", creationTimes)
} }
if !verifyHTTPGET { if !verifyHTTPGET {
return return
@ -640,14 +641,14 @@ var _ = KubeDescribe("GCE L7 LoadBalancer Controller [Feature:Ingress]", func()
sort.Sort(timeSlice(responseTimes)) sort.Sort(timeSlice(responseTimes))
perc50 = responseTimes[len(responseTimes)/2] perc50 = responseTimes[len(responseTimes)/2]
if perc50 > expectedLBHealthCheckTime { if perc50 > expectedLBHealthCheckTime {
Logf("WARNING: Average startup time is too high: %+v", responseTimes) framework.Logf("WARNING: Average startup time is too high: %+v", responseTimes)
} }
}) })
}) })
func curlServiceNodePort(client *client.Client, ns, name string, port int) error { func curlServiceNodePort(client *client.Client, ns, name string, port int) error {
// TODO: Curl all nodes? // TODO: Curl all nodes?
u, err := getNodePortURL(client, ns, name, port) u, err := framework.GetNodePortURL(client, ns, name, port)
if err != nil { if err != nil {
return err return err
} }
@ -656,7 +657,7 @@ func curlServiceNodePort(client *client.Client, ns, name string, port int) error
pollErr := wait.Poll(10*time.Second, timeout, func() (bool, error) { pollErr := wait.Poll(10*time.Second, timeout, func() (bool, error) {
svcCurlBody, err = simpleGET(timeoutClient, u, "") svcCurlBody, err = simpleGET(timeoutClient, u, "")
if err != nil { if err != nil {
Logf("Failed to curl service node port, body: %v\nerror %v", svcCurlBody, err) framework.Logf("Failed to curl service node port, body: %v\nerror %v", svcCurlBody, err)
return false, nil return false, nil
} }
return true, nil return true, nil
@ -664,6 +665,6 @@ func curlServiceNodePort(client *client.Client, ns, name string, port int) error
if pollErr != nil { if pollErr != nil {
return fmt.Errorf("Failed to curl service node port in %v, body: %v\nerror %v", timeout, svcCurlBody, err) return fmt.Errorf("Failed to curl service node port in %v, body: %v\nerror %v", timeout, svcCurlBody, err)
} }
Logf("Successfully curled service node port, body: %v", svcCurlBody) framework.Logf("Successfully curled service node port, body: %v", svcCurlBody)
return nil return nil
} }

View File

@ -36,6 +36,7 @@ import (
"k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/apis/extensions"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
utilnet "k8s.io/kubernetes/pkg/util/net" utilnet "k8s.io/kubernetes/pkg/util/net"
"k8s.io/kubernetes/test/e2e/framework"
) )
const ( const (
@ -125,7 +126,7 @@ func createSecret(kubeClient *client.Client, ing *extensions.Ingress) (host stri
var k, c bytes.Buffer var k, c bytes.Buffer
tls := ing.Spec.TLS[0] tls := ing.Spec.TLS[0]
host = strings.Join(tls.Hosts, ",") host = strings.Join(tls.Hosts, ",")
Logf("Generating RSA cert for host %v", host) framework.Logf("Generating RSA cert for host %v", host)
if err = generateRSACerts(host, true, &k, &c); err != nil { if err = generateRSACerts(host, true, &k, &c); err != nil {
return return
@ -141,7 +142,7 @@ func createSecret(kubeClient *client.Client, ing *extensions.Ingress) (host stri
api.TLSPrivateKeyKey: key, api.TLSPrivateKeyKey: key,
}, },
} }
Logf("Creating secret %v in ns %v with hosts %v for ingress %v", secret.Name, secret.Namespace, host, ing.Name) framework.Logf("Creating secret %v in ns %v with hosts %v for ingress %v", secret.Name, secret.Namespace, host, ing.Name)
_, err = kubeClient.Secrets(ing.Namespace).Create(secret) _, err = kubeClient.Secrets(ing.Namespace).Create(secret)
return host, cert, key, err return host, cert, key, err
} }

View File

@ -23,14 +23,15 @@ import (
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/test/e2e/framework"
) )
// [Feature:InitialResources]: Initial resources is an experimental feature, so // [Feature:InitialResources]: Initial resources is an experimental feature, so
// these tests are not run by default. // these tests are not run by default.
// //
// Flaky issue #20272 // Flaky issue #20272
var _ = KubeDescribe("Initial Resources [Feature:InitialResources] [Flaky]", func() { var _ = framework.KubeDescribe("Initial Resources [Feature:InitialResources] [Flaky]", func() {
f := NewDefaultFramework("initial-resources") f := framework.NewDefaultFramework("initial-resources")
It("should set initial resources based on historical data", func() { It("should set initial resources based on historical data", func() {
// TODO(piosz): Add cleanup data in InfluxDB that left from previous tests. // TODO(piosz): Add cleanup data in InfluxDB that left from previous tests.
@ -50,7 +51,7 @@ var _ = KubeDescribe("Initial Resources [Feature:InitialResources] [Flaky]", fun
}) })
}) })
func runPod(f *Framework, name, image string) *api.Pod { func runPod(f *framework.Framework, name, image string) *api.Pod {
pod := &api.Pod{ pod := &api.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Name: name, Name: name,
@ -65,7 +66,7 @@ func runPod(f *Framework, name, image string) *api.Pod {
}, },
} }
createdPod, err := f.Client.Pods(f.Namespace.Name).Create(pod) createdPod, err := f.Client.Pods(f.Namespace.Name).Create(pod)
expectNoError(err) framework.ExpectNoError(err)
expectNoError(waitForPodRunningInNamespace(f.Client, name, f.Namespace.Name)) framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.Client, name, f.Namespace.Name))
return createdPod return createdPod
} }

View File

@ -26,6 +26,7 @@ import (
"k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
@ -39,8 +40,8 @@ const (
jobSelectorKey = "job" jobSelectorKey = "job"
) )
var _ = KubeDescribe("Job", func() { var _ = framework.KubeDescribe("Job", func() {
f := NewDefaultFramework("job") f := framework.NewDefaultFramework("job")
parallelism := 2 parallelism := 2
completions := 4 completions := 4
lotsOfFailures := 5 // more than completions lotsOfFailures := 5 // more than completions
@ -101,7 +102,7 @@ var _ = KubeDescribe("Job", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring job shows many failures") By("Ensuring job shows many failures")
err = wait.Poll(poll, jobTimeout, func() (bool, error) { err = wait.Poll(framework.Poll, jobTimeout, func() (bool, error) {
curr, err := f.Client.Extensions().Jobs(f.Namespace.Name).Get(job.Name) curr, err := f.Client.Extensions().Jobs(f.Namespace.Name).Get(job.Name)
if err != nil { if err != nil {
return false, err return false, err
@ -271,7 +272,7 @@ func deleteJob(c *client.Client, ns, name string) error {
// Wait for all pods to become Running. Only use when pods will run for a long time, or it will be racy. // Wait for all pods to become Running. Only use when pods will run for a long time, or it will be racy.
func waitForAllPodsRunning(c *client.Client, ns, jobName string, parallelism int) error { func waitForAllPodsRunning(c *client.Client, ns, jobName string, parallelism int) error {
label := labels.SelectorFromSet(labels.Set(map[string]string{jobSelectorKey: jobName})) label := labels.SelectorFromSet(labels.Set(map[string]string{jobSelectorKey: jobName}))
return wait.Poll(poll, jobTimeout, func() (bool, error) { return wait.Poll(framework.Poll, jobTimeout, func() (bool, error) {
options := api.ListOptions{LabelSelector: label} options := api.ListOptions{LabelSelector: label}
pods, err := c.Pods(ns).List(options) pods, err := c.Pods(ns).List(options)
if err != nil { if err != nil {
@ -289,7 +290,7 @@ func waitForAllPodsRunning(c *client.Client, ns, jobName string, parallelism int
// Wait for job to reach completions. // Wait for job to reach completions.
func waitForJobFinish(c *client.Client, ns, jobName string, completions int) error { func waitForJobFinish(c *client.Client, ns, jobName string, completions int) error {
return wait.Poll(poll, jobTimeout, func() (bool, error) { return wait.Poll(framework.Poll, jobTimeout, func() (bool, error) {
curr, err := c.Extensions().Jobs(ns).Get(jobName) curr, err := c.Extensions().Jobs(ns).Get(jobName)
if err != nil { if err != nil {
return false, err return false, err
@ -300,7 +301,7 @@ func waitForJobFinish(c *client.Client, ns, jobName string, completions int) err
// Wait for job fail. // Wait for job fail.
func waitForJobFail(c *client.Client, ns, jobName string) error { func waitForJobFail(c *client.Client, ns, jobName string) error {
return wait.Poll(poll, jobTimeout, func() (bool, error) { return wait.Poll(framework.Poll, jobTimeout, func() (bool, error) {
curr, err := c.Extensions().Jobs(ns).Get(jobName) curr, err := c.Extensions().Jobs(ns).Get(jobName)
if err != nil { if err != nil {
return false, err return false, err

View File

@ -21,19 +21,20 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
) )
var _ = KubeDescribe("Kibana Logging Instances Is Alive", func() { var _ = framework.KubeDescribe("Kibana Logging Instances Is Alive", func() {
f := NewDefaultFramework("kibana-logging") f := framework.NewDefaultFramework("kibana-logging")
BeforeEach(func() { BeforeEach(func() {
// TODO: For now assume we are only testing cluster logging with Elasticsearch // TODO: For now assume we are only testing cluster logging with Elasticsearch
// and Kibana on GCE. Once we are sure that Elasticsearch and Kibana cluster level logging // and Kibana on GCE. Once we are sure that Elasticsearch and Kibana cluster level logging
// works for other providers we should widen this scope of this test. // works for other providers we should widen this scope of this test.
SkipUnlessProviderIs("gce") framework.SkipUnlessProviderIs("gce")
}) })
It("should check that the Kibana logging instance is alive", func() { It("should check that the Kibana logging instance is alive", func() {
@ -47,7 +48,7 @@ const (
) )
// ClusterLevelLoggingWithKibana is an end to end test that checks to see if Kibana is alive. // ClusterLevelLoggingWithKibana is an end to end test that checks to see if Kibana is alive.
func ClusterLevelLoggingWithKibana(f *Framework) { func ClusterLevelLoggingWithKibana(f *framework.Framework) {
// graceTime is how long to keep retrying requests for status information. // graceTime is how long to keep retrying requests for status information.
const graceTime = 2 * time.Minute const graceTime = 2 * time.Minute
@ -61,7 +62,7 @@ func ClusterLevelLoggingWithKibana(f *Framework) {
if _, err = s.Get("kibana-logging"); err == nil { if _, err = s.Get("kibana-logging"); err == nil {
break break
} }
Logf("Attempt to check for the existence of the Kibana service failed after %v", time.Since(start)) framework.Logf("Attempt to check for the existence of the Kibana service failed after %v", time.Since(start))
} }
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -72,16 +73,16 @@ func ClusterLevelLoggingWithKibana(f *Framework) {
pods, err := f.Client.Pods(api.NamespaceSystem).List(options) pods, err := f.Client.Pods(api.NamespaceSystem).List(options)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
for _, pod := range pods.Items { for _, pod := range pods.Items {
err = waitForPodRunningInNamespace(f.Client, pod.Name, api.NamespaceSystem) err = framework.WaitForPodRunningInNamespace(f.Client, pod.Name, api.NamespaceSystem)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
} }
By("Checking to make sure we get a response from the Kibana UI.") By("Checking to make sure we get a response from the Kibana UI.")
err = nil err = nil
for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) { for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) {
proxyRequest, errProxy := getServicesProxyRequest(f.Client, f.Client.Get()) proxyRequest, errProxy := framework.GetServicesProxyRequest(f.Client, f.Client.Get())
if errProxy != nil { if errProxy != nil {
Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy) framework.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy)
continue continue
} }
// Query against the root URL for Kibana. // Query against the root URL for Kibana.
@ -89,7 +90,7 @@ func ClusterLevelLoggingWithKibana(f *Framework) {
Name("kibana-logging"). Name("kibana-logging").
DoRaw() DoRaw()
if err != nil { if err != nil {
Logf("After %v proxy call to kibana-logging failed: %v", time.Since(start), err) framework.Logf("After %v proxy call to kibana-logging failed: %v", time.Since(start), err)
continue continue
} }
break break

File diff suppressed because it is too large Load Diff

View File

@ -25,15 +25,16 @@ import (
"k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
) )
const ( const (
// Interval to poll /runningpods on a node // Interval to framework.Poll /runningpods on a node
pollInterval = 1 * time.Second pollInterval = 1 * time.Second
// Interval to poll /stats/container on a node // Interval to framework.Poll /stats/container on a node
containerStatsPollingInterval = 5 * time.Second containerStatsPollingInterval = 5 * time.Second
) )
@ -41,10 +42,10 @@ const (
// podNamePrefix and namespace. // podNamePrefix and namespace.
func getPodMatches(c *client.Client, nodeName string, podNamePrefix string, namespace string) sets.String { func getPodMatches(c *client.Client, nodeName string, podNamePrefix string, namespace string) sets.String {
matches := sets.NewString() matches := sets.NewString()
Logf("Checking pods on node %v via /runningpods endpoint", nodeName) framework.Logf("Checking pods on node %v via /runningpods endpoint", nodeName)
runningPods, err := GetKubeletPods(c, nodeName) runningPods, err := framework.GetKubeletPods(c, nodeName)
if err != nil { if err != nil {
Logf("Error checking running pods on %v: %v", nodeName, err) framework.Logf("Error checking running pods on %v: %v", nodeName, err)
return matches return matches
} }
for _, pod := range runningPods.Items { for _, pod := range runningPods.Items {
@ -81,25 +82,25 @@ func waitTillNPodsRunningOnNodes(c *client.Client, nodeNames sets.String, podNam
if seen.Len() == targetNumPods { if seen.Len() == targetNumPods {
return true, nil return true, nil
} }
Logf("Waiting for %d pods to be running on the node; %d are currently running;", targetNumPods, seen.Len()) framework.Logf("Waiting for %d pods to be running on the node; %d are currently running;", targetNumPods, seen.Len())
return false, nil return false, nil
}) })
} }
var _ = KubeDescribe("kubelet", func() { var _ = framework.KubeDescribe("kubelet", func() {
var numNodes int var numNodes int
var nodeNames sets.String var nodeNames sets.String
framework := NewDefaultFramework("kubelet") f := framework.NewDefaultFramework("kubelet")
var resourceMonitor *resourceMonitor var resourceMonitor *framework.ResourceMonitor
BeforeEach(func() { BeforeEach(func() {
nodes := ListSchedulableNodesOrDie(framework.Client) nodes := framework.ListSchedulableNodesOrDie(f.Client)
numNodes = len(nodes.Items) numNodes = len(nodes.Items)
nodeNames = sets.NewString() nodeNames = sets.NewString()
for _, node := range nodes.Items { for _, node := range nodes.Items {
nodeNames.Insert(node.Name) nodeNames.Insert(node.Name)
} }
resourceMonitor = newResourceMonitor(framework.Client, targetContainers(), containerStatsPollingInterval) resourceMonitor = framework.NewResourceMonitor(f.Client, framework.TargetContainers(), containerStatsPollingInterval)
resourceMonitor.Start() resourceMonitor.Start()
}) })
@ -107,7 +108,7 @@ var _ = KubeDescribe("kubelet", func() {
resourceMonitor.Stop() resourceMonitor.Stop()
}) })
KubeDescribe("Clean up pods on node", func() { framework.KubeDescribe("Clean up pods on node", func() {
type DeleteTest struct { type DeleteTest struct {
podsPerNode int podsPerNode int
timeout time.Duration timeout time.Duration
@ -123,23 +124,23 @@ var _ = KubeDescribe("kubelet", func() {
By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods)) By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods))
rcName := fmt.Sprintf("cleanup%d-%s", totalPods, string(util.NewUUID())) rcName := fmt.Sprintf("cleanup%d-%s", totalPods, string(util.NewUUID()))
Expect(RunRC(RCConfig{ Expect(framework.RunRC(framework.RCConfig{
Client: framework.Client, Client: f.Client,
Name: rcName, Name: rcName,
Namespace: framework.Namespace.Name, Namespace: f.Namespace.Name,
Image: "gcr.io/google_containers/pause:2.0", Image: "gcr.io/google_containers/pause:2.0",
Replicas: totalPods, Replicas: totalPods,
})).NotTo(HaveOccurred()) })).NotTo(HaveOccurred())
// Perform a sanity check so that we know all desired pods are // Perform a sanity check so that we know all desired pods are
// running on the nodes according to kubelet. The timeout is set to // running on the nodes according to kubelet. The timeout is set to
// only 30 seconds here because RunRC already waited for all pods to // only 30 seconds here because framework.RunRC already waited for all pods to
// transition to the running status. // transition to the running status.
Expect(waitTillNPodsRunningOnNodes(framework.Client, nodeNames, rcName, framework.Namespace.Name, totalPods, Expect(waitTillNPodsRunningOnNodes(f.Client, nodeNames, rcName, f.Namespace.Name, totalPods,
time.Second*30)).NotTo(HaveOccurred()) time.Second*30)).NotTo(HaveOccurred())
resourceMonitor.LogLatest() resourceMonitor.LogLatest()
By("Deleting the RC") By("Deleting the RC")
DeleteRC(framework.Client, framework.Namespace.Name, rcName) framework.DeleteRC(f.Client, f.Namespace.Name, rcName)
// Check that the pods really are gone by querying /runningpods on the // Check that the pods really are gone by querying /runningpods on the
// node. The /runningpods handler checks the container runtime (or its // node. The /runningpods handler checks the container runtime (or its
// cache) and returns a list of running pods. Some possible causes of // cache) and returns a list of running pods. Some possible causes of
@ -148,9 +149,9 @@ var _ = KubeDescribe("kubelet", func() {
// - a bug in graceful termination (if it is enabled) // - a bug in graceful termination (if it is enabled)
// - docker slow to delete pods (or resource problems causing slowness) // - docker slow to delete pods (or resource problems causing slowness)
start := time.Now() start := time.Now()
Expect(waitTillNPodsRunningOnNodes(framework.Client, nodeNames, rcName, framework.Namespace.Name, 0, Expect(waitTillNPodsRunningOnNodes(f.Client, nodeNames, rcName, f.Namespace.Name, 0,
itArg.timeout)).NotTo(HaveOccurred()) itArg.timeout)).NotTo(HaveOccurred())
Logf("Deleting %d pods on %d nodes completed in %v after the RC was deleted", totalPods, len(nodeNames), framework.Logf("Deleting %d pods on %d nodes completed in %v after the RC was deleted", totalPods, len(nodeNames),
time.Since(start)) time.Since(start))
resourceMonitor.LogCPUSummary() resourceMonitor.LogCPUSummary()
}) })

View File

@ -25,6 +25,7 @@ import (
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/test/e2e/framework"
) )
const ( const (
@ -37,11 +38,11 @@ const (
type KubeletManagedHostConfig struct { type KubeletManagedHostConfig struct {
hostNetworkPod *api.Pod hostNetworkPod *api.Pod
pod *api.Pod pod *api.Pod
f *Framework f *framework.Framework
} }
var _ = KubeDescribe("KubeletManagedEtcHosts", func() { var _ = framework.KubeDescribe("KubeletManagedEtcHosts", func() {
f := NewDefaultFramework("e2e-kubelet-etc-hosts") f := framework.NewDefaultFramework("e2e-kubelet-etc-hosts")
config := &KubeletManagedHostConfig{ config := &KubeletManagedHostConfig{
f: f, f: f,
} }
@ -94,12 +95,12 @@ func (config *KubeletManagedHostConfig) createPodWithHostNetwork() {
func (config *KubeletManagedHostConfig) createPod(podSpec *api.Pod) *api.Pod { func (config *KubeletManagedHostConfig) createPod(podSpec *api.Pod) *api.Pod {
createdPod, err := config.getPodClient().Create(podSpec) createdPod, err := config.getPodClient().Create(podSpec)
if err != nil { if err != nil {
Failf("Failed to create %s pod: %v", podSpec.Name, err) framework.Failf("Failed to create %s pod: %v", podSpec.Name, err)
} }
expectNoError(config.f.WaitForPodRunning(podSpec.Name)) framework.ExpectNoError(config.f.WaitForPodRunning(podSpec.Name))
createdPod, err = config.getPodClient().Get(podSpec.Name) createdPod, err = config.getPodClient().Get(podSpec.Name)
if err != nil { if err != nil {
Failf("Failed to retrieve %s pod: %v", podSpec.Name, err) framework.Failf("Failed to retrieve %s pod: %v", podSpec.Name, err)
} }
return createdPod return createdPod
} }
@ -111,31 +112,31 @@ func (config *KubeletManagedHostConfig) getPodClient() client.PodInterface {
func assertEtcHostsIsKubeletManaged(etcHostsContent string) { func assertEtcHostsIsKubeletManaged(etcHostsContent string) {
isKubeletManaged := strings.Contains(etcHostsContent, etcHostsPartialContent) isKubeletManaged := strings.Contains(etcHostsContent, etcHostsPartialContent)
if !isKubeletManaged { if !isKubeletManaged {
Failf("/etc/hosts file should be kubelet managed, but is not: %q", etcHostsContent) framework.Failf("/etc/hosts file should be kubelet managed, but is not: %q", etcHostsContent)
} }
} }
func assertEtcHostsIsNotKubeletManaged(etcHostsContent string) { func assertEtcHostsIsNotKubeletManaged(etcHostsContent string) {
isKubeletManaged := strings.Contains(etcHostsContent, etcHostsPartialContent) isKubeletManaged := strings.Contains(etcHostsContent, etcHostsPartialContent)
if isKubeletManaged { if isKubeletManaged {
Failf("/etc/hosts file should not be kubelet managed, but is: %q", etcHostsContent) framework.Failf("/etc/hosts file should not be kubelet managed, but is: %q", etcHostsContent)
} }
} }
func (config *KubeletManagedHostConfig) getEtcHostsContent(podName, containerName string) string { func (config *KubeletManagedHostConfig) getEtcHostsContent(podName, containerName string) string {
cmd := kubectlCmd("exec", fmt.Sprintf("--namespace=%v", config.f.Namespace.Name), podName, "-c", containerName, "cat", "/etc/hosts") cmd := framework.KubectlCmd("exec", fmt.Sprintf("--namespace=%v", config.f.Namespace.Name), podName, "-c", containerName, "cat", "/etc/hosts")
stdout, stderr, err := startCmdAndStreamOutput(cmd) stdout, stderr, err := framework.StartCmdAndStreamOutput(cmd)
if err != nil { if err != nil {
Failf("Failed to retrieve /etc/hosts, err: %q", err) framework.Failf("Failed to retrieve /etc/hosts, err: %q", err)
} }
defer stdout.Close() defer stdout.Close()
defer stderr.Close() defer stderr.Close()
buf := make([]byte, 1000) buf := make([]byte, 1000)
var n int var n int
Logf("reading from `kubectl exec` command's stdout") framework.Logf("reading from `kubectl exec` command's stdout")
if n, err = stdout.Read(buf); err != nil { if n, err = stdout.Read(buf); err != nil {
Failf("Failed to read from kubectl exec stdout: %v", err) framework.Failf("Failed to read from kubectl exec stdout: %v", err)
} }
return string(buf[:n]) return string(buf[:n])
} }

View File

@ -25,6 +25,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats" "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats"
"k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
@ -41,33 +42,33 @@ const (
type resourceTest struct { type resourceTest struct {
podsPerNode int podsPerNode int
cpuLimits containersCPUSummary cpuLimits framework.ContainersCPUSummary
memLimits resourceUsagePerContainer memLimits framework.ResourceUsagePerContainer
} }
func logPodsOnNodes(c *client.Client, nodeNames []string) { func logPodsOnNodes(c *client.Client, nodeNames []string) {
for _, n := range nodeNames { for _, n := range nodeNames {
podList, err := GetKubeletRunningPods(c, n) podList, err := framework.GetKubeletRunningPods(c, n)
if err != nil { if err != nil {
Logf("Unable to retrieve kubelet pods for node %v", n) framework.Logf("Unable to retrieve kubelet pods for node %v", n)
continue continue
} }
Logf("%d pods are running on node %v", len(podList.Items), n) framework.Logf("%d pods are running on node %v", len(podList.Items), n)
} }
} }
func runResourceTrackingTest(framework *Framework, podsPerNode int, nodeNames sets.String, rm *resourceMonitor, func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames sets.String, rm *framework.ResourceMonitor,
expectedCPU map[string]map[float64]float64, expectedMemory resourceUsagePerContainer) { expectedCPU map[string]map[float64]float64, expectedMemory framework.ResourceUsagePerContainer) {
numNodes := nodeNames.Len() numNodes := nodeNames.Len()
totalPods := podsPerNode * numNodes totalPods := podsPerNode * numNodes
By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods)) By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods))
rcName := fmt.Sprintf("resource%d-%s", totalPods, string(util.NewUUID())) rcName := fmt.Sprintf("resource%d-%s", totalPods, string(util.NewUUID()))
// TODO: Use a more realistic workload // TODO: Use a more realistic workload
Expect(RunRC(RCConfig{ Expect(framework.RunRC(framework.RCConfig{
Client: framework.Client, Client: f.Client,
Name: rcName, Name: rcName,
Namespace: framework.Namespace.Name, Namespace: f.Namespace.Name,
Image: "gcr.io/google_containers/pause:2.0", Image: "gcr.io/google_containers/pause:2.0",
Replicas: totalPods, Replicas: totalPods,
})).NotTo(HaveOccurred()) })).NotTo(HaveOccurred())
@ -78,38 +79,38 @@ func runResourceTrackingTest(framework *Framework, podsPerNode int, nodeNames se
By("Start monitoring resource usage") By("Start monitoring resource usage")
// Periodically dump the cpu summary until the deadline is met. // Periodically dump the cpu summary until the deadline is met.
// Note that without calling resourceMonitor.Reset(), the stats // Note that without calling framework.ResourceMonitor.Reset(), the stats
// would occupy increasingly more memory. This should be fine // would occupy increasingly more memory. This should be fine
// for the current test duration, but we should reclaim the // for the current test duration, but we should reclaim the
// entries if we plan to monitor longer (e.g., 8 hours). // entries if we plan to monitor longer (e.g., 8 hours).
deadline := time.Now().Add(monitoringTime) deadline := time.Now().Add(monitoringTime)
for time.Now().Before(deadline) { for time.Now().Before(deadline) {
timeLeft := deadline.Sub(time.Now()) timeLeft := deadline.Sub(time.Now())
Logf("Still running...%v left", timeLeft) framework.Logf("Still running...%v left", timeLeft)
if timeLeft < reportingPeriod { if timeLeft < reportingPeriod {
time.Sleep(timeLeft) time.Sleep(timeLeft)
} else { } else {
time.Sleep(reportingPeriod) time.Sleep(reportingPeriod)
} }
logPodsOnNodes(framework.Client, nodeNames.List()) logPodsOnNodes(f.Client, nodeNames.List())
} }
By("Reporting overall resource usage") By("Reporting overall resource usage")
logPodsOnNodes(framework.Client, nodeNames.List()) logPodsOnNodes(f.Client, nodeNames.List())
usageSummary, err := rm.GetLatest() usageSummary, err := rm.GetLatest()
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Logf("%s", rm.FormatResourceUsage(usageSummary)) framework.Logf("%s", rm.FormatResourceUsage(usageSummary))
verifyMemoryLimits(framework.Client, expectedMemory, usageSummary) verifyMemoryLimits(f.Client, expectedMemory, usageSummary)
cpuSummary := rm.GetCPUSummary() cpuSummary := rm.GetCPUSummary()
Logf("%s", rm.FormatCPUSummary(cpuSummary)) framework.Logf("%s", rm.FormatCPUSummary(cpuSummary))
verifyCPULimits(expectedCPU, cpuSummary) verifyCPULimits(expectedCPU, cpuSummary)
By("Deleting the RC") By("Deleting the RC")
DeleteRC(framework.Client, framework.Namespace.Name, rcName) framework.DeleteRC(f.Client, f.Namespace.Name, rcName)
} }
func verifyMemoryLimits(c *client.Client, expected resourceUsagePerContainer, actual resourceUsagePerNode) { func verifyMemoryLimits(c *client.Client, expected framework.ResourceUsagePerContainer, actual framework.ResourceUsagePerNode) {
if expected == nil { if expected == nil {
return return
} }
@ -132,20 +133,20 @@ func verifyMemoryLimits(c *client.Client, expected resourceUsagePerContainer, ac
} }
if len(nodeErrs) > 0 { if len(nodeErrs) > 0 {
errList = append(errList, fmt.Sprintf("node %v:\n %s", nodeName, strings.Join(nodeErrs, ", "))) errList = append(errList, fmt.Sprintf("node %v:\n %s", nodeName, strings.Join(nodeErrs, ", ")))
heapStats, err := getKubeletHeapStats(c, nodeName) heapStats, err := framework.GetKubeletHeapStats(c, nodeName)
if err != nil { if err != nil {
Logf("Unable to get heap stats from %q", nodeName) framework.Logf("Unable to get heap stats from %q", nodeName)
} else { } else {
Logf("Heap stats on %q\n:%v", nodeName, heapStats) framework.Logf("Heap stats on %q\n:%v", nodeName, heapStats)
} }
} }
} }
if len(errList) > 0 { if len(errList) > 0 {
Failf("Memory usage exceeding limits:\n %s", strings.Join(errList, "\n")) framework.Failf("Memory usage exceeding limits:\n %s", strings.Join(errList, "\n"))
} }
} }
func verifyCPULimits(expected containersCPUSummary, actual nodesCPUSummary) { func verifyCPULimits(expected framework.ContainersCPUSummary, actual framework.NodesCPUSummary) {
if expected == nil { if expected == nil {
return return
} }
@ -175,30 +176,30 @@ func verifyCPULimits(expected containersCPUSummary, actual nodesCPUSummary) {
} }
} }
if len(errList) > 0 { if len(errList) > 0 {
Failf("CPU usage exceeding limits:\n %s", strings.Join(errList, "\n")) framework.Failf("CPU usage exceeding limits:\n %s", strings.Join(errList, "\n"))
} }
} }
// Slow by design (1 hour) // Slow by design (1 hour)
var _ = KubeDescribe("Kubelet [Serial] [Slow]", func() { var _ = framework.KubeDescribe("Kubelet [Serial] [Slow]", func() {
var nodeNames sets.String var nodeNames sets.String
framework := NewDefaultFramework("kubelet-perf") f := framework.NewDefaultFramework("kubelet-perf")
var rm *resourceMonitor var rm *framework.ResourceMonitor
BeforeEach(func() { BeforeEach(func() {
nodes := ListSchedulableNodesOrDie(framework.Client) nodes := framework.ListSchedulableNodesOrDie(f.Client)
nodeNames = sets.NewString() nodeNames = sets.NewString()
for _, node := range nodes.Items { for _, node := range nodes.Items {
nodeNames.Insert(node.Name) nodeNames.Insert(node.Name)
} }
rm = newResourceMonitor(framework.Client, targetContainers(), containerStatsPollingPeriod) rm = framework.NewResourceMonitor(f.Client, framework.TargetContainers(), containerStatsPollingPeriod)
rm.Start() rm.Start()
}) })
AfterEach(func() { AfterEach(func() {
rm.Stop() rm.Stop()
}) })
KubeDescribe("regular resource usage tracking", func() { framework.KubeDescribe("regular resource usage tracking", func() {
// We assume that the scheduler will make reasonable scheduling choices // We assume that the scheduler will make reasonable scheduling choices
// and assign ~N pods on the node. // and assign ~N pods on the node.
// Although we want to track N pods per node, there are N + add-on pods // Although we want to track N pods per node, there are N + add-on pods
@ -210,27 +211,27 @@ var _ = KubeDescribe("Kubelet [Serial] [Slow]", func() {
rTests := []resourceTest{ rTests := []resourceTest{
{ {
podsPerNode: 0, podsPerNode: 0,
cpuLimits: containersCPUSummary{ cpuLimits: framework.ContainersCPUSummary{
stats.SystemContainerKubelet: {0.50: 0.06, 0.95: 0.08}, stats.SystemContainerKubelet: {0.50: 0.06, 0.95: 0.08},
stats.SystemContainerRuntime: {0.50: 0.05, 0.95: 0.06}, stats.SystemContainerRuntime: {0.50: 0.05, 0.95: 0.06},
}, },
// We set the memory limits generously because the distribution // We set the memory limits generously because the distribution
// of the addon pods affect the memory usage on each node. // of the addon pods affect the memory usage on each node.
memLimits: resourceUsagePerContainer{ memLimits: framework.ResourceUsagePerContainer{
stats.SystemContainerKubelet: &containerResourceUsage{MemoryRSSInBytes: 70 * 1024 * 1024}, stats.SystemContainerKubelet: &framework.ContainerResourceUsage{MemoryRSSInBytes: 70 * 1024 * 1024},
stats.SystemContainerRuntime: &containerResourceUsage{MemoryRSSInBytes: 85 * 1024 * 1024}, stats.SystemContainerRuntime: &framework.ContainerResourceUsage{MemoryRSSInBytes: 85 * 1024 * 1024},
}, },
}, },
{ {
podsPerNode: 35, podsPerNode: 35,
cpuLimits: containersCPUSummary{ cpuLimits: framework.ContainersCPUSummary{
stats.SystemContainerKubelet: {0.50: 0.12, 0.95: 0.14}, stats.SystemContainerKubelet: {0.50: 0.12, 0.95: 0.14},
stats.SystemContainerRuntime: {0.50: 0.06, 0.95: 0.08}, stats.SystemContainerRuntime: {0.50: 0.06, 0.95: 0.08},
}, },
// We set the memory limits generously because the distribution // We set the memory limits generously because the distribution
// of the addon pods affect the memory usage on each node. // of the addon pods affect the memory usage on each node.
memLimits: resourceUsagePerContainer{ memLimits: framework.ResourceUsagePerContainer{
stats.SystemContainerRuntime: &containerResourceUsage{MemoryRSSInBytes: 100 * 1024 * 1024}, stats.SystemContainerRuntime: &framework.ContainerResourceUsage{MemoryRSSInBytes: 100 * 1024 * 1024},
}, },
}, },
{ {
@ -244,18 +245,18 @@ var _ = KubeDescribe("Kubelet [Serial] [Slow]", func() {
name := fmt.Sprintf( name := fmt.Sprintf(
"for %d pods per node over %v", podsPerNode, monitoringTime) "for %d pods per node over %v", podsPerNode, monitoringTime)
It(name, func() { It(name, func() {
runResourceTrackingTest(framework, podsPerNode, nodeNames, rm, itArg.cpuLimits, itArg.memLimits) runResourceTrackingTest(f, podsPerNode, nodeNames, rm, itArg.cpuLimits, itArg.memLimits)
}) })
} }
}) })
KubeDescribe("experimental resource usage tracking [Feature:ExperimentalResourceUsageTracking]", func() { framework.KubeDescribe("experimental resource usage tracking [Feature:ExperimentalResourceUsageTracking]", func() {
density := []int{100} density := []int{100}
for i := range density { for i := range density {
podsPerNode := density[i] podsPerNode := density[i]
name := fmt.Sprintf( name := fmt.Sprintf(
"for %d pods per node over %v", podsPerNode, monitoringTime) "for %d pods per node over %v", podsPerNode, monitoringTime)
It(name, func() { It(name, func() {
runResourceTrackingTest(framework, podsPerNode, nodeNames, rm, nil, nil) runResourceTrackingTest(f, podsPerNode, nodeNames, rm, nil, nil)
}) })
} }
}) })

View File

@ -36,6 +36,7 @@ import (
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
utilnet "k8s.io/kubernetes/pkg/util/net" utilnet "k8s.io/kubernetes/pkg/util/net"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
) )
const ( const (
@ -63,15 +64,15 @@ type KubeProxyTestConfig struct {
testContainerPod *api.Pod testContainerPod *api.Pod
hostTestContainerPod *api.Pod hostTestContainerPod *api.Pod
endpointPods []*api.Pod endpointPods []*api.Pod
f *Framework f *framework.Framework
nodePortService *api.Service nodePortService *api.Service
loadBalancerService *api.Service loadBalancerService *api.Service
externalAddrs []string externalAddrs []string
nodes []api.Node nodes []api.Node
} }
var _ = KubeDescribe("KubeProxy", func() { var _ = framework.KubeDescribe("KubeProxy", func() {
f := NewDefaultFramework("e2e-kubeproxy") f := framework.NewDefaultFramework("e2e-kubeproxy")
config := &KubeProxyTestConfig{ config := &KubeProxyTestConfig{
f: f, f: f,
} }
@ -238,7 +239,7 @@ func (config *KubeProxyTestConfig) dialFromContainer(protocol, containerIP, targ
tries) tries)
By(fmt.Sprintf("Dialing from container. Running command:%s", cmd)) By(fmt.Sprintf("Dialing from container. Running command:%s", cmd))
stdout := RunHostCmdOrDie(config.f.Namespace.Name, config.hostTestContainerPod.Name, cmd) stdout := framework.RunHostCmdOrDie(config.f.Namespace.Name, config.hostTestContainerPod.Name, cmd)
var output map[string][]string var output map[string][]string
err := json.Unmarshal([]byte(stdout), &output) err := json.Unmarshal([]byte(stdout), &output)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Could not unmarshal curl response: %s", stdout)) Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Could not unmarshal curl response: %s", stdout))
@ -258,14 +259,14 @@ func (config *KubeProxyTestConfig) dialFromNode(protocol, targetIP string, targe
// hitting any other. // hitting any other.
forLoop := fmt.Sprintf("for i in $(seq 1 %d); do %s; echo; sleep %v; done | grep -v '^\\s*$' |sort | uniq -c | wc -l", tries, cmd, hitEndpointRetryDelay) forLoop := fmt.Sprintf("for i in $(seq 1 %d); do %s; echo; sleep %v; done | grep -v '^\\s*$' |sort | uniq -c | wc -l", tries, cmd, hitEndpointRetryDelay)
By(fmt.Sprintf("Dialing from node. command:%s", forLoop)) By(fmt.Sprintf("Dialing from node. command:%s", forLoop))
stdout := RunHostCmdOrDie(config.f.Namespace.Name, config.hostTestContainerPod.Name, forLoop) stdout := framework.RunHostCmdOrDie(config.f.Namespace.Name, config.hostTestContainerPod.Name, forLoop)
Expect(strconv.Atoi(strings.TrimSpace(stdout))).To(BeNumerically("==", expectedCount)) Expect(strconv.Atoi(strings.TrimSpace(stdout))).To(BeNumerically("==", expectedCount))
} }
func (config *KubeProxyTestConfig) getSelfURL(path string, expected string) { func (config *KubeProxyTestConfig) getSelfURL(path string, expected string) {
cmd := fmt.Sprintf("curl -s --connect-timeout 1 http://localhost:10249%s", path) cmd := fmt.Sprintf("curl -s --connect-timeout 1 http://localhost:10249%s", path)
By(fmt.Sprintf("Getting kube-proxy self URL %s", path)) By(fmt.Sprintf("Getting kube-proxy self URL %s", path))
stdout := RunHostCmdOrDie(config.f.Namespace.Name, config.hostTestContainerPod.Name, cmd) stdout := framework.RunHostCmdOrDie(config.f.Namespace.Name, config.hostTestContainerPod.Name, cmd)
Expect(strings.Contains(stdout, expected)).To(BeTrue()) Expect(strings.Contains(stdout, expected)).To(BeTrue())
} }
@ -421,23 +422,23 @@ func (config *KubeProxyTestConfig) waitForLoadBalancerIngressSetup() {
func (config *KubeProxyTestConfig) createTestPods() { func (config *KubeProxyTestConfig) createTestPods() {
testContainerPod := config.createTestPodSpec() testContainerPod := config.createTestPodSpec()
hostTestContainerPod := NewHostExecPodSpec(config.f.Namespace.Name, hostTestPodName) hostTestContainerPod := framework.NewHostExecPodSpec(config.f.Namespace.Name, hostTestPodName)
config.createPod(testContainerPod) config.createPod(testContainerPod)
config.createPod(hostTestContainerPod) config.createPod(hostTestContainerPod)
expectNoError(config.f.WaitForPodRunning(testContainerPod.Name)) framework.ExpectNoError(config.f.WaitForPodRunning(testContainerPod.Name))
expectNoError(config.f.WaitForPodRunning(hostTestContainerPod.Name)) framework.ExpectNoError(config.f.WaitForPodRunning(hostTestContainerPod.Name))
var err error var err error
config.testContainerPod, err = config.getPodClient().Get(testContainerPod.Name) config.testContainerPod, err = config.getPodClient().Get(testContainerPod.Name)
if err != nil { if err != nil {
Failf("Failed to retrieve %s pod: %v", testContainerPod.Name, err) framework.Failf("Failed to retrieve %s pod: %v", testContainerPod.Name, err)
} }
config.hostTestContainerPod, err = config.getPodClient().Get(hostTestContainerPod.Name) config.hostTestContainerPod, err = config.getPodClient().Get(hostTestContainerPod.Name)
if err != nil { if err != nil {
Failf("Failed to retrieve %s pod: %v", hostTestContainerPod.Name, err) framework.Failf("Failed to retrieve %s pod: %v", hostTestContainerPod.Name, err)
} }
} }
@ -445,7 +446,7 @@ func (config *KubeProxyTestConfig) createService(serviceSpec *api.Service) *api.
_, err := config.getServiceClient().Create(serviceSpec) _, err := config.getServiceClient().Create(serviceSpec)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create %s service: %v", serviceSpec.Name, err)) Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create %s service: %v", serviceSpec.Name, err))
err = waitForService(config.f.Client, config.f.Namespace.Name, serviceSpec.Name, true, 5*time.Second, 45*time.Second) err = framework.WaitForService(config.f.Client, config.f.Namespace.Name, serviceSpec.Name, true, 5*time.Second, 45*time.Second)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("error while waiting for service:%s err: %v", serviceSpec.Name, err)) Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("error while waiting for service:%s err: %v", serviceSpec.Name, err))
createdService, err := config.getServiceClient().Get(serviceSpec.Name) createdService, err := config.getServiceClient().Get(serviceSpec.Name)
@ -462,11 +463,11 @@ func (config *KubeProxyTestConfig) setup() {
} }
By("Getting node addresses") By("Getting node addresses")
nodeList := ListSchedulableNodesOrDie(config.f.Client) nodeList := framework.ListSchedulableNodesOrDie(config.f.Client)
config.externalAddrs = NodeAddresses(nodeList, api.NodeExternalIP) config.externalAddrs = framework.NodeAddresses(nodeList, api.NodeExternalIP)
if len(config.externalAddrs) < 2 { if len(config.externalAddrs) < 2 {
// fall back to legacy IPs // fall back to legacy IPs
config.externalAddrs = NodeAddresses(nodeList, api.NodeLegacyHostIP) config.externalAddrs = framework.NodeAddresses(nodeList, api.NodeLegacyHostIP)
} }
Expect(len(config.externalAddrs)).To(BeNumerically(">=", 2), fmt.Sprintf("At least two nodes necessary with an external or LegacyHostIP")) Expect(len(config.externalAddrs)).To(BeNumerically(">=", 2), fmt.Sprintf("At least two nodes necessary with an external or LegacyHostIP"))
config.nodes = nodeList.Items config.nodes = nodeList.Items
@ -500,7 +501,7 @@ func (config *KubeProxyTestConfig) cleanup() {
} }
func (config *KubeProxyTestConfig) createNetProxyPods(podName string, selector map[string]string) []*api.Pod { func (config *KubeProxyTestConfig) createNetProxyPods(podName string, selector map[string]string) []*api.Pod {
nodes := ListSchedulableNodesOrDie(config.f.Client) nodes := framework.ListSchedulableNodesOrDie(config.f.Client)
// create pods, one for each node // create pods, one for each node
createdPods := make([]*api.Pod, 0, len(nodes.Items)) createdPods := make([]*api.Pod, 0, len(nodes.Items))
@ -515,9 +516,9 @@ func (config *KubeProxyTestConfig) createNetProxyPods(podName string, selector m
// wait that all of them are up // wait that all of them are up
runningPods := make([]*api.Pod, 0, len(nodes.Items)) runningPods := make([]*api.Pod, 0, len(nodes.Items))
for _, p := range createdPods { for _, p := range createdPods {
expectNoError(config.f.WaitForPodReady(p.Name)) framework.ExpectNoError(config.f.WaitForPodReady(p.Name))
rp, err := config.getPodClient().Get(p.Name) rp, err := config.getPodClient().Get(p.Name)
expectNoError(err) framework.ExpectNoError(err)
runningPods = append(runningPods, rp) runningPods = append(runningPods, rp)
} }
@ -529,14 +530,14 @@ func (config *KubeProxyTestConfig) deleteNetProxyPod() {
config.getPodClient().Delete(pod.Name, api.NewDeleteOptions(0)) config.getPodClient().Delete(pod.Name, api.NewDeleteOptions(0))
config.endpointPods = config.endpointPods[1:] config.endpointPods = config.endpointPods[1:]
// wait for pod being deleted. // wait for pod being deleted.
err := waitForPodToDisappear(config.f.Client, config.f.Namespace.Name, pod.Name, labels.Everything(), time.Second, wait.ForeverTestTimeout) err := framework.WaitForPodToDisappear(config.f.Client, config.f.Namespace.Name, pod.Name, labels.Everything(), time.Second, wait.ForeverTestTimeout)
if err != nil { if err != nil {
Failf("Failed to delete %s pod: %v", pod.Name, err) framework.Failf("Failed to delete %s pod: %v", pod.Name, err)
} }
// wait for endpoint being removed. // wait for endpoint being removed.
err = waitForServiceEndpointsNum(config.f.Client, config.f.Namespace.Name, nodePortServiceName, len(config.endpointPods), time.Second, wait.ForeverTestTimeout) err = framework.WaitForServiceEndpointsNum(config.f.Client, config.f.Namespace.Name, nodePortServiceName, len(config.endpointPods), time.Second, wait.ForeverTestTimeout)
if err != nil { if err != nil {
Failf("Failed to remove endpoint from service: %s", nodePortServiceName) framework.Failf("Failed to remove endpoint from service: %s", nodePortServiceName)
} }
// wait for kube-proxy to catch up with the pod being deleted. // wait for kube-proxy to catch up with the pod being deleted.
time.Sleep(5 * time.Second) time.Sleep(5 * time.Second)
@ -545,7 +546,7 @@ func (config *KubeProxyTestConfig) deleteNetProxyPod() {
func (config *KubeProxyTestConfig) createPod(pod *api.Pod) *api.Pod { func (config *KubeProxyTestConfig) createPod(pod *api.Pod) *api.Pod {
createdPod, err := config.getPodClient().Create(pod) createdPod, err := config.getPodClient().Create(pod)
if err != nil { if err != nil {
Failf("Failed to create %s pod: %v", pod.Name, err) framework.Failf("Failed to create %s pod: %v", pod.Name, err)
} }
return createdPod return createdPod
} }

View File

@ -21,13 +21,14 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
) )
var _ = KubeDescribe("LimitRange", func() { var _ = framework.KubeDescribe("LimitRange", func() {
f := NewDefaultFramework("limitrange") f := framework.NewDefaultFramework("limitrange")
It("should create a LimitRange with defaults and ensure pod has those defaults applied.", func() { It("should create a LimitRange with defaults and ensure pod has those defaults applied.", func() {
By("Creating a LimitRange") By("Creating a LimitRange")
@ -63,7 +64,7 @@ var _ = KubeDescribe("LimitRange", func() {
err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources) err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources)
if err != nil { if err != nil {
// Print the pod to help in debugging. // Print the pod to help in debugging.
Logf("Pod %+v does not have the expected requirements", pod) framework.Logf("Pod %+v does not have the expected requirements", pod)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
} }
} }
@ -84,7 +85,7 @@ var _ = KubeDescribe("LimitRange", func() {
err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources) err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources)
if err != nil { if err != nil {
// Print the pod to help in debugging. // Print the pod to help in debugging.
Logf("Pod %+v does not have the expected requirements", pod) framework.Logf("Pod %+v does not have the expected requirements", pod)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
} }
} }
@ -103,12 +104,12 @@ var _ = KubeDescribe("LimitRange", func() {
}) })
func equalResourceRequirement(expected api.ResourceRequirements, actual api.ResourceRequirements) error { func equalResourceRequirement(expected api.ResourceRequirements, actual api.ResourceRequirements) error {
Logf("Verifying requests: expected %s with actual %s", expected.Requests, actual.Requests) framework.Logf("Verifying requests: expected %s with actual %s", expected.Requests, actual.Requests)
err := equalResourceList(expected.Requests, actual.Requests) err := equalResourceList(expected.Requests, actual.Requests)
if err != nil { if err != nil {
return err return err
} }
Logf("Verifying limits: expected %v with actual %v", expected.Limits, actual.Limits) framework.Logf("Verifying limits: expected %v with actual %v", expected.Limits, actual.Limits)
err = equalResourceList(expected.Limits, actual.Limits) err = equalResourceList(expected.Limits, actual.Limits)
if err != nil { if err != nil {
return err return err

View File

@ -26,6 +26,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
@ -47,45 +48,45 @@ const (
// the ginkgo.skip list (see driver.go). // the ginkgo.skip list (see driver.go).
// To run this suite you must explicitly ask for it by setting the // To run this suite you must explicitly ask for it by setting the
// -t/--test flag or ginkgo.focus flag. // -t/--test flag or ginkgo.focus flag.
var _ = KubeDescribe("Load capacity", func() { var _ = framework.KubeDescribe("Load capacity", func() {
var c *client.Client var c *client.Client
var nodeCount int var nodeCount int
var ns string var ns string
var configs []*RCConfig var configs []*framework.RCConfig
// Gathers metrics before teardown // Gathers metrics before teardown
// TODO add flag that allows to skip cleanup on failure // TODO add flag that allows to skip cleanup on failure
AfterEach(func() { AfterEach(func() {
// Verify latency metrics // Verify latency metrics
highLatencyRequests, err := HighLatencyRequests(c) highLatencyRequests, err := framework.HighLatencyRequests(c)
expectNoError(err, "Too many instances metrics above the threshold") framework.ExpectNoError(err, "Too many instances metrics above the threshold")
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0)) Expect(highLatencyRequests).NotTo(BeNumerically(">", 0))
}) })
// Explicitly put here, to delete namespace at the end of the test // Explicitly put here, to delete namespace at the end of the test
// (after measuring latency metrics, etc.). // (after measuring latency metrics, etc.).
options := FrameworkOptions{ options := framework.FrameworkOptions{
clientQPS: 50, ClientQPS: 50,
clientBurst: 100, ClientBurst: 100,
} }
framework := NewFramework("load", options) f := framework.NewFramework("load", options)
framework.NamespaceDeletionTimeout = time.Hour f.NamespaceDeletionTimeout = time.Hour
BeforeEach(func() { BeforeEach(func() {
c = framework.Client c = f.Client
ns = framework.Namespace.Name ns = f.Namespace.Name
nodes := ListSchedulableNodesOrDie(c) nodes := framework.ListSchedulableNodesOrDie(c)
nodeCount = len(nodes.Items) nodeCount = len(nodes.Items)
Expect(nodeCount).NotTo(BeZero()) Expect(nodeCount).NotTo(BeZero())
// Terminating a namespace (deleting the remaining objects from it - which // Terminating a namespace (deleting the remaining objects from it - which
// generally means events) can affect the current run. Thus we wait for all // generally means events) can affect the current run. Thus we wait for all
// terminating namespace to be finally deleted before starting this test. // terminating namespace to be finally deleted before starting this test.
err := checkTestingNSDeletedExcept(c, ns) err := framework.CheckTestingNSDeletedExcept(c, ns)
expectNoError(err) framework.ExpectNoError(err)
expectNoError(resetMetrics(c)) framework.ExpectNoError(framework.ResetMetrics(c))
}) })
type Load struct { type Load struct {
@ -166,8 +167,8 @@ func computeRCCounts(total int) (int, int, int) {
return smallRCCount, mediumRCCount, bigRCCount return smallRCCount, mediumRCCount, bigRCCount
} }
func generateRCConfigs(totalPods int, image string, command []string, c *client.Client, ns string) []*RCConfig { func generateRCConfigs(totalPods int, image string, command []string, c *client.Client, ns string) []*framework.RCConfig {
configs := make([]*RCConfig, 0) configs := make([]*framework.RCConfig, 0)
smallRCCount, mediumRCCount, bigRCCount := computeRCCounts(totalPods) smallRCCount, mediumRCCount, bigRCCount := computeRCCounts(totalPods)
configs = append(configs, generateRCConfigsForGroup(c, ns, smallRCGroupName, smallRCSize, smallRCCount, image, command)...) configs = append(configs, generateRCConfigsForGroup(c, ns, smallRCGroupName, smallRCSize, smallRCCount, image, command)...)
@ -177,10 +178,10 @@ func generateRCConfigs(totalPods int, image string, command []string, c *client.
return configs return configs
} }
func generateRCConfigsForGroup(c *client.Client, ns, groupName string, size, count int, image string, command []string) []*RCConfig { func generateRCConfigsForGroup(c *client.Client, ns, groupName string, size, count int, image string, command []string) []*framework.RCConfig {
configs := make([]*RCConfig, 0, count) configs := make([]*framework.RCConfig, 0, count)
for i := 1; i <= count; i++ { for i := 1; i <= count; i++ {
config := &RCConfig{ config := &framework.RCConfig{
Client: c, Client: c,
Name: groupName + "-" + strconv.Itoa(i), Name: groupName + "-" + strconv.Itoa(i),
Namespace: ns, Namespace: ns,
@ -200,7 +201,7 @@ func sleepUpTo(d time.Duration) {
time.Sleep(time.Duration(rand.Int63n(d.Nanoseconds()))) time.Sleep(time.Duration(rand.Int63n(d.Nanoseconds())))
} }
func createAllRC(configs []*RCConfig, creatingTime time.Duration) { func createAllRC(configs []*framework.RCConfig, creatingTime time.Duration) {
var wg sync.WaitGroup var wg sync.WaitGroup
wg.Add(len(configs)) wg.Add(len(configs))
for _, config := range configs { for _, config := range configs {
@ -209,15 +210,15 @@ func createAllRC(configs []*RCConfig, creatingTime time.Duration) {
wg.Wait() wg.Wait()
} }
func createRC(wg *sync.WaitGroup, config *RCConfig, creatingTime time.Duration) { func createRC(wg *sync.WaitGroup, config *framework.RCConfig, creatingTime time.Duration) {
defer GinkgoRecover() defer GinkgoRecover()
defer wg.Done() defer wg.Done()
sleepUpTo(creatingTime) sleepUpTo(creatingTime)
expectNoError(RunRC(*config), fmt.Sprintf("creating rc %s", config.Name)) framework.ExpectNoError(framework.RunRC(*config), fmt.Sprintf("creating rc %s", config.Name))
} }
func scaleAllRC(configs []*RCConfig, scalingTime time.Duration) { func scaleAllRC(configs []*framework.RCConfig, scalingTime time.Duration) {
var wg sync.WaitGroup var wg sync.WaitGroup
wg.Add(len(configs)) wg.Add(len(configs))
for _, config := range configs { for _, config := range configs {
@ -228,13 +229,13 @@ func scaleAllRC(configs []*RCConfig, scalingTime time.Duration) {
// Scales RC to a random size within [0.5*size, 1.5*size] and lists all the pods afterwards. // Scales RC to a random size within [0.5*size, 1.5*size] and lists all the pods afterwards.
// Scaling happens always based on original size, not the current size. // Scaling happens always based on original size, not the current size.
func scaleRC(wg *sync.WaitGroup, config *RCConfig, scalingTime time.Duration) { func scaleRC(wg *sync.WaitGroup, config *framework.RCConfig, scalingTime time.Duration) {
defer GinkgoRecover() defer GinkgoRecover()
defer wg.Done() defer wg.Done()
sleepUpTo(scalingTime) sleepUpTo(scalingTime)
newSize := uint(rand.Intn(config.Replicas) + config.Replicas/2) newSize := uint(rand.Intn(config.Replicas) + config.Replicas/2)
expectNoError(ScaleRC(config.Client, config.Namespace, config.Name, newSize, true), framework.ExpectNoError(framework.ScaleRC(config.Client, config.Namespace, config.Name, newSize, true),
fmt.Sprintf("scaling rc %s for the first time", config.Name)) fmt.Sprintf("scaling rc %s for the first time", config.Name))
selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": config.Name})) selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": config.Name}))
options := api.ListOptions{ options := api.ListOptions{
@ -242,10 +243,10 @@ func scaleRC(wg *sync.WaitGroup, config *RCConfig, scalingTime time.Duration) {
ResourceVersion: "0", ResourceVersion: "0",
} }
_, err := config.Client.Pods(config.Namespace).List(options) _, err := config.Client.Pods(config.Namespace).List(options)
expectNoError(err, fmt.Sprintf("listing pods from rc %v", config.Name)) framework.ExpectNoError(err, fmt.Sprintf("listing pods from rc %v", config.Name))
} }
func deleteAllRC(configs []*RCConfig, deletingTime time.Duration) { func deleteAllRC(configs []*framework.RCConfig, deletingTime time.Duration) {
var wg sync.WaitGroup var wg sync.WaitGroup
wg.Add(len(configs)) wg.Add(len(configs))
for _, config := range configs { for _, config := range configs {
@ -254,10 +255,10 @@ func deleteAllRC(configs []*RCConfig, deletingTime time.Duration) {
wg.Wait() wg.Wait()
} }
func deleteRC(wg *sync.WaitGroup, config *RCConfig, deletingTime time.Duration) { func deleteRC(wg *sync.WaitGroup, config *framework.RCConfig, deletingTime time.Duration) {
defer GinkgoRecover() defer GinkgoRecover()
defer wg.Done() defer wg.Done()
sleepUpTo(deletingTime) sleepUpTo(deletingTime)
expectNoError(DeleteRC(config.Client, config.Namespace, config.Name), fmt.Sprintf("deleting rc %s", config.Name)) framework.ExpectNoError(framework.DeleteRC(config.Client, config.Namespace, config.Name), fmt.Sprintf("deleting rc %s", config.Name))
} }

View File

@ -24,30 +24,31 @@ import (
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
) )
var _ = KubeDescribe("Mesos", func() { var _ = framework.KubeDescribe("Mesos", func() {
framework := NewDefaultFramework("pods") f := framework.NewDefaultFramework("pods")
var c *client.Client var c *client.Client
var ns string var ns string
BeforeEach(func() { BeforeEach(func() {
SkipUnlessProviderIs("mesos/docker") framework.SkipUnlessProviderIs("mesos/docker")
c = framework.Client c = f.Client
ns = framework.Namespace.Name ns = f.Namespace.Name
}) })
It("applies slave attributes as labels", func() { It("applies slave attributes as labels", func() {
nodeClient := framework.Client.Nodes() nodeClient := f.Client.Nodes()
rackA := labels.SelectorFromSet(map[string]string{"k8s.mesosphere.io/attribute-rack": "1"}) rackA := labels.SelectorFromSet(map[string]string{"k8s.mesosphere.io/attribute-rack": "1"})
options := api.ListOptions{LabelSelector: rackA} options := api.ListOptions{LabelSelector: rackA}
nodes, err := nodeClient.List(options) nodes, err := nodeClient.List(options)
if err != nil { if err != nil {
Failf("Failed to query for node: %v", err) framework.Failf("Failed to query for node: %v", err)
} }
Expect(len(nodes.Items)).To(Equal(1)) Expect(len(nodes.Items)).To(Equal(1))
@ -61,14 +62,14 @@ var _ = KubeDescribe("Mesos", func() {
}) })
It("starts static pods on every node in the mesos cluster", func() { It("starts static pods on every node in the mesos cluster", func() {
client := framework.Client client := f.Client
expectNoError(allNodesReady(client, wait.ForeverTestTimeout), "all nodes ready") framework.ExpectNoError(framework.AllNodesReady(client, wait.ForeverTestTimeout), "all nodes ready")
nodelist := ListSchedulableNodesOrDie(framework.Client) nodelist := framework.ListSchedulableNodesOrDie(f.Client)
const ns = "static-pods" const ns = "static-pods"
numpods := len(nodelist.Items) numpods := len(nodelist.Items)
expectNoError(waitForPodsRunningReady(ns, numpods, wait.ForeverTestTimeout), framework.ExpectNoError(framework.WaitForPodsRunningReady(ns, numpods, wait.ForeverTestTimeout),
fmt.Sprintf("number of static pods in namespace %s is %d", ns, numpods)) fmt.Sprintf("number of static pods in namespace %s is %d", ns, numpods))
}) })
@ -98,13 +99,13 @@ var _ = KubeDescribe("Mesos", func() {
}, },
}, },
}) })
expectNoError(err) framework.ExpectNoError(err)
expectNoError(waitForPodRunningInNamespace(c, podName, ns)) framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, podName, ns))
pod, err := c.Pods(ns).Get(podName) pod, err := c.Pods(ns).Get(podName)
expectNoError(err) framework.ExpectNoError(err)
nodeClient := framework.Client.Nodes() nodeClient := f.Client.Nodes()
// schedule onto node with rack=2 being assigned to the "public" role // schedule onto node with rack=2 being assigned to the "public" role
rack2 := labels.SelectorFromSet(map[string]string{ rack2 := labels.SelectorFromSet(map[string]string{
@ -112,7 +113,7 @@ var _ = KubeDescribe("Mesos", func() {
}) })
options := api.ListOptions{LabelSelector: rack2} options := api.ListOptions{LabelSelector: rack2}
nodes, err := nodeClient.List(options) nodes, err := nodeClient.List(options)
expectNoError(err) framework.ExpectNoError(err)
Expect(nodes.Items[0].Name).To(Equal(pod.Spec.NodeName)) Expect(nodes.Items[0].Name).To(Equal(pod.Spec.NodeName))
}) })

View File

@ -23,6 +23,7 @@ import (
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/metrics" "k8s.io/kubernetes/pkg/metrics"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
@ -77,23 +78,23 @@ func checkMetrics(response metrics.Metrics, assumedMetrics map[string][]string)
Expect(invalidLabels).To(BeEmpty()) Expect(invalidLabels).To(BeEmpty())
} }
var _ = KubeDescribe("MetricsGrabber", func() { var _ = framework.KubeDescribe("MetricsGrabber", func() {
framework := NewDefaultFramework("metrics-grabber") f := framework.NewDefaultFramework("metrics-grabber")
var c *client.Client var c *client.Client
var grabber *metrics.MetricsGrabber var grabber *metrics.MetricsGrabber
BeforeEach(func() { BeforeEach(func() {
var err error var err error
c = framework.Client c = f.Client
expectNoError(err) framework.ExpectNoError(err)
grabber, err = metrics.NewMetricsGrabber(c, true, true, true, true) grabber, err = metrics.NewMetricsGrabber(c, true, true, true, true)
expectNoError(err) framework.ExpectNoError(err)
}) })
It("should grab all metrics from API server.", func() { It("should grab all metrics from API server.", func() {
By("Connecting to /metrics endpoint") By("Connecting to /metrics endpoint")
unknownMetrics := sets.NewString() unknownMetrics := sets.NewString()
response, err := grabber.GrabFromApiServer(unknownMetrics) response, err := grabber.GrabFromApiServer(unknownMetrics)
expectNoError(err) framework.ExpectNoError(err)
Expect(unknownMetrics).To(BeEmpty()) Expect(unknownMetrics).To(BeEmpty())
checkMetrics(metrics.Metrics(response), metrics.KnownApiServerMetrics) checkMetrics(metrics.Metrics(response), metrics.KnownApiServerMetrics)
@ -101,10 +102,10 @@ var _ = KubeDescribe("MetricsGrabber", func() {
It("should grab all metrics from a Kubelet.", func() { It("should grab all metrics from a Kubelet.", func() {
By("Proxying to Node through the API server") By("Proxying to Node through the API server")
nodes := ListSchedulableNodesOrDie(c) nodes := framework.ListSchedulableNodesOrDie(c)
Expect(nodes.Items).NotTo(BeEmpty()) Expect(nodes.Items).NotTo(BeEmpty())
response, err := grabber.GrabFromKubelet(nodes.Items[0].Name) response, err := grabber.GrabFromKubelet(nodes.Items[0].Name)
expectNoError(err) framework.ExpectNoError(err)
checkNecessaryMetrics(metrics.Metrics(response), metrics.NecessaryKubeletMetrics) checkNecessaryMetrics(metrics.Metrics(response), metrics.NecessaryKubeletMetrics)
}) })
@ -112,7 +113,7 @@ var _ = KubeDescribe("MetricsGrabber", func() {
By("Proxying to Pod through the API server") By("Proxying to Pod through the API server")
// Check if master Node is registered // Check if master Node is registered
nodes, err := c.Nodes().List(api.ListOptions{}) nodes, err := c.Nodes().List(api.ListOptions{})
expectNoError(err) framework.ExpectNoError(err)
var masterRegistered = false var masterRegistered = false
for _, node := range nodes.Items { for _, node := range nodes.Items {
@ -121,12 +122,12 @@ var _ = KubeDescribe("MetricsGrabber", func() {
} }
} }
if !masterRegistered { if !masterRegistered {
Logf("Master is node registered. Skipping testing Scheduler metrics.") framework.Logf("Master is node registered. Skipping testing Scheduler metrics.")
return return
} }
unknownMetrics := sets.NewString() unknownMetrics := sets.NewString()
response, err := grabber.GrabFromScheduler(unknownMetrics) response, err := grabber.GrabFromScheduler(unknownMetrics)
expectNoError(err) framework.ExpectNoError(err)
Expect(unknownMetrics).To(BeEmpty()) Expect(unknownMetrics).To(BeEmpty())
checkMetrics(metrics.Metrics(response), metrics.KnownSchedulerMetrics) checkMetrics(metrics.Metrics(response), metrics.KnownSchedulerMetrics)
@ -136,7 +137,7 @@ var _ = KubeDescribe("MetricsGrabber", func() {
By("Proxying to Pod through the API server") By("Proxying to Pod through the API server")
// Check if master Node is registered // Check if master Node is registered
nodes, err := c.Nodes().List(api.ListOptions{}) nodes, err := c.Nodes().List(api.ListOptions{})
expectNoError(err) framework.ExpectNoError(err)
var masterRegistered = false var masterRegistered = false
for _, node := range nodes.Items { for _, node := range nodes.Items {
@ -145,12 +146,12 @@ var _ = KubeDescribe("MetricsGrabber", func() {
} }
} }
if !masterRegistered { if !masterRegistered {
Logf("Master is node registered. Skipping testing ControllerManager metrics.") framework.Logf("Master is node registered. Skipping testing ControllerManager metrics.")
return return
} }
unknownMetrics := sets.NewString() unknownMetrics := sets.NewString()
response, err := grabber.GrabFromControllerManager(unknownMetrics) response, err := grabber.GrabFromControllerManager(unknownMetrics)
expectNoError(err) framework.ExpectNoError(err)
Expect(unknownMetrics).To(BeEmpty()) Expect(unknownMetrics).To(BeEmpty())
checkMetrics(metrics.Metrics(response), metrics.KnownControllerManagerMetrics) checkMetrics(metrics.Metrics(response), metrics.KnownControllerManagerMetrics)

View File

@ -26,15 +26,16 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
) )
var _ = KubeDescribe("Monitoring", func() { var _ = framework.KubeDescribe("Monitoring", func() {
f := NewDefaultFramework("monitoring") f := framework.NewDefaultFramework("monitoring")
BeforeEach(func() { BeforeEach(func() {
SkipUnlessProviderIs("gce") framework.SkipUnlessProviderIs("gce")
}) })
It("should verify monitoring pods and all cluster nodes are available on influxdb using heapster.", func() { It("should verify monitoring pods and all cluster nodes are available on influxdb using heapster.", func() {
@ -190,7 +191,7 @@ func getInfluxdbData(c *client.Client, query string, tag string) (map[string]boo
return nil, fmt.Errorf("expected exactly one series for query %q.", query) return nil, fmt.Errorf("expected exactly one series for query %q.", query)
} }
if len(response.Results[0].Series[0].Columns) != 1 { if len(response.Results[0].Series[0].Columns) != 1 {
Failf("Expected one column for query %q. Found %v", query, response.Results[0].Series[0].Columns) framework.Failf("Expected one column for query %q. Found %v", query, response.Results[0].Series[0].Columns)
} }
result := map[string]bool{} result := map[string]bool{}
for _, value := range response.Results[0].Series[0].Values { for _, value := range response.Results[0].Series[0].Values {
@ -216,20 +217,20 @@ func validatePodsAndNodes(c *client.Client, expectedPods, expectedNodes []string
pods, err := getInfluxdbData(c, podlistQuery, "pod_id") pods, err := getInfluxdbData(c, podlistQuery, "pod_id")
if err != nil { if err != nil {
// We don't fail the test here because the influxdb service might still not be running. // We don't fail the test here because the influxdb service might still not be running.
Logf("failed to query list of pods from influxdb. Query: %q, Err: %v", podlistQuery, err) framework.Logf("failed to query list of pods from influxdb. Query: %q, Err: %v", podlistQuery, err)
return false return false
} }
nodes, err := getInfluxdbData(c, nodelistQuery, "hostname") nodes, err := getInfluxdbData(c, nodelistQuery, "hostname")
if err != nil { if err != nil {
Logf("failed to query list of nodes from influxdb. Query: %q, Err: %v", nodelistQuery, err) framework.Logf("failed to query list of nodes from influxdb. Query: %q, Err: %v", nodelistQuery, err)
return false return false
} }
if !expectedItemsExist(expectedPods, pods) { if !expectedItemsExist(expectedPods, pods) {
Logf("failed to find all expected Pods.\nExpected: %v\nActual: %v", expectedPods, pods) framework.Logf("failed to find all expected Pods.\nExpected: %v\nActual: %v", expectedPods, pods)
return false return false
} }
if !expectedItemsExist(expectedNodes, nodes) { if !expectedItemsExist(expectedNodes, nodes) {
Logf("failed to find all expected Nodes.\nExpected: %v\nActual: %v", expectedNodes, nodes) framework.Logf("failed to find all expected Nodes.\nExpected: %v\nActual: %v", expectedNodes, nodes)
return false return false
} }
return true return true
@ -238,12 +239,12 @@ func validatePodsAndNodes(c *client.Client, expectedPods, expectedNodes []string
func testMonitoringUsingHeapsterInfluxdb(c *client.Client) { func testMonitoringUsingHeapsterInfluxdb(c *client.Client) {
// Check if heapster pods and services are up. // Check if heapster pods and services are up.
expectedPods, err := verifyExpectedRcsExistAndGetExpectedPods(c) expectedPods, err := verifyExpectedRcsExistAndGetExpectedPods(c)
expectNoError(err) framework.ExpectNoError(err)
expectNoError(expectedServicesExist(c)) framework.ExpectNoError(expectedServicesExist(c))
// TODO: Wait for all pods and services to be running. // TODO: Wait for all pods and services to be running.
expectedNodes, err := getAllNodesInCluster(c) expectedNodes, err := getAllNodesInCluster(c)
expectNoError(err) framework.ExpectNoError(err)
startTime := time.Now() startTime := time.Now()
for { for {
if validatePodsAndNodes(c, expectedPods, expectedNodes) { if validatePodsAndNodes(c, expectedPods, expectedNodes) {
@ -256,7 +257,7 @@ func testMonitoringUsingHeapsterInfluxdb(c *client.Client) {
} }
time.Sleep(sleepBetweenAttempts) time.Sleep(sleepBetweenAttempts)
} }
Failf("monitoring using heapster and influxdb test failed") framework.Failf("monitoring using heapster and influxdb test failed")
} }
func printDebugInfo(c *client.Client) { func printDebugInfo(c *client.Client) {
@ -264,10 +265,10 @@ func printDebugInfo(c *client.Client) {
options := api.ListOptions{LabelSelector: set.AsSelector()} options := api.ListOptions{LabelSelector: set.AsSelector()}
podList, err := c.Pods(api.NamespaceSystem).List(options) podList, err := c.Pods(api.NamespaceSystem).List(options)
if err != nil { if err != nil {
Logf("Error while listing pods %v", err) framework.Logf("Error while listing pods %v", err)
return return
} }
for _, pod := range podList.Items { for _, pod := range podList.Items {
Logf("Kubectl output:\n%v", runKubectlOrDie("log", pod.Name, "--namespace=kube-system")) framework.Logf("Kubectl output:\n%v", framework.RunKubectlOrDie("log", pod.Name, "--namespace=kube-system"))
} }
} }

View File

@ -26,12 +26,13 @@ import (
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
) )
func extinguish(f *Framework, totalNS int, maxAllowedAfterDel int, maxSeconds int) { func extinguish(f *framework.Framework, totalNS int, maxAllowedAfterDel int, maxSeconds int) {
var err error var err error
By("Creating testing namespaces") By("Creating testing namespaces")
@ -50,13 +51,13 @@ func extinguish(f *Framework, totalNS int, maxAllowedAfterDel int, maxSeconds in
//Wait 10 seconds, then SEND delete requests for all the namespaces. //Wait 10 seconds, then SEND delete requests for all the namespaces.
By("Waiting 10 seconds") By("Waiting 10 seconds")
time.Sleep(time.Duration(10 * time.Second)) time.Sleep(time.Duration(10 * time.Second))
deleted, err := deleteNamespaces(f.Client, []string{"nslifetest"}, nil /* skipFilter */) deleted, err := framework.DeleteNamespaces(f.Client, []string{"nslifetest"}, nil /* skipFilter */)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Expect(len(deleted)).To(Equal(totalNS)) Expect(len(deleted)).To(Equal(totalNS))
By("Waiting for namespaces to vanish") By("Waiting for namespaces to vanish")
//Now POLL until all namespaces have been eradicated. //Now POLL until all namespaces have been eradicated.
expectNoError(wait.Poll(2*time.Second, time.Duration(maxSeconds)*time.Second, framework.ExpectNoError(wait.Poll(2*time.Second, time.Duration(maxSeconds)*time.Second,
func() (bool, error) { func() (bool, error) {
var cnt = 0 var cnt = 0
nsList, err := f.Client.Namespaces().List(api.ListOptions{}) nsList, err := f.Client.Namespaces().List(api.ListOptions{})
@ -69,14 +70,14 @@ func extinguish(f *Framework, totalNS int, maxAllowedAfterDel int, maxSeconds in
} }
} }
if cnt > maxAllowedAfterDel { if cnt > maxAllowedAfterDel {
Logf("Remaining namespaces : %v", cnt) framework.Logf("Remaining namespaces : %v", cnt)
return false, nil return false, nil
} }
return true, nil return true, nil
})) }))
} }
func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *Framework) { func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
var err error var err error
By("Creating a test namespace") By("Creating a test namespace")
@ -84,7 +85,7 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *Framework) {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Waiting for a default service account to be provisioned in namespace") By("Waiting for a default service account to be provisioned in namespace")
err = waitForDefaultServiceAccountInNamespace(f.Client, namespace.Name) err = framework.WaitForDefaultServiceAccountInNamespace(f.Client, namespace.Name)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Creating a pod in the namespace") By("Creating a pod in the namespace")
@ -105,7 +106,7 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *Framework) {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Waiting for the pod to have running status") By("Waiting for the pod to have running status")
expectNoError(waitForPodRunningInNamespace(f.Client, pod.Name, pod.Namespace)) framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.Client, pod.Name, pod.Namespace))
By("Deleting the namespace") By("Deleting the namespace")
err = f.Client.Namespaces().Delete(namespace.Name) err = f.Client.Namespaces().Delete(namespace.Name)
@ -113,7 +114,7 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *Framework) {
By("Waiting for the namespace to be removed.") By("Waiting for the namespace to be removed.")
maxWaitSeconds := int64(60) + *pod.Spec.TerminationGracePeriodSeconds maxWaitSeconds := int64(60) + *pod.Spec.TerminationGracePeriodSeconds
expectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second, framework.ExpectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second,
func() (bool, error) { func() (bool, error) {
_, err = f.Client.Namespaces().Get(namespace.Name) _, err = f.Client.Namespaces().Get(namespace.Name)
if err != nil && errors.IsNotFound(err) { if err != nil && errors.IsNotFound(err) {
@ -127,7 +128,7 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *Framework) {
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
} }
func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *Framework) { func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
var err error var err error
By("Creating a test namespace") By("Creating a test namespace")
@ -135,7 +136,7 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *Framework) {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Waiting for a default service account to be provisioned in namespace") By("Waiting for a default service account to be provisioned in namespace")
err = waitForDefaultServiceAccountInNamespace(f.Client, namespace.Name) err = framework.WaitForDefaultServiceAccountInNamespace(f.Client, namespace.Name)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Creating a service in the namespace") By("Creating a service in the namespace")
@ -165,7 +166,7 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *Framework) {
By("Waiting for the namespace to be removed.") By("Waiting for the namespace to be removed.")
maxWaitSeconds := int64(60) maxWaitSeconds := int64(60)
expectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second, framework.ExpectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second,
func() (bool, error) { func() (bool, error) {
_, err = f.Client.Namespaces().Get(namespace.Name) _, err = f.Client.Namespaces().Get(namespace.Name)
if err != nil && errors.IsNotFound(err) { if err != nil && errors.IsNotFound(err) {
@ -207,9 +208,9 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *Framework) {
// that each have a variable amount of content in the associated Namespace. // that each have a variable amount of content in the associated Namespace.
// When run in [Serial] this test appears to delete Namespace objects at a // When run in [Serial] this test appears to delete Namespace objects at a
// rate of approximately 1 per second. // rate of approximately 1 per second.
var _ = KubeDescribe("Namespaces [Serial]", func() { var _ = framework.KubeDescribe("Namespaces [Serial]", func() {
f := NewDefaultFramework("namespaces") f := framework.NewDefaultFramework("namespaces")
It("should ensure that all pods are removed when a namespace is deleted.", It("should ensure that all pods are removed when a namespace is deleted.",
func() { ensurePodsAreRemovedWhenNamespaceIsDeleted(f) }) func() { ensurePodsAreRemovedWhenNamespaceIsDeleted(f) })

View File

@ -24,13 +24,14 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
) )
var _ = KubeDescribe("Networking", func() { var _ = framework.KubeDescribe("Networking", func() {
f := NewDefaultFramework("nettest") f := framework.NewDefaultFramework("nettest")
var svcname = "nettest" var svcname = "nettest"
@ -41,16 +42,16 @@ var _ = KubeDescribe("Networking", func() {
By("Executing a successful http request from the external internet") By("Executing a successful http request from the external internet")
resp, err := http.Get("http://google.com") resp, err := http.Get("http://google.com")
if err != nil { if err != nil {
Failf("Unable to connect/talk to the internet: %v", err) framework.Failf("Unable to connect/talk to the internet: %v", err)
} }
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
Failf("Unexpected error code, expected 200, got, %v (%v)", resp.StatusCode, resp) framework.Failf("Unexpected error code, expected 200, got, %v (%v)", resp.StatusCode, resp)
} }
}) })
It("should provide Internet connection for containers [Conformance]", func() { It("should provide Internet connection for containers [Conformance]", func() {
By("Running container which tries to wget google.com") By("Running container which tries to wget google.com")
expectNoError(CheckConnectivityToHost(f, "", "wget-test", "google.com")) framework.ExpectNoError(framework.CheckConnectivityToHost(f, "", "wget-test", "google.com"))
}) })
// First test because it has no dependencies on variables created later on. // First test because it has no dependencies on variables created later on.
@ -69,7 +70,7 @@ var _ = KubeDescribe("Networking", func() {
AbsPath(test.path). AbsPath(test.path).
DoRaw() DoRaw()
if err != nil { if err != nil {
Failf("Failed: %v\nBody: %s", err, string(data)) framework.Failf("Failed: %v\nBody: %s", err, string(data))
} }
} }
}) })
@ -97,30 +98,30 @@ var _ = KubeDescribe("Networking", func() {
}, },
}) })
if err != nil { if err != nil {
Failf("unable to create test service named [%s] %v", svc.Name, err) framework.Failf("unable to create test service named [%s] %v", svc.Name, err)
} }
// Clean up service // Clean up service
defer func() { defer func() {
By("Cleaning up the service") By("Cleaning up the service")
if err = f.Client.Services(f.Namespace.Name).Delete(svc.Name); err != nil { if err = f.Client.Services(f.Namespace.Name).Delete(svc.Name); err != nil {
Failf("unable to delete svc %v: %v", svc.Name, err) framework.Failf("unable to delete svc %v: %v", svc.Name, err)
} }
}() }()
By("Creating a webserver (pending) pod on each node") By("Creating a webserver (pending) pod on each node")
nodes, err := GetReadyNodes(f) nodes, err := framework.GetReadyNodes(f)
expectNoError(err) framework.ExpectNoError(err)
if len(nodes.Items) == 1 { if len(nodes.Items) == 1 {
// in general, the test requires two nodes. But for local development, often a one node cluster // in general, the test requires two nodes. But for local development, often a one node cluster
// is created, for simplicity and speed. (see issue #10012). We permit one-node test // is created, for simplicity and speed. (see issue #10012). We permit one-node test
// only in some cases // only in some cases
if !providerIs("local") { if !framework.ProviderIs("local") {
Failf(fmt.Sprintf("The test requires two Ready nodes on %s, but found just one.", testContext.Provider)) framework.Failf(fmt.Sprintf("The test requires two Ready nodes on %s, but found just one.", framework.TestContext.Provider))
} }
Logf("Only one ready node is detected. The test has limited scope in such setting. " + framework.Logf("Only one ready node is detected. The test has limited scope in such setting. " +
"Rerun it with at least two nodes to get complete coverage.") "Rerun it with at least two nodes to get complete coverage.")
} }
@ -131,7 +132,7 @@ var _ = KubeDescribe("Networking", func() {
By("Cleaning up the webserver pods") By("Cleaning up the webserver pods")
for _, podName := range podNames { for _, podName := range podNames {
if err = f.Client.Pods(f.Namespace.Name).Delete(podName, nil); err != nil { if err = f.Client.Pods(f.Namespace.Name).Delete(podName, nil); err != nil {
Logf("Failed to delete pod %s: %v", podName, err) framework.Logf("Failed to delete pod %s: %v", podName, err)
} }
} }
}() }()
@ -148,7 +149,7 @@ var _ = KubeDescribe("Networking", func() {
//once response OK, evaluate response body for pass/fail. //once response OK, evaluate response body for pass/fail.
var body []byte var body []byte
getDetails := func() ([]byte, error) { getDetails := func() ([]byte, error) {
proxyRequest, errProxy := getServicesProxyRequest(f.Client, f.Client.Get()) proxyRequest, errProxy := framework.GetServicesProxyRequest(f.Client, f.Client.Get())
if errProxy != nil { if errProxy != nil {
return nil, errProxy return nil, errProxy
} }
@ -159,7 +160,7 @@ var _ = KubeDescribe("Networking", func() {
} }
getStatus := func() ([]byte, error) { getStatus := func() ([]byte, error) {
proxyRequest, errProxy := getServicesProxyRequest(f.Client, f.Client.Get()) proxyRequest, errProxy := framework.GetServicesProxyRequest(f.Client, f.Client.Get())
if errProxy != nil { if errProxy != nil {
return nil, errProxy return nil, errProxy
} }
@ -174,61 +175,61 @@ var _ = KubeDescribe("Networking", func() {
timeout := time.Now().Add(3 * time.Minute) timeout := time.Now().Add(3 * time.Minute)
for i := 0; !passed && timeout.After(time.Now()); i++ { for i := 0; !passed && timeout.After(time.Now()); i++ {
time.Sleep(2 * time.Second) time.Sleep(2 * time.Second)
Logf("About to make a proxy status call") framework.Logf("About to make a proxy status call")
start := time.Now() start := time.Now()
body, err = getStatus() body, err = getStatus()
Logf("Proxy status call returned in %v", time.Since(start)) framework.Logf("Proxy status call returned in %v", time.Since(start))
if err != nil { if err != nil {
Logf("Attempt %v: service/pod still starting. (error: '%v')", i, err) framework.Logf("Attempt %v: service/pod still starting. (error: '%v')", i, err)
continue continue
} }
// Finally, we pass/fail the test based on if the container's response body, as to whether or not it was able to find peers. // Finally, we pass/fail the test based on if the container's response body, as to whether or not it was able to find peers.
switch { switch {
case string(body) == "pass": case string(body) == "pass":
Logf("Passed on attempt %v. Cleaning up.", i) framework.Logf("Passed on attempt %v. Cleaning up.", i)
passed = true passed = true
case string(body) == "running": case string(body) == "running":
Logf("Attempt %v: test still running", i) framework.Logf("Attempt %v: test still running", i)
case string(body) == "fail": case string(body) == "fail":
if body, err = getDetails(); err != nil { if body, err = getDetails(); err != nil {
Failf("Failed on attempt %v. Cleaning up. Error reading details: %v", i, err) framework.Failf("Failed on attempt %v. Cleaning up. Error reading details: %v", i, err)
} else { } else {
Failf("Failed on attempt %v. Cleaning up. Details:\n%s", i, string(body)) framework.Failf("Failed on attempt %v. Cleaning up. Details:\n%s", i, string(body))
} }
case strings.Contains(string(body), "no endpoints available"): case strings.Contains(string(body), "no endpoints available"):
Logf("Attempt %v: waiting on service/endpoints", i) framework.Logf("Attempt %v: waiting on service/endpoints", i)
default: default:
Logf("Unexpected response:\n%s", body) framework.Logf("Unexpected response:\n%s", body)
} }
} }
if !passed { if !passed {
if body, err = getDetails(); err != nil { if body, err = getDetails(); err != nil {
Failf("Timed out. Cleaning up. Error reading details: %v", err) framework.Failf("Timed out. Cleaning up. Error reading details: %v", err)
} else { } else {
Failf("Timed out. Cleaning up. Details:\n%s", string(body)) framework.Failf("Timed out. Cleaning up. Details:\n%s", string(body))
} }
} }
Expect(string(body)).To(Equal("pass")) Expect(string(body)).To(Equal("pass"))
}) })
// Marked with [Flaky] until the tests prove themselves stable. // Marked with [Flaky] until the tests prove themselves stable.
KubeDescribe("[Flaky] Granular Checks", func() { framework.KubeDescribe("[Flaky] Granular Checks", func() {
It("should function for pod communication on a single node", func() { It("should function for pod communication on a single node", func() {
By("Picking a node") By("Picking a node")
nodes, err := GetReadyNodes(f) nodes, err := framework.GetReadyNodes(f)
expectNoError(err) framework.ExpectNoError(err)
node := nodes.Items[0] node := nodes.Items[0]
By("Creating a webserver pod") By("Creating a webserver pod")
podName := "same-node-webserver" podName := "same-node-webserver"
defer f.Client.Pods(f.Namespace.Name).Delete(podName, nil) defer f.Client.Pods(f.Namespace.Name).Delete(podName, nil)
ip := LaunchWebserverPod(f, podName, node.Name) ip := framework.LaunchWebserverPod(f, podName, node.Name)
By("Checking that the webserver is accessible from a pod on the same node") By("Checking that the webserver is accessible from a pod on the same node")
expectNoError(CheckConnectivityToHost(f, node.Name, "same-node-wget", ip)) framework.ExpectNoError(framework.CheckConnectivityToHost(f, node.Name, "same-node-wget", ip))
}) })
It("should function for pod communication between nodes", func() { It("should function for pod communication between nodes", func() {
@ -236,11 +237,11 @@ var _ = KubeDescribe("Networking", func() {
podClient := f.Client.Pods(f.Namespace.Name) podClient := f.Client.Pods(f.Namespace.Name)
By("Picking multiple nodes") By("Picking multiple nodes")
nodes, err := GetReadyNodes(f) nodes, err := framework.GetReadyNodes(f)
expectNoError(err) framework.ExpectNoError(err)
if len(nodes.Items) == 1 { if len(nodes.Items) == 1 {
Skipf("The test requires two Ready nodes on %s, but found just one.", testContext.Provider) framework.Skipf("The test requires two Ready nodes on %s, but found just one.", framework.TestContext.Provider)
} }
node1 := nodes.Items[0] node1 := nodes.Items[0]
@ -249,15 +250,15 @@ var _ = KubeDescribe("Networking", func() {
By("Creating a webserver pod") By("Creating a webserver pod")
podName := "different-node-webserver" podName := "different-node-webserver"
defer podClient.Delete(podName, nil) defer podClient.Delete(podName, nil)
ip := LaunchWebserverPod(f, podName, node1.Name) ip := framework.LaunchWebserverPod(f, podName, node1.Name)
By("Checking that the webserver is accessible from a pod on a different node") By("Checking that the webserver is accessible from a pod on a different node")
expectNoError(CheckConnectivityToHost(f, node2.Name, "different-node-wget", ip)) framework.ExpectNoError(framework.CheckConnectivityToHost(f, node2.Name, "different-node-wget", ip))
}) })
}) })
}) })
func LaunchNetTestPodPerNode(f *Framework, nodes *api.NodeList, name, version string) []string { func LaunchNetTestPodPerNode(f *framework.Framework, nodes *api.NodeList, name, version string) []string {
podNames := []string{} podNames := []string{}
totalPods := len(nodes.Items) totalPods := len(nodes.Items)
@ -291,7 +292,7 @@ func LaunchNetTestPodPerNode(f *Framework, nodes *api.NodeList, name, version st
}, },
}) })
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Logf("Created pod %s on node %s", pod.ObjectMeta.Name, node.Name) framework.Logf("Created pod %s on node %s", pod.ObjectMeta.Name, node.Name)
podNames = append(podNames, pod.ObjectMeta.Name) podNames = append(podNames, pod.ObjectMeta.Name)
} }
return podNames return podNames

View File

@ -27,6 +27,7 @@ import (
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
@ -64,19 +65,19 @@ const (
// 7. Observe that the pod in pending status schedules on that node. // 7. Observe that the pod in pending status schedules on that node.
// //
// Flaky issue #20015. We have no clear path for how to test this functionality in a non-flaky way. // Flaky issue #20015. We have no clear path for how to test this functionality in a non-flaky way.
var _ = KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", func() { var _ = framework.KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", func() {
var c *client.Client var c *client.Client
var unfilledNodeName, recoveredNodeName string var unfilledNodeName, recoveredNodeName string
framework := NewDefaultFramework("node-outofdisk") f := framework.NewDefaultFramework("node-outofdisk")
BeforeEach(func() { BeforeEach(func() {
c = framework.Client c = f.Client
nodelist := ListSchedulableNodesOrDie(c) nodelist := framework.ListSchedulableNodesOrDie(c)
// Skip this test on small clusters. No need to fail since it is not a use // Skip this test on small clusters. No need to fail since it is not a use
// case that any cluster of small size needs to support. // case that any cluster of small size needs to support.
SkipUnlessNodeCountIsAtLeast(2) framework.SkipUnlessNodeCountIsAtLeast(2)
unfilledNodeName = nodelist.Items[0].Name unfilledNodeName = nodelist.Items[0].Name
for _, node := range nodelist.Items[1:] { for _, node := range nodelist.Items[1:] {
@ -86,7 +87,7 @@ var _ = KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", func() {
AfterEach(func() { AfterEach(func() {
nodelist := ListSchedulableNodesOrDie(c) nodelist := framework.ListSchedulableNodesOrDie(c)
Expect(len(nodelist.Items)).ToNot(BeZero()) Expect(len(nodelist.Items)).ToNot(BeZero())
for _, node := range nodelist.Items { for _, node := range nodelist.Items {
if unfilledNodeName == node.Name || recoveredNodeName == node.Name { if unfilledNodeName == node.Name || recoveredNodeName == node.Name {
@ -98,11 +99,11 @@ var _ = KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", func() {
It("runs out of disk space", func() { It("runs out of disk space", func() {
unfilledNode, err := c.Nodes().Get(unfilledNodeName) unfilledNode, err := c.Nodes().Get(unfilledNodeName)
expectNoError(err) framework.ExpectNoError(err)
By(fmt.Sprintf("Calculating CPU availability on node %s", unfilledNode.Name)) By(fmt.Sprintf("Calculating CPU availability on node %s", unfilledNode.Name))
milliCpu, err := availCpu(c, unfilledNode) milliCpu, err := availCpu(c, unfilledNode)
expectNoError(err) framework.ExpectNoError(err)
// Per pod CPU should be just enough to fit only (numNodeOODPods - 1) pods on the given // Per pod CPU should be just enough to fit only (numNodeOODPods - 1) pods on the given
// node. We compute this value by dividing the available CPU capacity on the node by // node. We compute this value by dividing the available CPU capacity on the node by
@ -111,7 +112,7 @@ var _ = KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", func() {
// subtracting 1% from the value, we directly use 0.99 as the multiplier. // subtracting 1% from the value, we directly use 0.99 as the multiplier.
podCPU := int64(float64(milliCpu/(numNodeOODPods-1)) * 0.99) podCPU := int64(float64(milliCpu/(numNodeOODPods-1)) * 0.99)
ns := framework.Namespace.Name ns := f.Namespace.Name
podClient := c.Pods(ns) podClient := c.Pods(ns)
By("Creating pods and waiting for all but one pods to be scheduled") By("Creating pods and waiting for all but one pods to be scheduled")
@ -120,9 +121,9 @@ var _ = KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", func() {
name := fmt.Sprintf("pod-node-outofdisk-%d", i) name := fmt.Sprintf("pod-node-outofdisk-%d", i)
createOutOfDiskPod(c, ns, name, podCPU) createOutOfDiskPod(c, ns, name, podCPU)
expectNoError(framework.WaitForPodRunning(name)) framework.ExpectNoError(f.WaitForPodRunning(name))
pod, err := podClient.Get(name) pod, err := podClient.Get(name)
expectNoError(err) framework.ExpectNoError(err)
Expect(pod.Spec.NodeName).To(Equal(unfilledNodeName)) Expect(pod.Spec.NodeName).To(Equal(unfilledNodeName))
} }
@ -140,7 +141,7 @@ var _ = KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", func() {
}.AsSelector() }.AsSelector()
options := api.ListOptions{FieldSelector: selector} options := api.ListOptions{FieldSelector: selector}
schedEvents, err := c.Events(ns).List(options) schedEvents, err := c.Events(ns).List(options)
expectNoError(err) framework.ExpectNoError(err)
if len(schedEvents.Items) > 0 { if len(schedEvents.Items) > 0 {
return true, nil return true, nil
@ -149,7 +150,7 @@ var _ = KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", func() {
} }
}) })
nodelist := ListSchedulableNodesOrDie(c) nodelist := framework.ListSchedulableNodesOrDie(c)
Expect(len(nodelist.Items)).To(BeNumerically(">", 1)) Expect(len(nodelist.Items)).To(BeNumerically(">", 1))
nodeToRecover := nodelist.Items[1] nodeToRecover := nodelist.Items[1]
@ -159,9 +160,9 @@ var _ = KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", func() {
recoveredNodeName = nodeToRecover.Name recoveredNodeName = nodeToRecover.Name
By(fmt.Sprintf("Verifying that pod %s schedules on node %s", pendingPodName, recoveredNodeName)) By(fmt.Sprintf("Verifying that pod %s schedules on node %s", pendingPodName, recoveredNodeName))
expectNoError(framework.WaitForPodRunning(pendingPodName)) framework.ExpectNoError(f.WaitForPodRunning(pendingPodName))
pendingPod, err := podClient.Get(pendingPodName) pendingPod, err := podClient.Get(pendingPodName)
expectNoError(err) framework.ExpectNoError(err)
Expect(pendingPod.Spec.NodeName).To(Equal(recoveredNodeName)) Expect(pendingPod.Spec.NodeName).To(Equal(recoveredNodeName))
}) })
}) })
@ -191,7 +192,7 @@ func createOutOfDiskPod(c *client.Client, ns, name string, milliCPU int64) {
} }
_, err := podClient.Create(pod) _, err := podClient.Create(pod)
expectNoError(err) framework.ExpectNoError(err)
} }
// availCpu calculates the available CPU on a given node by subtracting the CPU requested by // availCpu calculates the available CPU on a given node by subtracting the CPU requested by
@ -218,7 +219,7 @@ func availCpu(c *client.Client, node *api.Node) (int64, error) {
// is in turn obtained internally from cadvisor. // is in turn obtained internally from cadvisor.
func availSize(c *client.Client, node *api.Node) (uint64, error) { func availSize(c *client.Client, node *api.Node) (uint64, error) {
statsResource := fmt.Sprintf("api/v1/proxy/nodes/%s/stats/", node.Name) statsResource := fmt.Sprintf("api/v1/proxy/nodes/%s/stats/", node.Name)
Logf("Querying stats for node %s using url %s", node.Name, statsResource) framework.Logf("Querying stats for node %s using url %s", node.Name, statsResource)
res, err := c.Get().AbsPath(statsResource).Timeout(time.Minute).Do().Raw() res, err := c.Get().AbsPath(statsResource).Timeout(time.Minute).Do().Raw()
if err != nil { if err != nil {
return 0, fmt.Errorf("error querying cAdvisor API: %v", err) return 0, fmt.Errorf("error querying cAdvisor API: %v", err)
@ -236,21 +237,21 @@ func availSize(c *client.Client, node *api.Node) (uint64, error) {
// below the lowDiskSpaceThreshold mark. // below the lowDiskSpaceThreshold mark.
func fillDiskSpace(c *client.Client, node *api.Node) { func fillDiskSpace(c *client.Client, node *api.Node) {
avail, err := availSize(c, node) avail, err := availSize(c, node)
expectNoError(err, "Node %s: couldn't obtain available disk size %v", node.Name, err) framework.ExpectNoError(err, "Node %s: couldn't obtain available disk size %v", node.Name, err)
fillSize := (avail - lowDiskSpaceThreshold + (100 * mb)) fillSize := (avail - lowDiskSpaceThreshold + (100 * mb))
Logf("Node %s: disk space available %d bytes", node.Name, avail) framework.Logf("Node %s: disk space available %d bytes", node.Name, avail)
By(fmt.Sprintf("Node %s: creating a file of size %d bytes to fill the available disk space", node.Name, fillSize)) By(fmt.Sprintf("Node %s: creating a file of size %d bytes to fill the available disk space", node.Name, fillSize))
cmd := fmt.Sprintf("fallocate -l %d test.img", fillSize) cmd := fmt.Sprintf("fallocate -l %d test.img", fillSize)
expectNoError(issueSSHCommand(cmd, testContext.Provider, node)) framework.ExpectNoError(framework.IssueSSHCommand(cmd, framework.TestContext.Provider, node))
ood := waitForNodeToBe(c, node.Name, api.NodeOutOfDisk, true, nodeOODTimeOut) ood := framework.WaitForNodeToBe(c, node.Name, api.NodeOutOfDisk, true, nodeOODTimeOut)
Expect(ood).To(BeTrue(), "Node %s did not run out of disk within %v", node.Name, nodeOODTimeOut) Expect(ood).To(BeTrue(), "Node %s did not run out of disk within %v", node.Name, nodeOODTimeOut)
avail, err = availSize(c, node) avail, err = availSize(c, node)
Logf("Node %s: disk space available %d bytes", node.Name, avail) framework.Logf("Node %s: disk space available %d bytes", node.Name, avail)
Expect(avail < lowDiskSpaceThreshold).To(BeTrue()) Expect(avail < lowDiskSpaceThreshold).To(BeTrue())
} }
@ -258,8 +259,8 @@ func fillDiskSpace(c *client.Client, node *api.Node) {
func recoverDiskSpace(c *client.Client, node *api.Node) { func recoverDiskSpace(c *client.Client, node *api.Node) {
By(fmt.Sprintf("Recovering disk space on node %s", node.Name)) By(fmt.Sprintf("Recovering disk space on node %s", node.Name))
cmd := "rm -f test.img" cmd := "rm -f test.img"
expectNoError(issueSSHCommand(cmd, testContext.Provider, node)) framework.ExpectNoError(framework.IssueSSHCommand(cmd, framework.TestContext.Provider, node))
ood := waitForNodeToBe(c, node.Name, api.NodeOutOfDisk, false, nodeOODTimeOut) ood := framework.WaitForNodeToBe(c, node.Name, api.NodeOutOfDisk, false, nodeOODTimeOut)
Expect(ood).To(BeTrue(), "Node %s's out of disk condition status did not change to false within %v", node.Name, nodeOODTimeOut) Expect(ood).To(BeTrue(), "Node %s's out of disk condition status did not change to false within %v", node.Name, nodeOODTimeOut)
} }

View File

@ -38,6 +38,7 @@ import (
awscloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws" awscloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/test/e2e/framework"
) )
const ( const (
@ -45,19 +46,19 @@ const (
gcePDDetachPollTime = 10 * time.Second gcePDDetachPollTime = 10 * time.Second
) )
var _ = KubeDescribe("Pod Disks", func() { var _ = framework.KubeDescribe("Pod Disks", func() {
var ( var (
podClient client.PodInterface podClient client.PodInterface
host0Name string host0Name string
host1Name string host1Name string
) )
framework := NewDefaultFramework("pod-disks") f := framework.NewDefaultFramework("pod-disks")
BeforeEach(func() { BeforeEach(func() {
SkipUnlessNodeCountIsAtLeast(2) framework.SkipUnlessNodeCountIsAtLeast(2)
podClient = framework.Client.Pods(framework.Namespace.Name) podClient = f.Client.Pods(f.Namespace.Name)
nodes := ListSchedulableNodesOrDie(framework.Client) nodes := framework.ListSchedulableNodesOrDie(f.Client)
Expect(len(nodes.Items)).To(BeNumerically(">=", 2), "Requires at least 2 nodes") Expect(len(nodes.Items)).To(BeNumerically(">=", 2), "Requires at least 2 nodes")
@ -68,11 +69,11 @@ var _ = KubeDescribe("Pod Disks", func() {
}) })
It("should schedule a pod w/ a RW PD, remove it, then schedule it on another host [Slow]", func() { It("should schedule a pod w/ a RW PD, remove it, then schedule it on another host [Slow]", func() {
SkipUnlessProviderIs("gce", "gke", "aws") framework.SkipUnlessProviderIs("gce", "gke", "aws")
By("creating PD") By("creating PD")
diskName, err := createPDWithRetry() diskName, err := createPDWithRetry()
expectNoError(err, "Error creating PD") framework.ExpectNoError(err, "Error creating PD")
host0Pod := testPDPod([]string{diskName}, host0Name, false /* readOnly */, 1 /* numContainers */) host0Pod := testPDPod([]string{diskName}, host0Name, false /* readOnly */, 1 /* numContainers */)
host1Pod := testPDPod([]string{diskName}, host1Name, false /* readOnly */, 1 /* numContainers */) host1Pod := testPDPod([]string{diskName}, host1Name, false /* readOnly */, 1 /* numContainers */)
@ -89,43 +90,43 @@ var _ = KubeDescribe("Pod Disks", func() {
By("submitting host0Pod to kubernetes") By("submitting host0Pod to kubernetes")
_, err = podClient.Create(host0Pod) _, err = podClient.Create(host0Pod)
expectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err))
expectNoError(framework.WaitForPodRunningSlow(host0Pod.Name)) framework.ExpectNoError(f.WaitForPodRunningSlow(host0Pod.Name))
testFile := "/testpd1/tracker" testFile := "/testpd1/tracker"
testFileContents := fmt.Sprintf("%v", mathrand.Int()) testFileContents := fmt.Sprintf("%v", mathrand.Int())
expectNoError(framework.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents)) framework.ExpectNoError(f.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents))
Logf("Wrote value: %v", testFileContents) framework.Logf("Wrote value: %v", testFileContents)
By("deleting host0Pod") By("deleting host0Pod")
expectNoError(podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host0Pod") framework.ExpectNoError(podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host0Pod")
By("submitting host1Pod to kubernetes") By("submitting host1Pod to kubernetes")
_, err = podClient.Create(host1Pod) _, err = podClient.Create(host1Pod)
expectNoError(err, "Failed to create host1Pod") framework.ExpectNoError(err, "Failed to create host1Pod")
expectNoError(framework.WaitForPodRunningSlow(host1Pod.Name)) framework.ExpectNoError(f.WaitForPodRunningSlow(host1Pod.Name))
v, err := framework.ReadFileViaContainer(host1Pod.Name, containerName, testFile) v, err := f.ReadFileViaContainer(host1Pod.Name, containerName, testFile)
expectNoError(err) framework.ExpectNoError(err)
Logf("Read value: %v", v) framework.Logf("Read value: %v", v)
Expect(strings.TrimSpace(v)).To(Equal(strings.TrimSpace(testFileContents))) Expect(strings.TrimSpace(v)).To(Equal(strings.TrimSpace(testFileContents)))
By("deleting host1Pod") By("deleting host1Pod")
expectNoError(podClient.Delete(host1Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host1Pod") framework.ExpectNoError(podClient.Delete(host1Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host1Pod")
return return
}) })
It("should schedule a pod w/ a readonly PD on two hosts, then remove both. [Slow]", func() { It("should schedule a pod w/ a readonly PD on two hosts, then remove both. [Slow]", func() {
SkipUnlessProviderIs("gce", "gke") framework.SkipUnlessProviderIs("gce", "gke")
By("creating PD") By("creating PD")
diskName, err := createPDWithRetry() diskName, err := createPDWithRetry()
expectNoError(err, "Error creating PD") framework.ExpectNoError(err, "Error creating PD")
rwPod := testPDPod([]string{diskName}, host0Name, false /* readOnly */, 1 /* numContainers */) rwPod := testPDPod([]string{diskName}, host0Name, false /* readOnly */, 1 /* numContainers */)
host0ROPod := testPDPod([]string{diskName}, host0Name, true /* readOnly */, 1 /* numContainers */) host0ROPod := testPDPod([]string{diskName}, host0Name, true /* readOnly */, 1 /* numContainers */)
@ -143,36 +144,36 @@ var _ = KubeDescribe("Pod Disks", func() {
By("submitting rwPod to ensure PD is formatted") By("submitting rwPod to ensure PD is formatted")
_, err = podClient.Create(rwPod) _, err = podClient.Create(rwPod)
expectNoError(err, "Failed to create rwPod") framework.ExpectNoError(err, "Failed to create rwPod")
expectNoError(framework.WaitForPodRunningSlow(rwPod.Name)) framework.ExpectNoError(f.WaitForPodRunningSlow(rwPod.Name))
expectNoError(podClient.Delete(rwPod.Name, api.NewDeleteOptions(0)), "Failed to delete host0Pod") framework.ExpectNoError(podClient.Delete(rwPod.Name, api.NewDeleteOptions(0)), "Failed to delete host0Pod")
expectNoError(waitForPDDetach(diskName, host0Name)) framework.ExpectNoError(waitForPDDetach(diskName, host0Name))
By("submitting host0ROPod to kubernetes") By("submitting host0ROPod to kubernetes")
_, err = podClient.Create(host0ROPod) _, err = podClient.Create(host0ROPod)
expectNoError(err, "Failed to create host0ROPod") framework.ExpectNoError(err, "Failed to create host0ROPod")
By("submitting host1ROPod to kubernetes") By("submitting host1ROPod to kubernetes")
_, err = podClient.Create(host1ROPod) _, err = podClient.Create(host1ROPod)
expectNoError(err, "Failed to create host1ROPod") framework.ExpectNoError(err, "Failed to create host1ROPod")
expectNoError(framework.WaitForPodRunningSlow(host0ROPod.Name)) framework.ExpectNoError(f.WaitForPodRunningSlow(host0ROPod.Name))
expectNoError(framework.WaitForPodRunningSlow(host1ROPod.Name)) framework.ExpectNoError(f.WaitForPodRunningSlow(host1ROPod.Name))
By("deleting host0ROPod") By("deleting host0ROPod")
expectNoError(podClient.Delete(host0ROPod.Name, api.NewDeleteOptions(0)), "Failed to delete host0ROPod") framework.ExpectNoError(podClient.Delete(host0ROPod.Name, api.NewDeleteOptions(0)), "Failed to delete host0ROPod")
By("deleting host1ROPod") By("deleting host1ROPod")
expectNoError(podClient.Delete(host1ROPod.Name, api.NewDeleteOptions(0)), "Failed to delete host1ROPod") framework.ExpectNoError(podClient.Delete(host1ROPod.Name, api.NewDeleteOptions(0)), "Failed to delete host1ROPod")
}) })
It("should schedule a pod w/ a RW PD shared between multiple containers, write to PD, delete pod, verify contents, and repeat in rapid succession [Slow]", func() { It("should schedule a pod w/ a RW PD shared between multiple containers, write to PD, delete pod, verify contents, and repeat in rapid succession [Slow]", func() {
SkipUnlessProviderIs("gce", "gke", "aws") framework.SkipUnlessProviderIs("gce", "gke", "aws")
By("creating PD") By("creating PD")
diskName, err := createPDWithRetry() diskName, err := createPDWithRetry()
expectNoError(err, "Error creating PD") framework.ExpectNoError(err, "Error creating PD")
numContainers := 4 numContainers := 4
host0Pod := testPDPod([]string{diskName}, host0Name, false /* readOnly */, numContainers) host0Pod := testPDPod([]string{diskName}, host0Name, false /* readOnly */, numContainers)
@ -187,43 +188,43 @@ var _ = KubeDescribe("Pod Disks", func() {
fileAndContentToVerify := make(map[string]string) fileAndContentToVerify := make(map[string]string)
for i := 0; i < 3; i++ { for i := 0; i < 3; i++ {
Logf("PD Read/Writer Iteration #%v", i) framework.Logf("PD Read/Writer Iteration #%v", i)
By("submitting host0Pod to kubernetes") By("submitting host0Pod to kubernetes")
_, err = podClient.Create(host0Pod) _, err = podClient.Create(host0Pod)
expectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err))
expectNoError(framework.WaitForPodRunningSlow(host0Pod.Name)) framework.ExpectNoError(f.WaitForPodRunningSlow(host0Pod.Name))
// randomly select a container and read/verify pd contents from it // randomly select a container and read/verify pd contents from it
containerName := fmt.Sprintf("mycontainer%v", mathrand.Intn(numContainers)+1) containerName := fmt.Sprintf("mycontainer%v", mathrand.Intn(numContainers)+1)
verifyPDContentsViaContainer(framework, host0Pod.Name, containerName, fileAndContentToVerify) verifyPDContentsViaContainer(f, host0Pod.Name, containerName, fileAndContentToVerify)
// Randomly select a container to write a file to PD from // Randomly select a container to write a file to PD from
containerName = fmt.Sprintf("mycontainer%v", mathrand.Intn(numContainers)+1) containerName = fmt.Sprintf("mycontainer%v", mathrand.Intn(numContainers)+1)
testFile := fmt.Sprintf("/testpd1/tracker%v", i) testFile := fmt.Sprintf("/testpd1/tracker%v", i)
testFileContents := fmt.Sprintf("%v", mathrand.Int()) testFileContents := fmt.Sprintf("%v", mathrand.Int())
fileAndContentToVerify[testFile] = testFileContents fileAndContentToVerify[testFile] = testFileContents
expectNoError(framework.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents)) framework.ExpectNoError(f.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents))
Logf("Wrote value: \"%v\" to PD %q from pod %q container %q", testFileContents, diskName, host0Pod.Name, containerName) framework.Logf("Wrote value: \"%v\" to PD %q from pod %q container %q", testFileContents, diskName, host0Pod.Name, containerName)
// Randomly select a container and read/verify pd contents from it // Randomly select a container and read/verify pd contents from it
containerName = fmt.Sprintf("mycontainer%v", mathrand.Intn(numContainers)+1) containerName = fmt.Sprintf("mycontainer%v", mathrand.Intn(numContainers)+1)
verifyPDContentsViaContainer(framework, host0Pod.Name, containerName, fileAndContentToVerify) verifyPDContentsViaContainer(f, host0Pod.Name, containerName, fileAndContentToVerify)
By("deleting host0Pod") By("deleting host0Pod")
expectNoError(podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host0Pod") framework.ExpectNoError(podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host0Pod")
} }
}) })
It("should schedule a pod w/two RW PDs both mounted to one container, write to PD, verify contents, delete pod, recreate pod, verify contents, and repeat in rapid succession [Slow]", func() { It("should schedule a pod w/two RW PDs both mounted to one container, write to PD, verify contents, delete pod, recreate pod, verify contents, and repeat in rapid succession [Slow]", func() {
SkipUnlessProviderIs("gce", "gke", "aws") framework.SkipUnlessProviderIs("gce", "gke", "aws")
By("creating PD1") By("creating PD1")
disk1Name, err := createPDWithRetry() disk1Name, err := createPDWithRetry()
expectNoError(err, "Error creating PD1") framework.ExpectNoError(err, "Error creating PD1")
By("creating PD2") By("creating PD2")
disk2Name, err := createPDWithRetry() disk2Name, err := createPDWithRetry()
expectNoError(err, "Error creating PD2") framework.ExpectNoError(err, "Error creating PD2")
host0Pod := testPDPod([]string{disk1Name, disk2Name}, host0Name, false /* readOnly */, 1 /* numContainers */) host0Pod := testPDPod([]string{disk1Name, disk2Name}, host0Name, false /* readOnly */, 1 /* numContainers */)
@ -239,15 +240,15 @@ var _ = KubeDescribe("Pod Disks", func() {
containerName := "mycontainer" containerName := "mycontainer"
fileAndContentToVerify := make(map[string]string) fileAndContentToVerify := make(map[string]string)
for i := 0; i < 3; i++ { for i := 0; i < 3; i++ {
Logf("PD Read/Writer Iteration #%v", i) framework.Logf("PD Read/Writer Iteration #%v", i)
By("submitting host0Pod to kubernetes") By("submitting host0Pod to kubernetes")
_, err = podClient.Create(host0Pod) _, err = podClient.Create(host0Pod)
expectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err))
expectNoError(framework.WaitForPodRunningSlow(host0Pod.Name)) framework.ExpectNoError(f.WaitForPodRunningSlow(host0Pod.Name))
// Read/verify pd contents for both disks from container // Read/verify pd contents for both disks from container
verifyPDContentsViaContainer(framework, host0Pod.Name, containerName, fileAndContentToVerify) verifyPDContentsViaContainer(f, host0Pod.Name, containerName, fileAndContentToVerify)
// Write a file to both PDs from container // Write a file to both PDs from container
testFilePD1 := fmt.Sprintf("/testpd1/tracker%v", i) testFilePD1 := fmt.Sprintf("/testpd1/tracker%v", i)
@ -256,16 +257,16 @@ var _ = KubeDescribe("Pod Disks", func() {
testFilePD2Contents := fmt.Sprintf("%v", mathrand.Int()) testFilePD2Contents := fmt.Sprintf("%v", mathrand.Int())
fileAndContentToVerify[testFilePD1] = testFilePD1Contents fileAndContentToVerify[testFilePD1] = testFilePD1Contents
fileAndContentToVerify[testFilePD2] = testFilePD2Contents fileAndContentToVerify[testFilePD2] = testFilePD2Contents
expectNoError(framework.WriteFileViaContainer(host0Pod.Name, containerName, testFilePD1, testFilePD1Contents)) framework.ExpectNoError(f.WriteFileViaContainer(host0Pod.Name, containerName, testFilePD1, testFilePD1Contents))
Logf("Wrote value: \"%v\" to PD1 (%q) from pod %q container %q", testFilePD1Contents, disk1Name, host0Pod.Name, containerName) framework.Logf("Wrote value: \"%v\" to PD1 (%q) from pod %q container %q", testFilePD1Contents, disk1Name, host0Pod.Name, containerName)
expectNoError(framework.WriteFileViaContainer(host0Pod.Name, containerName, testFilePD2, testFilePD2Contents)) framework.ExpectNoError(f.WriteFileViaContainer(host0Pod.Name, containerName, testFilePD2, testFilePD2Contents))
Logf("Wrote value: \"%v\" to PD2 (%q) from pod %q container %q", testFilePD2Contents, disk2Name, host0Pod.Name, containerName) framework.Logf("Wrote value: \"%v\" to PD2 (%q) from pod %q container %q", testFilePD2Contents, disk2Name, host0Pod.Name, containerName)
// Read/verify pd contents for both disks from container // Read/verify pd contents for both disks from container
verifyPDContentsViaContainer(framework, host0Pod.Name, containerName, fileAndContentToVerify) verifyPDContentsViaContainer(f, host0Pod.Name, containerName, fileAndContentToVerify)
By("deleting host0Pod") By("deleting host0Pod")
expectNoError(podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host0Pod") framework.ExpectNoError(podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host0Pod")
} }
}) })
}) })
@ -275,10 +276,10 @@ func createPDWithRetry() (string, error) {
var err error var err error
for start := time.Now(); time.Since(start) < 180*time.Second; time.Sleep(5 * time.Second) { for start := time.Now(); time.Since(start) < 180*time.Second; time.Sleep(5 * time.Second) {
if newDiskName, err = createPD(); err != nil { if newDiskName, err = createPD(); err != nil {
Logf("Couldn't create a new PD. Sleeping 5 seconds (%v)", err) framework.Logf("Couldn't create a new PD. Sleeping 5 seconds (%v)", err)
continue continue
} }
Logf("Successfully created a new PD: %q.", newDiskName) framework.Logf("Successfully created a new PD: %q.", newDiskName)
break break
} }
return newDiskName, err return newDiskName, err
@ -288,30 +289,30 @@ func deletePDWithRetry(diskName string) {
var err error var err error
for start := time.Now(); time.Since(start) < 180*time.Second; time.Sleep(5 * time.Second) { for start := time.Now(); time.Since(start) < 180*time.Second; time.Sleep(5 * time.Second) {
if err = deletePD(diskName); err != nil { if err = deletePD(diskName); err != nil {
Logf("Couldn't delete PD %q. Sleeping 5 seconds (%v)", diskName, err) framework.Logf("Couldn't delete PD %q. Sleeping 5 seconds (%v)", diskName, err)
continue continue
} }
Logf("Successfully deleted PD %q.", diskName) framework.Logf("Successfully deleted PD %q.", diskName)
break break
} }
expectNoError(err, "Error deleting PD") framework.ExpectNoError(err, "Error deleting PD")
} }
func verifyPDContentsViaContainer(f *Framework, podName, containerName string, fileAndContentToVerify map[string]string) { func verifyPDContentsViaContainer(f *framework.Framework, podName, containerName string, fileAndContentToVerify map[string]string) {
for filePath, expectedContents := range fileAndContentToVerify { for filePath, expectedContents := range fileAndContentToVerify {
v, err := f.ReadFileViaContainer(podName, containerName, filePath) v, err := f.ReadFileViaContainer(podName, containerName, filePath)
if err != nil { if err != nil {
Logf("Error reading file: %v", err) framework.Logf("Error reading file: %v", err)
} }
expectNoError(err) framework.ExpectNoError(err)
Logf("Read file %q with content: %v", filePath, v) framework.Logf("Read file %q with content: %v", filePath, v)
Expect(strings.TrimSpace(v)).To(Equal(strings.TrimSpace(expectedContents))) Expect(strings.TrimSpace(v)).To(Equal(strings.TrimSpace(expectedContents)))
} }
} }
func createPD() (string, error) { func createPD() (string, error) {
if testContext.Provider == "gce" || testContext.Provider == "gke" { if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" {
pdName := fmt.Sprintf("%s-%s", testContext.prefix, string(util.NewUUID())) pdName := fmt.Sprintf("%s-%s", framework.TestContext.Prefix, string(util.NewUUID()))
gceCloud, err := getGCECloud() gceCloud, err := getGCECloud()
if err != nil { if err != nil {
@ -319,12 +320,12 @@ func createPD() (string, error) {
} }
tags := map[string]string{} tags := map[string]string{}
err = gceCloud.CreateDisk(pdName, testContext.CloudConfig.Zone, 10 /* sizeGb */, tags) err = gceCloud.CreateDisk(pdName, framework.TestContext.CloudConfig.Zone, 10 /* sizeGb */, tags)
if err != nil { if err != nil {
return "", err return "", err
} }
return pdName, nil return pdName, nil
} else if testContext.Provider == "aws" { } else if framework.TestContext.Provider == "aws" {
client := ec2.New(session.New()) client := ec2.New(session.New())
request := &ec2.CreateVolumeInput{} request := &ec2.CreateVolumeInput{}
@ -347,7 +348,7 @@ func createPD() (string, error) {
} }
func deletePD(pdName string) error { func deletePD(pdName string) error {
if testContext.Provider == "gce" || testContext.Provider == "gke" { if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" {
gceCloud, err := getGCECloud() gceCloud, err := getGCECloud()
if err != nil { if err != nil {
return err return err
@ -361,10 +362,10 @@ func deletePD(pdName string) error {
return nil return nil
} }
Logf("Error deleting PD %q: %v", pdName, err) framework.Logf("Error deleting PD %q: %v", pdName, err)
} }
return err return err
} else if testContext.Provider == "aws" { } else if framework.TestContext.Provider == "aws" {
client := ec2.New(session.New()) client := ec2.New(session.New())
tokens := strings.Split(pdName, "/") tokens := strings.Split(pdName, "/")
@ -374,7 +375,7 @@ func deletePD(pdName string) error {
_, err := client.DeleteVolume(request) _, err := client.DeleteVolume(request)
if err != nil { if err != nil {
if awsError, ok := err.(awserr.Error); ok && awsError.Code() == "InvalidVolume.NotFound" { if awsError, ok := err.(awserr.Error); ok && awsError.Code() == "InvalidVolume.NotFound" {
Logf("Volume deletion implicitly succeeded because volume %q does not exist.", pdName) framework.Logf("Volume deletion implicitly succeeded because volume %q does not exist.", pdName)
} else { } else {
return fmt.Errorf("error deleting EBS volumes: %v", err) return fmt.Errorf("error deleting EBS volumes: %v", err)
} }
@ -386,7 +387,7 @@ func deletePD(pdName string) error {
} }
func detachPD(hostName, pdName string) error { func detachPD(hostName, pdName string) error {
if testContext.Provider == "gce" || testContext.Provider == "gke" { if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" {
instanceName := strings.Split(hostName, ".")[0] instanceName := strings.Split(hostName, ".")[0]
gceCloud, err := getGCECloud() gceCloud, err := getGCECloud()
@ -401,11 +402,11 @@ func detachPD(hostName, pdName string) error {
return nil return nil
} }
Logf("Error detaching PD %q: %v", pdName, err) framework.Logf("Error detaching PD %q: %v", pdName, err)
} }
return err return err
} else if testContext.Provider == "aws" { } else if framework.TestContext.Provider == "aws" {
client := ec2.New(session.New()) client := ec2.New(session.New())
tokens := strings.Split(pdName, "/") tokens := strings.Split(pdName, "/")
@ -462,7 +463,7 @@ func testPDPod(diskNames []string, targetHost string, readOnly bool, numContaine
}, },
} }
if testContext.Provider == "gce" || testContext.Provider == "gke" { if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" {
pod.Spec.Volumes = make([]api.Volume, len(diskNames)) pod.Spec.Volumes = make([]api.Volume, len(diskNames))
for k, diskName := range diskNames { for k, diskName := range diskNames {
pod.Spec.Volumes[k].Name = fmt.Sprintf("testpd%v", k+1) pod.Spec.Volumes[k].Name = fmt.Sprintf("testpd%v", k+1)
@ -474,7 +475,7 @@ func testPDPod(diskNames []string, targetHost string, readOnly bool, numContaine
}, },
} }
} }
} else if testContext.Provider == "aws" { } else if framework.TestContext.Provider == "aws" {
pod.Spec.Volumes = make([]api.Volume, len(diskNames)) pod.Spec.Volumes = make([]api.Volume, len(diskNames))
for k, diskName := range diskNames { for k, diskName := range diskNames {
pod.Spec.Volumes[k].Name = fmt.Sprintf("testpd%v", k+1) pod.Spec.Volumes[k].Name = fmt.Sprintf("testpd%v", k+1)
@ -487,7 +488,7 @@ func testPDPod(diskNames []string, targetHost string, readOnly bool, numContaine
} }
} }
} else { } else {
panic("Unknown provider: " + testContext.Provider) panic("Unknown provider: " + framework.TestContext.Provider)
} }
return pod return pod
@ -495,7 +496,7 @@ func testPDPod(diskNames []string, targetHost string, readOnly bool, numContaine
// Waits for specified PD to to detach from specified hostName // Waits for specified PD to to detach from specified hostName
func waitForPDDetach(diskName, hostName string) error { func waitForPDDetach(diskName, hostName string) error {
if testContext.Provider == "gce" || testContext.Provider == "gke" { if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" {
gceCloud, err := getGCECloud() gceCloud, err := getGCECloud()
if err != nil { if err != nil {
return err return err
@ -504,17 +505,17 @@ func waitForPDDetach(diskName, hostName string) error {
for start := time.Now(); time.Since(start) < gcePDDetachTimeout; time.Sleep(gcePDDetachPollTime) { for start := time.Now(); time.Since(start) < gcePDDetachTimeout; time.Sleep(gcePDDetachPollTime) {
diskAttached, err := gceCloud.DiskIsAttached(diskName, hostName) diskAttached, err := gceCloud.DiskIsAttached(diskName, hostName)
if err != nil { if err != nil {
Logf("Error waiting for PD %q to detach from node %q. 'DiskIsAttached(...)' failed with %v", diskName, hostName, err) framework.Logf("Error waiting for PD %q to detach from node %q. 'DiskIsAttached(...)' failed with %v", diskName, hostName, err)
return err return err
} }
if !diskAttached { if !diskAttached {
// Specified disk does not appear to be attached to specified node // Specified disk does not appear to be attached to specified node
Logf("GCE PD %q appears to have successfully detached from %q.", diskName, hostName) framework.Logf("GCE PD %q appears to have successfully detached from %q.", diskName, hostName)
return nil return nil
} }
Logf("Waiting for GCE PD %q to detach from %q.", diskName, hostName) framework.Logf("Waiting for GCE PD %q to detach from %q.", diskName, hostName)
} }
return fmt.Errorf("Gave up waiting for GCE PD %q to detach from %q after %v", diskName, hostName, gcePDDetachTimeout) return fmt.Errorf("Gave up waiting for GCE PD %q to detach from %q after %v", diskName, hostName, gcePDDetachTimeout)
@ -524,10 +525,10 @@ func waitForPDDetach(diskName, hostName string) error {
} }
func getGCECloud() (*gcecloud.GCECloud, error) { func getGCECloud() (*gcecloud.GCECloud, error) {
gceCloud, ok := testContext.CloudConfig.Provider.(*gcecloud.GCECloud) gceCloud, ok := framework.TestContext.CloudConfig.Provider.(*gcecloud.GCECloud)
if !ok { if !ok {
return nil, fmt.Errorf("failed to convert CloudConfig.Provider to GCECloud: %#v", testContext.CloudConfig.Provider) return nil, fmt.Errorf("failed to convert CloudConfig.Provider to GCECloud: %#v", framework.TestContext.CloudConfig.Provider)
} }
return gceCloud, nil return gceCloud, nil

View File

@ -26,18 +26,19 @@ import (
"k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/test/e2e/framework"
) )
// This test needs privileged containers, which are disabled by default. Run // This test needs privileged containers, which are disabled by default. Run
// the test with "go run hack/e2e.go ... --ginkgo.focus=[Feature:Volumes]" // the test with "go run hack/e2e.go ... --ginkgo.focus=[Feature:Volumes]"
var _ = KubeDescribe("PersistentVolumes [Feature:Volumes]", func() { var _ = framework.KubeDescribe("PersistentVolumes [Feature:Volumes]", func() {
framework := NewDefaultFramework("pv") f := framework.NewDefaultFramework("pv")
var c *client.Client var c *client.Client
var ns string var ns string
BeforeEach(func() { BeforeEach(func() {
c = framework.Client c = f.Client
ns = framework.Namespace.Name ns = f.Namespace.Name
}) })
It("NFS volume can be created, bound, retrieved, unbound, and used by a pod", func() { It("NFS volume can be created, bound, retrieved, unbound, and used by a pod", func() {
@ -54,47 +55,47 @@ var _ = KubeDescribe("PersistentVolumes [Feature:Volumes]", func() {
pod := startVolumeServer(c, config) pod := startVolumeServer(c, config)
serverIP := pod.Status.PodIP serverIP := pod.Status.PodIP
Logf("NFS server IP address: %v", serverIP) framework.Logf("NFS server IP address: %v", serverIP)
pv := makePersistentVolume(serverIP) pv := makePersistentVolume(serverIP)
pvc := makePersistentVolumeClaim(ns) pvc := makePersistentVolumeClaim(ns)
Logf("Creating PersistentVolume using NFS") framework.Logf("Creating PersistentVolume using NFS")
pv, err := c.PersistentVolumes().Create(pv) pv, err := c.PersistentVolumes().Create(pv)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Logf("Creating PersistentVolumeClaim") framework.Logf("Creating PersistentVolumeClaim")
pvc, err = c.PersistentVolumeClaims(ns).Create(pvc) pvc, err = c.PersistentVolumeClaims(ns).Create(pvc)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// allow the binder a chance to catch up. should not be more than 20s. // allow the binder a chance to catch up. should not be more than 20s.
waitForPersistentVolumePhase(api.VolumeBound, c, pv.Name, 1*time.Second, 30*time.Second) framework.WaitForPersistentVolumePhase(api.VolumeBound, c, pv.Name, 1*time.Second, 30*time.Second)
pv, err = c.PersistentVolumes().Get(pv.Name) pv, err = c.PersistentVolumes().Get(pv.Name)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
if pv.Spec.ClaimRef == nil { if pv.Spec.ClaimRef == nil {
Failf("Expected PersistentVolume to be bound, but got nil ClaimRef: %+v", pv) framework.Failf("Expected PersistentVolume to be bound, but got nil ClaimRef: %+v", pv)
} }
Logf("Deleting PersistentVolumeClaim to trigger PV Recycling") framework.Logf("Deleting PersistentVolumeClaim to trigger PV Recycling")
err = c.PersistentVolumeClaims(ns).Delete(pvc.Name) err = c.PersistentVolumeClaims(ns).Delete(pvc.Name)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// allow the recycler a chance to catch up. it has to perform NFS scrub, which can be slow in e2e. // allow the recycler a chance to catch up. it has to perform NFS scrub, which can be slow in e2e.
waitForPersistentVolumePhase(api.VolumeAvailable, c, pv.Name, 5*time.Second, 300*time.Second) framework.WaitForPersistentVolumePhase(api.VolumeAvailable, c, pv.Name, 5*time.Second, 300*time.Second)
pv, err = c.PersistentVolumes().Get(pv.Name) pv, err = c.PersistentVolumes().Get(pv.Name)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
if pv.Spec.ClaimRef != nil { if pv.Spec.ClaimRef != nil {
Failf("Expected PersistentVolume to be unbound, but found non-nil ClaimRef: %+v", pv) framework.Failf("Expected PersistentVolume to be unbound, but found non-nil ClaimRef: %+v", pv)
} }
// The NFS Server pod we're using contains an index.html file // The NFS Server pod we're using contains an index.html file
// Verify the file was really scrubbed from the volume // Verify the file was really scrubbed from the volume
podTemplate := makeCheckPod(ns, serverIP) podTemplate := makeCheckPod(ns, serverIP)
checkpod, err := c.Pods(ns).Create(podTemplate) checkpod, err := c.Pods(ns).Create(podTemplate)
expectNoError(err, "Failed to create checker pod: %v", err) framework.ExpectNoError(err, "Failed to create checker pod: %v", err)
err = waitForPodSuccessInNamespace(c, checkpod.Name, checkpod.Spec.Containers[0].Name, checkpod.Namespace) err = framework.WaitForPodSuccessInNamespace(c, checkpod.Name, checkpod.Spec.Containers[0].Name, checkpod.Namespace)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
}) })

View File

@ -36,6 +36,7 @@ import (
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/pkg/watch" "k8s.io/kubernetes/pkg/watch"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
@ -54,7 +55,7 @@ var (
func runLivenessTest(c *client.Client, ns string, podDescr *api.Pod, expectNumRestarts int, timeout time.Duration) { func runLivenessTest(c *client.Client, ns string, podDescr *api.Pod, expectNumRestarts int, timeout time.Duration) {
By(fmt.Sprintf("Creating pod %s in namespace %s", podDescr.Name, ns)) By(fmt.Sprintf("Creating pod %s in namespace %s", podDescr.Name, ns))
_, err := c.Pods(ns).Create(podDescr) _, err := c.Pods(ns).Create(podDescr)
expectNoError(err, fmt.Sprintf("creating pod %s", podDescr.Name)) framework.ExpectNoError(err, fmt.Sprintf("creating pod %s", podDescr.Name))
// At the end of the test, clean up by removing the pod. // At the end of the test, clean up by removing the pod.
defer func() { defer func() {
@ -65,16 +66,16 @@ func runLivenessTest(c *client.Client, ns string, podDescr *api.Pod, expectNumRe
// Wait until the pod is not pending. (Here we need to check for something other than // Wait until the pod is not pending. (Here we need to check for something other than
// 'Pending' other than checking for 'Running', since when failures occur, we go to // 'Pending' other than checking for 'Running', since when failures occur, we go to
// 'Terminated' which can cause indefinite blocking.) // 'Terminated' which can cause indefinite blocking.)
expectNoError(waitForPodNotPending(c, ns, podDescr.Name), framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, podDescr.Name),
fmt.Sprintf("starting pod %s in namespace %s", podDescr.Name, ns)) fmt.Sprintf("starting pod %s in namespace %s", podDescr.Name, ns))
Logf("Started pod %s in namespace %s", podDescr.Name, ns) framework.Logf("Started pod %s in namespace %s", podDescr.Name, ns)
// Check the pod's current state and verify that restartCount is present. // Check the pod's current state and verify that restartCount is present.
By("checking the pod's current state and verifying that restartCount is present") By("checking the pod's current state and verifying that restartCount is present")
pod, err := c.Pods(ns).Get(podDescr.Name) pod, err := c.Pods(ns).Get(podDescr.Name)
expectNoError(err, fmt.Sprintf("getting pod %s in namespace %s", podDescr.Name, ns)) framework.ExpectNoError(err, fmt.Sprintf("getting pod %s in namespace %s", podDescr.Name, ns))
initialRestartCount := api.GetExistingContainerStatus(pod.Status.ContainerStatuses, "liveness").RestartCount initialRestartCount := api.GetExistingContainerStatus(pod.Status.ContainerStatuses, "liveness").RestartCount
Logf("Initial restart count of pod %s is %d", podDescr.Name, initialRestartCount) framework.Logf("Initial restart count of pod %s is %d", podDescr.Name, initialRestartCount)
// Wait for the restart state to be as desired. // Wait for the restart state to be as desired.
deadline := time.Now().Add(timeout) deadline := time.Now().Add(timeout)
@ -82,13 +83,13 @@ func runLivenessTest(c *client.Client, ns string, podDescr *api.Pod, expectNumRe
observedRestarts := 0 observedRestarts := 0
for start := time.Now(); time.Now().Before(deadline); time.Sleep(2 * time.Second) { for start := time.Now(); time.Now().Before(deadline); time.Sleep(2 * time.Second) {
pod, err = c.Pods(ns).Get(podDescr.Name) pod, err = c.Pods(ns).Get(podDescr.Name)
expectNoError(err, fmt.Sprintf("getting pod %s", podDescr.Name)) framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podDescr.Name))
restartCount := api.GetExistingContainerStatus(pod.Status.ContainerStatuses, "liveness").RestartCount restartCount := api.GetExistingContainerStatus(pod.Status.ContainerStatuses, "liveness").RestartCount
if restartCount != lastRestartCount { if restartCount != lastRestartCount {
Logf("Restart count of pod %s/%s is now %d (%v elapsed)", framework.Logf("Restart count of pod %s/%s is now %d (%v elapsed)",
ns, podDescr.Name, restartCount, time.Since(start)) ns, podDescr.Name, restartCount, time.Since(start))
if restartCount < lastRestartCount { if restartCount < lastRestartCount {
Failf("Restart count should increment monotonically: restart cont of pod %s/%s changed from %d to %d", framework.Failf("Restart count should increment monotonically: restart cont of pod %s/%s changed from %d to %d",
ns, podDescr.Name, lastRestartCount, restartCount) ns, podDescr.Name, lastRestartCount, restartCount)
} }
} }
@ -104,7 +105,7 @@ func runLivenessTest(c *client.Client, ns string, podDescr *api.Pod, expectNumRe
// If we expected n restarts (n > 0), fail if we observed < n restarts. // If we expected n restarts (n > 0), fail if we observed < n restarts.
if (expectNumRestarts == 0 && observedRestarts > 0) || (expectNumRestarts > 0 && if (expectNumRestarts == 0 && observedRestarts > 0) || (expectNumRestarts > 0 &&
observedRestarts < expectNumRestarts) { observedRestarts < expectNumRestarts) {
Failf("pod %s/%s - expected number of restarts: %t, found restarts: %t", framework.Failf("pod %s/%s - expected number of restarts: %t, found restarts: %t",
ns, podDescr.Name, expectNumRestarts, observedRestarts) ns, podDescr.Name, expectNumRestarts, observedRestarts)
} }
} }
@ -115,12 +116,12 @@ func testHostIP(c *client.Client, ns string, pod *api.Pod) {
By("creating pod") By("creating pod")
defer podClient.Delete(pod.Name, api.NewDeleteOptions(0)) defer podClient.Delete(pod.Name, api.NewDeleteOptions(0))
if _, err := podClient.Create(pod); err != nil { if _, err := podClient.Create(pod); err != nil {
Failf("Failed to create pod: %v", err) framework.Failf("Failed to create pod: %v", err)
} }
By("ensuring that pod is running and has a hostIP") By("ensuring that pod is running and has a hostIP")
// Wait for the pods to enter the running state. Waiting loops until the pods // Wait for the pods to enter the running state. Waiting loops until the pods
// are running so non-running pods cause a timeout for this test. // are running so non-running pods cause a timeout for this test.
err := waitForPodRunningInNamespace(c, pod.Name, ns) err := framework.WaitForPodRunningInNamespace(c, pod.Name, ns)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Try to make sure we get a hostIP for each pod. // Try to make sure we get a hostIP for each pod.
hostIPTimeout := 2 * time.Minute hostIPTimeout := 2 * time.Minute
@ -129,56 +130,56 @@ func testHostIP(c *client.Client, ns string, pod *api.Pod) {
p, err := podClient.Get(pod.Name) p, err := podClient.Get(pod.Name)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
if p.Status.HostIP != "" { if p.Status.HostIP != "" {
Logf("Pod %s has hostIP: %s", p.Name, p.Status.HostIP) framework.Logf("Pod %s has hostIP: %s", p.Name, p.Status.HostIP)
break break
} }
if time.Since(t) >= hostIPTimeout { if time.Since(t) >= hostIPTimeout {
Failf("Gave up waiting for hostIP of pod %s after %v seconds", framework.Failf("Gave up waiting for hostIP of pod %s after %v seconds",
p.Name, time.Since(t).Seconds()) p.Name, time.Since(t).Seconds())
} }
Logf("Retrying to get the hostIP of pod %s", p.Name) framework.Logf("Retrying to get the hostIP of pod %s", p.Name)
time.Sleep(5 * time.Second) time.Sleep(5 * time.Second)
} }
} }
func runPodFromStruct(framework *Framework, pod *api.Pod) { func runPodFromStruct(f *framework.Framework, pod *api.Pod) {
By("submitting the pod to kubernetes") By("submitting the pod to kubernetes")
podClient := framework.Client.Pods(framework.Namespace.Name) podClient := f.Client.Pods(f.Namespace.Name)
pod, err := podClient.Create(pod) pod, err := podClient.Create(pod)
if err != nil { if err != nil {
Failf("Failed to create pod: %v", err) framework.Failf("Failed to create pod: %v", err)
} }
expectNoError(framework.WaitForPodRunning(pod.Name)) framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
By("verifying the pod is in kubernetes") By("verifying the pod is in kubernetes")
pod, err = podClient.Get(pod.Name) pod, err = podClient.Get(pod.Name)
if err != nil { if err != nil {
Failf("failed to get pod: %v", err) framework.Failf("failed to get pod: %v", err)
} }
} }
func startPodAndGetBackOffs(framework *Framework, pod *api.Pod, podName string, containerName string, sleepAmount time.Duration) (time.Duration, time.Duration) { func startPodAndGetBackOffs(f *framework.Framework, pod *api.Pod, podName string, containerName string, sleepAmount time.Duration) (time.Duration, time.Duration) {
runPodFromStruct(framework, pod) runPodFromStruct(f, pod)
time.Sleep(sleepAmount) time.Sleep(sleepAmount)
By("getting restart delay-0") By("getting restart delay-0")
_, err := getRestartDelay(framework.Client, pod, framework.Namespace.Name, podName, containerName) _, err := getRestartDelay(f.Client, pod, f.Namespace.Name, podName, containerName)
if err != nil { if err != nil {
Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
} }
By("getting restart delay-1") By("getting restart delay-1")
delay1, err := getRestartDelay(framework.Client, pod, framework.Namespace.Name, podName, containerName) delay1, err := getRestartDelay(f.Client, pod, f.Namespace.Name, podName, containerName)
if err != nil { if err != nil {
Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
} }
By("getting restart delay-2") By("getting restart delay-2")
delay2, err := getRestartDelay(framework.Client, pod, framework.Namespace.Name, podName, containerName) delay2, err := getRestartDelay(f.Client, pod, f.Namespace.Name, podName, containerName)
if err != nil { if err != nil {
Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
} }
return delay1, delay2 return delay1, delay2
} }
@ -188,29 +189,29 @@ func getRestartDelay(c *client.Client, pod *api.Pod, ns string, name string, con
for time.Since(beginTime) < (2 * maxBackOffTolerance) { // may just miss the 1st MaxContainerBackOff delay for time.Since(beginTime) < (2 * maxBackOffTolerance) { // may just miss the 1st MaxContainerBackOff delay
time.Sleep(time.Second) time.Sleep(time.Second)
pod, err := c.Pods(ns).Get(name) pod, err := c.Pods(ns).Get(name)
expectNoError(err, fmt.Sprintf("getting pod %s", name)) framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", name))
status, ok := api.GetContainerStatus(pod.Status.ContainerStatuses, containerName) status, ok := api.GetContainerStatus(pod.Status.ContainerStatuses, containerName)
if !ok { if !ok {
Logf("getRestartDelay: status missing") framework.Logf("getRestartDelay: status missing")
continue continue
} }
if status.State.Waiting == nil && status.State.Running != nil && status.LastTerminationState.Terminated != nil && status.State.Running.StartedAt.Time.After(beginTime) { if status.State.Waiting == nil && status.State.Running != nil && status.LastTerminationState.Terminated != nil && status.State.Running.StartedAt.Time.After(beginTime) {
startedAt := status.State.Running.StartedAt.Time startedAt := status.State.Running.StartedAt.Time
finishedAt := status.LastTerminationState.Terminated.FinishedAt.Time finishedAt := status.LastTerminationState.Terminated.FinishedAt.Time
Logf("getRestartDelay: restartCount = %d, finishedAt=%s restartedAt=%s (%s)", status.RestartCount, finishedAt, startedAt, startedAt.Sub(finishedAt)) framework.Logf("getRestartDelay: restartCount = %d, finishedAt=%s restartedAt=%s (%s)", status.RestartCount, finishedAt, startedAt, startedAt.Sub(finishedAt))
return startedAt.Sub(finishedAt), nil return startedAt.Sub(finishedAt), nil
} }
} }
return 0, fmt.Errorf("timeout getting pod restart delay") return 0, fmt.Errorf("timeout getting pod restart delay")
} }
var _ = KubeDescribe("Pods", func() { var _ = framework.KubeDescribe("Pods", func() {
framework := NewDefaultFramework("pods") f := framework.NewDefaultFramework("pods")
It("should get a host IP [Conformance]", func() { It("should get a host IP [Conformance]", func() {
name := "pod-hostip-" + string(util.NewUUID()) name := "pod-hostip-" + string(util.NewUUID())
testHostIP(framework.Client, framework.Namespace.Name, &api.Pod{ testHostIP(f.Client, f.Namespace.Name, &api.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Name: name, Name: name,
}, },
@ -226,7 +227,7 @@ var _ = KubeDescribe("Pods", func() {
}) })
It("should be schedule with cpu and memory limits [Conformance]", func() { It("should be schedule with cpu and memory limits [Conformance]", func() {
podClient := framework.Client.Pods(framework.Namespace.Name) podClient := f.Client.Pods(f.Namespace.Name)
By("creating the pod") By("creating the pod")
name := "pod-update-" + string(util.NewUUID()) name := "pod-update-" + string(util.NewUUID())
@ -257,13 +258,13 @@ var _ = KubeDescribe("Pods", func() {
defer podClient.Delete(pod.Name, nil) defer podClient.Delete(pod.Name, nil)
_, err := podClient.Create(pod) _, err := podClient.Create(pod)
if err != nil { if err != nil {
Failf("Error creating a pod: %v", err) framework.Failf("Error creating a pod: %v", err)
} }
expectNoError(framework.WaitForPodRunning(pod.Name)) framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
}) })
It("should be submitted and removed [Conformance]", func() { It("should be submitted and removed [Conformance]", func() {
podClient := framework.Client.Pods(framework.Namespace.Name) podClient := f.Client.Pods(f.Namespace.Name)
By("creating the pod") By("creating the pod")
name := "pod-update-" + string(util.NewUUID()) name := "pod-update-" + string(util.NewUUID())
@ -301,7 +302,7 @@ var _ = KubeDescribe("Pods", func() {
options := api.ListOptions{LabelSelector: selector} options := api.ListOptions{LabelSelector: selector}
pods, err := podClient.List(options) pods, err := podClient.List(options)
if err != nil { if err != nil {
Failf("Failed to query for pods: %v", err) framework.Failf("Failed to query for pods: %v", err)
} }
Expect(len(pods.Items)).To(Equal(0)) Expect(len(pods.Items)).To(Equal(0))
options = api.ListOptions{ options = api.ListOptions{
@ -310,7 +311,7 @@ var _ = KubeDescribe("Pods", func() {
} }
w, err := podClient.Watch(options) w, err := podClient.Watch(options)
if err != nil { if err != nil {
Failf("Failed to set up watch: %v", err) framework.Failf("Failed to set up watch: %v", err)
} }
By("submitting the pod to kubernetes") By("submitting the pod to kubernetes")
@ -320,7 +321,7 @@ var _ = KubeDescribe("Pods", func() {
defer podClient.Delete(pod.Name, api.NewDeleteOptions(0)) defer podClient.Delete(pod.Name, api.NewDeleteOptions(0))
_, err = podClient.Create(pod) _, err = podClient.Create(pod)
if err != nil { if err != nil {
Failf("Failed to create pod: %v", err) framework.Failf("Failed to create pod: %v", err)
} }
By("verifying the pod is in kubernetes") By("verifying the pod is in kubernetes")
@ -328,7 +329,7 @@ var _ = KubeDescribe("Pods", func() {
options = api.ListOptions{LabelSelector: selector} options = api.ListOptions{LabelSelector: selector}
pods, err = podClient.List(options) pods, err = podClient.List(options)
if err != nil { if err != nil {
Failf("Failed to query for pods: %v", err) framework.Failf("Failed to query for pods: %v", err)
} }
Expect(len(pods.Items)).To(Equal(1)) Expect(len(pods.Items)).To(Equal(1))
@ -336,27 +337,27 @@ var _ = KubeDescribe("Pods", func() {
select { select {
case event, _ := <-w.ResultChan(): case event, _ := <-w.ResultChan():
if event.Type != watch.Added { if event.Type != watch.Added {
Failf("Failed to observe pod creation: %v", event) framework.Failf("Failed to observe pod creation: %v", event)
} }
case <-time.After(podStartTimeout): case <-time.After(framework.PodStartTimeout):
Fail("Timeout while waiting for pod creation") Fail("Timeout while waiting for pod creation")
} }
// We need to wait for the pod to be scheduled, otherwise the deletion // We need to wait for the pod to be scheduled, otherwise the deletion
// will be carried out immediately rather than gracefully. // will be carried out immediately rather than gracefully.
expectNoError(framework.WaitForPodRunning(pod.Name)) framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
By("deleting the pod gracefully") By("deleting the pod gracefully")
if err := podClient.Delete(pod.Name, api.NewDeleteOptions(30)); err != nil { if err := podClient.Delete(pod.Name, api.NewDeleteOptions(30)); err != nil {
Failf("Failed to delete pod: %v", err) framework.Failf("Failed to delete pod: %v", err)
} }
By("verifying the kubelet observed the termination notice") By("verifying the kubelet observed the termination notice")
pod, err = podClient.Get(pod.Name) pod, err = podClient.Get(pod.Name)
Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) { Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) {
podList, err := GetKubeletPods(framework.Client, pod.Spec.NodeName) podList, err := framework.GetKubeletPods(f.Client, pod.Spec.NodeName)
if err != nil { if err != nil {
Logf("Unable to retrieve kubelet pods for node %v: %v", pod.Spec.NodeName, err) framework.Logf("Unable to retrieve kubelet pods for node %v: %v", pod.Spec.NodeName, err)
return false, nil return false, nil
} }
for _, kubeletPod := range podList.Items { for _, kubeletPod := range podList.Items {
@ -364,12 +365,12 @@ var _ = KubeDescribe("Pods", func() {
continue continue
} }
if kubeletPod.ObjectMeta.DeletionTimestamp == nil { if kubeletPod.ObjectMeta.DeletionTimestamp == nil {
Logf("deletion has not yet been observed") framework.Logf("deletion has not yet been observed")
return false, nil return false, nil
} }
return true, nil return true, nil
} }
Logf("no pod exists with the name we were looking for, assuming the termination request was observed and completed") framework.Logf("no pod exists with the name we were looking for, assuming the termination request was observed and completed")
return true, nil return true, nil
})).NotTo(HaveOccurred(), "kubelet never observed the termination notice") })).NotTo(HaveOccurred(), "kubelet never observed the termination notice")
@ -406,7 +407,7 @@ var _ = KubeDescribe("Pods", func() {
}) })
It("should be updated [Conformance]", func() { It("should be updated [Conformance]", func() {
podClient := framework.Client.Pods(framework.Namespace.Name) podClient := f.Client.Pods(f.Namespace.Name)
By("creating the pod") By("creating the pod")
name := "pod-update-" + string(util.NewUUID()) name := "pod-update-" + string(util.NewUUID())
@ -446,10 +447,10 @@ var _ = KubeDescribe("Pods", func() {
}() }()
pod, err := podClient.Create(pod) pod, err := podClient.Create(pod)
if err != nil { if err != nil {
Failf("Failed to create pod: %v", err) framework.Failf("Failed to create pod: %v", err)
} }
expectNoError(framework.WaitForPodRunning(pod.Name)) framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
By("verifying the pod is in kubernetes") By("verifying the pod is in kubernetes")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
@ -458,7 +459,7 @@ var _ = KubeDescribe("Pods", func() {
Expect(len(pods.Items)).To(Equal(1)) Expect(len(pods.Items)).To(Equal(1))
// Standard get, update retry loop // Standard get, update retry loop
expectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) { framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) {
By("updating the pod") By("updating the pod")
value = strconv.Itoa(time.Now().Nanosecond()) value = strconv.Itoa(time.Now().Nanosecond())
if pod == nil { // on retries we need to re-get if pod == nil { // on retries we need to re-get
@ -470,29 +471,29 @@ var _ = KubeDescribe("Pods", func() {
pod.Labels["time"] = value pod.Labels["time"] = value
pod, err = podClient.Update(pod) pod, err = podClient.Update(pod)
if err == nil { if err == nil {
Logf("Successfully updated pod") framework.Logf("Successfully updated pod")
return true, nil return true, nil
} }
if errors.IsConflict(err) { if errors.IsConflict(err) {
Logf("Conflicting update to pod, re-get and re-update: %v", err) framework.Logf("Conflicting update to pod, re-get and re-update: %v", err)
pod = nil // re-get it when we retry pod = nil // re-get it when we retry
return false, nil return false, nil
} }
return false, fmt.Errorf("failed to update pod: %v", err) return false, fmt.Errorf("failed to update pod: %v", err)
})) }))
expectNoError(framework.WaitForPodRunning(pod.Name)) framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
By("verifying the updated pod is in kubernetes") By("verifying the updated pod is in kubernetes")
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options = api.ListOptions{LabelSelector: selector} options = api.ListOptions{LabelSelector: selector}
pods, err = podClient.List(options) pods, err = podClient.List(options)
Expect(len(pods.Items)).To(Equal(1)) Expect(len(pods.Items)).To(Equal(1))
Logf("Pod update OK") framework.Logf("Pod update OK")
}) })
It("should allow activeDeadlineSeconds to be updated [Conformance]", func() { It("should allow activeDeadlineSeconds to be updated [Conformance]", func() {
podClient := framework.Client.Pods(framework.Namespace.Name) podClient := f.Client.Pods(f.Namespace.Name)
By("creating the pod") By("creating the pod")
name := "pod-update-activedeadlineseconds-" + string(util.NewUUID()) name := "pod-update-activedeadlineseconds-" + string(util.NewUUID())
@ -532,10 +533,10 @@ var _ = KubeDescribe("Pods", func() {
}() }()
pod, err := podClient.Create(pod) pod, err := podClient.Create(pod)
if err != nil { if err != nil {
Failf("Failed to create pod: %v", err) framework.Failf("Failed to create pod: %v", err)
} }
expectNoError(framework.WaitForPodRunning(pod.Name)) framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
By("verifying the pod is in kubernetes") By("verifying the pod is in kubernetes")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
@ -544,7 +545,7 @@ var _ = KubeDescribe("Pods", func() {
Expect(len(pods.Items)).To(Equal(1)) Expect(len(pods.Items)).To(Equal(1))
// Standard get, update retry loop // Standard get, update retry loop
expectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) { framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) {
By("updating the pod") By("updating the pod")
value = strconv.Itoa(time.Now().Nanosecond()) value = strconv.Itoa(time.Now().Nanosecond())
if pod == nil { // on retries we need to re-get if pod == nil { // on retries we need to re-get
@ -557,18 +558,18 @@ var _ = KubeDescribe("Pods", func() {
pod.Spec.ActiveDeadlineSeconds = &newDeadline pod.Spec.ActiveDeadlineSeconds = &newDeadline
pod, err = podClient.Update(pod) pod, err = podClient.Update(pod)
if err == nil { if err == nil {
Logf("Successfully updated pod") framework.Logf("Successfully updated pod")
return true, nil return true, nil
} }
if errors.IsConflict(err) { if errors.IsConflict(err) {
Logf("Conflicting update to pod, re-get and re-update: %v", err) framework.Logf("Conflicting update to pod, re-get and re-update: %v", err)
pod = nil // re-get it when we retry pod = nil // re-get it when we retry
return false, nil return false, nil
} }
return false, fmt.Errorf("failed to update pod: %v", err) return false, fmt.Errorf("failed to update pod: %v", err)
})) }))
expectNoError(framework.WaitForPodTerminated(pod.Name, "DeadlineExceeded")) framework.ExpectNoError(f.WaitForPodTerminated(pod.Name, "DeadlineExceeded"))
}) })
It("should contain environment variables for services [Conformance]", func() { It("should contain environment variables for services [Conformance]", func() {
@ -590,12 +591,12 @@ var _ = KubeDescribe("Pods", func() {
}, },
}, },
} }
defer framework.Client.Pods(framework.Namespace.Name).Delete(serverPod.Name, api.NewDeleteOptions(0)) defer f.Client.Pods(f.Namespace.Name).Delete(serverPod.Name, api.NewDeleteOptions(0))
_, err := framework.Client.Pods(framework.Namespace.Name).Create(serverPod) _, err := f.Client.Pods(f.Namespace.Name).Create(serverPod)
if err != nil { if err != nil {
Failf("Failed to create serverPod: %v", err) framework.Failf("Failed to create serverPod: %v", err)
} }
expectNoError(framework.WaitForPodRunning(serverPod.Name)) framework.ExpectNoError(f.WaitForPodRunning(serverPod.Name))
// This service exposes port 8080 of the test pod as a service on port 8765 // This service exposes port 8080 of the test pod as a service on port 8765
// TODO(filbranden): We would like to use a unique service name such as: // TODO(filbranden): We would like to use a unique service name such as:
@ -622,10 +623,10 @@ var _ = KubeDescribe("Pods", func() {
}, },
}, },
} }
defer framework.Client.Services(framework.Namespace.Name).Delete(svc.Name) defer f.Client.Services(f.Namespace.Name).Delete(svc.Name)
_, err = framework.Client.Services(framework.Namespace.Name).Create(svc) _, err = f.Client.Services(f.Namespace.Name).Create(svc)
if err != nil { if err != nil {
Failf("Failed to create service: %v", err) framework.Failf("Failed to create service: %v", err)
} }
// Make a client pod that verifies that it has the service environment variables. // Make a client pod that verifies that it has the service environment variables.
@ -647,7 +648,7 @@ var _ = KubeDescribe("Pods", func() {
}, },
} }
framework.TestContainerOutput("service env", pod, 0, []string{ f.TestContainerOutput("service env", pod, 0, []string{
"FOOSERVICE_SERVICE_HOST=", "FOOSERVICE_SERVICE_HOST=",
"FOOSERVICE_SERVICE_PORT=", "FOOSERVICE_SERVICE_PORT=",
"FOOSERVICE_PORT=", "FOOSERVICE_PORT=",
@ -659,7 +660,7 @@ var _ = KubeDescribe("Pods", func() {
}) })
It("should be restarted with a docker exec \"cat /tmp/health\" liveness probe [Conformance]", func() { It("should be restarted with a docker exec \"cat /tmp/health\" liveness probe [Conformance]", func() {
runLivenessTest(framework.Client, framework.Namespace.Name, &api.Pod{ runLivenessTest(f.Client, f.Namespace.Name, &api.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Name: "liveness-exec", Name: "liveness-exec",
Labels: map[string]string{"test": "liveness"}, Labels: map[string]string{"test": "liveness"},
@ -686,7 +687,7 @@ var _ = KubeDescribe("Pods", func() {
}) })
It("should *not* be restarted with a docker exec \"cat /tmp/health\" liveness probe [Conformance]", func() { It("should *not* be restarted with a docker exec \"cat /tmp/health\" liveness probe [Conformance]", func() {
runLivenessTest(framework.Client, framework.Namespace.Name, &api.Pod{ runLivenessTest(f.Client, f.Namespace.Name, &api.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Name: "liveness-exec", Name: "liveness-exec",
Labels: map[string]string{"test": "liveness"}, Labels: map[string]string{"test": "liveness"},
@ -713,7 +714,7 @@ var _ = KubeDescribe("Pods", func() {
}) })
It("should be restarted with a /healthz http liveness probe [Conformance]", func() { It("should be restarted with a /healthz http liveness probe [Conformance]", func() {
runLivenessTest(framework.Client, framework.Namespace.Name, &api.Pod{ runLivenessTest(f.Client, f.Namespace.Name, &api.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Name: "liveness-http", Name: "liveness-http",
Labels: map[string]string{"test": "liveness"}, Labels: map[string]string{"test": "liveness"},
@ -742,7 +743,7 @@ var _ = KubeDescribe("Pods", func() {
// Slow by design (5 min) // Slow by design (5 min)
It("should have monotonically increasing restart count [Conformance] [Slow]", func() { It("should have monotonically increasing restart count [Conformance] [Slow]", func() {
runLivenessTest(framework.Client, framework.Namespace.Name, &api.Pod{ runLivenessTest(f.Client, f.Namespace.Name, &api.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Name: "liveness-http", Name: "liveness-http",
Labels: map[string]string{"test": "liveness"}, Labels: map[string]string{"test": "liveness"},
@ -770,7 +771,7 @@ var _ = KubeDescribe("Pods", func() {
}) })
It("should *not* be restarted with a /healthz http liveness probe [Conformance]", func() { It("should *not* be restarted with a /healthz http liveness probe [Conformance]", func() {
runLivenessTest(framework.Client, framework.Namespace.Name, &api.Pod{ runLivenessTest(f.Client, f.Namespace.Name, &api.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Name: "liveness-http", Name: "liveness-http",
Labels: map[string]string{"test": "liveness"}, Labels: map[string]string{"test": "liveness"},
@ -785,7 +786,7 @@ var _ = KubeDescribe("Pods", func() {
Args: []string{ Args: []string{
"-service=liveness-http", "-service=liveness-http",
"-peers=1", "-peers=1",
"-namespace=" + framework.Namespace.Name}, "-namespace=" + f.Namespace.Name},
Ports: []api.ContainerPort{{ContainerPort: 8080}}, Ports: []api.ContainerPort{{ContainerPort: 8080}},
LivenessProbe: &api.Probe{ LivenessProbe: &api.Probe{
Handler: api.Handler{ Handler: api.Handler{
@ -805,11 +806,11 @@ var _ = KubeDescribe("Pods", func() {
}) })
It("should support remote command execution over websockets", func() { It("should support remote command execution over websockets", func() {
config, err := loadConfig() config, err := framework.LoadConfig()
if err != nil { if err != nil {
Failf("Unable to get base config: %v", err) framework.Failf("Unable to get base config: %v", err)
} }
podClient := framework.Client.Pods(framework.Namespace.Name) podClient := f.Client.Pods(f.Namespace.Name)
By("creating the pod") By("creating the pod")
name := "pod-exec-websocket-" + string(util.NewUUID()) name := "pod-exec-websocket-" + string(util.NewUUID())
@ -835,13 +836,13 @@ var _ = KubeDescribe("Pods", func() {
}() }()
pod, err = podClient.Create(pod) pod, err = podClient.Create(pod)
if err != nil { if err != nil {
Failf("Failed to create pod: %v", err) framework.Failf("Failed to create pod: %v", err)
} }
expectNoError(framework.WaitForPodRunning(pod.Name)) framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
req := framework.Client.Get(). req := f.Client.Get().
Namespace(framework.Namespace.Name). Namespace(f.Namespace.Name).
Resource("pods"). Resource("pods").
Name(pod.Name). Name(pod.Name).
Suffix("exec"). Suffix("exec").
@ -852,9 +853,9 @@ var _ = KubeDescribe("Pods", func() {
Param("command", "/etc/resolv.conf") Param("command", "/etc/resolv.conf")
url := req.URL() url := req.URL()
ws, err := OpenWebSocketForURL(url, config, []string{"channel.k8s.io"}) ws, err := framework.OpenWebSocketForURL(url, config, []string{"channel.k8s.io"})
if err != nil { if err != nil {
Failf("Failed to open websocket to %s: %v", url.String(), err) framework.Failf("Failed to open websocket to %s: %v", url.String(), err)
} }
defer ws.Close() defer ws.Close()
@ -865,30 +866,30 @@ var _ = KubeDescribe("Pods", func() {
if err == io.EOF { if err == io.EOF {
break break
} }
Failf("Failed to read completely from websocket %s: %v", url.String(), err) framework.Failf("Failed to read completely from websocket %s: %v", url.String(), err)
} }
if len(msg) == 0 { if len(msg) == 0 {
continue continue
} }
if msg[0] != 1 { if msg[0] != 1 {
Failf("Got message from server that didn't start with channel 1 (STDOUT): %v", msg) framework.Failf("Got message from server that didn't start with channel 1 (STDOUT): %v", msg)
} }
buf.Write(msg[1:]) buf.Write(msg[1:])
} }
if buf.Len() == 0 { if buf.Len() == 0 {
Failf("Unexpected output from server") framework.Failf("Unexpected output from server")
} }
if !strings.Contains(buf.String(), "nameserver") { if !strings.Contains(buf.String(), "nameserver") {
Failf("Expected to find 'nameserver' in %q", buf.String()) framework.Failf("Expected to find 'nameserver' in %q", buf.String())
} }
}) })
It("should support retrieving logs from the container over websockets", func() { It("should support retrieving logs from the container over websockets", func() {
config, err := loadConfig() config, err := framework.LoadConfig()
if err != nil { if err != nil {
Failf("Unable to get base config: %v", err) framework.Failf("Unable to get base config: %v", err)
} }
podClient := framework.Client.Pods(framework.Namespace.Name) podClient := f.Client.Pods(f.Namespace.Name)
By("creating the pod") By("creating the pod")
name := "pod-logs-websocket-" + string(util.NewUUID()) name := "pod-logs-websocket-" + string(util.NewUUID())
@ -914,13 +915,13 @@ var _ = KubeDescribe("Pods", func() {
}() }()
pod, err = podClient.Create(pod) pod, err = podClient.Create(pod)
if err != nil { if err != nil {
Failf("Failed to create pod: %v", err) framework.Failf("Failed to create pod: %v", err)
} }
expectNoError(framework.WaitForPodRunning(pod.Name)) framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
req := framework.Client.Get(). req := f.Client.Get().
Namespace(framework.Namespace.Name). Namespace(f.Namespace.Name).
Resource("pods"). Resource("pods").
Name(pod.Name). Name(pod.Name).
Suffix("log"). Suffix("log").
@ -928,9 +929,9 @@ var _ = KubeDescribe("Pods", func() {
url := req.URL() url := req.URL()
ws, err := OpenWebSocketForURL(url, config, []string{"binary.k8s.io"}) ws, err := framework.OpenWebSocketForURL(url, config, []string{"binary.k8s.io"})
if err != nil { if err != nil {
Failf("Failed to open websocket to %s: %v", url.String(), err) framework.Failf("Failed to open websocket to %s: %v", url.String(), err)
} }
defer ws.Close() defer ws.Close()
buf := &bytes.Buffer{} buf := &bytes.Buffer{}
@ -940,7 +941,7 @@ var _ = KubeDescribe("Pods", func() {
if err == io.EOF { if err == io.EOF {
break break
} }
Failf("Failed to read completely from websocket %s: %v", url.String(), err) framework.Failf("Failed to read completely from websocket %s: %v", url.String(), err)
} }
if len(msg) == 0 { if len(msg) == 0 {
continue continue
@ -948,14 +949,14 @@ var _ = KubeDescribe("Pods", func() {
buf.Write(msg) buf.Write(msg)
} }
if buf.String() != "container is alive\n" { if buf.String() != "container is alive\n" {
Failf("Unexpected websocket logs:\n%s", buf.String()) framework.Failf("Unexpected websocket logs:\n%s", buf.String())
} }
}) })
It("should have their auto-restart back-off timer reset on image update [Slow]", func() { It("should have their auto-restart back-off timer reset on image update [Slow]", func() {
podName := "pod-back-off-image" podName := "pod-back-off-image"
containerName := "back-off" containerName := "back-off"
podClient := framework.Client.Pods(framework.Namespace.Name) podClient := f.Client.Pods(f.Namespace.Name)
pod := &api.Pod{ pod := &api.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Name: podName, Name: podName,
@ -977,35 +978,35 @@ var _ = KubeDescribe("Pods", func() {
podClient.Delete(pod.Name, api.NewDeleteOptions(0)) podClient.Delete(pod.Name, api.NewDeleteOptions(0))
}() }()
delay1, delay2 := startPodAndGetBackOffs(framework, pod, podName, containerName, buildBackOffDuration) delay1, delay2 := startPodAndGetBackOffs(f, pod, podName, containerName, buildBackOffDuration)
By("updating the image") By("updating the image")
pod, err := podClient.Get(pod.Name) pod, err := podClient.Get(pod.Name)
if err != nil { if err != nil {
Failf("failed to get pod: %v", err) framework.Failf("failed to get pod: %v", err)
} }
pod.Spec.Containers[0].Image = "gcr.io/google_containers/nginx:1.7.9" pod.Spec.Containers[0].Image = "gcr.io/google_containers/nginx:1.7.9"
pod, err = podClient.Update(pod) pod, err = podClient.Update(pod)
if err != nil { if err != nil {
Failf("error updating pod=%s/%s %v", podName, containerName, err) framework.Failf("error updating pod=%s/%s %v", podName, containerName, err)
} }
time.Sleep(syncLoopFrequency) time.Sleep(syncLoopFrequency)
expectNoError(framework.WaitForPodRunning(pod.Name)) framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
By("get restart delay after image update") By("get restart delay after image update")
delayAfterUpdate, err := getRestartDelay(framework.Client, pod, framework.Namespace.Name, podName, containerName) delayAfterUpdate, err := getRestartDelay(f.Client, pod, f.Namespace.Name, podName, containerName)
if err != nil { if err != nil {
Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
} }
if delayAfterUpdate > 2*delay2 || delayAfterUpdate > 2*delay1 { if delayAfterUpdate > 2*delay2 || delayAfterUpdate > 2*delay1 {
Failf("updating image did not reset the back-off value in pod=%s/%s d3=%s d2=%s d1=%s", podName, containerName, delayAfterUpdate, delay1, delay2) framework.Failf("updating image did not reset the back-off value in pod=%s/%s d3=%s d2=%s d1=%s", podName, containerName, delayAfterUpdate, delay1, delay2)
} }
}) })
// Slow issue #19027 (20 mins) // Slow issue #19027 (20 mins)
It("should cap back-off at MaxContainerBackOff [Slow]", func() { It("should cap back-off at MaxContainerBackOff [Slow]", func() {
podClient := framework.Client.Pods(framework.Namespace.Name) podClient := f.Client.Pods(f.Namespace.Name)
podName := "back-off-cap" podName := "back-off-cap"
containerName := "back-off-cap" containerName := "back-off-cap"
pod := &api.Pod{ pod := &api.Pod{
@ -1029,7 +1030,7 @@ var _ = KubeDescribe("Pods", func() {
podClient.Delete(pod.Name, api.NewDeleteOptions(0)) podClient.Delete(pod.Name, api.NewDeleteOptions(0))
}() }()
runPodFromStruct(framework, pod) runPodFromStruct(f, pod)
time.Sleep(2 * kubelet.MaxContainerBackOff) // it takes slightly more than 2*x to get to a back-off of x time.Sleep(2 * kubelet.MaxContainerBackOff) // it takes slightly more than 2*x to get to a back-off of x
// wait for a delay == capped delay of MaxContainerBackOff // wait for a delay == capped delay of MaxContainerBackOff
@ -1039,9 +1040,9 @@ var _ = KubeDescribe("Pods", func() {
err error err error
) )
for i := 0; i < 3; i++ { for i := 0; i < 3; i++ {
delay1, err = getRestartDelay(framework.Client, pod, framework.Namespace.Name, podName, containerName) delay1, err = getRestartDelay(f.Client, pod, f.Namespace.Name, podName, containerName)
if err != nil { if err != nil {
Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
} }
if delay1 < kubelet.MaxContainerBackOff { if delay1 < kubelet.MaxContainerBackOff {
@ -1050,17 +1051,17 @@ var _ = KubeDescribe("Pods", func() {
} }
if (delay1 < kubelet.MaxContainerBackOff) || (delay1 > maxBackOffTolerance) { if (delay1 < kubelet.MaxContainerBackOff) || (delay1 > maxBackOffTolerance) {
Failf("expected %s back-off got=%s in delay1", kubelet.MaxContainerBackOff, delay1) framework.Failf("expected %s back-off got=%s in delay1", kubelet.MaxContainerBackOff, delay1)
} }
By("getting restart delay after a capped delay") By("getting restart delay after a capped delay")
delay2, err := getRestartDelay(framework.Client, pod, framework.Namespace.Name, podName, containerName) delay2, err := getRestartDelay(f.Client, pod, f.Namespace.Name, podName, containerName)
if err != nil { if err != nil {
Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
} }
if delay2 < kubelet.MaxContainerBackOff || delay2 > maxBackOffTolerance { // syncloop cumulative drift if delay2 < kubelet.MaxContainerBackOff || delay2 > maxBackOffTolerance { // syncloop cumulative drift
Failf("expected %s back-off got=%s on delay2", kubelet.MaxContainerBackOff, delay2) framework.Failf("expected %s back-off got=%s on delay2", kubelet.MaxContainerBackOff, delay2)
} }
}) })
@ -1071,12 +1072,12 @@ var _ = KubeDescribe("Pods", func() {
// all providers), we can enable these tests. // all providers), we can enable these tests.
/* /*
It("should support remote command execution", func() { It("should support remote command execution", func() {
clientConfig, err := loadConfig() clientConfig, err := framework.LoadConfig()
if err != nil { if err != nil {
Failf("Failed to create client config: %v", err) framework.Failf("Failed to create client config: %v", err)
} }
podClient := framework.Client.Pods(framework.Namespace.Name) podClient := f.Client.Pods(f.Namespace.Name)
By("creating the pod") By("creating the pod")
name := "pod-exec-" + string(util.NewUUID()) name := "pod-exec-" + string(util.NewUUID())
@ -1102,7 +1103,7 @@ var _ = KubeDescribe("Pods", func() {
By("submitting the pod to kubernetes") By("submitting the pod to kubernetes")
_, err = podClient.Create(pod) _, err = podClient.Create(pod)
if err != nil { if err != nil {
Failf("Failed to create pod: %v", err) framework.Failf("Failed to create pod: %v", err)
} }
defer func() { defer func() {
// We call defer here in case there is a problem with // We call defer here in case there is a problem with
@ -1112,45 +1113,45 @@ var _ = KubeDescribe("Pods", func() {
}() }()
By("waiting for the pod to start running") By("waiting for the pod to start running")
expectNoError(framework.WaitForPodRunning(pod.Name)) framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
By("verifying the pod is in kubernetes") By("verifying the pod is in kubernetes")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options := api.ListOptions{LabelSelector: selector} options := api.ListOptions{LabelSelector: selector}
pods, err := podClient.List(options) pods, err := podClient.List(options)
if err != nil { if err != nil {
Failf("Failed to query for pods: %v", err) framework.Failf("Failed to query for pods: %v", err)
} }
Expect(len(pods.Items)).To(Equal(1)) Expect(len(pods.Items)).To(Equal(1))
pod = &pods.Items[0] pod = &pods.Items[0]
By(fmt.Sprintf("executing command on host %s pod %s in container %s", By(fmt.Sprintf("executing command on host %s pod %s in container %s",
pod.Status.Host, pod.Name, pod.Spec.Containers[0].Name)) pod.Status.Host, pod.Name, pod.Spec.Containers[0].Name))
req := framework.Client.Get(). req := f.Client.Get().
Prefix("proxy"). Prefix("proxy").
Resource("nodes"). Resource("nodes").
Name(pod.Status.Host). Name(pod.Status.Host).
Suffix("exec", framework.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) Suffix("exec", f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
out := &bytes.Buffer{} out := &bytes.Buffer{}
e := remotecommand.New(req, clientConfig, []string{"whoami"}, nil, out, nil, false) e := remotecommand.New(req, clientConfig, []string{"whoami"}, nil, out, nil, false)
err = e.Execute() err = e.Execute()
if err != nil { if err != nil {
Failf("Failed to execute command on host %s pod %s in container %s: %v", framework.Failf("Failed to execute command on host %s pod %s in container %s: %v",
pod.Status.Host, pod.Name, pod.Spec.Containers[0].Name, err) pod.Status.Host, pod.Name, pod.Spec.Containers[0].Name, err)
} }
if e, a := "root\n", out.String(); e != a { if e, a := "root\n", out.String(); e != a {
Failf("exec: whoami: expected '%s', got '%s'", e, a) framework.Failf("exec: whoami: expected '%s', got '%s'", e, a)
} }
}) })
It("should support port forwarding", func() { It("should support port forwarding", func() {
clientConfig, err := loadConfig() clientConfig, err := framework.LoadConfig()
if err != nil { if err != nil {
Failf("Failed to create client config: %v", err) framework.Failf("Failed to create client config: %v", err)
} }
podClient := framework.Client.Pods(framework.Namespace.Name) podClient := f.Client.Pods(f.Namespace.Name)
By("creating the pod") By("creating the pod")
name := "pod-portforward-" + string(util.NewUUID()) name := "pod-portforward-" + string(util.NewUUID())
@ -1177,7 +1178,7 @@ var _ = KubeDescribe("Pods", func() {
By("submitting the pod to kubernetes") By("submitting the pod to kubernetes")
_, err = podClient.Create(pod) _, err = podClient.Create(pod)
if err != nil { if err != nil {
Failf("Failed to create pod: %v", err) framework.Failf("Failed to create pod: %v", err)
} }
defer func() { defer func() {
// We call defer here in case there is a problem with // We call defer here in case there is a problem with
@ -1187,14 +1188,14 @@ var _ = KubeDescribe("Pods", func() {
}() }()
By("waiting for the pod to start running") By("waiting for the pod to start running")
expectNoError(framework.WaitForPodRunning(pod.Name)) framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
By("verifying the pod is in kubernetes") By("verifying the pod is in kubernetes")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options := api.ListOptions{LabelSelector: selector} options := api.ListOptions{LabelSelector: selector}
pods, err := podClient.List(options) pods, err := podClient.List(options)
if err != nil { if err != nil {
Failf("Failed to query for pods: %v", err) framework.Failf("Failed to query for pods: %v", err)
} }
Expect(len(pods.Items)).To(Equal(1)) Expect(len(pods.Items)).To(Equal(1))
@ -1202,16 +1203,16 @@ var _ = KubeDescribe("Pods", func() {
By(fmt.Sprintf("initiating port forwarding to host %s pod %s in container %s", By(fmt.Sprintf("initiating port forwarding to host %s pod %s in container %s",
pod.Status.Host, pod.Name, pod.Spec.Containers[0].Name)) pod.Status.Host, pod.Name, pod.Spec.Containers[0].Name))
req := framework.Client.Get(). req := f.Client.Get().
Prefix("proxy"). Prefix("proxy").
Resource("nodes"). Resource("nodes").
Name(pod.Status.Host). Name(pod.Status.Host).
Suffix("portForward", framework.Namespace.Name, pod.Name) Suffix("portForward", f.Namespace.Name, pod.Name)
stopChan := make(chan struct{}) stopChan := make(chan struct{})
pf, err := portforward.New(req, clientConfig, []string{"5678:80"}, stopChan) pf, err := portforward.New(req, clientConfig, []string{"5678:80"}, stopChan)
if err != nil { if err != nil {
Failf("Error creating port forwarder: %s", err) framework.Failf("Error creating port forwarder: %s", err)
} }
errorChan := make(chan error) errorChan := make(chan error)
@ -1224,11 +1225,11 @@ var _ = KubeDescribe("Pods", func() {
resp, err := http.Get("http://localhost:5678/") resp, err := http.Get("http://localhost:5678/")
if err != nil { if err != nil {
Failf("Error with http get to localhost:5678: %s", err) framework.Failf("Error with http get to localhost:5678: %s", err)
} }
body, err := ioutil.ReadAll(resp.Body) body, err := ioutil.ReadAll(resp.Body)
if err != nil { if err != nil {
Failf("Error reading response body: %s", err) framework.Failf("Error reading response body: %s", err)
} }
titleRegex := regexp.MustCompile("<title>(.+)</title>") titleRegex := regexp.MustCompile("<title>(.+)</title>")
@ -1237,7 +1238,7 @@ var _ = KubeDescribe("Pods", func() {
Fail("Unable to locate page title in response HTML") Fail("Unable to locate page title in response HTML")
} }
if e, a := "Welcome to nginx on Debian!", matches[1]; e != a { if e, a := "Welcome to nginx on Debian!", matches[1]; e != a {
Failf("<title>: expected '%s', got '%s'", e, a) framework.Failf("<title>: expected '%s', got '%s'", e, a)
} }
}) })
*/ */

View File

@ -29,6 +29,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
) )
@ -90,7 +91,7 @@ type portForwardCommand struct {
func (c *portForwardCommand) Stop() { func (c *portForwardCommand) Stop() {
// SIGINT signals that kubectl port-forward should gracefully terminate // SIGINT signals that kubectl port-forward should gracefully terminate
if err := c.cmd.Process.Signal(syscall.SIGINT); err != nil { if err := c.cmd.Process.Signal(syscall.SIGINT); err != nil {
Logf("error sending SIGINT to kubectl port-forward: %v", err) framework.Logf("error sending SIGINT to kubectl port-forward: %v", err)
} }
// try to wait for a clean exit // try to wait for a clean exit
@ -108,41 +109,41 @@ func (c *portForwardCommand) Stop() {
// success // success
return return
} }
Logf("error waiting for kubectl port-forward to exit: %v", err) framework.Logf("error waiting for kubectl port-forward to exit: %v", err)
case <-expired.C: case <-expired.C:
Logf("timed out waiting for kubectl port-forward to exit") framework.Logf("timed out waiting for kubectl port-forward to exit")
} }
Logf("trying to forcibly kill kubectl port-forward") framework.Logf("trying to forcibly kill kubectl port-forward")
tryKill(c.cmd) framework.TryKill(c.cmd)
} }
func runPortForward(ns, podName string, port int) *portForwardCommand { func runPortForward(ns, podName string, port int) *portForwardCommand {
cmd := kubectlCmd("port-forward", fmt.Sprintf("--namespace=%v", ns), podName, fmt.Sprintf(":%d", port)) cmd := framework.KubectlCmd("port-forward", fmt.Sprintf("--namespace=%v", ns), podName, fmt.Sprintf(":%d", port))
// This is somewhat ugly but is the only way to retrieve the port that was picked // This is somewhat ugly but is the only way to retrieve the port that was picked
// by the port-forward command. We don't want to hard code the port as we have no // by the port-forward command. We don't want to hard code the port as we have no
// way of guaranteeing we can pick one that isn't in use, particularly on Jenkins. // way of guaranteeing we can pick one that isn't in use, particularly on Jenkins.
Logf("starting port-forward command and streaming output") framework.Logf("starting port-forward command and streaming output")
_, stderr, err := startCmdAndStreamOutput(cmd) _, stderr, err := framework.StartCmdAndStreamOutput(cmd)
if err != nil { if err != nil {
Failf("Failed to start port-forward command: %v", err) framework.Failf("Failed to start port-forward command: %v", err)
} }
buf := make([]byte, 128) buf := make([]byte, 128)
var n int var n int
Logf("reading from `kubectl port-forward` command's stderr") framework.Logf("reading from `kubectl port-forward` command's stderr")
if n, err = stderr.Read(buf); err != nil { if n, err = stderr.Read(buf); err != nil {
Failf("Failed to read from kubectl port-forward stderr: %v", err) framework.Failf("Failed to read from kubectl port-forward stderr: %v", err)
} }
portForwardOutput := string(buf[:n]) portForwardOutput := string(buf[:n])
match := portForwardRegexp.FindStringSubmatch(portForwardOutput) match := portForwardRegexp.FindStringSubmatch(portForwardOutput)
if len(match) != 2 { if len(match) != 2 {
Failf("Failed to parse kubectl port-forward output: %s", portForwardOutput) framework.Failf("Failed to parse kubectl port-forward output: %s", portForwardOutput)
} }
listenPort, err := strconv.Atoi(match[1]) listenPort, err := strconv.Atoi(match[1])
if err != nil { if err != nil {
Failf("Error converting %s to an int: %v", match[1], err) framework.Failf("Error converting %s to an int: %v", match[1], err)
} }
return &portForwardCommand{ return &portForwardCommand{
@ -151,42 +152,42 @@ func runPortForward(ns, podName string, port int) *portForwardCommand {
} }
} }
var _ = KubeDescribe("Port forwarding", func() { var _ = framework.KubeDescribe("Port forwarding", func() {
framework := NewDefaultFramework("port-forwarding") f := framework.NewDefaultFramework("port-forwarding")
KubeDescribe("With a server that expects a client request", func() { framework.KubeDescribe("With a server that expects a client request", func() {
It("should support a client that connects, sends no data, and disconnects [Conformance]", func() { It("should support a client that connects, sends no data, and disconnects [Conformance]", func() {
By("creating the target pod") By("creating the target pod")
pod := pfPod("abc", "1", "1", "1") pod := pfPod("abc", "1", "1", "1")
if _, err := framework.Client.Pods(framework.Namespace.Name).Create(pod); err != nil { if _, err := f.Client.Pods(f.Namespace.Name).Create(pod); err != nil {
Failf("Couldn't create pod: %v", err) framework.Failf("Couldn't create pod: %v", err)
} }
if err := framework.WaitForPodRunning(pod.Name); err != nil { if err := f.WaitForPodRunning(pod.Name); err != nil {
Failf("Pod did not start running: %v", err) framework.Failf("Pod did not start running: %v", err)
} }
By("Running 'kubectl port-forward'") By("Running 'kubectl port-forward'")
cmd := runPortForward(framework.Namespace.Name, pod.Name, 80) cmd := runPortForward(f.Namespace.Name, pod.Name, 80)
defer cmd.Stop() defer cmd.Stop()
By("Dialing the local port") By("Dialing the local port")
conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", cmd.port)) conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", cmd.port))
if err != nil { if err != nil {
Failf("Couldn't connect to port %d: %v", cmd.port, err) framework.Failf("Couldn't connect to port %d: %v", cmd.port, err)
} }
By("Closing the connection to the local port") By("Closing the connection to the local port")
conn.Close() conn.Close()
By("Waiting for the target pod to stop running") By("Waiting for the target pod to stop running")
if err := framework.WaitForPodNoLongerRunning(pod.Name); err != nil { if err := f.WaitForPodNoLongerRunning(pod.Name); err != nil {
Failf("Pod did not stop running: %v", err) framework.Failf("Pod did not stop running: %v", err)
} }
By("Retrieving logs from the target pod") By("Retrieving logs from the target pod")
logOutput, err := getPodLogs(framework.Client, framework.Namespace.Name, pod.Name, "portforwardtester") logOutput, err := framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, "portforwardtester")
if err != nil { if err != nil {
Failf("Error retrieving logs: %v", err) framework.Failf("Error retrieving logs: %v", err)
} }
By("Verifying logs") By("Verifying logs")
@ -197,25 +198,25 @@ var _ = KubeDescribe("Port forwarding", func() {
It("should support a client that connects, sends data, and disconnects [Conformance]", func() { It("should support a client that connects, sends data, and disconnects [Conformance]", func() {
By("creating the target pod") By("creating the target pod")
pod := pfPod("abc", "10", "10", "100") pod := pfPod("abc", "10", "10", "100")
if _, err := framework.Client.Pods(framework.Namespace.Name).Create(pod); err != nil { if _, err := f.Client.Pods(f.Namespace.Name).Create(pod); err != nil {
Failf("Couldn't create pod: %v", err) framework.Failf("Couldn't create pod: %v", err)
} }
if err := framework.WaitForPodRunning(pod.Name); err != nil { if err := f.WaitForPodRunning(pod.Name); err != nil {
Failf("Pod did not start running: %v", err) framework.Failf("Pod did not start running: %v", err)
} }
By("Running 'kubectl port-forward'") By("Running 'kubectl port-forward'")
cmd := runPortForward(framework.Namespace.Name, pod.Name, 80) cmd := runPortForward(f.Namespace.Name, pod.Name, 80)
defer cmd.Stop() defer cmd.Stop()
By("Dialing the local port") By("Dialing the local port")
addr, err := net.ResolveTCPAddr("tcp", fmt.Sprintf("127.0.0.1:%d", cmd.port)) addr, err := net.ResolveTCPAddr("tcp", fmt.Sprintf("127.0.0.1:%d", cmd.port))
if err != nil { if err != nil {
Failf("Error resolving tcp addr: %v", err) framework.Failf("Error resolving tcp addr: %v", err)
} }
conn, err := net.DialTCP("tcp", nil, addr) conn, err := net.DialTCP("tcp", nil, addr)
if err != nil { if err != nil {
Failf("Couldn't connect to port %d: %v", cmd.port, err) framework.Failf("Couldn't connect to port %d: %v", cmd.port, err)
} }
defer func() { defer func() {
By("Closing the connection to the local port") By("Closing the connection to the local port")
@ -231,22 +232,22 @@ var _ = KubeDescribe("Port forwarding", func() {
By("Reading data from the local port") By("Reading data from the local port")
fromServer, err := ioutil.ReadAll(conn) fromServer, err := ioutil.ReadAll(conn)
if err != nil { if err != nil {
Failf("Unexpected error reading data from the server: %v", err) framework.Failf("Unexpected error reading data from the server: %v", err)
} }
if e, a := strings.Repeat("x", 100), string(fromServer); e != a { if e, a := strings.Repeat("x", 100), string(fromServer); e != a {
Failf("Expected %q from server, got %q", e, a) framework.Failf("Expected %q from server, got %q", e, a)
} }
By("Waiting for the target pod to stop running") By("Waiting for the target pod to stop running")
if err := framework.WaitForPodNoLongerRunning(pod.Name); err != nil { if err := f.WaitForPodNoLongerRunning(pod.Name); err != nil {
Failf("Pod did not stop running: %v", err) framework.Failf("Pod did not stop running: %v", err)
} }
By("Retrieving logs from the target pod") By("Retrieving logs from the target pod")
logOutput, err := getPodLogs(framework.Client, framework.Namespace.Name, pod.Name, "portforwardtester") logOutput, err := framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, "portforwardtester")
if err != nil { if err != nil {
Failf("Error retrieving logs: %v", err) framework.Failf("Error retrieving logs: %v", err)
} }
By("Verifying logs") By("Verifying logs")
@ -255,25 +256,25 @@ var _ = KubeDescribe("Port forwarding", func() {
verifyLogMessage(logOutput, "^Done$") verifyLogMessage(logOutput, "^Done$")
}) })
}) })
KubeDescribe("With a server that expects no client request", func() { framework.KubeDescribe("With a server that expects no client request", func() {
It("should support a client that connects, sends no data, and disconnects [Conformance]", func() { It("should support a client that connects, sends no data, and disconnects [Conformance]", func() {
By("creating the target pod") By("creating the target pod")
pod := pfPod("", "10", "10", "100") pod := pfPod("", "10", "10", "100")
if _, err := framework.Client.Pods(framework.Namespace.Name).Create(pod); err != nil { if _, err := f.Client.Pods(f.Namespace.Name).Create(pod); err != nil {
Failf("Couldn't create pod: %v", err) framework.Failf("Couldn't create pod: %v", err)
} }
if err := framework.WaitForPodRunning(pod.Name); err != nil { if err := f.WaitForPodRunning(pod.Name); err != nil {
Failf("Pod did not start running: %v", err) framework.Failf("Pod did not start running: %v", err)
} }
By("Running 'kubectl port-forward'") By("Running 'kubectl port-forward'")
cmd := runPortForward(framework.Namespace.Name, pod.Name, 80) cmd := runPortForward(f.Namespace.Name, pod.Name, 80)
defer cmd.Stop() defer cmd.Stop()
By("Dialing the local port") By("Dialing the local port")
conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", cmd.port)) conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", cmd.port))
if err != nil { if err != nil {
Failf("Couldn't connect to port %d: %v", cmd.port, err) framework.Failf("Couldn't connect to port %d: %v", cmd.port, err)
} }
defer func() { defer func() {
By("Closing the connection to the local port") By("Closing the connection to the local port")
@ -283,22 +284,22 @@ var _ = KubeDescribe("Port forwarding", func() {
By("Reading data from the local port") By("Reading data from the local port")
fromServer, err := ioutil.ReadAll(conn) fromServer, err := ioutil.ReadAll(conn)
if err != nil { if err != nil {
Failf("Unexpected error reading data from the server: %v", err) framework.Failf("Unexpected error reading data from the server: %v", err)
} }
if e, a := strings.Repeat("x", 100), string(fromServer); e != a { if e, a := strings.Repeat("x", 100), string(fromServer); e != a {
Failf("Expected %q from server, got %q", e, a) framework.Failf("Expected %q from server, got %q", e, a)
} }
By("Waiting for the target pod to stop running") By("Waiting for the target pod to stop running")
if err := framework.WaitForPodNoLongerRunning(pod.Name); err != nil { if err := f.WaitForPodNoLongerRunning(pod.Name); err != nil {
Failf("Pod did not stop running: %v", err) framework.Failf("Pod did not stop running: %v", err)
} }
By("Retrieving logs from the target pod") By("Retrieving logs from the target pod")
logOutput, err := getPodLogs(framework.Client, framework.Namespace.Name, pod.Name, "portforwardtester") logOutput, err := framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, "portforwardtester")
if err != nil { if err != nil {
Failf("Error retrieving logs: %v", err) framework.Failf("Error retrieving logs: %v", err)
} }
By("Verifying logs") By("Verifying logs")
@ -316,5 +317,5 @@ func verifyLogMessage(log, expected string) {
return return
} }
} }
Failf("Missing %q from log: %s", expected, log) framework.Failf("Missing %q from log: %s", expected, log)
} }

View File

@ -24,6 +24,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
) )
@ -51,7 +52,7 @@ func testPreStop(c *client.Client, ns string) {
} }
By(fmt.Sprintf("Creating server pod %s in namespace %s", podDescr.Name, ns)) By(fmt.Sprintf("Creating server pod %s in namespace %s", podDescr.Name, ns))
_, err := c.Pods(ns).Create(podDescr) _, err := c.Pods(ns).Create(podDescr)
expectNoError(err, fmt.Sprintf("creating pod %s", podDescr.Name)) framework.ExpectNoError(err, fmt.Sprintf("creating pod %s", podDescr.Name))
// At the end of the test, clean up by removing the pod. // At the end of the test, clean up by removing the pod.
defer func() { defer func() {
@ -60,13 +61,13 @@ func testPreStop(c *client.Client, ns string) {
}() }()
By("Waiting for pods to come up.") By("Waiting for pods to come up.")
err = waitForPodRunningInNamespace(c, podDescr.Name, ns) err = framework.WaitForPodRunningInNamespace(c, podDescr.Name, ns)
expectNoError(err, "waiting for server pod to start") framework.ExpectNoError(err, "waiting for server pod to start")
val := "{\"Source\": \"prestop\"}" val := "{\"Source\": \"prestop\"}"
podOut, err := c.Pods(ns).Get(podDescr.Name) podOut, err := c.Pods(ns).Get(podDescr.Name)
expectNoError(err, "getting pod info") framework.ExpectNoError(err, "getting pod info")
preStopDescr := &api.Pod{ preStopDescr := &api.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
@ -94,7 +95,7 @@ func testPreStop(c *client.Client, ns string) {
By(fmt.Sprintf("Creating tester pod %s in namespace %s", preStopDescr.Name, ns)) By(fmt.Sprintf("Creating tester pod %s in namespace %s", preStopDescr.Name, ns))
_, err = c.Pods(ns).Create(preStopDescr) _, err = c.Pods(ns).Create(preStopDescr)
expectNoError(err, fmt.Sprintf("creating pod %s", preStopDescr.Name)) framework.ExpectNoError(err, fmt.Sprintf("creating pod %s", preStopDescr.Name))
deletePreStop := true deletePreStop := true
// At the end of the test, clean up by removing the pod. // At the end of the test, clean up by removing the pod.
@ -105,19 +106,19 @@ func testPreStop(c *client.Client, ns string) {
} }
}() }()
err = waitForPodRunningInNamespace(c, preStopDescr.Name, ns) err = framework.WaitForPodRunningInNamespace(c, preStopDescr.Name, ns)
expectNoError(err, "waiting for tester pod to start") framework.ExpectNoError(err, "waiting for tester pod to start")
// Delete the pod with the preStop handler. // Delete the pod with the preStop handler.
By("Deleting pre-stop pod") By("Deleting pre-stop pod")
if err := c.Pods(ns).Delete(preStopDescr.Name, nil); err == nil { if err := c.Pods(ns).Delete(preStopDescr.Name, nil); err == nil {
deletePreStop = false deletePreStop = false
} }
expectNoError(err, fmt.Sprintf("deleting pod: %s", preStopDescr.Name)) framework.ExpectNoError(err, fmt.Sprintf("deleting pod: %s", preStopDescr.Name))
// Validate that the server received the web poke. // Validate that the server received the web poke.
err = wait.Poll(time.Second*5, time.Second*60, func() (bool, error) { err = wait.Poll(time.Second*5, time.Second*60, func() (bool, error) {
subResourceProxyAvailable, err := serverVersionGTE(subResourcePodProxyVersion, c) subResourceProxyAvailable, err := framework.ServerVersionGTE(framework.SubResourcePodProxyVersion, c)
if err != nil { if err != nil {
return false, err return false, err
} }
@ -142,11 +143,11 @@ func testPreStop(c *client.Client, ns string) {
if err != nil { if err != nil {
By(fmt.Sprintf("Error validating prestop: %v", err)) By(fmt.Sprintf("Error validating prestop: %v", err))
} else { } else {
Logf("Saw: %s", string(body)) framework.Logf("Saw: %s", string(body))
state := State{} state := State{}
err := json.Unmarshal(body, &state) err := json.Unmarshal(body, &state)
if err != nil { if err != nil {
Logf("Error parsing: %v", err) framework.Logf("Error parsing: %v", err)
return false, nil return false, nil
} }
if state.Received["prestop"] != 0 { if state.Received["prestop"] != 0 {
@ -155,11 +156,11 @@ func testPreStop(c *client.Client, ns string) {
} }
return false, nil return false, nil
}) })
expectNoError(err, "validating pre-stop.") framework.ExpectNoError(err, "validating pre-stop.")
} }
var _ = KubeDescribe("PreStop", func() { var _ = framework.KubeDescribe("PreStop", func() {
f := NewDefaultFramework("prestop") f := framework.NewDefaultFramework("prestop")
It("should call prestop when killing a pod [Conformance]", func() { It("should call prestop when killing a pod [Conformance]", func() {
testPreStop(f.Client, f.Namespace.Name) testPreStop(f.Client, f.Namespace.Name)

View File

@ -27,6 +27,7 @@ import (
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/test/e2e/framework"
) )
const ( const (
@ -43,17 +44,17 @@ const (
type PrivilegedPodTestConfig struct { type PrivilegedPodTestConfig struct {
privilegedPod *api.Pod privilegedPod *api.Pod
f *Framework f *framework.Framework
hostExecPod *api.Pod hostExecPod *api.Pod
} }
var _ = KubeDescribe("PrivilegedPod", func() { var _ = framework.KubeDescribe("PrivilegedPod", func() {
f := NewDefaultFramework("e2e-privilegedpod") f := framework.NewDefaultFramework("e2e-privilegedpod")
config := &PrivilegedPodTestConfig{ config := &PrivilegedPodTestConfig{
f: f, f: f,
} }
It("should test privileged pod", func() { It("should test privileged pod", func() {
config.hostExecPod = LaunchHostExecPod(config.f.Client, config.f.Namespace.Name, "hostexec") config.hostExecPod = framework.LaunchHostExecPod(config.f.Client, config.f.Namespace.Name, "hostexec")
By("Creating a privileged pod") By("Creating a privileged pod")
config.createPrivilegedPod() config.createPrivilegedPod()
@ -69,14 +70,14 @@ var _ = KubeDescribe("PrivilegedPod", func() {
func (config *PrivilegedPodTestConfig) runPrivilegedCommandOnPrivilegedContainer() { func (config *PrivilegedPodTestConfig) runPrivilegedCommandOnPrivilegedContainer() {
outputMap := config.dialFromContainer(config.privilegedPod.Status.PodIP, privilegedHttpPort) outputMap := config.dialFromContainer(config.privilegedPod.Status.PodIP, privilegedHttpPort)
if len(outputMap["error"]) > 0 { if len(outputMap["error"]) > 0 {
Failf("Privileged command failed unexpectedly on privileged container, output:%v", outputMap) framework.Failf("Privileged command failed unexpectedly on privileged container, output:%v", outputMap)
} }
} }
func (config *PrivilegedPodTestConfig) runPrivilegedCommandOnNonPrivilegedContainer() { func (config *PrivilegedPodTestConfig) runPrivilegedCommandOnNonPrivilegedContainer() {
outputMap := config.dialFromContainer(config.privilegedPod.Status.PodIP, notPrivilegedHttpPort) outputMap := config.dialFromContainer(config.privilegedPod.Status.PodIP, notPrivilegedHttpPort)
if len(outputMap["error"]) == 0 { if len(outputMap["error"]) == 0 {
Failf("Privileged command should have failed on non-privileged container, output:%v", outputMap) framework.Failf("Privileged command should have failed on non-privileged container, output:%v", outputMap)
} }
} }
@ -89,11 +90,11 @@ func (config *PrivilegedPodTestConfig) dialFromContainer(containerIP string, con
v.Encode()) v.Encode())
By(fmt.Sprintf("Exec-ing into container over http. Running command:%s", cmd)) By(fmt.Sprintf("Exec-ing into container over http. Running command:%s", cmd))
stdout := RunHostCmdOrDie(config.hostExecPod.Namespace, config.hostExecPod.Name, cmd) stdout := framework.RunHostCmdOrDie(config.hostExecPod.Namespace, config.hostExecPod.Name, cmd)
var output map[string]string var output map[string]string
err := json.Unmarshal([]byte(stdout), &output) err := json.Unmarshal([]byte(stdout), &output)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Could not unmarshal curl response: %s", stdout)) Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Could not unmarshal curl response: %s", stdout))
Logf("Deserialized output is %v", stdout) framework.Logf("Deserialized output is %v", stdout)
return output return output
} }
@ -147,12 +148,12 @@ func (config *PrivilegedPodTestConfig) createPrivilegedPod() {
func (config *PrivilegedPodTestConfig) createPod(pod *api.Pod) *api.Pod { func (config *PrivilegedPodTestConfig) createPod(pod *api.Pod) *api.Pod {
createdPod, err := config.getPodClient().Create(pod) createdPod, err := config.getPodClient().Create(pod)
if err != nil { if err != nil {
Failf("Failed to create %q pod: %v", pod.Name, err) framework.Failf("Failed to create %q pod: %v", pod.Name, err)
} }
expectNoError(config.f.WaitForPodRunning(pod.Name)) framework.ExpectNoError(config.f.WaitForPodRunning(pod.Name))
createdPod, err = config.getPodClient().Get(pod.Name) createdPod, err = config.getPodClient().Get(pod.Name)
if err != nil { if err != nil {
Failf("Failed to retrieve %q pod: %v", pod.Name, err) framework.Failf("Failed to retrieve %q pod: %v", pod.Name, err)
} }
return createdPod return createdPod
} }

View File

@ -29,12 +29,13 @@ import (
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/net" "k8s.io/kubernetes/pkg/util/net"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
) )
var _ = KubeDescribe("Proxy", func() { var _ = framework.KubeDescribe("Proxy", func() {
version := testapi.Default.GroupVersion().Version version := testapi.Default.GroupVersion().Version
Context("version "+version, func() { proxyContext(version) }) Context("version "+version, func() { proxyContext(version) })
}) })
@ -51,7 +52,7 @@ const (
) )
func proxyContext(version string) { func proxyContext(version string) {
f := NewDefaultFramework("proxy") f := framework.NewDefaultFramework("proxy")
prefix := "/api/" + version prefix := "/api/" + version
// Port here has to be kept in sync with default kubelet port. // Port here has to be kept in sync with default kubelet port.
@ -99,13 +100,13 @@ func proxyContext(version string) {
defer func(name string) { defer func(name string) {
err := f.Client.Services(f.Namespace.Name).Delete(name) err := f.Client.Services(f.Namespace.Name).Delete(name)
if err != nil { if err != nil {
Logf("Failed deleting service %v: %v", name, err) framework.Logf("Failed deleting service %v: %v", name, err)
} }
}(service.Name) }(service.Name)
// Make an RC with a single pod. // Make an RC with a single pod.
pods := []*api.Pod{} pods := []*api.Pod{}
cfg := RCConfig{ cfg := framework.RCConfig{
Client: f.Client, Client: f.Client,
Image: "gcr.io/google_containers/porter:cd5cb5791ebaa8641955f0e8c2a9bed669b1eaab", Image: "gcr.io/google_containers/porter:cd5cb5791ebaa8641955f0e8c2a9bed669b1eaab",
Name: service.Name, Name: service.Name,
@ -141,8 +142,8 @@ func proxyContext(version string) {
Labels: labels, Labels: labels,
CreatedPods: &pods, CreatedPods: &pods,
} }
Expect(RunRC(cfg)).NotTo(HaveOccurred()) Expect(framework.RunRC(cfg)).NotTo(HaveOccurred())
defer DeleteRC(f.Client, f.Namespace.Name, cfg.Name) defer framework.DeleteRC(f.Client, f.Namespace.Name, cfg.Name)
Expect(f.WaitForAnEndpoint(service.Name)).NotTo(HaveOccurred()) Expect(f.WaitForAnEndpoint(service.Name)).NotTo(HaveOccurred())
@ -247,7 +248,7 @@ func proxyContext(version string) {
}) })
} }
func doProxy(f *Framework, path string) (body []byte, statusCode int, d time.Duration, err error) { func doProxy(f *framework.Framework, path string) (body []byte, statusCode int, d time.Duration, err error) {
// About all of the proxy accesses in this file: // About all of the proxy accesses in this file:
// * AbsPath is used because it preserves the trailing '/'. // * AbsPath is used because it preserves the trailing '/'.
// * Do().Raw() is used (instead of DoRaw()) because it will turn an // * Do().Raw() is used (instead of DoRaw()) because it will turn an
@ -258,9 +259,9 @@ func doProxy(f *Framework, path string) (body []byte, statusCode int, d time.Dur
body, err = f.Client.Get().AbsPath(path).Do().StatusCode(&statusCode).Raw() body, err = f.Client.Get().AbsPath(path).Do().StatusCode(&statusCode).Raw()
d = time.Since(start) d = time.Since(start)
if len(body) > 0 { if len(body) > 0 {
Logf("%v: %s (%v; %v)", path, truncate(body, maxDisplayBodyLen), statusCode, d) framework.Logf("%v: %s (%v; %v)", path, truncate(body, maxDisplayBodyLen), statusCode, d)
} else { } else {
Logf("%v: %s (%v; %v)", path, "no body", statusCode, d) framework.Logf("%v: %s (%v; %v)", path, "no body", statusCode, d)
} }
return return
} }
@ -276,14 +277,14 @@ func truncate(b []byte, maxLen int) []byte {
func pickNode(c *client.Client) (string, error) { func pickNode(c *client.Client) (string, error) {
// TODO: investigate why it doesn't work on master Node. // TODO: investigate why it doesn't work on master Node.
nodes := ListSchedulableNodesOrDie(c) nodes := framework.ListSchedulableNodesOrDie(c)
if len(nodes.Items) == 0 { if len(nodes.Items) == 0 {
return "", fmt.Errorf("no nodes exist, can't test node proxy") return "", fmt.Errorf("no nodes exist, can't test node proxy")
} }
return nodes.Items[0].Name, nil return nodes.Items[0].Name, nil
} }
func nodeProxyTest(f *Framework, prefix, nodeDest string) { func nodeProxyTest(f *framework.Framework, prefix, nodeDest string) {
node, err := pickNode(f.Client) node, err := pickNode(f.Client)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// TODO: Change it to test whether all requests succeeded when requests // TODO: Change it to test whether all requests succeeded when requests
@ -292,7 +293,7 @@ func nodeProxyTest(f *Framework, prefix, nodeDest string) {
for i := 0; i < proxyAttempts; i++ { for i := 0; i < proxyAttempts; i++ {
_, status, d, err := doProxy(f, prefix+node+nodeDest) _, status, d, err := doProxy(f, prefix+node+nodeDest)
if status == http.StatusServiceUnavailable { if status == http.StatusServiceUnavailable {
Logf("Failed proxying node logs due to service unavailable: %v", err) framework.Logf("Failed proxying node logs due to service unavailable: %v", err)
time.Sleep(time.Second) time.Sleep(time.Second)
serviceUnavailableErrors++ serviceUnavailableErrors++
} else { } else {
@ -302,7 +303,7 @@ func nodeProxyTest(f *Framework, prefix, nodeDest string) {
} }
} }
if serviceUnavailableErrors > 0 { if serviceUnavailableErrors > 0 {
Logf("error: %d requests to proxy node logs failed", serviceUnavailableErrors) framework.Logf("error: %d requests to proxy node logs failed", serviceUnavailableErrors)
} }
maxFailures := int(math.Floor(0.1 * float64(proxyAttempts))) maxFailures := int(math.Floor(0.1 * float64(proxyAttempts)))
Expect(serviceUnavailableErrors).To(BeNumerically("<", maxFailures)) Expect(serviceUnavailableErrors).To(BeNumerically("<", maxFailures))

View File

@ -24,30 +24,31 @@ import (
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
) )
var _ = KubeDescribe("ReplicationController", func() { var _ = framework.KubeDescribe("ReplicationController", func() {
framework := NewDefaultFramework("replication-controller") f := framework.NewDefaultFramework("replication-controller")
It("should serve a basic image on each replica with a public image [Conformance]", func() { It("should serve a basic image on each replica with a public image [Conformance]", func() {
ServeImageOrFail(framework, "basic", "gcr.io/google_containers/serve_hostname:v1.4") ServeImageOrFail(f, "basic", "gcr.io/google_containers/serve_hostname:v1.4")
}) })
It("should serve a basic image on each replica with a private image", func() { It("should serve a basic image on each replica with a private image", func() {
// requires private images // requires private images
SkipUnlessProviderIs("gce", "gke") framework.SkipUnlessProviderIs("gce", "gke")
ServeImageOrFail(framework, "private", "b.gcr.io/k8s_authenticated_test/serve_hostname:v1.4") ServeImageOrFail(f, "private", "b.gcr.io/k8s_authenticated_test/serve_hostname:v1.4")
}) })
}) })
// A basic test to check the deployment of an image using // A basic test to check the deployment of an image using
// a replication controller. The image serves its hostname // a replication controller. The image serves its hostname
// which is checked for each replica. // which is checked for each replica.
func ServeImageOrFail(f *Framework, test string, image string) { func ServeImageOrFail(f *framework.Framework, test string, image string) {
name := "my-hostname-" + test + "-" + string(util.NewUUID()) name := "my-hostname-" + test + "-" + string(util.NewUUID())
replicas := 2 replicas := 2
@ -85,15 +86,15 @@ func ServeImageOrFail(f *Framework, test string, image string) {
// Cleanup the replication controller when we are done. // Cleanup the replication controller when we are done.
defer func() { defer func() {
// Resize the replication controller to zero to get rid of pods. // Resize the replication controller to zero to get rid of pods.
if err := DeleteRC(f.Client, f.Namespace.Name, controller.Name); err != nil { if err := framework.DeleteRC(f.Client, f.Namespace.Name, controller.Name); err != nil {
Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err) framework.Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err)
} }
}() }()
// List the pods, making sure we observe all the replicas. // List the pods, making sure we observe all the replicas.
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
pods, err := podsCreated(f.Client, f.Namespace.Name, name, replicas) pods, err := framework.PodsCreated(f.Client, f.Namespace.Name, name, replicas)
By("Ensuring each pod is running") By("Ensuring each pod is running")
@ -111,8 +112,8 @@ func ServeImageOrFail(f *Framework, test string, image string) {
By("Trying to dial each unique pod") By("Trying to dial each unique pod")
retryTimeout := 2 * time.Minute retryTimeout := 2 * time.Minute
retryInterval := 5 * time.Second retryInterval := 5 * time.Second
err = wait.Poll(retryInterval, retryTimeout, podProxyResponseChecker{f.Client, f.Namespace.Name, label, name, true, pods}.checkAllResponses) err = wait.Poll(retryInterval, retryTimeout, framework.PodProxyResponseChecker(f.Client, f.Namespace.Name, label, name, true, pods).CheckAllResponses)
if err != nil { if err != nil {
Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds()) framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds())
} }
} }

View File

@ -26,6 +26,7 @@ import (
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
@ -45,15 +46,15 @@ const (
rebootPodReadyAgainTimeout = 5 * time.Minute rebootPodReadyAgainTimeout = 5 * time.Minute
) )
var _ = KubeDescribe("Reboot [Disruptive] [Feature:Reboot]", func() { var _ = framework.KubeDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
var f *Framework var f *framework.Framework
BeforeEach(func() { BeforeEach(func() {
// These tests requires SSH to nodes, so the provider check should be identical to there // These tests requires SSH to nodes, so the provider check should be identical to there
// (the limiting factor is the implementation of util.go's getSigner(...)). // (the limiting factor is the implementation of util.go's framework.GetSigner(...)).
// Cluster must support node reboot // Cluster must support node reboot
SkipUnlessProviderIs(providersWithSSH...) framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
}) })
AfterEach(func() { AfterEach(func() {
@ -66,23 +67,23 @@ var _ = KubeDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
for _, e := range events.Items { for _, e := range events.Items {
Logf("event for %v: %v %v: %v", e.InvolvedObject.Name, e.Source, e.Reason, e.Message) framework.Logf("event for %v: %v %v: %v", e.InvolvedObject.Name, e.Source, e.Reason, e.Message)
} }
} }
// In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a // In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a
// rebooted/deleted node) for up to 5 minutes before all tunnels are dropped and recreated. Most tests // rebooted/deleted node) for up to 5 minutes before all tunnels are dropped and recreated. Most tests
// make use of some proxy feature to verify functionality. So, if a reboot test runs right before a test // make use of some proxy feature to verify functionality. So, if a reboot test runs right before a test
// that tries to get logs, for example, we may get unlucky and try to use a closed tunnel to a node that // that tries to get logs, for example, we may get unlucky and try to use a closed tunnel to a node that
// was recently rebooted. There's no good way to poll for proxies being closed, so we sleep. // was recently rebooted. There's no good way to framework.Poll for proxies being closed, so we sleep.
// //
// TODO(cjcullen) reduce this sleep (#19314) // TODO(cjcullen) reduce this sleep (#19314)
if providerIs("gke") { if framework.ProviderIs("gke") {
By("waiting 5 minutes for all dead tunnels to be dropped") By("waiting 5 minutes for all dead tunnels to be dropped")
time.Sleep(5 * time.Minute) time.Sleep(5 * time.Minute)
} }
}) })
f = NewDefaultFramework("reboot") f = framework.NewDefaultFramework("reboot")
It("each node by ordering clean reboot and ensure they function upon restart", func() { It("each node by ordering clean reboot and ensure they function upon restart", func() {
// clean shutdown and restart // clean shutdown and restart
@ -127,7 +128,7 @@ var _ = KubeDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
func testReboot(c *client.Client, rebootCmd string) { func testReboot(c *client.Client, rebootCmd string) {
// Get all nodes, and kick off the test on each. // Get all nodes, and kick off the test on each.
nodelist := ListSchedulableNodesOrDie(c) nodelist := framework.ListSchedulableNodesOrDie(c)
result := make([]bool, len(nodelist.Items)) result := make([]bool, len(nodelist.Items))
wg := sync.WaitGroup{} wg := sync.WaitGroup{}
wg.Add(len(nodelist.Items)) wg.Add(len(nodelist.Items))
@ -137,7 +138,7 @@ func testReboot(c *client.Client, rebootCmd string) {
go func(ix int) { go func(ix int) {
defer wg.Done() defer wg.Done()
n := nodelist.Items[ix] n := nodelist.Items[ix]
result[ix] = rebootNode(c, testContext.Provider, n.ObjectMeta.Name, rebootCmd) result[ix] = rebootNode(c, framework.TestContext.Provider, n.ObjectMeta.Name, rebootCmd)
if !result[ix] { if !result[ix] {
failed = true failed = true
} }
@ -151,10 +152,10 @@ func testReboot(c *client.Client, rebootCmd string) {
for ix := range nodelist.Items { for ix := range nodelist.Items {
n := nodelist.Items[ix] n := nodelist.Items[ix]
if !result[ix] { if !result[ix] {
Logf("Node %s failed reboot test.", n.ObjectMeta.Name) framework.Logf("Node %s failed reboot test.", n.ObjectMeta.Name)
} }
} }
Failf("Test failed; at least one node failed to reboot in the time given.") framework.Failf("Test failed; at least one node failed to reboot in the time given.")
} }
} }
@ -165,9 +166,9 @@ func printStatusAndLogsForNotReadyPods(c *client.Client, ns string, podNames []s
prefix = "Retrieving log for the last terminated container" prefix = "Retrieving log for the last terminated container"
} }
if err != nil { if err != nil {
Logf("%s %s, err: %v:\n%s\n", prefix, id, err, log) framework.Logf("%s %s, err: %v:\n%s\n", prefix, id, err, log)
} else { } else {
Logf("%s %s:\n%s\n", prefix, id, log) framework.Logf("%s %s:\n%s\n", prefix, id, log)
} }
} }
podNameSet := sets.NewString(podNames...) podNameSet := sets.NewString(podNames...)
@ -178,14 +179,14 @@ func printStatusAndLogsForNotReadyPods(c *client.Client, ns string, podNames []s
if !podNameSet.Has(p.Name) { if !podNameSet.Has(p.Name) {
continue continue
} }
if ok, _ := podRunningReady(p); ok { if ok, _ := framework.PodRunningReady(p); ok {
continue continue
} }
Logf("Status for not ready pod %s/%s: %+v", p.Namespace, p.Name, p.Status) framework.Logf("Status for not ready pod %s/%s: %+v", p.Namespace, p.Name, p.Status)
// Print the log of the containers if pod is not running and ready. // Print the log of the containers if pod is not running and ready.
for _, container := range p.Status.ContainerStatuses { for _, container := range p.Status.ContainerStatuses {
cIdentifer := fmt.Sprintf("%s/%s/%s", p.Namespace, p.Name, container.Name) cIdentifer := fmt.Sprintf("%s/%s/%s", p.Namespace, p.Name, container.Name)
log, err := getPodLogs(c, p.Namespace, p.Name, container.Name) log, err := framework.GetPodLogs(c, p.Namespace, p.Name, container.Name)
printFn(cIdentifer, log, err, false) printFn(cIdentifer, log, err, false)
// Get log from the previous container. // Get log from the previous container.
if container.RestartCount > 0 { if container.RestartCount > 0 {
@ -208,19 +209,19 @@ func printStatusAndLogsForNotReadyPods(c *client.Client, ns string, podNames []s
func rebootNode(c *client.Client, provider, name, rebootCmd string) bool { func rebootNode(c *client.Client, provider, name, rebootCmd string) bool {
// Setup // Setup
ns := api.NamespaceSystem ns := api.NamespaceSystem
ps := newPodStore(c, ns, labels.Everything(), fields.OneTermEqualSelector(api.PodHostField, name)) ps := framework.NewPodStore(c, ns, labels.Everything(), fields.OneTermEqualSelector(api.PodHostField, name))
defer ps.Stop() defer ps.Stop()
// Get the node initially. // Get the node initially.
Logf("Getting %s", name) framework.Logf("Getting %s", name)
node, err := c.Nodes().Get(name) node, err := c.Nodes().Get(name)
if err != nil { if err != nil {
Logf("Couldn't get node %s", name) framework.Logf("Couldn't get node %s", name)
return false return false
} }
// Node sanity check: ensure it is "ready". // Node sanity check: ensure it is "ready".
if !waitForNodeToBeReady(c, name, nodeReadyInitialTimeout) { if !framework.WaitForNodeToBeReady(c, name, framework.NodeReadyInitialTimeout) {
return false return false
} }
@ -240,39 +241,39 @@ func rebootNode(c *client.Client, provider, name, rebootCmd string) bool {
podNames = append(podNames, p.ObjectMeta.Name) podNames = append(podNames, p.ObjectMeta.Name)
} }
} }
Logf("Node %s has %d pods: %v", name, len(podNames), podNames) framework.Logf("Node %s has %d pods: %v", name, len(podNames), podNames)
// For each pod, we do a sanity check to ensure it's running / healthy // For each pod, we do a sanity check to ensure it's running / healthy
// now, as that's what we'll be checking later. // now, as that's what we'll be checking later.
if !checkPodsRunningReady(c, ns, podNames, podReadyBeforeTimeout) { if !framework.CheckPodsRunningReady(c, ns, podNames, framework.PodReadyBeforeTimeout) {
printStatusAndLogsForNotReadyPods(c, ns, podNames, pods) printStatusAndLogsForNotReadyPods(c, ns, podNames, pods)
return false return false
} }
// Reboot the node. // Reboot the node.
if err = issueSSHCommand(rebootCmd, provider, node); err != nil { if err = framework.IssueSSHCommand(rebootCmd, provider, node); err != nil {
Logf("Error while issuing ssh command: %v", err) framework.Logf("Error while issuing ssh command: %v", err)
return false return false
} }
// Wait for some kind of "not ready" status. // Wait for some kind of "not ready" status.
if !waitForNodeToBeNotReady(c, name, rebootNodeNotReadyTimeout) { if !framework.WaitForNodeToBeNotReady(c, name, rebootNodeNotReadyTimeout) {
return false return false
} }
// Wait for some kind of "ready" status. // Wait for some kind of "ready" status.
if !waitForNodeToBeReady(c, name, rebootNodeReadyAgainTimeout) { if !framework.WaitForNodeToBeReady(c, name, rebootNodeReadyAgainTimeout) {
return false return false
} }
// Ensure all of the pods that we found on this node before the reboot are // Ensure all of the pods that we found on this node before the reboot are
// running / healthy. // running / healthy.
if !checkPodsRunningReady(c, ns, podNames, rebootPodReadyAgainTimeout) { if !framework.CheckPodsRunningReady(c, ns, podNames, rebootPodReadyAgainTimeout) {
newPods := ps.List() newPods := ps.List()
printStatusAndLogsForNotReadyPods(c, ns, podNames, newPods) printStatusAndLogsForNotReadyPods(c, ns, podNames, newPods)
return false return false
} }
Logf("Reboot successful on node %s", name) framework.Logf("Reboot successful on node %s", name)
return true return true
} }

View File

@ -26,29 +26,30 @@ import (
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
) )
var _ = KubeDescribe("ReplicaSet", func() { var _ = framework.KubeDescribe("ReplicaSet", func() {
framework := NewDefaultFramework("replicaset") f := framework.NewDefaultFramework("replicaset")
It("should serve a basic image on each replica with a public image [Conformance]", func() { It("should serve a basic image on each replica with a public image [Conformance]", func() {
ReplicaSetServeImageOrFail(framework, "basic", "gcr.io/google_containers/serve_hostname:v1.4") ReplicaSetServeImageOrFail(f, "basic", "gcr.io/google_containers/serve_hostname:v1.4")
}) })
It("should serve a basic image on each replica with a private image", func() { It("should serve a basic image on each replica with a private image", func() {
// requires private images // requires private images
SkipUnlessProviderIs("gce", "gke") framework.SkipUnlessProviderIs("gce", "gke")
ReplicaSetServeImageOrFail(framework, "private", "b.gcr.io/k8s_authenticated_test/serve_hostname:v1.4") ReplicaSetServeImageOrFail(f, "private", "b.gcr.io/k8s_authenticated_test/serve_hostname:v1.4")
}) })
}) })
// A basic test to check the deployment of an image using a ReplicaSet. The // A basic test to check the deployment of an image using a ReplicaSet. The
// image serves its hostname which is checked for each replica. // image serves its hostname which is checked for each replica.
func ReplicaSetServeImageOrFail(f *Framework, test string, image string) { func ReplicaSetServeImageOrFail(f *framework.Framework, test string, image string) {
name := "my-hostname-" + test + "-" + string(util.NewUUID()) name := "my-hostname-" + test + "-" + string(util.NewUUID())
replicas := 2 replicas := 2
@ -85,15 +86,15 @@ func ReplicaSetServeImageOrFail(f *Framework, test string, image string) {
// Cleanup the ReplicaSet when we are done. // Cleanup the ReplicaSet when we are done.
defer func() { defer func() {
// Resize the ReplicaSet to zero to get rid of pods. // Resize the ReplicaSet to zero to get rid of pods.
if err := DeleteReplicaSet(f.Client, f.Namespace.Name, rs.Name); err != nil { if err := framework.DeleteReplicaSet(f.Client, f.Namespace.Name, rs.Name); err != nil {
Logf("Failed to cleanup ReplicaSet %v: %v.", rs.Name, err) framework.Logf("Failed to cleanup ReplicaSet %v: %v.", rs.Name, err)
} }
}() }()
// List the pods, making sure we observe all the replicas. // List the pods, making sure we observe all the replicas.
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
pods, err := podsCreated(f.Client, f.Namespace.Name, name, replicas) pods, err := framework.PodsCreated(f.Client, f.Namespace.Name, name, replicas)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring each pod is running") By("Ensuring each pod is running")
@ -112,8 +113,8 @@ func ReplicaSetServeImageOrFail(f *Framework, test string, image string) {
By("Trying to dial each unique pod") By("Trying to dial each unique pod")
retryTimeout := 2 * time.Minute retryTimeout := 2 * time.Minute
retryInterval := 5 * time.Second retryInterval := 5 * time.Second
err = wait.Poll(retryInterval, retryTimeout, podProxyResponseChecker{f.Client, f.Namespace.Name, label, name, true, pods}.checkAllResponses) err = wait.Poll(retryInterval, retryTimeout, framework.PodProxyResponseChecker(f.Client, f.Namespace.Name, label, name, true, pods).CheckAllResponses)
if err != nil { if err != nil {
Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds()) framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds())
} }
} }

View File

@ -29,6 +29,7 @@ import (
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/test/e2e/framework"
"github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/autoscaling" "github.com/aws/aws-sdk-go/service/autoscaling"
@ -53,48 +54,48 @@ const (
) )
func resizeGroup(size int) error { func resizeGroup(size int) error {
if testContext.ReportDir != "" { if framework.TestContext.ReportDir != "" {
CoreDump(testContext.ReportDir) framework.CoreDump(framework.TestContext.ReportDir)
defer CoreDump(testContext.ReportDir) defer framework.CoreDump(framework.TestContext.ReportDir)
} }
if testContext.Provider == "gce" || testContext.Provider == "gke" { if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" {
// TODO: make this hit the compute API directly instead of shelling out to gcloud. // TODO: make this hit the compute API directly instead of shelling out to gcloud.
// TODO: make gce/gke implement InstanceGroups, so we can eliminate the per-provider logic // TODO: make gce/gke implement InstanceGroups, so we can eliminate the per-provider logic
output, err := exec.Command("gcloud", "compute", "instance-groups", "managed", "resize", output, err := exec.Command("gcloud", "compute", "instance-groups", "managed", "resize",
testContext.CloudConfig.NodeInstanceGroup, fmt.Sprintf("--size=%v", size), framework.TestContext.CloudConfig.NodeInstanceGroup, fmt.Sprintf("--size=%v", size),
"--project="+testContext.CloudConfig.ProjectID, "--zone="+testContext.CloudConfig.Zone).CombinedOutput() "--project="+framework.TestContext.CloudConfig.ProjectID, "--zone="+framework.TestContext.CloudConfig.Zone).CombinedOutput()
if err != nil { if err != nil {
Logf("Failed to resize node instance group: %v", string(output)) framework.Logf("Failed to resize node instance group: %v", string(output))
} }
return err return err
} else if testContext.Provider == "aws" { } else if framework.TestContext.Provider == "aws" {
client := autoscaling.New(session.New()) client := autoscaling.New(session.New())
return awscloud.ResizeInstanceGroup(client, testContext.CloudConfig.NodeInstanceGroup, size) return awscloud.ResizeInstanceGroup(client, framework.TestContext.CloudConfig.NodeInstanceGroup, size)
} else { } else {
return fmt.Errorf("Provider does not support InstanceGroups") return fmt.Errorf("Provider does not support InstanceGroups")
} }
} }
func groupSize() (int, error) { func groupSize() (int, error) {
if testContext.Provider == "gce" || testContext.Provider == "gke" { if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" {
// TODO: make this hit the compute API directly instead of shelling out to gcloud. // TODO: make this hit the compute API directly instead of shelling out to gcloud.
// TODO: make gce/gke implement InstanceGroups, so we can eliminate the per-provider logic // TODO: make gce/gke implement InstanceGroups, so we can eliminate the per-provider logic
output, err := exec.Command("gcloud", "compute", "instance-groups", "managed", output, err := exec.Command("gcloud", "compute", "instance-groups", "managed",
"list-instances", testContext.CloudConfig.NodeInstanceGroup, "--project="+testContext.CloudConfig.ProjectID, "list-instances", framework.TestContext.CloudConfig.NodeInstanceGroup, "--project="+framework.TestContext.CloudConfig.ProjectID,
"--zone="+testContext.CloudConfig.Zone).CombinedOutput() "--zone="+framework.TestContext.CloudConfig.Zone).CombinedOutput()
if err != nil { if err != nil {
return -1, err return -1, err
} }
re := regexp.MustCompile("RUNNING") re := regexp.MustCompile("RUNNING")
return len(re.FindAllString(string(output), -1)), nil return len(re.FindAllString(string(output), -1)), nil
} else if testContext.Provider == "aws" { } else if framework.TestContext.Provider == "aws" {
client := autoscaling.New(session.New()) client := autoscaling.New(session.New())
instanceGroup, err := awscloud.DescribeInstanceGroup(client, testContext.CloudConfig.NodeInstanceGroup) instanceGroup, err := awscloud.DescribeInstanceGroup(client, framework.TestContext.CloudConfig.NodeInstanceGroup)
if err != nil { if err != nil {
return -1, fmt.Errorf("error describing instance group: %v", err) return -1, fmt.Errorf("error describing instance group: %v", err)
} }
if instanceGroup == nil { if instanceGroup == nil {
return -1, fmt.Errorf("instance group not found: %s", testContext.CloudConfig.NodeInstanceGroup) return -1, fmt.Errorf("instance group not found: %s", framework.TestContext.CloudConfig.NodeInstanceGroup)
} }
return instanceGroup.CurrentSize() return instanceGroup.CurrentSize()
} else { } else {
@ -107,14 +108,14 @@ func waitForGroupSize(size int) error {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) { for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
currentSize, err := groupSize() currentSize, err := groupSize()
if err != nil { if err != nil {
Logf("Failed to get node instance group size: %v", err) framework.Logf("Failed to get node instance group size: %v", err)
continue continue
} }
if currentSize != size { if currentSize != size {
Logf("Waiting for node instance group size %d, current size %d", size, currentSize) framework.Logf("Waiting for node instance group size %d, current size %d", size, currentSize)
continue continue
} }
Logf("Node instance group has reached the desired size %d", size) framework.Logf("Node instance group has reached the desired size %d", size)
return nil return nil
} }
return fmt.Errorf("timeout waiting %v for node instance group size to be %d", timeout, size) return fmt.Errorf("timeout waiting %v for node instance group size to be %d", timeout, size)
@ -168,9 +169,9 @@ func podOnNode(podName, nodeName string, image string) *api.Pod {
func newPodOnNode(c *client.Client, namespace, podName, nodeName string) error { func newPodOnNode(c *client.Client, namespace, podName, nodeName string) error {
pod, err := c.Pods(namespace).Create(podOnNode(podName, nodeName, serveHostnameImage)) pod, err := c.Pods(namespace).Create(podOnNode(podName, nodeName, serveHostnameImage))
if err == nil { if err == nil {
Logf("Created pod %s on node %s", pod.ObjectMeta.Name, nodeName) framework.Logf("Created pod %s on node %s", pod.ObjectMeta.Name, nodeName)
} else { } else {
Logf("Failed to create pod %s on node %s: %v", podName, nodeName, err) framework.Logf("Failed to create pod %s on node %s: %v", podName, nodeName, err)
} }
return err return err
} }
@ -239,23 +240,23 @@ func resizeRC(c *client.Client, ns, name string, replicas int) error {
func getMaster(c *client.Client) string { func getMaster(c *client.Client) string {
master := "" master := ""
switch testContext.Provider { switch framework.TestContext.Provider {
case "gce": case "gce":
eps, err := c.Endpoints(api.NamespaceDefault).Get("kubernetes") eps, err := c.Endpoints(api.NamespaceDefault).Get("kubernetes")
if err != nil { if err != nil {
Failf("Fail to get kubernetes endpoinds: %v", err) framework.Failf("Fail to get kubernetes endpoinds: %v", err)
} }
if len(eps.Subsets) != 1 || len(eps.Subsets[0].Addresses) != 1 { if len(eps.Subsets) != 1 || len(eps.Subsets[0].Addresses) != 1 {
Failf("There are more than 1 endpoints for kubernetes service: %+v", eps) framework.Failf("There are more than 1 endpoints for kubernetes service: %+v", eps)
} }
master = eps.Subsets[0].Addresses[0].IP master = eps.Subsets[0].Addresses[0].IP
case "gke": case "gke":
master = strings.TrimPrefix(testContext.Host, "https://") master = strings.TrimPrefix(framework.TestContext.Host, "https://")
case "aws": case "aws":
// TODO(justinsb): Avoid hardcoding this. // TODO(justinsb): Avoid hardcoding this.
master = "172.20.0.9" master = "172.20.0.9"
default: default:
Failf("This test is not supported for provider %s and should be disabled", testContext.Provider) framework.Failf("This test is not supported for provider %s and should be disabled", framework.TestContext.Provider)
} }
return master return master
} }
@ -263,7 +264,7 @@ func getMaster(c *client.Client) string {
// Return node external IP concatenated with port 22 for ssh // Return node external IP concatenated with port 22 for ssh
// e.g. 1.2.3.4:22 // e.g. 1.2.3.4:22
func getNodeExternalIP(node *api.Node) string { func getNodeExternalIP(node *api.Node) string {
Logf("Getting external IP address for %s", node.Name) framework.Logf("Getting external IP address for %s", node.Name)
host := "" host := ""
for _, a := range node.Status.Addresses { for _, a := range node.Status.Addresses {
if a.Type == api.NodeExternalIP { if a.Type == api.NodeExternalIP {
@ -272,7 +273,7 @@ func getNodeExternalIP(node *api.Node) string {
} }
} }
if host == "" { if host == "" {
Failf("Couldn't get the external IP of host %s with addresses %v", node.Name, node.Status.Addresses) framework.Failf("Couldn't get the external IP of host %s with addresses %v", node.Name, node.Status.Addresses)
} }
return host return host
} }
@ -294,26 +295,26 @@ func performTemporaryNetworkFailure(c *client.Client, ns, rcName string, replica
// had been inserted. (yes, we could look at the error code and ssh error // had been inserted. (yes, we could look at the error code and ssh error
// separately, but I prefer to stay on the safe side). // separately, but I prefer to stay on the safe side).
By(fmt.Sprintf("Unblock network traffic from node %s to the master", node.Name)) By(fmt.Sprintf("Unblock network traffic from node %s to the master", node.Name))
unblockNetwork(host, master) framework.UnblockNetwork(host, master)
}() }()
Logf("Waiting %v to ensure node %s is ready before beginning test...", resizeNodeReadyTimeout, node.Name) framework.Logf("Waiting %v to ensure node %s is ready before beginning test...", resizeNodeReadyTimeout, node.Name)
if !waitForNodeToBe(c, node.Name, api.NodeReady, true, resizeNodeReadyTimeout) { if !framework.WaitForNodeToBe(c, node.Name, api.NodeReady, true, resizeNodeReadyTimeout) {
Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout) framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
} }
blockNetwork(host, master) framework.BlockNetwork(host, master)
Logf("Waiting %v for node %s to be not ready after simulated network failure", resizeNodeNotReadyTimeout, node.Name) framework.Logf("Waiting %v for node %s to be not ready after simulated network failure", resizeNodeNotReadyTimeout, node.Name)
if !waitForNodeToBe(c, node.Name, api.NodeReady, false, resizeNodeNotReadyTimeout) { if !framework.WaitForNodeToBe(c, node.Name, api.NodeReady, false, resizeNodeNotReadyTimeout) {
Failf("Node %s did not become not-ready within %v", node.Name, resizeNodeNotReadyTimeout) framework.Failf("Node %s did not become not-ready within %v", node.Name, resizeNodeNotReadyTimeout)
} }
Logf("Waiting for pod %s to be removed", podNameToDisappear) framework.Logf("Waiting for pod %s to be removed", podNameToDisappear)
err := waitForRCPodToDisappear(c, ns, rcName, podNameToDisappear) err := framework.WaitForRCPodToDisappear(c, ns, rcName, podNameToDisappear)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("verifying whether the pod from the unreachable node is recreated") By("verifying whether the pod from the unreachable node is recreated")
err = verifyPods(c, ns, rcName, true, replicas) err = framework.VerifyPods(c, ns, rcName, true, replicas)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// network traffic is unblocked in a deferred function // network traffic is unblocked in a deferred function
@ -326,41 +327,41 @@ func expectNodeReadiness(isReady bool, newNode chan *api.Node) {
for !expected && !timeout { for !expected && !timeout {
select { select {
case n := <-newNode: case n := <-newNode:
if isNodeConditionSetAsExpected(n, api.NodeReady, isReady) { if framework.IsNodeConditionSetAsExpected(n, api.NodeReady, isReady) {
expected = true expected = true
} else { } else {
Logf("Observed node ready status is NOT %v as expected", isReady) framework.Logf("Observed node ready status is NOT %v as expected", isReady)
} }
case <-timer: case <-timer:
timeout = true timeout = true
} }
} }
if !expected { if !expected {
Failf("Failed to observe node ready status change to %v", isReady) framework.Failf("Failed to observe node ready status change to %v", isReady)
} }
} }
var _ = KubeDescribe("Nodes [Disruptive]", func() { var _ = framework.KubeDescribe("Nodes [Disruptive]", func() {
framework := NewDefaultFramework("resize-nodes") f := framework.NewDefaultFramework("resize-nodes")
var systemPodsNo int var systemPodsNo int
var c *client.Client var c *client.Client
var ns string var ns string
BeforeEach(func() { BeforeEach(func() {
c = framework.Client c = f.Client
ns = framework.Namespace.Name ns = f.Namespace.Name
systemPods, err := c.Pods(api.NamespaceSystem).List(api.ListOptions{}) systemPods, err := c.Pods(api.NamespaceSystem).List(api.ListOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
systemPodsNo = len(systemPods.Items) systemPodsNo = len(systemPods.Items)
}) })
// Slow issue #13323 (8 min) // Slow issue #13323 (8 min)
KubeDescribe("Resize [Slow]", func() { framework.KubeDescribe("Resize [Slow]", func() {
var skipped bool var skipped bool
BeforeEach(func() { BeforeEach(func() {
skipped = true skipped = true
SkipUnlessProviderIs("gce", "gke", "aws") framework.SkipUnlessProviderIs("gce", "gke", "aws")
SkipUnlessNodeCountIsAtLeast(2) framework.SkipUnlessNodeCountIsAtLeast(2)
skipped = false skipped = false
}) })
@ -370,32 +371,32 @@ var _ = KubeDescribe("Nodes [Disruptive]", func() {
} }
By("restoring the original node instance group size") By("restoring the original node instance group size")
if err := resizeGroup(testContext.CloudConfig.NumNodes); err != nil { if err := resizeGroup(framework.TestContext.CloudConfig.NumNodes); err != nil {
Failf("Couldn't restore the original node instance group size: %v", err) framework.Failf("Couldn't restore the original node instance group size: %v", err)
} }
// In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a // In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a
// rebooted/deleted node) for up to 5 minutes before all tunnels are dropped and recreated. // rebooted/deleted node) for up to 5 minutes before all tunnels are dropped and recreated.
// Most tests make use of some proxy feature to verify functionality. So, if a reboot test runs // Most tests make use of some proxy feature to verify functionality. So, if a reboot test runs
// right before a test that tries to get logs, for example, we may get unlucky and try to use a // right before a test that tries to get logs, for example, we may get unlucky and try to use a
// closed tunnel to a node that was recently rebooted. There's no good way to poll for proxies // closed tunnel to a node that was recently rebooted. There's no good way to framework.Poll for proxies
// being closed, so we sleep. // being closed, so we sleep.
// //
// TODO(cjcullen) reduce this sleep (#19314) // TODO(cjcullen) reduce this sleep (#19314)
if providerIs("gke") { if framework.ProviderIs("gke") {
By("waiting 5 minutes for all dead tunnels to be dropped") By("waiting 5 minutes for all dead tunnels to be dropped")
time.Sleep(5 * time.Minute) time.Sleep(5 * time.Minute)
} }
if err := waitForGroupSize(testContext.CloudConfig.NumNodes); err != nil { if err := waitForGroupSize(framework.TestContext.CloudConfig.NumNodes); err != nil {
Failf("Couldn't restore the original node instance group size: %v", err) framework.Failf("Couldn't restore the original node instance group size: %v", err)
} }
if err := waitForClusterSize(c, testContext.CloudConfig.NumNodes, 10*time.Minute); err != nil { if err := framework.WaitForClusterSize(c, framework.TestContext.CloudConfig.NumNodes, 10*time.Minute); err != nil {
Failf("Couldn't restore the original cluster size: %v", err) framework.Failf("Couldn't restore the original cluster size: %v", err)
} }
// Many e2e tests assume that the cluster is fully healthy before they start. Wait until // Many e2e tests assume that the cluster is fully healthy before they start. Wait until
// the cluster is restored to health. // the cluster is restored to health.
By("waiting for system pods to successfully restart") By("waiting for system pods to successfully restart")
err := waitForPodsRunningReady(api.NamespaceSystem, systemPodsNo, podReadyBeforeTimeout) err := framework.WaitForPodsRunningReady(api.NamespaceSystem, systemPodsNo, framework.PodReadyBeforeTimeout)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
@ -403,9 +404,9 @@ var _ = KubeDescribe("Nodes [Disruptive]", func() {
// Create a replication controller for a service that serves its hostname. // Create a replication controller for a service that serves its hostname.
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname // The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
name := "my-hostname-delete-node" name := "my-hostname-delete-node"
replicas := testContext.CloudConfig.NumNodes replicas := framework.TestContext.CloudConfig.NumNodes
newRCByName(c, ns, name, replicas) newRCByName(c, ns, name, replicas)
err := verifyPods(c, ns, name, true, replicas) err := framework.VerifyPods(c, ns, name, true, replicas)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("decreasing cluster size to %d", replicas-1)) By(fmt.Sprintf("decreasing cluster size to %d", replicas-1))
@ -413,11 +414,11 @@ var _ = KubeDescribe("Nodes [Disruptive]", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
err = waitForGroupSize(replicas - 1) err = waitForGroupSize(replicas - 1)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
err = waitForClusterSize(c, replicas-1, 10*time.Minute) err = framework.WaitForClusterSize(c, replicas-1, 10*time.Minute)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("verifying whether the pods from the removed node are recreated") By("verifying whether the pods from the removed node are recreated")
err = verifyPods(c, ns, name, true, replicas) err = framework.VerifyPods(c, ns, name, true, replicas)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
@ -427,9 +428,9 @@ var _ = KubeDescribe("Nodes [Disruptive]", func() {
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname // The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
name := "my-hostname-add-node" name := "my-hostname-add-node"
newSVCByName(c, ns, name) newSVCByName(c, ns, name)
replicas := testContext.CloudConfig.NumNodes replicas := framework.TestContext.CloudConfig.NumNodes
newRCByName(c, ns, name, replicas) newRCByName(c, ns, name, replicas)
err := verifyPods(c, ns, name, true, replicas) err := framework.VerifyPods(c, ns, name, true, replicas)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("increasing cluster size to %d", replicas+1)) By(fmt.Sprintf("increasing cluster size to %d", replicas+1))
@ -437,22 +438,22 @@ var _ = KubeDescribe("Nodes [Disruptive]", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
err = waitForGroupSize(replicas + 1) err = waitForGroupSize(replicas + 1)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
err = waitForClusterSize(c, replicas+1, 10*time.Minute) err = framework.WaitForClusterSize(c, replicas+1, 10*time.Minute)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("increasing size of the replication controller to %d and verifying all pods are running", replicas+1)) By(fmt.Sprintf("increasing size of the replication controller to %d and verifying all pods are running", replicas+1))
err = resizeRC(c, ns, name, replicas+1) err = resizeRC(c, ns, name, replicas+1)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
err = verifyPods(c, ns, name, true, replicas+1) err = framework.VerifyPods(c, ns, name, true, replicas+1)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
}) })
KubeDescribe("Network", func() { framework.KubeDescribe("Network", func() {
Context("when a node becomes unreachable", func() { Context("when a node becomes unreachable", func() {
BeforeEach(func() { BeforeEach(func() {
SkipUnlessProviderIs("gce", "gke", "aws") framework.SkipUnlessProviderIs("gce", "gke", "aws")
SkipUnlessNodeCountIsAtLeast(2) framework.SkipUnlessNodeCountIsAtLeast(2)
}) })
// TODO marekbiskup 2015-06-19 #10085 // TODO marekbiskup 2015-06-19 #10085
@ -468,9 +469,9 @@ var _ = KubeDescribe("Nodes [Disruptive]", func() {
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname // The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
name := "my-hostname-net" name := "my-hostname-net"
newSVCByName(c, ns, name) newSVCByName(c, ns, name)
replicas := testContext.CloudConfig.NumNodes replicas := framework.TestContext.CloudConfig.NumNodes
newRCByName(c, ns, name, replicas) newRCByName(c, ns, name, replicas)
err := verifyPods(c, ns, name, true, replicas) err := framework.VerifyPods(c, ns, name, true, replicas)
Expect(err).NotTo(HaveOccurred(), "Each pod should start running and responding") Expect(err).NotTo(HaveOccurred(), "Each pod should start running and responding")
By("choose a node with at least one pod - we will block some network traffic on this node") By("choose a node with at least one pod - we will block some network traffic on this node")
@ -485,9 +486,9 @@ var _ = KubeDescribe("Nodes [Disruptive]", func() {
By(fmt.Sprintf("block network traffic from node %s", node.Name)) By(fmt.Sprintf("block network traffic from node %s", node.Name))
performTemporaryNetworkFailure(c, ns, name, replicas, pods.Items[0].Name, node) performTemporaryNetworkFailure(c, ns, name, replicas, pods.Items[0].Name, node)
Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name) framework.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
if !waitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) { if !framework.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout) framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
} }
// sleep a bit, to allow Watch in NodeController to catch up. // sleep a bit, to allow Watch in NodeController to catch up.
@ -499,7 +500,7 @@ var _ = KubeDescribe("Nodes [Disruptive]", func() {
additionalPod := "additionalpod" additionalPod := "additionalpod"
err = newPodOnNode(c, ns, additionalPod, node.Name) err = newPodOnNode(c, ns, additionalPod, node.Name)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
err = verifyPods(c, ns, additionalPod, true, 1) err = framework.VerifyPods(c, ns, additionalPod, true, 1)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// verify that it is really on the requested node // verify that it is really on the requested node
@ -507,7 +508,7 @@ var _ = KubeDescribe("Nodes [Disruptive]", func() {
pod, err := c.Pods(ns).Get(additionalPod) pod, err := c.Pods(ns).Get(additionalPod)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
if pod.Spec.NodeName != node.Name { if pod.Spec.NodeName != node.Name {
Logf("Pod %s found on invalid node: %s instead of %s", pod.Name, pod.Spec.NodeName, node.Name) framework.Logf("Pod %s found on invalid node: %s instead of %s", pod.Name, pod.Spec.NodeName, node.Name)
} }
} }
}) })
@ -525,8 +526,8 @@ var _ = KubeDescribe("Nodes [Disruptive]", func() {
nodeOpts := api.ListOptions{} nodeOpts := api.ListOptions{}
nodes, err := c.Nodes().List(nodeOpts) nodes, err := c.Nodes().List(nodeOpts)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
filterNodes(nodes, func(node api.Node) bool { framework.FilterNodes(nodes, func(node api.Node) bool {
if !isNodeConditionSetAsExpected(&node, api.NodeReady, true) { if !framework.IsNodeConditionSetAsExpected(&node, api.NodeReady, true) {
return false return false
} }
podOpts = api.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name)} podOpts = api.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name)}
@ -537,12 +538,12 @@ var _ = KubeDescribe("Nodes [Disruptive]", func() {
return true return true
}) })
if len(nodes.Items) <= 0 { if len(nodes.Items) <= 0 {
Failf("No eligible node were found: %d", len(nodes.Items)) framework.Failf("No eligible node were found: %d", len(nodes.Items))
} }
node := nodes.Items[0] node := nodes.Items[0]
podOpts = api.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name)} podOpts = api.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name)}
if err = waitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, podRunningReady); err != nil { if err = framework.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, framework.PodRunningReady); err != nil {
Failf("Pods on node %s are not ready and running within %v: %v", node.Name, podReadyTimeout, err) framework.Failf("Pods on node %s are not ready and running within %v: %v", node.Name, podReadyTimeout, err)
} }
By("Set up watch on node status") By("Set up watch on node status")
@ -554,11 +555,11 @@ var _ = KubeDescribe("Nodes [Disruptive]", func() {
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
options.FieldSelector = nodeSelector options.FieldSelector = nodeSelector
return framework.Client.Nodes().List(options) return f.Client.Nodes().List(options)
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
options.FieldSelector = nodeSelector options.FieldSelector = nodeSelector
return framework.Client.Nodes().Watch(options) return f.Client.Nodes().Watch(options)
}, },
}, },
&api.Node{}, &api.Node{},
@ -585,7 +586,7 @@ var _ = KubeDescribe("Nodes [Disruptive]", func() {
master := getMaster(c) master := getMaster(c)
defer func() { defer func() {
By(fmt.Sprintf("Unblock traffic from node %s to the master", node.Name)) By(fmt.Sprintf("Unblock traffic from node %s to the master", node.Name))
unblockNetwork(host, master) framework.UnblockNetwork(host, master)
if CurrentGinkgoTestDescription().Failed { if CurrentGinkgoTestDescription().Failed {
return return
@ -593,17 +594,17 @@ var _ = KubeDescribe("Nodes [Disruptive]", func() {
By("Expect to observe node and pod status change from NotReady to Ready after network connectivity recovers") By("Expect to observe node and pod status change from NotReady to Ready after network connectivity recovers")
expectNodeReadiness(true, newNode) expectNodeReadiness(true, newNode)
if err = waitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, podRunningReady); err != nil { if err = framework.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, framework.PodRunningReady); err != nil {
Failf("Pods on node %s did not become ready and running within %v: %v", node.Name, podReadyTimeout, err) framework.Failf("Pods on node %s did not become ready and running within %v: %v", node.Name, podReadyTimeout, err)
} }
}() }()
blockNetwork(host, master) framework.BlockNetwork(host, master)
By("Expect to observe node and pod status change from Ready to NotReady after network partition") By("Expect to observe node and pod status change from Ready to NotReady after network partition")
expectNodeReadiness(false, newNode) expectNodeReadiness(false, newNode)
if err = waitForMatchPodsCondition(c, podOpts, "NotReady", podNotReadyTimeout, podNotReady); err != nil { if err = framework.WaitForMatchPodsCondition(c, podOpts, "NotReady", podNotReadyTimeout, framework.PodNotReady); err != nil {
Failf("Pods on node %s did not become NotReady within %v: %v", node.Name, podNotReadyTimeout, err) framework.Failf("Pods on node %s did not become NotReady within %v: %v", node.Name, podNotReadyTimeout, err)
} }
}) })
}) })

View File

@ -25,6 +25,7 @@ import (
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
@ -35,8 +36,8 @@ const (
resourceQuotaTimeout = 30 * time.Second resourceQuotaTimeout = 30 * time.Second
) )
var _ = KubeDescribe("ResourceQuota", func() { var _ = framework.KubeDescribe("ResourceQuota", func() {
f := NewDefaultFramework("resourcequota") f := framework.NewDefaultFramework("resourcequota")
It("should create a ResourceQuota and ensure its status is promptly calculated.", func() { It("should create a ResourceQuota and ensure its status is promptly calculated.", func() {
By("Creating a ResourceQuota") By("Creating a ResourceQuota")
@ -712,7 +713,7 @@ func deleteResourceQuota(c *client.Client, namespace, name string) error {
// wait for resource quota status to show the expected used resources value // wait for resource quota status to show the expected used resources value
func waitForResourceQuota(c *client.Client, ns, quotaName string, used api.ResourceList) error { func waitForResourceQuota(c *client.Client, ns, quotaName string, used api.ResourceList) error {
return wait.Poll(poll, resourceQuotaTimeout, func() (bool, error) { return wait.Poll(framework.Poll, resourceQuotaTimeout, func() (bool, error) {
resourceQuota, err := c.ResourceQuotas(ns).Get(quotaName) resourceQuota, err := c.ResourceQuotas(ns).Get(quotaName)
if err != nil { if err != nil {
return false, err return false, err
@ -724,7 +725,7 @@ func waitForResourceQuota(c *client.Client, ns, quotaName string, used api.Resou
// verify that the quota shows the expected used resource values // verify that the quota shows the expected used resource values
for k, v := range used { for k, v := range used {
if actualValue, found := resourceQuota.Status.Used[k]; !found || (actualValue.Cmp(v) != 0) { if actualValue, found := resourceQuota.Status.Used[k]; !found || (actualValue.Cmp(v) != 0) {
Logf("resource %s, expected %s, actual %s", k, v.String(), actualValue.String()) framework.Logf("resource %s, expected %s, actual %s", k, v.String(), actualValue.String())
return false, nil return false, nil
} }
} }

View File

@ -25,6 +25,7 @@ import (
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
@ -36,7 +37,7 @@ const (
// restart all nodes will be this number times the number of nodes.) // restart all nodes will be this number times the number of nodes.)
restartPerNodeTimeout = 5 * time.Minute restartPerNodeTimeout = 5 * time.Minute
// How often to poll the statues of a restart. // How often to framework.Poll the statues of a restart.
restartPoll = 20 * time.Second restartPoll = 20 * time.Second
// How long a node is allowed to become "Ready" after it is restarted before // How long a node is allowed to become "Ready" after it is restarted before
@ -48,16 +49,16 @@ const (
restartPodReadyAgainTimeout = 5 * time.Minute restartPodReadyAgainTimeout = 5 * time.Minute
) )
var _ = KubeDescribe("Restart [Disruptive]", func() { var _ = framework.KubeDescribe("Restart [Disruptive]", func() {
f := NewDefaultFramework("restart") f := framework.NewDefaultFramework("restart")
var ps *podStore var ps *framework.PodStore
BeforeEach(func() { BeforeEach(func() {
// This test requires the ability to restart all nodes, so the provider // This test requires the ability to restart all nodes, so the provider
// check must be identical to that call. // check must be identical to that call.
SkipUnlessProviderIs("gce", "gke") framework.SkipUnlessProviderIs("gce", "gke")
ps = newPodStore(f.Client, api.NamespaceSystem, labels.Everything(), fields.Everything()) ps = framework.NewPodStore(f.Client, api.NamespaceSystem, labels.Everything(), fields.Everything())
}) })
AfterEach(func() { AfterEach(func() {
@ -67,12 +68,12 @@ var _ = KubeDescribe("Restart [Disruptive]", func() {
}) })
It("should restart all nodes and ensure all nodes and pods recover", func() { It("should restart all nodes and ensure all nodes and pods recover", func() {
nn := testContext.CloudConfig.NumNodes nn := framework.TestContext.CloudConfig.NumNodes
By("ensuring all nodes are ready") By("ensuring all nodes are ready")
nodeNamesBefore, err := checkNodesReady(f.Client, nodeReadyInitialTimeout, nn) nodeNamesBefore, err := checkNodesReady(f.Client, framework.NodeReadyInitialTimeout, nn)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Logf("Got the following nodes before restart: %v", nodeNamesBefore) framework.Logf("Got the following nodes before restart: %v", nodeNamesBefore)
By("ensuring all pods are running and ready") By("ensuring all pods are running and ready")
pods := ps.List() pods := ps.List()
@ -81,24 +82,24 @@ var _ = KubeDescribe("Restart [Disruptive]", func() {
podNamesBefore[i] = p.ObjectMeta.Name podNamesBefore[i] = p.ObjectMeta.Name
} }
ns := api.NamespaceSystem ns := api.NamespaceSystem
if !checkPodsRunningReady(f.Client, ns, podNamesBefore, podReadyBeforeTimeout) { if !framework.CheckPodsRunningReady(f.Client, ns, podNamesBefore, framework.PodReadyBeforeTimeout) {
Failf("At least one pod wasn't running and ready at test start.") framework.Failf("At least one pod wasn't running and ready at test start.")
} }
By("restarting all of the nodes") By("restarting all of the nodes")
err = restartNodes(testContext.Provider, restartPerNodeTimeout) err = restartNodes(framework.TestContext.Provider, restartPerNodeTimeout)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("ensuring all nodes are ready after the restart") By("ensuring all nodes are ready after the restart")
nodeNamesAfter, err := checkNodesReady(f.Client, restartNodeReadyAgainTimeout, nn) nodeNamesAfter, err := checkNodesReady(f.Client, restartNodeReadyAgainTimeout, nn)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Logf("Got the following nodes after restart: %v", nodeNamesAfter) framework.Logf("Got the following nodes after restart: %v", nodeNamesAfter)
// Make sure that we have the same number of nodes. We're not checking // Make sure that we have the same number of nodes. We're not checking
// that the names match because that's implementation specific. // that the names match because that's implementation specific.
By("ensuring the same number of nodes exist after the restart") By("ensuring the same number of nodes exist after the restart")
if len(nodeNamesBefore) != len(nodeNamesAfter) { if len(nodeNamesBefore) != len(nodeNamesAfter) {
Failf("Had %d nodes before nodes were restarted, but now only have %d", framework.Failf("Had %d nodes before nodes were restarted, but now only have %d",
len(nodeNamesBefore), len(nodeNamesAfter)) len(nodeNamesBefore), len(nodeNamesAfter))
} }
@ -110,23 +111,23 @@ var _ = KubeDescribe("Restart [Disruptive]", func() {
podNamesAfter, err := waitForNPods(ps, len(podNamesBefore), restartPodReadyAgainTimeout) podNamesAfter, err := waitForNPods(ps, len(podNamesBefore), restartPodReadyAgainTimeout)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
remaining := restartPodReadyAgainTimeout - time.Since(podCheckStart) remaining := restartPodReadyAgainTimeout - time.Since(podCheckStart)
if !checkPodsRunningReady(f.Client, ns, podNamesAfter, remaining) { if !framework.CheckPodsRunningReady(f.Client, ns, podNamesAfter, remaining) {
Failf("At least one pod wasn't running and ready after the restart.") framework.Failf("At least one pod wasn't running and ready after the restart.")
} }
}) })
}) })
// waitForNPods tries to list pods using c until it finds expect of them, // waitForNPods tries to list pods using c until it finds expect of them,
// returning their names if it can do so before timeout. // returning their names if it can do so before timeout.
func waitForNPods(ps *podStore, expect int, timeout time.Duration) ([]string, error) { func waitForNPods(ps *framework.PodStore, expect int, timeout time.Duration) ([]string, error) {
// Loop until we find expect pods or timeout is passed. // Loop until we find expect pods or timeout is passed.
var pods []*api.Pod var pods []*api.Pod
var errLast error var errLast error
found := wait.Poll(poll, timeout, func() (bool, error) { found := wait.Poll(framework.Poll, timeout, func() (bool, error) {
pods = ps.List() pods = ps.List()
if len(pods) != expect { if len(pods) != expect {
errLast = fmt.Errorf("expected to find %d pods but found only %d", expect, len(pods)) errLast = fmt.Errorf("expected to find %d pods but found only %d", expect, len(pods))
Logf("Error getting pods: %v", errLast) framework.Logf("Error getting pods: %v", errLast)
return false, nil return false, nil
} }
return true, nil return true, nil
@ -151,7 +152,7 @@ func checkNodesReady(c *client.Client, nt time.Duration, expect int) ([]string,
var nodeList *api.NodeList var nodeList *api.NodeList
var errLast error var errLast error
start := time.Now() start := time.Now()
found := wait.Poll(poll, nt, func() (bool, error) { found := wait.Poll(framework.Poll, nt, func() (bool, error) {
// A rolling-update (GCE/GKE implementation of restart) can complete before the apiserver // A rolling-update (GCE/GKE implementation of restart) can complete before the apiserver
// knows about all of the nodes. Thus, we retry the list nodes call // knows about all of the nodes. Thus, we retry the list nodes call
// until we get the expected number of nodes. // until we get the expected number of nodes.
@ -163,7 +164,7 @@ func checkNodesReady(c *client.Client, nt time.Duration, expect int) ([]string,
if len(nodeList.Items) != expect { if len(nodeList.Items) != expect {
errLast = fmt.Errorf("expected to find %d nodes but found only %d (%v elapsed)", errLast = fmt.Errorf("expected to find %d nodes but found only %d (%v elapsed)",
expect, len(nodeList.Items), time.Since(start)) expect, len(nodeList.Items), time.Since(start))
Logf("%v", errLast) framework.Logf("%v", errLast)
return false, nil return false, nil
} }
return true, nil return true, nil
@ -176,7 +177,7 @@ func checkNodesReady(c *client.Client, nt time.Duration, expect int) ([]string,
return nodeNames, fmt.Errorf("couldn't find %d nodes within %v; last error: %v", return nodeNames, fmt.Errorf("couldn't find %d nodes within %v; last error: %v",
expect, nt, errLast) expect, nt, errLast)
} }
Logf("Successfully found %d nodes", expect) framework.Logf("Successfully found %d nodes", expect)
// Next, ensure in parallel that all the nodes are ready. We subtract the // Next, ensure in parallel that all the nodes are ready. We subtract the
// time we spent waiting above. // time we spent waiting above.
@ -184,7 +185,7 @@ func checkNodesReady(c *client.Client, nt time.Duration, expect int) ([]string,
result := make(chan bool, len(nodeList.Items)) result := make(chan bool, len(nodeList.Items))
for _, n := range nodeNames { for _, n := range nodeNames {
n := n n := n
go func() { result <- waitForNodeToBeReady(c, n, timeout) }() go func() { result <- framework.WaitForNodeToBeReady(c, n, timeout) }()
} }
failed := false failed := false
// TODO(mbforbes): Change to `for range` syntax once we support only Go // TODO(mbforbes): Change to `for range` syntax once we support only Go

View File

@ -30,6 +30,7 @@ import (
"k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/system" "k8s.io/kubernetes/pkg/util/system"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
@ -63,7 +64,7 @@ func getRequestedCPU(pod api.Pod) int64 {
func verifyResult(c *client.Client, podName string, ns string) { func verifyResult(c *client.Client, podName string, ns string) {
allPods, err := c.Pods(api.NamespaceAll).List(api.ListOptions{}) allPods, err := c.Pods(api.NamespaceAll).List(api.ListOptions{})
expectNoError(err) framework.ExpectNoError(err)
scheduledPods, notScheduledPods := getPodsScheduled(allPods) scheduledPods, notScheduledPods := getPodsScheduled(allPods)
selector := fields.Set{ selector := fields.Set{
@ -75,7 +76,7 @@ func verifyResult(c *client.Client, podName string, ns string) {
}.AsSelector() }.AsSelector()
options := api.ListOptions{FieldSelector: selector} options := api.ListOptions{FieldSelector: selector}
schedEvents, err := c.Events(ns).List(options) schedEvents, err := c.Events(ns).List(options)
expectNoError(err) framework.ExpectNoError(err)
// If we failed to find event with a capitalized first letter of reason // If we failed to find event with a capitalized first letter of reason
// try looking for one starting with a small one for backward compatibility. // try looking for one starting with a small one for backward compatibility.
// If we don't do it we end up in #15806. // If we don't do it we end up in #15806.
@ -90,7 +91,7 @@ func verifyResult(c *client.Client, podName string, ns string) {
}.AsSelector() }.AsSelector()
options := api.ListOptions{FieldSelector: selector} options := api.ListOptions{FieldSelector: selector}
schedEvents, err = c.Events(ns).List(options) schedEvents, err = c.Events(ns).List(options)
expectNoError(err) framework.ExpectNoError(err)
} }
printed := false printed := false
@ -110,10 +111,10 @@ func verifyResult(c *client.Client, podName string, ns string) {
func cleanupPods(c *client.Client, ns string) { func cleanupPods(c *client.Client, ns string) {
By("Removing all pods in namespace " + ns) By("Removing all pods in namespace " + ns)
pods, err := c.Pods(ns).List(api.ListOptions{}) pods, err := c.Pods(ns).List(api.ListOptions{})
expectNoError(err) framework.ExpectNoError(err)
opt := api.NewDeleteOptions(0) opt := api.NewDeleteOptions(0)
for _, p := range pods.Items { for _, p := range pods.Items {
expectNoError(c.Pods(ns).Delete(p.ObjectMeta.Name, opt)) framework.ExpectNoError(c.Pods(ns).Delete(p.ObjectMeta.Name, opt))
} }
} }
@ -123,24 +124,24 @@ func waitForStableCluster(c *client.Client) int {
startTime := time.Now() startTime := time.Now()
allPods, err := c.Pods(api.NamespaceAll).List(api.ListOptions{}) allPods, err := c.Pods(api.NamespaceAll).List(api.ListOptions{})
expectNoError(err) framework.ExpectNoError(err)
scheduledPods, currentlyNotScheduledPods := getPodsScheduled(allPods) scheduledPods, currentlyNotScheduledPods := getPodsScheduled(allPods)
for len(currentlyNotScheduledPods) != 0 { for len(currentlyNotScheduledPods) != 0 {
time.Sleep(2 * time.Second) time.Sleep(2 * time.Second)
allPods, err := c.Pods(api.NamespaceAll).List(api.ListOptions{}) allPods, err := c.Pods(api.NamespaceAll).List(api.ListOptions{})
expectNoError(err) framework.ExpectNoError(err)
scheduledPods, currentlyNotScheduledPods = getPodsScheduled(allPods) scheduledPods, currentlyNotScheduledPods = getPodsScheduled(allPods)
if startTime.Add(timeout).Before(time.Now()) { if startTime.Add(timeout).Before(time.Now()) {
Failf("Timed out after %v waiting for stable cluster.", timeout) framework.Failf("Timed out after %v waiting for stable cluster.", timeout)
break break
} }
} }
return len(scheduledPods) return len(scheduledPods)
} }
var _ = KubeDescribe("SchedulerPredicates [Serial]", func() { var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
var c *client.Client var c *client.Client
var nodeList *api.NodeList var nodeList *api.NodeList
var systemPodsNo int var systemPodsNo int
@ -152,16 +153,16 @@ var _ = KubeDescribe("SchedulerPredicates [Serial]", func() {
rc, err := c.ReplicationControllers(ns).Get(RCName) rc, err := c.ReplicationControllers(ns).Get(RCName)
if err == nil && rc.Spec.Replicas != 0 { if err == nil && rc.Spec.Replicas != 0 {
By("Cleaning up the replication controller") By("Cleaning up the replication controller")
err := DeleteRC(c, ns, RCName) err := framework.DeleteRC(c, ns, RCName)
expectNoError(err) framework.ExpectNoError(err)
} }
}) })
framework := NewDefaultFramework("sched-pred") f := framework.NewDefaultFramework("sched-pred")
BeforeEach(func() { BeforeEach(func() {
c = framework.Client c = f.Client
ns = framework.Namespace.Name ns = f.Namespace.Name
nodeList = &api.NodeList{} nodeList = &api.NodeList{}
nodes, err := c.Nodes().List(api.ListOptions{}) nodes, err := c.Nodes().List(api.ListOptions{})
masterNodes = sets.NewString() masterNodes = sets.NewString()
@ -173,8 +174,8 @@ var _ = KubeDescribe("SchedulerPredicates [Serial]", func() {
} }
} }
err = checkTestingNSDeletedExcept(c, ns) err = framework.CheckTestingNSDeletedExcept(c, ns)
expectNoError(err) framework.ExpectNoError(err)
// Every test case in this suite assumes that cluster add-on pods stay stable and // Every test case in this suite assumes that cluster add-on pods stay stable and
// cannot be run in parallel with any other test that touches Nodes or Pods. // cannot be run in parallel with any other test that touches Nodes or Pods.
@ -188,12 +189,12 @@ var _ = KubeDescribe("SchedulerPredicates [Serial]", func() {
} }
} }
err = waitForPodsRunningReady(api.NamespaceSystem, systemPodsNo, podReadyBeforeTimeout) err = framework.WaitForPodsRunningReady(api.NamespaceSystem, systemPodsNo, framework.PodReadyBeforeTimeout)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
for _, node := range nodeList.Items { for _, node := range nodeList.Items {
Logf("\nLogging pods the kubelet thinks is on node %v before test", node.Name) framework.Logf("\nLogging pods the kubelet thinks is on node %v before test", node.Name)
PrintAllKubeletPods(c, node.Name) framework.PrintAllKubeletPods(c, node.Name)
} }
}) })
@ -207,7 +208,7 @@ var _ = KubeDescribe("SchedulerPredicates [Serial]", func() {
totalPodCapacity = 0 totalPodCapacity = 0
for _, node := range nodeList.Items { for _, node := range nodeList.Items {
Logf("Node: %v", node) framework.Logf("Node: %v", node)
podCapacity, found := node.Status.Capacity["pods"] podCapacity, found := node.Status.Capacity["pods"]
Expect(found).To(Equal(true)) Expect(found).To(Equal(true))
totalPodCapacity += podCapacity.Value() totalPodCapacity += podCapacity.Value()
@ -218,7 +219,7 @@ var _ = KubeDescribe("SchedulerPredicates [Serial]", func() {
By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster max pods and trying to start another one", podsNeededForSaturation)) By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster max pods and trying to start another one", podsNeededForSaturation))
startPods(c, podsNeededForSaturation, ns, "maxp", api.Pod{ framework.StartPods(c, podsNeededForSaturation, ns, "maxp", api.Pod{
TypeMeta: unversioned.TypeMeta{ TypeMeta: unversioned.TypeMeta{
Kind: "Pod", Kind: "Pod",
}, },
@ -254,10 +255,10 @@ var _ = KubeDescribe("SchedulerPredicates [Serial]", func() {
}, },
}, },
}) })
expectNoError(err) framework.ExpectNoError(err)
// Wait a bit to allow scheduler to do its thing // Wait a bit to allow scheduler to do its thing
// TODO: this is brittle; there's no guarantee the scheduler will have run in 10 seconds. // TODO: this is brittle; there's no guarantee the scheduler will have run in 10 seconds.
Logf("Sleeping 10 seconds and crossing our fingers that scheduler will run in that time.") framework.Logf("Sleeping 10 seconds and crossing our fingers that scheduler will run in that time.")
time.Sleep(10 * time.Second) time.Sleep(10 * time.Second)
verifyResult(c, podName, ns) verifyResult(c, podName, ns)
@ -277,11 +278,11 @@ var _ = KubeDescribe("SchedulerPredicates [Serial]", func() {
waitForStableCluster(c) waitForStableCluster(c)
pods, err := c.Pods(api.NamespaceAll).List(api.ListOptions{}) pods, err := c.Pods(api.NamespaceAll).List(api.ListOptions{})
expectNoError(err) framework.ExpectNoError(err)
for _, pod := range pods.Items { for _, pod := range pods.Items {
_, found := nodeToCapacityMap[pod.Spec.NodeName] _, found := nodeToCapacityMap[pod.Spec.NodeName]
if found && pod.Status.Phase == api.PodRunning { if found && pod.Status.Phase == api.PodRunning {
Logf("Pod %v requesting resource %v on Node %v", pod.Name, getRequestedCPU(pod), pod.Spec.NodeName) framework.Logf("Pod %v requesting resource %v on Node %v", pod.Name, getRequestedCPU(pod), pod.Spec.NodeName)
nodeToCapacityMap[pod.Spec.NodeName] -= getRequestedCPU(pod) nodeToCapacityMap[pod.Spec.NodeName] -= getRequestedCPU(pod)
} }
} }
@ -289,13 +290,13 @@ var _ = KubeDescribe("SchedulerPredicates [Serial]", func() {
var podsNeededForSaturation int var podsNeededForSaturation int
milliCpuPerPod := int64(500) milliCpuPerPod := int64(500)
for name, leftCapacity := range nodeToCapacityMap { for name, leftCapacity := range nodeToCapacityMap {
Logf("Node: %v has capacity: %v", name, leftCapacity) framework.Logf("Node: %v has capacity: %v", name, leftCapacity)
podsNeededForSaturation += (int)(leftCapacity / milliCpuPerPod) podsNeededForSaturation += (int)(leftCapacity / milliCpuPerPod)
} }
By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster CPU and trying to start another one", podsNeededForSaturation)) By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster CPU and trying to start another one", podsNeededForSaturation))
startPods(c, podsNeededForSaturation, ns, "overcommit", api.Pod{ framework.StartPods(c, podsNeededForSaturation, ns, "overcommit", api.Pod{
TypeMeta: unversioned.TypeMeta{ TypeMeta: unversioned.TypeMeta{
Kind: "Pod", Kind: "Pod",
}, },
@ -344,10 +345,10 @@ var _ = KubeDescribe("SchedulerPredicates [Serial]", func() {
}, },
}, },
}) })
expectNoError(err) framework.ExpectNoError(err)
// Wait a bit to allow scheduler to do its thing // Wait a bit to allow scheduler to do its thing
// TODO: this is brittle; there's no guarantee the scheduler will have run in 10 seconds. // TODO: this is brittle; there's no guarantee the scheduler will have run in 10 seconds.
Logf("Sleeping 10 seconds and crossing our fingers that scheduler will run in that time.") framework.Logf("Sleeping 10 seconds and crossing our fingers that scheduler will run in that time.")
time.Sleep(10 * time.Second) time.Sleep(10 * time.Second)
verifyResult(c, podName, ns) verifyResult(c, podName, ns)
@ -382,10 +383,10 @@ var _ = KubeDescribe("SchedulerPredicates [Serial]", func() {
}, },
}, },
}) })
expectNoError(err) framework.ExpectNoError(err)
// Wait a bit to allow scheduler to do its thing // Wait a bit to allow scheduler to do its thing
// TODO: this is brittle; there's no guarantee the scheduler will have run in 10 seconds. // TODO: this is brittle; there's no guarantee the scheduler will have run in 10 seconds.
Logf("Sleeping 10 seconds and crossing our fingers that scheduler will run in that time.") framework.Logf("Sleeping 10 seconds and crossing our fingers that scheduler will run in that time.")
time.Sleep(10 * time.Second) time.Sleep(10 * time.Second)
verifyResult(c, podName, ns) verifyResult(c, podName, ns)
@ -424,12 +425,12 @@ var _ = KubeDescribe("SchedulerPredicates [Serial]", func() {
}) })
if err == nil || !errors.IsInvalid(err) { if err == nil || !errors.IsInvalid(err) {
Failf("Expect error of invalid, got : %v", err) framework.Failf("Expect error of invalid, got : %v", err)
} }
// Wait a bit to allow scheduler to do its thing if the pod is not rejected. // Wait a bit to allow scheduler to do its thing if the pod is not rejected.
// TODO: this is brittle; there's no guarantee the scheduler will have run in 10 seconds. // TODO: this is brittle; there's no guarantee the scheduler will have run in 10 seconds.
Logf("Sleeping 10 seconds and crossing our fingers that scheduler will run in that time.") framework.Logf("Sleeping 10 seconds and crossing our fingers that scheduler will run in that time.")
time.Sleep(10 * time.Second) time.Sleep(10 * time.Second)
cleanupPods(c, ns) cleanupPods(c, ns)
@ -458,24 +459,24 @@ var _ = KubeDescribe("SchedulerPredicates [Serial]", func() {
}, },
}, },
}) })
expectNoError(err) framework.ExpectNoError(err)
expectNoError(waitForPodRunningInNamespace(c, podName, ns)) framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, podName, ns))
pod, err := c.Pods(ns).Get(podName) pod, err := c.Pods(ns).Get(podName)
expectNoError(err) framework.ExpectNoError(err)
nodeName := pod.Spec.NodeName nodeName := pod.Spec.NodeName
err = c.Pods(ns).Delete(podName, api.NewDeleteOptions(0)) err = c.Pods(ns).Delete(podName, api.NewDeleteOptions(0))
expectNoError(err) framework.ExpectNoError(err)
By("Trying to apply a random label on the found node.") By("Trying to apply a random label on the found node.")
k := fmt.Sprintf("kubernetes.io/e2e-%s", string(util.NewUUID())) k := fmt.Sprintf("kubernetes.io/e2e-%s", string(util.NewUUID()))
v := "42" v := "42"
patch := fmt.Sprintf(`{"metadata":{"labels":{"%s":"%s"}}}`, k, v) patch := fmt.Sprintf(`{"metadata":{"labels":{"%s":"%s"}}}`, k, v)
err = c.Patch(api.MergePatchType).Resource("nodes").Name(nodeName).Body([]byte(patch)).Do().Error() err = c.Patch(api.MergePatchType).Resource("nodes").Name(nodeName).Body([]byte(patch)).Do().Error()
expectNoError(err) framework.ExpectNoError(err)
node, err := c.Nodes().Get(nodeName) node, err := c.Nodes().Get(nodeName)
expectNoError(err) framework.ExpectNoError(err)
Expect(node.Labels[k]).To(Equal(v)) Expect(node.Labels[k]).To(Equal(v))
By("Trying to relaunch the pod, now with labels.") By("Trying to relaunch the pod, now with labels.")
@ -500,7 +501,7 @@ var _ = KubeDescribe("SchedulerPredicates [Serial]", func() {
}, },
}, },
}) })
expectNoError(err) framework.ExpectNoError(err)
defer c.Pods(ns).Delete(labelPodName, api.NewDeleteOptions(0)) defer c.Pods(ns).Delete(labelPodName, api.NewDeleteOptions(0))
// check that pod got scheduled. We intentionally DO NOT check that the // check that pod got scheduled. We intentionally DO NOT check that the
@ -508,9 +509,9 @@ var _ = KubeDescribe("SchedulerPredicates [Serial]", func() {
// kubelet and the scheduler: the scheduler might have scheduled a pod // kubelet and the scheduler: the scheduler might have scheduled a pod
// already when the kubelet does not know about its new label yet. The // already when the kubelet does not know about its new label yet. The
// kubelet will then refuse to launch the pod. // kubelet will then refuse to launch the pod.
expectNoError(waitForPodNotPending(c, ns, labelPodName)) framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, labelPodName))
labelPod, err := c.Pods(ns).Get(labelPodName) labelPod, err := c.Pods(ns).Get(labelPodName)
expectNoError(err) framework.ExpectNoError(err)
Expect(labelPod.Spec.NodeName).To(Equal(nodeName)) Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
}) })
@ -560,10 +561,10 @@ var _ = KubeDescribe("SchedulerPredicates [Serial]", func() {
}, },
}, },
}) })
expectNoError(err) framework.ExpectNoError(err)
// Wait a bit to allow scheduler to do its thing // Wait a bit to allow scheduler to do its thing
// TODO: this is brittle; there's no guarantee the scheduler will have run in 10 seconds. // TODO: this is brittle; there's no guarantee the scheduler will have run in 10 seconds.
Logf("Sleeping 10 seconds and crossing our fingers that scheduler will run in that time.") framework.Logf("Sleeping 10 seconds and crossing our fingers that scheduler will run in that time.")
time.Sleep(10 * time.Second) time.Sleep(10 * time.Second)
verifyResult(c, podName, ns) verifyResult(c, podName, ns)
@ -595,24 +596,24 @@ var _ = KubeDescribe("SchedulerPredicates [Serial]", func() {
}, },
}, },
}) })
expectNoError(err) framework.ExpectNoError(err)
expectNoError(waitForPodRunningInNamespace(c, podName, ns)) framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, podName, ns))
pod, err := c.Pods(ns).Get(podName) pod, err := c.Pods(ns).Get(podName)
expectNoError(err) framework.ExpectNoError(err)
nodeName := pod.Spec.NodeName nodeName := pod.Spec.NodeName
err = c.Pods(ns).Delete(podName, api.NewDeleteOptions(0)) err = c.Pods(ns).Delete(podName, api.NewDeleteOptions(0))
expectNoError(err) framework.ExpectNoError(err)
By("Trying to apply a random label on the found node.") By("Trying to apply a random label on the found node.")
k := fmt.Sprintf("kubernetes.io/e2e-%s", string(util.NewUUID())) k := fmt.Sprintf("kubernetes.io/e2e-%s", string(util.NewUUID()))
v := "42" v := "42"
patch := fmt.Sprintf(`{"metadata":{"labels":{"%s":"%s"}}}`, k, v) patch := fmt.Sprintf(`{"metadata":{"labels":{"%s":"%s"}}}`, k, v)
err = c.Patch(api.MergePatchType).Resource("nodes").Name(nodeName).Body([]byte(patch)).Do().Error() err = c.Patch(api.MergePatchType).Resource("nodes").Name(nodeName).Body([]byte(patch)).Do().Error()
expectNoError(err) framework.ExpectNoError(err)
node, err := c.Nodes().Get(nodeName) node, err := c.Nodes().Get(nodeName)
expectNoError(err) framework.ExpectNoError(err)
Expect(node.Labels[k]).To(Equal(v)) Expect(node.Labels[k]).To(Equal(v))
By("Trying to relaunch the pod, now with labels.") By("Trying to relaunch the pod, now with labels.")
@ -651,7 +652,7 @@ var _ = KubeDescribe("SchedulerPredicates [Serial]", func() {
}, },
}, },
}) })
expectNoError(err) framework.ExpectNoError(err)
defer c.Pods(ns).Delete(labelPodName, api.NewDeleteOptions(0)) defer c.Pods(ns).Delete(labelPodName, api.NewDeleteOptions(0))
// check that pod got scheduled. We intentionally DO NOT check that the // check that pod got scheduled. We intentionally DO NOT check that the
@ -659,9 +660,9 @@ var _ = KubeDescribe("SchedulerPredicates [Serial]", func() {
// kubelet and the scheduler: the scheduler might have scheduled a pod // kubelet and the scheduler: the scheduler might have scheduled a pod
// already when the kubelet does not know about its new label yet. The // already when the kubelet does not know about its new label yet. The
// kubelet will then refuse to launch the pod. // kubelet will then refuse to launch the pod.
expectNoError(waitForPodNotPending(c, ns, labelPodName)) framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, labelPodName))
labelPod, err := c.Pods(ns).Get(labelPodName) labelPod, err := c.Pods(ns).Get(labelPodName)
expectNoError(err) framework.ExpectNoError(err)
Expect(labelPod.Spec.NodeName).To(Equal(nodeName)) Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
}) })
@ -689,31 +690,31 @@ var _ = KubeDescribe("SchedulerPredicates [Serial]", func() {
}, },
}, },
}) })
expectNoError(err) framework.ExpectNoError(err)
expectNoError(waitForPodRunningInNamespace(c, podName, ns)) framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, podName, ns))
pod, err := c.Pods(ns).Get(podName) pod, err := c.Pods(ns).Get(podName)
expectNoError(err) framework.ExpectNoError(err)
nodeName := pod.Spec.NodeName nodeName := pod.Spec.NodeName
err = c.Pods(ns).Delete(podName, api.NewDeleteOptions(0)) err = c.Pods(ns).Delete(podName, api.NewDeleteOptions(0))
expectNoError(err) framework.ExpectNoError(err)
By("Trying to apply a label with fake az info on the found node.") By("Trying to apply a label with fake az info on the found node.")
k := "kubernetes.io/e2e-az-name" k := "kubernetes.io/e2e-az-name"
v := "e2e-az1" v := "e2e-az1"
patch := fmt.Sprintf(`{"metadata":{"labels":{"%s":"%s"}}}`, k, v) patch := fmt.Sprintf(`{"metadata":{"labels":{"%s":"%s"}}}`, k, v)
err = c.Patch(api.MergePatchType).Resource("nodes").Name(nodeName).Body([]byte(patch)).Do().Error() err = c.Patch(api.MergePatchType).Resource("nodes").Name(nodeName).Body([]byte(patch)).Do().Error()
expectNoError(err) framework.ExpectNoError(err)
node, err := c.Nodes().Get(nodeName) node, err := c.Nodes().Get(nodeName)
expectNoError(err) framework.ExpectNoError(err)
Expect(node.Labels[k]).To(Equal(v)) Expect(node.Labels[k]).To(Equal(v))
By("Trying to launch a pod that with NodeAffinity setting as embedded JSON string in the annotation value.") By("Trying to launch a pod that with NodeAffinity setting as embedded JSON string in the annotation value.")
labelPodName := "with-labels" labelPodName := "with-labels"
nodeSelectionRoot := filepath.Join(testContext.RepoRoot, "docs/user-guide/node-selection") nodeSelectionRoot := filepath.Join(framework.TestContext.RepoRoot, "docs/user-guide/node-selection")
testPodPath := filepath.Join(nodeSelectionRoot, "pod-with-node-affinity.yaml") testPodPath := filepath.Join(nodeSelectionRoot, "pod-with-node-affinity.yaml")
runKubectlOrDie("create", "-f", testPodPath, fmt.Sprintf("--namespace=%v", ns)) framework.RunKubectlOrDie("create", "-f", testPodPath, fmt.Sprintf("--namespace=%v", ns))
defer c.Pods(ns).Delete(labelPodName, api.NewDeleteOptions(0)) defer c.Pods(ns).Delete(labelPodName, api.NewDeleteOptions(0))
// check that pod got scheduled. We intentionally DO NOT check that the // check that pod got scheduled. We intentionally DO NOT check that the
@ -721,9 +722,9 @@ var _ = KubeDescribe("SchedulerPredicates [Serial]", func() {
// kubelet and the scheduler: the scheduler might have scheduled a pod // kubelet and the scheduler: the scheduler might have scheduled a pod
// already when the kubelet does not know about its new label yet. The // already when the kubelet does not know about its new label yet. The
// kubelet will then refuse to launch the pod. // kubelet will then refuse to launch the pod.
expectNoError(waitForPodNotPending(c, ns, labelPodName)) framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, labelPodName))
labelPod, err := c.Pods(ns).Get(labelPodName) labelPod, err := c.Pods(ns).Get(labelPodName)
expectNoError(err) framework.ExpectNoError(err)
Expect(labelPod.Spec.NodeName).To(Equal(nodeName)) Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
}) })
}) })

View File

@ -21,12 +21,13 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
) )
var _ = KubeDescribe("Secrets", func() { var _ = framework.KubeDescribe("Secrets", func() {
f := NewDefaultFramework("secrets") f := framework.NewDefaultFramework("secrets")
It("should be consumable from pods in volume [Conformance]", func() { It("should be consumable from pods in volume [Conformance]", func() {
name := "secret-test-" + string(util.NewUUID()) name := "secret-test-" + string(util.NewUUID())
@ -49,12 +50,12 @@ var _ = KubeDescribe("Secrets", func() {
defer func() { defer func() {
By("Cleaning up the secret") By("Cleaning up the secret")
if err := f.Client.Secrets(f.Namespace.Name).Delete(secret.Name); err != nil { if err := f.Client.Secrets(f.Namespace.Name).Delete(secret.Name); err != nil {
Failf("unable to delete secret %v: %v", secret.Name, err) framework.Failf("unable to delete secret %v: %v", secret.Name, err)
} }
}() }()
var err error var err error
if secret, err = f.Client.Secrets(f.Namespace.Name).Create(secret); err != nil { if secret, err = f.Client.Secrets(f.Namespace.Name).Create(secret); err != nil {
Failf("unable to create test secret %s: %v", secret.Name, err) framework.Failf("unable to create test secret %s: %v", secret.Name, err)
} }
pod := &api.Pod{ pod := &api.Pod{
@ -92,7 +93,7 @@ var _ = KubeDescribe("Secrets", func() {
}, },
} }
testContainerOutput("consume secrets", f.Client, pod, 0, []string{ framework.TestContainerOutput("consume secrets", f.Client, pod, 0, []string{
"content of file \"/etc/secret-volume/data-1\": value-1", "content of file \"/etc/secret-volume/data-1\": value-1",
"mode of file \"/etc/secret-volume/data-1\": -r--r--r--", "mode of file \"/etc/secret-volume/data-1\": -r--r--r--",
}, f.Namespace.Name) }, f.Namespace.Name)
@ -115,12 +116,12 @@ var _ = KubeDescribe("Secrets", func() {
defer func() { defer func() {
By("Cleaning up the secret") By("Cleaning up the secret")
if err := f.Client.Secrets(f.Namespace.Name).Delete(secret.Name); err != nil { if err := f.Client.Secrets(f.Namespace.Name).Delete(secret.Name); err != nil {
Failf("unable to delete secret %v: %v", secret.Name, err) framework.Failf("unable to delete secret %v: %v", secret.Name, err)
} }
}() }()
var err error var err error
if secret, err = f.Client.Secrets(f.Namespace.Name).Create(secret); err != nil { if secret, err = f.Client.Secrets(f.Namespace.Name).Create(secret); err != nil {
Failf("unable to create test secret %s: %v", secret.Name, err) framework.Failf("unable to create test secret %s: %v", secret.Name, err)
} }
pod := &api.Pod{ pod := &api.Pod{
@ -152,7 +153,7 @@ var _ = KubeDescribe("Secrets", func() {
}, },
} }
testContainerOutput("consume secrets", f.Client, pod, 0, []string{ framework.TestContainerOutput("consume secrets", f.Client, pod, 0, []string{
"SECRET_DATA=value-1", "SECRET_DATA=value-1",
}, f.Namespace.Name) }, f.Namespace.Name)
}) })

View File

@ -27,6 +27,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
@ -57,15 +58,15 @@ func scTestPod(hostIPC bool, hostPID bool) *api.Pod {
return pod return pod
} }
var _ = KubeDescribe("Security Context [Feature:SecurityContext]", func() { var _ = framework.KubeDescribe("Security Context [Feature:SecurityContext]", func() {
framework := NewDefaultFramework("security-context") f := framework.NewDefaultFramework("security-context")
It("should support pod.Spec.SecurityContext.SupplementalGroups", func() { It("should support pod.Spec.SecurityContext.SupplementalGroups", func() {
pod := scTestPod(false, false) pod := scTestPod(false, false)
pod.Spec.Containers[0].Command = []string{"id", "-G"} pod.Spec.Containers[0].Command = []string{"id", "-G"}
pod.Spec.SecurityContext.SupplementalGroups = []int64{1234, 5678} pod.Spec.SecurityContext.SupplementalGroups = []int64{1234, 5678}
groups := []string{"1234", "5678"} groups := []string{"1234", "5678"}
framework.TestContainerOutput("pod.Spec.SecurityContext.SupplementalGroups", pod, 0, groups) f.TestContainerOutput("pod.Spec.SecurityContext.SupplementalGroups", pod, 0, groups)
}) })
It("should support pod.Spec.SecurityContext.RunAsUser", func() { It("should support pod.Spec.SecurityContext.RunAsUser", func() {
@ -74,7 +75,7 @@ var _ = KubeDescribe("Security Context [Feature:SecurityContext]", func() {
pod.Spec.SecurityContext.RunAsUser = &uid pod.Spec.SecurityContext.RunAsUser = &uid
pod.Spec.Containers[0].Command = []string{"sh", "-c", "id -u"} pod.Spec.Containers[0].Command = []string{"sh", "-c", "id -u"}
framework.TestContainerOutput("pod.Spec.SecurityContext.RunAsUser", pod, 0, []string{ f.TestContainerOutput("pod.Spec.SecurityContext.RunAsUser", pod, 0, []string{
fmt.Sprintf("%v", uid), fmt.Sprintf("%v", uid),
}) })
}) })
@ -88,26 +89,26 @@ var _ = KubeDescribe("Security Context [Feature:SecurityContext]", func() {
pod.Spec.Containers[0].SecurityContext.RunAsUser = &overrideUid pod.Spec.Containers[0].SecurityContext.RunAsUser = &overrideUid
pod.Spec.Containers[0].Command = []string{"sh", "-c", "id -u"} pod.Spec.Containers[0].Command = []string{"sh", "-c", "id -u"}
framework.TestContainerOutput("pod.Spec.SecurityContext.RunAsUser", pod, 0, []string{ f.TestContainerOutput("pod.Spec.SecurityContext.RunAsUser", pod, 0, []string{
fmt.Sprintf("%v", overrideUid), fmt.Sprintf("%v", overrideUid),
}) })
}) })
It("should support volume SELinux relabeling", func() { It("should support volume SELinux relabeling", func() {
testPodSELinuxLabeling(framework, false, false) testPodSELinuxLabeling(f, false, false)
}) })
It("should support volume SELinux relabeling when using hostIPC", func() { It("should support volume SELinux relabeling when using hostIPC", func() {
testPodSELinuxLabeling(framework, true, false) testPodSELinuxLabeling(f, true, false)
}) })
It("should support volume SELinux relabeling when using hostPID", func() { It("should support volume SELinux relabeling when using hostPID", func() {
testPodSELinuxLabeling(framework, false, true) testPodSELinuxLabeling(f, false, true)
}) })
}) })
func testPodSELinuxLabeling(framework *Framework, hostIPC bool, hostPID bool) { func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool) {
// Write and read a file with an empty_dir volume // Write and read a file with an empty_dir volume
// with a pod with the MCS label s0:c0,c1 // with a pod with the MCS label s0:c0,c1
pod := scTestPod(hostIPC, hostPID) pod := scTestPod(hostIPC, hostPID)
@ -134,28 +135,28 @@ func testPodSELinuxLabeling(framework *Framework, hostIPC bool, hostPID bool) {
} }
pod.Spec.Containers[0].Command = []string{"sleep", "6000"} pod.Spec.Containers[0].Command = []string{"sleep", "6000"}
client := framework.Client.Pods(framework.Namespace.Name) client := f.Client.Pods(f.Namespace.Name)
_, err := client.Create(pod) _, err := client.Create(pod)
expectNoError(err, "Error creating pod %v", pod) framework.ExpectNoError(err, "Error creating pod %v", pod)
defer client.Delete(pod.Name, nil) defer client.Delete(pod.Name, nil)
expectNoError(waitForPodRunningInNamespace(framework.Client, pod.Name, framework.Namespace.Name)) framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.Client, pod.Name, f.Namespace.Name))
testContent := "hello" testContent := "hello"
testFilePath := mountPath + "/TEST" testFilePath := mountPath + "/TEST"
err = framework.WriteFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, testFilePath, testContent) err = f.WriteFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, testFilePath, testContent)
Expect(err).To(BeNil()) Expect(err).To(BeNil())
content, err := framework.ReadFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, testFilePath) content, err := f.ReadFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, testFilePath)
Expect(err).To(BeNil()) Expect(err).To(BeNil())
Expect(content).To(ContainSubstring(testContent)) Expect(content).To(ContainSubstring(testContent))
foundPod, err := framework.Client.Pods(framework.Namespace.Name).Get(pod.Name) foundPod, err := f.Client.Pods(f.Namespace.Name).Get(pod.Name)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Confirm that the file can be accessed from a second // Confirm that the file can be accessed from a second
// pod using host_path with the same MCS label // pod using host_path with the same MCS label
volumeHostPath := fmt.Sprintf("%s/pods/%s/volumes/kubernetes.io~empty-dir/%s", testContext.KubeVolumeDir, foundPod.UID, volumeName) volumeHostPath := fmt.Sprintf("%s/pods/%s/volumes/kubernetes.io~empty-dir/%s", framework.TestContext.KubeVolumeDir, foundPod.UID, volumeName)
By(fmt.Sprintf("confirming a container with the same label can read the file under --volume-dir=%s", testContext.KubeVolumeDir)) By(fmt.Sprintf("confirming a container with the same label can read the file under --volume-dir=%s", framework.TestContext.KubeVolumeDir))
pod = scTestPod(hostIPC, hostPID) pod = scTestPod(hostIPC, hostPID)
pod.Spec.NodeName = foundPod.Spec.NodeName pod.Spec.NodeName = foundPod.Spec.NodeName
volumeMounts := []api.VolumeMount{ volumeMounts := []api.VolumeMount{
@ -181,7 +182,7 @@ func testPodSELinuxLabeling(framework *Framework, hostIPC bool, hostPID bool) {
Level: "s0:c0,c1", Level: "s0:c0,c1",
} }
framework.TestContainerOutput("Pod with same MCS label reading test file", pod, 0, []string{testContent}) f.TestContainerOutput("Pod with same MCS label reading test file", pod, 0, []string{testContent})
// Confirm that the same pod with a different MCS // Confirm that the same pod with a different MCS
// label cannot access the volume // label cannot access the volume
pod = scTestPod(hostIPC, hostPID) pod = scTestPod(hostIPC, hostPID)
@ -192,12 +193,12 @@ func testPodSELinuxLabeling(framework *Framework, hostIPC bool, hostPID bool) {
Level: "s0:c2,c3", Level: "s0:c2,c3",
} }
_, err = client.Create(pod) _, err = client.Create(pod)
expectNoError(err, "Error creating pod %v", pod) framework.ExpectNoError(err, "Error creating pod %v", pod)
defer client.Delete(pod.Name, nil) defer client.Delete(pod.Name, nil)
err = framework.WaitForPodRunning(pod.Name) err = f.WaitForPodRunning(pod.Name)
expectNoError(err, "Error waiting for pod to run %v", pod) framework.ExpectNoError(err, "Error waiting for pod to run %v", pod)
content, err = framework.ReadFileViaContainer(pod.Name, "test-container", testFilePath) content, err = f.ReadFileViaContainer(pod.Name, "test-container", testFilePath)
Expect(content).NotTo(ContainSubstring(testContent)) Expect(content).NotTo(ContainSubstring(testContent))
} }

File diff suppressed because it is too large Load Diff

View File

@ -26,6 +26,7 @@ import (
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/pkg/version" "k8s.io/kubernetes/pkg/version"
"k8s.io/kubernetes/plugin/pkg/admission/serviceaccount" "k8s.io/kubernetes/plugin/pkg/admission/serviceaccount"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
@ -33,29 +34,29 @@ import (
var serviceAccountTokenNamespaceVersion = version.MustParse("v1.2.0") var serviceAccountTokenNamespaceVersion = version.MustParse("v1.2.0")
var _ = KubeDescribe("ServiceAccounts", func() { var _ = framework.KubeDescribe("ServiceAccounts", func() {
f := NewDefaultFramework("svcaccounts") f := framework.NewDefaultFramework("svcaccounts")
It("should ensure a single API token exists", func() { It("should ensure a single API token exists", func() {
// wait for the service account to reference a single secret // wait for the service account to reference a single secret
var secrets []api.ObjectReference var secrets []api.ObjectReference
expectNoError(wait.Poll(time.Millisecond*500, time.Second*10, func() (bool, error) { framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*10, func() (bool, error) {
By("waiting for a single token reference") By("waiting for a single token reference")
sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default") sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default")
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {
Logf("default service account was not found") framework.Logf("default service account was not found")
return false, nil return false, nil
} }
if err != nil { if err != nil {
Logf("error getting default service account: %v", err) framework.Logf("error getting default service account: %v", err)
return false, err return false, err
} }
switch len(sa.Secrets) { switch len(sa.Secrets) {
case 0: case 0:
Logf("default service account has no secret references") framework.Logf("default service account has no secret references")
return false, nil return false, nil
case 1: case 1:
Logf("default service account has a single secret reference") framework.Logf("default service account has a single secret reference")
secrets = sa.Secrets secrets = sa.Secrets
return true, nil return true, nil
default: default:
@ -68,32 +69,32 @@ var _ = KubeDescribe("ServiceAccounts", func() {
By("ensuring the single token reference persists") By("ensuring the single token reference persists")
time.Sleep(2 * time.Second) time.Sleep(2 * time.Second)
sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default") sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default")
expectNoError(err) framework.ExpectNoError(err)
Expect(sa.Secrets).To(Equal(secrets)) Expect(sa.Secrets).To(Equal(secrets))
} }
// delete the referenced secret // delete the referenced secret
By("deleting the service account token") By("deleting the service account token")
expectNoError(f.Client.Secrets(f.Namespace.Name).Delete(secrets[0].Name)) framework.ExpectNoError(f.Client.Secrets(f.Namespace.Name).Delete(secrets[0].Name))
// wait for the referenced secret to be removed, and another one autocreated // wait for the referenced secret to be removed, and another one autocreated
expectNoError(wait.Poll(time.Millisecond*500, serviceAccountProvisionTimeout, func() (bool, error) { framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) {
By("waiting for a new token reference") By("waiting for a new token reference")
sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default") sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default")
if err != nil { if err != nil {
Logf("error getting default service account: %v", err) framework.Logf("error getting default service account: %v", err)
return false, err return false, err
} }
switch len(sa.Secrets) { switch len(sa.Secrets) {
case 0: case 0:
Logf("default service account has no secret references") framework.Logf("default service account has no secret references")
return false, nil return false, nil
case 1: case 1:
if sa.Secrets[0] == secrets[0] { if sa.Secrets[0] == secrets[0] {
Logf("default service account still has the deleted secret reference") framework.Logf("default service account still has the deleted secret reference")
return false, nil return false, nil
} }
Logf("default service account has a new single secret reference") framework.Logf("default service account has a new single secret reference")
secrets = sa.Secrets secrets = sa.Secrets
return true, nil return true, nil
default: default:
@ -106,7 +107,7 @@ var _ = KubeDescribe("ServiceAccounts", func() {
By("ensuring the single token reference persists") By("ensuring the single token reference persists")
time.Sleep(2 * time.Second) time.Sleep(2 * time.Second)
sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default") sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default")
expectNoError(err) framework.ExpectNoError(err)
Expect(sa.Secrets).To(Equal(secrets)) Expect(sa.Secrets).To(Equal(secrets))
} }
@ -114,26 +115,26 @@ var _ = KubeDescribe("ServiceAccounts", func() {
By("deleting the reference to the service account token") By("deleting the reference to the service account token")
{ {
sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default") sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default")
expectNoError(err) framework.ExpectNoError(err)
sa.Secrets = nil sa.Secrets = nil
_, updateErr := f.Client.ServiceAccounts(f.Namespace.Name).Update(sa) _, updateErr := f.Client.ServiceAccounts(f.Namespace.Name).Update(sa)
expectNoError(updateErr) framework.ExpectNoError(updateErr)
} }
// wait for another one to be autocreated // wait for another one to be autocreated
expectNoError(wait.Poll(time.Millisecond*500, serviceAccountProvisionTimeout, func() (bool, error) { framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) {
By("waiting for a new token to be created and added") By("waiting for a new token to be created and added")
sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default") sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default")
if err != nil { if err != nil {
Logf("error getting default service account: %v", err) framework.Logf("error getting default service account: %v", err)
return false, err return false, err
} }
switch len(sa.Secrets) { switch len(sa.Secrets) {
case 0: case 0:
Logf("default service account has no secret references") framework.Logf("default service account has no secret references")
return false, nil return false, nil
case 1: case 1:
Logf("default service account has a new single secret reference") framework.Logf("default service account has a new single secret reference")
secrets = sa.Secrets secrets = sa.Secrets
return true, nil return true, nil
default: default:
@ -146,7 +147,7 @@ var _ = KubeDescribe("ServiceAccounts", func() {
By("ensuring the single token reference persists") By("ensuring the single token reference persists")
time.Sleep(2 * time.Second) time.Sleep(2 * time.Second)
sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default") sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default")
expectNoError(err) framework.ExpectNoError(err)
Expect(sa.Secrets).To(Equal(secrets)) Expect(sa.Secrets).To(Equal(secrets))
} }
}) })
@ -156,25 +157,25 @@ var _ = KubeDescribe("ServiceAccounts", func() {
var rootCAContent string var rootCAContent string
// Standard get, update retry loop // Standard get, update retry loop
expectNoError(wait.Poll(time.Millisecond*500, serviceAccountProvisionTimeout, func() (bool, error) { framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) {
By("getting the auto-created API token") By("getting the auto-created API token")
sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default") sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default")
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {
Logf("default service account was not found") framework.Logf("default service account was not found")
return false, nil return false, nil
} }
if err != nil { if err != nil {
Logf("error getting default service account: %v", err) framework.Logf("error getting default service account: %v", err)
return false, err return false, err
} }
if len(sa.Secrets) == 0 { if len(sa.Secrets) == 0 {
Logf("default service account has no secret references") framework.Logf("default service account has no secret references")
return false, nil return false, nil
} }
for _, secretRef := range sa.Secrets { for _, secretRef := range sa.Secrets {
secret, err := f.Client.Secrets(f.Namespace.Name).Get(secretRef.Name) secret, err := f.Client.Secrets(f.Namespace.Name).Get(secretRef.Name)
if err != nil { if err != nil {
Logf("Error getting secret %s: %v", secretRef.Name, err) framework.Logf("Error getting secret %s: %v", secretRef.Name, err)
continue continue
} }
if secret.Type == api.SecretTypeServiceAccountToken { if secret.Type == api.SecretTypeServiceAccountToken {
@ -184,7 +185,7 @@ var _ = KubeDescribe("ServiceAccounts", func() {
} }
} }
Logf("default service account has no secret references to valid service account tokens") framework.Logf("default service account has no secret references to valid service account tokens")
return false, nil return false, nil
})) }))
@ -213,7 +214,7 @@ var _ = KubeDescribe("ServiceAccounts", func() {
}, },
} }
supportsTokenNamespace, _ := serverVersionGTE(serviceAccountTokenNamespaceVersion, f.Client) supportsTokenNamespace, _ := framework.ServerVersionGTE(serviceAccountTokenNamespaceVersion, f.Client)
if supportsTokenNamespace { if supportsTokenNamespace {
pod.Spec.Containers = append(pod.Spec.Containers, api.Container{ pod.Spec.Containers = append(pod.Spec.Containers, api.Container{
Name: "namespace-test", Name: "namespace-test",

View File

@ -24,11 +24,12 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/controller/framework" controllerframework "k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/flowcontrol" "k8s.io/kubernetes/pkg/util/flowcontrol"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/watch" "k8s.io/kubernetes/pkg/watch"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
) )
@ -39,8 +40,8 @@ func (d durations) Len() int { return len(d) }
func (d durations) Less(i, j int) bool { return d[i] < d[j] } func (d durations) Less(i, j int) bool { return d[i] < d[j] }
func (d durations) Swap(i, j int) { d[i], d[j] = d[j], d[i] } func (d durations) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
var _ = KubeDescribe("Service endpoints latency", func() { var _ = framework.KubeDescribe("Service endpoints latency", func() {
f := NewDefaultFramework("svc-latency") f := framework.NewDefaultFramework("svc-latency")
It("should not be very high [Conformance]", func() { It("should not be very high [Conformance]", func() {
const ( const (
@ -91,14 +92,14 @@ var _ = KubeDescribe("Service endpoints latency", func() {
} }
return dSorted[est] return dSorted[est]
} }
Logf("Latencies: %v", dSorted) framework.Logf("Latencies: %v", dSorted)
p50 := percentile(50) p50 := percentile(50)
p90 := percentile(90) p90 := percentile(90)
p99 := percentile(99) p99 := percentile(99)
Logf("50 %%ile: %v", p50) framework.Logf("50 %%ile: %v", p50)
Logf("90 %%ile: %v", p90) framework.Logf("90 %%ile: %v", p90)
Logf("99 %%ile: %v", p99) framework.Logf("99 %%ile: %v", p99)
Logf("Total sample count: %v", len(dSorted)) framework.Logf("Total sample count: %v", len(dSorted))
if p50 > limitMedian { if p50 > limitMedian {
failing.Insert("Median latency should be less than " + limitMedian.String()) failing.Insert("Median latency should be less than " + limitMedian.String())
@ -114,8 +115,8 @@ var _ = KubeDescribe("Service endpoints latency", func() {
}) })
}) })
func runServiceLatencies(f *Framework, inParallel, total int) (output []time.Duration, err error) { func runServiceLatencies(f *framework.Framework, inParallel, total int) (output []time.Duration, err error) {
cfg := RCConfig{ cfg := framework.RCConfig{
Client: f.Client, Client: f.Client,
Image: "gcr.io/google_containers/pause:2.0", Image: "gcr.io/google_containers/pause:2.0",
Name: "svc-latency-rc", Name: "svc-latency-rc",
@ -123,10 +124,10 @@ func runServiceLatencies(f *Framework, inParallel, total int) (output []time.Dur
Replicas: 1, Replicas: 1,
PollInterval: time.Second, PollInterval: time.Second,
} }
if err := RunRC(cfg); err != nil { if err := framework.RunRC(cfg); err != nil {
return nil, err return nil, err
} }
defer DeleteRC(f.Client, f.Namespace.Name, cfg.Name) defer framework.DeleteRC(f.Client, f.Namespace.Name, cfg.Name)
// Run a single watcher, to reduce the number of API calls we have to // Run a single watcher, to reduce the number of API calls we have to
// make; this is to minimize the timing error. It's how kube-proxy // make; this is to minimize the timing error. It's how kube-proxy
@ -164,7 +165,7 @@ func runServiceLatencies(f *Framework, inParallel, total int) (output []time.Dur
for i := 0; i < total; i++ { for i := 0; i < total; i++ {
select { select {
case e := <-errs: case e := <-errs:
Logf("Got error: %v", e) framework.Logf("Got error: %v", e)
errCount += 1 errCount += 1
case d := <-durations: case d := <-durations:
output = append(output, d) output = append(output, d)
@ -273,8 +274,8 @@ func (eq *endpointQueries) added(e *api.Endpoints) {
} }
// blocks until it has finished syncing. // blocks until it has finished syncing.
func startEndpointWatcher(f *Framework, q *endpointQueries) { func startEndpointWatcher(f *framework.Framework, q *endpointQueries) {
_, controller := framework.NewInformer( _, controller := controllerframework.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return f.Client.Endpoints(f.Namespace.Name).List(options) return f.Client.Endpoints(f.Namespace.Name).List(options)
@ -285,7 +286,7 @@ func startEndpointWatcher(f *Framework, q *endpointQueries) {
}, },
&api.Endpoints{}, &api.Endpoints{},
0, 0,
framework.ResourceEventHandlerFuncs{ controllerframework.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { AddFunc: func(obj interface{}) {
if e, ok := obj.(*api.Endpoints); ok { if e, ok := obj.(*api.Endpoints); ok {
if len(e.Subsets) > 0 && len(e.Subsets[0].Addresses) > 0 { if len(e.Subsets) > 0 && len(e.Subsets[0].Addresses) > 0 {
@ -311,7 +312,7 @@ func startEndpointWatcher(f *Framework, q *endpointQueries) {
} }
} }
func singleServiceLatency(f *Framework, name string, q *endpointQueries) (time.Duration, error) { func singleServiceLatency(f *framework.Framework, name string, q *endpointQueries) (time.Duration, error) {
// Make a service that points to that pod. // Make a service that points to that pod.
svc := &api.Service{ svc := &api.Service{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
@ -329,7 +330,7 @@ func singleServiceLatency(f *Framework, name string, q *endpointQueries) (time.D
if err != nil { if err != nil {
return 0, err return 0, err
} }
Logf("Created: %v", gotSvc.Name) framework.Logf("Created: %v", gotSvc.Name)
defer f.Client.Services(gotSvc.Namespace).Delete(gotSvc.Name) defer f.Client.Services(gotSvc.Namespace).Delete(gotSvc.Name)
if e := q.request(gotSvc.Name); e == nil { if e := q.request(gotSvc.Name); e == nil {
@ -337,6 +338,6 @@ func singleServiceLatency(f *Framework, name string, q *endpointQueries) (time.D
} }
stopTime := time.Now() stopTime := time.Now()
d := stopTime.Sub(startTime) d := stopTime.Sub(startTime)
Logf("Got endpoints: %v [%v]", gotSvc.Name, d) framework.Logf("Got endpoints: %v [%v]", gotSvc.Name, d)
return d, nil return d, nil
} }

View File

@ -28,6 +28,7 @@ import (
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
utilyaml "k8s.io/kubernetes/pkg/util/yaml" utilyaml "k8s.io/kubernetes/pkg/util/yaml"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
@ -95,14 +96,14 @@ func (h *haproxyControllerTester) start(namespace string) (err error) {
for i, c := range rc.Spec.Template.Spec.Containers { for i, c := range rc.Spec.Template.Spec.Containers {
rc.Spec.Template.Spec.Containers[i].Args = append( rc.Spec.Template.Spec.Containers[i].Args = append(
c.Args, fmt.Sprintf("--namespace=%v", namespace)) c.Args, fmt.Sprintf("--namespace=%v", namespace))
Logf("Container args %+v", rc.Spec.Template.Spec.Containers[i].Args) framework.Logf("Container args %+v", rc.Spec.Template.Spec.Containers[i].Args)
} }
rc, err = h.client.ReplicationControllers(rc.Namespace).Create(rc) rc, err = h.client.ReplicationControllers(rc.Namespace).Create(rc)
if err != nil { if err != nil {
return return
} }
if err = waitForRCPodsRunning(h.client, namespace, rc.Name); err != nil { if err = framework.WaitForRCPodsRunning(h.client, namespace, rc.Name); err != nil {
return return
} }
h.rcName = rc.Name h.rcName = rc.Name
@ -119,10 +120,10 @@ func (h *haproxyControllerTester) start(namespace string) (err error) {
// Find the external addresses of the nodes the pods are running on. // Find the external addresses of the nodes the pods are running on.
for _, p := range pods.Items { for _, p := range pods.Items {
wait.Poll(pollInterval, serviceRespondingTimeout, func() (bool, error) { wait.Poll(pollInterval, framework.ServiceRespondingTimeout, func() (bool, error) {
address, err := getHostExternalAddress(h.client, &p) address, err := framework.GetHostExternalAddress(h.client, &p)
if err != nil { if err != nil {
Logf("%v", err) framework.Logf("%v", err)
return false, nil return false, nil
} }
h.address = append(h.address, address) h.address = append(h.address, address)
@ -169,7 +170,7 @@ func (s *ingManager) start(namespace string) (err error) {
if err != nil { if err != nil {
return return
} }
if err = waitForRCPodsRunning(s.client, rc.Namespace, rc.Name); err != nil { if err = framework.WaitForRCPodsRunning(s.client, rc.Namespace, rc.Name); err != nil {
return return
} }
} }
@ -194,28 +195,28 @@ func (s *ingManager) start(namespace string) (err error) {
func (s *ingManager) test(path string) error { func (s *ingManager) test(path string) error {
url := fmt.Sprintf("%v/hostName", path) url := fmt.Sprintf("%v/hostName", path)
httpClient := &http.Client{} httpClient := &http.Client{}
return wait.Poll(pollInterval, serviceRespondingTimeout, func() (bool, error) { return wait.Poll(pollInterval, framework.ServiceRespondingTimeout, func() (bool, error) {
body, err := simpleGET(httpClient, url, "") body, err := simpleGET(httpClient, url, "")
if err != nil { if err != nil {
Logf("%v\n%v\n%v", url, body, err) framework.Logf("%v\n%v\n%v", url, body, err)
return false, nil return false, nil
} }
return true, nil return true, nil
}) })
} }
var _ = KubeDescribe("ServiceLoadBalancer [Feature:ServiceLoadBalancer]", func() { var _ = framework.KubeDescribe("ServiceLoadBalancer [Feature:ServiceLoadBalancer]", func() {
// These variables are initialized after framework's beforeEach. // These variables are initialized after framework's beforeEach.
var ns string var ns string
var repoRoot string var repoRoot string
var client *client.Client var client *client.Client
framework := NewDefaultFramework("servicelb") f := framework.NewDefaultFramework("servicelb")
BeforeEach(func() { BeforeEach(func() {
client = framework.Client client = f.Client
ns = framework.Namespace.Name ns = f.Namespace.Name
repoRoot = testContext.RepoRoot repoRoot = framework.TestContext.RepoRoot
}) })
It("should support simple GET on Ingress ips", func() { It("should support simple GET on Ingress ips", func() {
@ -229,7 +230,7 @@ var _ = KubeDescribe("ServiceLoadBalancer [Feature:ServiceLoadBalancer]", func()
for _, sName := range s.svcNames { for _, sName := range s.svcNames {
path := t.lookup(sName) path := t.lookup(sName)
Logf("Testing path %v", path) framework.Logf("Testing path %v", path)
Expect(s.test(path)).NotTo(HaveOccurred()) Expect(s.test(path)).NotTo(HaveOccurred())
} }
} }
@ -266,7 +267,7 @@ func simpleGET(c *http.Client, url, host string) (string, error) {
// rcFromManifest reads a .json/yaml file and returns the rc in it. // rcFromManifest reads a .json/yaml file and returns the rc in it.
func rcFromManifest(fileName string) *api.ReplicationController { func rcFromManifest(fileName string) *api.ReplicationController {
var controller api.ReplicationController var controller api.ReplicationController
Logf("Parsing rc from %v", fileName) framework.Logf("Parsing rc from %v", fileName)
data, err := ioutil.ReadFile(fileName) data, err := ioutil.ReadFile(fileName)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -280,7 +281,7 @@ func rcFromManifest(fileName string) *api.ReplicationController {
// svcFromManifest reads a .json/yaml file and returns the rc in it. // svcFromManifest reads a .json/yaml file and returns the rc in it.
func svcFromManifest(fileName string) *api.Service { func svcFromManifest(fileName string) *api.Service {
var svc api.Service var svc api.Service
Logf("Parsing service from %v", fileName) framework.Logf("Parsing service from %v", fileName)
data, err := ioutil.ReadFile(fileName) data, err := ioutil.ReadFile(fileName)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())

View File

@ -20,24 +20,26 @@ import (
"fmt" "fmt"
"strings" "strings"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
) )
var _ = KubeDescribe("SSH", func() { var _ = framework.KubeDescribe("SSH", func() {
f := NewDefaultFramework("ssh") f := framework.NewDefaultFramework("ssh")
BeforeEach(func() { BeforeEach(func() {
// When adding more providers here, also implement their functionality in util.go's getSigner(...). // When adding more providers here, also implement their functionality in util.go's framework.GetSigner(...).
SkipUnlessProviderIs(providersWithSSH...) framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
}) })
It("should SSH to all nodes and run commands", func() { It("should SSH to all nodes and run commands", func() {
// Get all nodes' external IPs. // Get all nodes' external IPs.
By("Getting all nodes' SSH-able IP addresses") By("Getting all nodes' SSH-able IP addresses")
hosts, err := NodeSSHHosts(f.Client) hosts, err := framework.NodeSSHHosts(f.Client)
if err != nil { if err != nil {
Failf("Error getting node hostnames: %v", err) framework.Failf("Error getting node hostnames: %v", err)
} }
testCases := []struct { testCases := []struct {
@ -59,34 +61,34 @@ var _ = KubeDescribe("SSH", func() {
for _, testCase := range testCases { for _, testCase := range testCases {
By(fmt.Sprintf("SSH'ing to all nodes and running %s", testCase.cmd)) By(fmt.Sprintf("SSH'ing to all nodes and running %s", testCase.cmd))
for _, host := range hosts { for _, host := range hosts {
result, err := SSH(testCase.cmd, host, testContext.Provider) result, err := framework.SSH(testCase.cmd, host, framework.TestContext.Provider)
stdout, stderr := strings.TrimSpace(result.Stdout), strings.TrimSpace(result.Stderr) stdout, stderr := strings.TrimSpace(result.Stdout), strings.TrimSpace(result.Stderr)
if err != testCase.expectedError { if err != testCase.expectedError {
Failf("Ran %s on %s, got error %v, expected %v", testCase.cmd, host, err, testCase.expectedError) framework.Failf("Ran %s on %s, got error %v, expected %v", testCase.cmd, host, err, testCase.expectedError)
} }
if testCase.checkStdout && stdout != testCase.expectedStdout { if testCase.checkStdout && stdout != testCase.expectedStdout {
Failf("Ran %s on %s, got stdout '%s', expected '%s'", testCase.cmd, host, stdout, testCase.expectedStdout) framework.Failf("Ran %s on %s, got stdout '%s', expected '%s'", testCase.cmd, host, stdout, testCase.expectedStdout)
} }
if stderr != testCase.expectedStderr { if stderr != testCase.expectedStderr {
Failf("Ran %s on %s, got stderr '%s', expected '%s'", testCase.cmd, host, stderr, testCase.expectedStderr) framework.Failf("Ran %s on %s, got stderr '%s', expected '%s'", testCase.cmd, host, stderr, testCase.expectedStderr)
} }
if result.Code != testCase.expectedCode { if result.Code != testCase.expectedCode {
Failf("Ran %s on %s, got exit code %d, expected %d", testCase.cmd, host, result.Code, testCase.expectedCode) framework.Failf("Ran %s on %s, got exit code %d, expected %d", testCase.cmd, host, result.Code, testCase.expectedCode)
} }
// Show stdout, stderr for logging purposes. // Show stdout, stderr for logging purposes.
if len(stdout) > 0 { if len(stdout) > 0 {
Logf("Got stdout from %s: %s", host, strings.TrimSpace(stdout)) framework.Logf("Got stdout from %s: %s", host, strings.TrimSpace(stdout))
} }
if len(stderr) > 0 { if len(stderr) > 0 {
Logf("Got stderr from %s: %s", host, strings.TrimSpace(stderr)) framework.Logf("Got stderr from %s: %s", host, strings.TrimSpace(stderr))
} }
} }
} }
// Quickly test that SSH itself errors correctly. // Quickly test that SSH itself errors correctly.
By("SSH'ing to a nonexistent host") By("SSH'ing to a nonexistent host")
if _, err = SSH(`echo "hello"`, "i.do.not.exist", testContext.Provider); err == nil { if _, err = framework.SSH(`echo "hello"`, "i.do.not.exist", framework.TestContext.Provider); err == nil {
Failf("Expected error trying to SSH to nonexistent host.") framework.Failf("Expected error trying to SSH to nonexistent host.")
} }
}) })
}) })

View File

@ -29,34 +29,35 @@ import (
"k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/test/e2e/framework"
) )
var _ = KubeDescribe("Ubernetes Lite", func() { var _ = framework.KubeDescribe("Ubernetes Lite", func() {
framework := NewDefaultFramework("ubernetes-lite") f := framework.NewDefaultFramework("ubernetes-lite")
var zoneCount int var zoneCount int
var err error var err error
image := "gcr.io/google_containers/serve_hostname:v1.4" image := "gcr.io/google_containers/serve_hostname:v1.4"
BeforeEach(func() { BeforeEach(func() {
if zoneCount <= 0 { if zoneCount <= 0 {
zoneCount, err = getZoneCount(framework.Client) zoneCount, err = getZoneCount(f.Client)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
} }
By(fmt.Sprintf("Checking for multi-zone cluster. Zone count = %d", zoneCount)) By(fmt.Sprintf("Checking for multi-zone cluster. Zone count = %d", zoneCount))
SkipUnlessAtLeast(zoneCount, 2, "Zone count is %d, only run for multi-zone clusters, skipping test") framework.SkipUnlessAtLeast(zoneCount, 2, "Zone count is %d, only run for multi-zone clusters, skipping test")
SkipUnlessProviderIs("gce", "gke", "aws") framework.SkipUnlessProviderIs("gce", "gke", "aws")
// TODO: SkipUnlessDefaultScheduler() // Non-default schedulers might not spread // TODO: SkipUnlessDefaultScheduler() // Non-default schedulers might not spread
}) })
It("should spread the pods of a service across zones", func() { It("should spread the pods of a service across zones", func() {
SpreadServiceOrFail(framework, (2*zoneCount)+1, image) SpreadServiceOrFail(f, (2*zoneCount)+1, image)
}) })
It("should spread the pods of a replication controller across zones", func() { It("should spread the pods of a replication controller across zones", func() {
SpreadRCOrFail(framework, (2*zoneCount)+1, image) SpreadRCOrFail(f, (2*zoneCount)+1, image)
}) })
}) })
// Check that the pods comprising a service get spread evenly across available zones // Check that the pods comprising a service get spread evenly across available zones
func SpreadServiceOrFail(f *Framework, replicaCount int, image string) { func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string) {
// First create the service // First create the service
serviceName := "test-service" serviceName := "test-service"
serviceSpec := &api.Service{ serviceSpec := &api.Service{
@ -92,11 +93,11 @@ func SpreadServiceOrFail(f *Framework, replicaCount int, image string) {
}, },
}, },
} }
startPods(f.Client, replicaCount, f.Namespace.Name, serviceName, *podSpec, false) framework.StartPods(f.Client, replicaCount, f.Namespace.Name, serviceName, *podSpec, false)
// Wait for all of them to be scheduled // Wait for all of them to be scheduled
selector := labels.SelectorFromSet(labels.Set(map[string]string{"service": serviceName})) selector := labels.SelectorFromSet(labels.Set(map[string]string{"service": serviceName}))
pods, err := waitForPodsWithLabelScheduled(f.Client, f.Namespace.Name, selector) pods, err := framework.WaitForPodsWithLabelScheduled(f.Client, f.Namespace.Name, selector)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Now make sure they're spread across zones // Now make sure they're spread across zones
@ -180,7 +181,7 @@ func checkZoneSpreading(c *client.Client, pods *api.PodList, zoneNames []string)
} }
// Check that the pods comprising a replication controller get spread evenly across available zones // Check that the pods comprising a replication controller get spread evenly across available zones
func SpreadRCOrFail(f *Framework, replicaCount int, image string) { func SpreadRCOrFail(f *framework.Framework, replicaCount int, image string) {
name := "ubelite-spread-rc-" + string(util.NewUUID()) name := "ubelite-spread-rc-" + string(util.NewUUID())
By(fmt.Sprintf("Creating replication controller %s", name)) By(fmt.Sprintf("Creating replication controller %s", name))
controller, err := f.Client.ReplicationControllers(f.Namespace.Name).Create(&api.ReplicationController{ controller, err := f.Client.ReplicationControllers(f.Namespace.Name).Create(&api.ReplicationController{
@ -213,18 +214,18 @@ func SpreadRCOrFail(f *Framework, replicaCount int, image string) {
// Cleanup the replication controller when we are done. // Cleanup the replication controller when we are done.
defer func() { defer func() {
// Resize the replication controller to zero to get rid of pods. // Resize the replication controller to zero to get rid of pods.
if err := DeleteRC(f.Client, f.Namespace.Name, controller.Name); err != nil { if err := framework.DeleteRC(f.Client, f.Namespace.Name, controller.Name); err != nil {
Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err) framework.Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err)
} }
}() }()
// List the pods, making sure we observe all the replicas. // List the pods, making sure we observe all the replicas.
selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
pods, err := podsCreated(f.Client, f.Namespace.Name, name, replicaCount) pods, err := framework.PodsCreated(f.Client, f.Namespace.Name, name, replicaCount)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Wait for all of them to be scheduled // Wait for all of them to be scheduled
By(fmt.Sprintf("Waiting for %d replicas of %s to be scheduled. Selector: %v", replicaCount, name, selector)) By(fmt.Sprintf("Waiting for %d replicas of %s to be scheduled. Selector: %v", replicaCount, name, selector))
pods, err = waitForPodsWithLabelScheduled(f.Client, f.Namespace.Name, selector) pods, err = framework.WaitForPodsWithLabelScheduled(f.Client, f.Namespace.Name, selector)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Now make sure they're spread across zones // Now make sure they're spread across zones

View File

@ -23,6 +23,7 @@ import (
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
@ -36,21 +37,21 @@ const (
expectedSize = "2Gi" expectedSize = "2Gi"
) )
var _ = KubeDescribe("Dynamic provisioning", func() { var _ = framework.KubeDescribe("Dynamic provisioning", func() {
framework := NewDefaultFramework("volume-provisioning") f := framework.NewDefaultFramework("volume-provisioning")
// filled in BeforeEach // filled in BeforeEach
var c *client.Client var c *client.Client
var ns string var ns string
BeforeEach(func() { BeforeEach(func() {
c = framework.Client c = f.Client
ns = framework.Namespace.Name ns = f.Namespace.Name
}) })
KubeDescribe("DynamicProvisioner", func() { framework.KubeDescribe("DynamicProvisioner", func() {
It("should create and delete persistent volumes", func() { It("should create and delete persistent volumes", func() {
SkipUnlessProviderIs("openstack", "gce", "aws", "gke") framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke")
By("creating a claim with a dynamic provisioning annotation") By("creating a claim with a dynamic provisioning annotation")
claim := createClaim(ns) claim := createClaim(ns)
defer func() { defer func() {
@ -59,7 +60,7 @@ var _ = KubeDescribe("Dynamic provisioning", func() {
claim, err := c.PersistentVolumeClaims(ns).Create(claim) claim, err := c.PersistentVolumeClaims(ns).Create(claim)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
err = waitForPersistentVolumeClaimPhase(api.ClaimBound, c, ns, claim.Name, poll, claimProvisionTimeout) err = framework.WaitForPersistentVolumeClaimPhase(api.ClaimBound, c, ns, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("checking the claim") By("checking the claim")
@ -115,10 +116,10 @@ var _ = KubeDescribe("Dynamic provisioning", func() {
time.Sleep(time.Minute) time.Sleep(time.Minute)
By("deleting the claim") By("deleting the claim")
expectNoError(c.PersistentVolumeClaims(ns).Delete(claim.Name)) framework.ExpectNoError(c.PersistentVolumeClaims(ns).Delete(claim.Name))
// Wait for the PV to get deleted too. // Wait for the PV to get deleted too.
expectNoError(waitForPersistentVolumeDeleted(c, pv.Name, 1*time.Second, 10*time.Minute)) framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(c, pv.Name, 1*time.Second, 10*time.Minute))
}) })
}) })
}) })
@ -186,8 +187,8 @@ func runInPodWithVolume(c *client.Client, ns, claimName, command string) {
} }
pod, err := c.Pods(ns).Create(pod) pod, err := c.Pods(ns).Create(pod)
defer func() { defer func() {
expectNoError(c.Pods(ns).Delete(pod.Name, nil)) framework.ExpectNoError(c.Pods(ns).Delete(pod.Name, nil))
}() }()
expectNoError(err, "Failed to create pod: %v", err) framework.ExpectNoError(err, "Failed to create pod: %v", err)
expectNoError(waitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Spec.Containers[0].Name, pod.Namespace)) framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Spec.Containers[0].Name, pod.Namespace))
} }

View File

@ -49,6 +49,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/test/e2e/framework"
"github.com/golang/glog" "github.com/golang/glog"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
@ -140,13 +141,13 @@ func startVolumeServer(client *client.Client, config VolumeTestConfig) *api.Pod
}, },
} }
_, err := podClient.Create(serverPod) _, err := podClient.Create(serverPod)
expectNoError(err, "Failed to create %s pod: %v", serverPod.Name, err) framework.ExpectNoError(err, "Failed to create %s pod: %v", serverPod.Name, err)
expectNoError(waitForPodRunningInNamespace(client, serverPod.Name, config.namespace)) framework.ExpectNoError(framework.WaitForPodRunningInNamespace(client, serverPod.Name, config.namespace))
By("locating the server pod") By("locating the server pod")
pod, err := podClient.Get(serverPod.Name) pod, err := podClient.Get(serverPod.Name)
expectNoError(err, "Cannot locate the server pod %v: %v", serverPod.Name, err) framework.ExpectNoError(err, "Cannot locate the server pod %v: %v", serverPod.Name, err)
By("sleeping a bit to give the server time to start") By("sleeping a bit to give the server time to start")
time.Sleep(20 * time.Second) time.Sleep(20 * time.Second)
@ -164,16 +165,16 @@ func volumeTestCleanup(client *client.Client, config VolumeTestConfig) {
err := podClient.Delete(config.prefix+"-client", nil) err := podClient.Delete(config.prefix+"-client", nil)
if err != nil { if err != nil {
// Log the error before failing test: if the test has already failed, // Log the error before failing test: if the test has already failed,
// expectNoError() won't print anything to logs! // framework.ExpectNoError() won't print anything to logs!
glog.Warningf("Failed to delete client pod: %v", err) glog.Warningf("Failed to delete client pod: %v", err)
expectNoError(err, "Failed to delete client pod: %v", err) framework.ExpectNoError(err, "Failed to delete client pod: %v", err)
} }
if config.serverImage != "" { if config.serverImage != "" {
err = podClient.Delete(config.prefix+"-server", nil) err = podClient.Delete(config.prefix+"-server", nil)
if err != nil { if err != nil {
glog.Warningf("Failed to delete server pod: %v", err) glog.Warningf("Failed to delete server pod: %v", err)
expectNoError(err, "Failed to delete server pod: %v", err) framework.ExpectNoError(err, "Failed to delete server pod: %v", err)
} }
} }
} }
@ -234,18 +235,18 @@ func testVolumeClient(client *client.Client, config VolumeTestConfig, volume api
clientPod.Spec.SecurityContext.FSGroup = fsGroup clientPod.Spec.SecurityContext.FSGroup = fsGroup
} }
if _, err := podsNamespacer.Create(clientPod); err != nil { if _, err := podsNamespacer.Create(clientPod); err != nil {
Failf("Failed to create %s pod: %v", clientPod.Name, err) framework.Failf("Failed to create %s pod: %v", clientPod.Name, err)
} }
expectNoError(waitForPodRunningInNamespace(client, clientPod.Name, config.namespace)) framework.ExpectNoError(framework.WaitForPodRunningInNamespace(client, clientPod.Name, config.namespace))
By("Checking that text file contents are perfect.") By("Checking that text file contents are perfect.")
_, err := lookForStringInPodExec(config.namespace, clientPod.Name, []string{"cat", "/opt/index.html"}, expectedContent, time.Minute) _, err := framework.LookForStringInPodExec(config.namespace, clientPod.Name, []string{"cat", "/opt/index.html"}, expectedContent, time.Minute)
Expect(err).NotTo(HaveOccurred(), "failed: finding the contents of the mounted file.") Expect(err).NotTo(HaveOccurred(), "failed: finding the contents of the mounted file.")
if fsGroup != nil { if fsGroup != nil {
By("Checking fsGroup is correct.") By("Checking fsGroup is correct.")
_, err = lookForStringInPodExec(config.namespace, clientPod.Name, []string{"ls", "-ld", "/opt"}, strconv.Itoa(int(*fsGroup)), time.Minute) _, err = framework.LookForStringInPodExec(config.namespace, clientPod.Name, []string{"ls", "-ld", "/opt"}, strconv.Itoa(int(*fsGroup)), time.Minute)
Expect(err).NotTo(HaveOccurred(), "failed: getting the right priviliges in the file %v", int(*fsGroup)) Expect(err).NotTo(HaveOccurred(), "failed: getting the right priviliges in the file %v", int(*fsGroup))
} }
} }
@ -303,8 +304,8 @@ func injectHtml(client *client.Client, config VolumeTestConfig, volume api.Volum
}() }()
injectPod, err := podClient.Create(injectPod) injectPod, err := podClient.Create(injectPod)
expectNoError(err, "Failed to create injector pod: %v", err) framework.ExpectNoError(err, "Failed to create injector pod: %v", err)
err = waitForPodSuccessInNamespace(client, injectPod.Name, injectPod.Spec.Containers[0].Name, injectPod.Namespace) err = framework.WaitForPodSuccessInNamespace(client, injectPod.Name, injectPod.Spec.Containers[0].Name, injectPod.Namespace)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
} }
@ -315,24 +316,24 @@ func deleteCinderVolume(name string) error {
var err error var err error
timeout := time.Second * 120 timeout := time.Second * 120
Logf("Waiting up to %v for removal of cinder volume %s", timeout, name) framework.Logf("Waiting up to %v for removal of cinder volume %s", timeout, name)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) { for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
output, err = exec.Command("cinder", "delete", name).CombinedOutput() output, err = exec.Command("cinder", "delete", name).CombinedOutput()
if err == nil { if err == nil {
Logf("Cinder volume %s deleted", name) framework.Logf("Cinder volume %s deleted", name)
return nil return nil
} else { } else {
Logf("Failed to delete volume %s: %v", name, err) framework.Logf("Failed to delete volume %s: %v", name, err)
} }
} }
Logf("Giving up deleting volume %s: %v\n%s", name, err, string(output[:])) framework.Logf("Giving up deleting volume %s: %v\n%s", name, err, string(output[:]))
return err return err
} }
// These tests need privileged containers, which are disabled by default. Run // These tests need privileged containers, which are disabled by default. Run
// the test with "go run hack/e2e.go ... --ginkgo.focus=[Feature:Volumes]" // the test with "go run hack/e2e.go ... --ginkgo.focus=[Feature:Volumes]"
var _ = KubeDescribe("Volumes [Feature:Volumes]", func() { var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() {
framework := NewDefaultFramework("volume") f := framework.NewDefaultFramework("volume")
// If 'false', the test won't clear its volumes upon completion. Useful for debugging, // If 'false', the test won't clear its volumes upon completion. Useful for debugging,
// note that namespace deletion is handled by delete-namespace flag // note that namespace deletion is handled by delete-namespace flag
@ -342,15 +343,15 @@ var _ = KubeDescribe("Volumes [Feature:Volumes]", func() {
var namespace *api.Namespace var namespace *api.Namespace
BeforeEach(func() { BeforeEach(func() {
c = framework.Client c = f.Client
namespace = framework.Namespace namespace = f.Namespace
}) })
//////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////
// NFS // NFS
//////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////
KubeDescribe("NFS", func() { framework.KubeDescribe("NFS", func() {
It("should be mountable", func() { It("should be mountable", func() {
config := VolumeTestConfig{ config := VolumeTestConfig{
namespace: namespace.Name, namespace: namespace.Name,
@ -366,7 +367,7 @@ var _ = KubeDescribe("Volumes [Feature:Volumes]", func() {
}() }()
pod := startVolumeServer(c, config) pod := startVolumeServer(c, config)
serverIP := pod.Status.PodIP serverIP := pod.Status.PodIP
Logf("NFS server IP address: %v", serverIP) framework.Logf("NFS server IP address: %v", serverIP)
volume := api.VolumeSource{ volume := api.VolumeSource{
NFS: &api.NFSVolumeSource{ NFS: &api.NFSVolumeSource{
@ -384,7 +385,7 @@ var _ = KubeDescribe("Volumes [Feature:Volumes]", func() {
// Gluster // Gluster
//////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////
KubeDescribe("GlusterFS", func() { framework.KubeDescribe("GlusterFS", func() {
It("should be mountable", func() { It("should be mountable", func() {
config := VolumeTestConfig{ config := VolumeTestConfig{
namespace: namespace.Name, namespace: namespace.Name,
@ -400,7 +401,7 @@ var _ = KubeDescribe("Volumes [Feature:Volumes]", func() {
}() }()
pod := startVolumeServer(c, config) pod := startVolumeServer(c, config)
serverIP := pod.Status.PodIP serverIP := pod.Status.PodIP
Logf("Gluster server IP address: %v", serverIP) framework.Logf("Gluster server IP address: %v", serverIP)
// create Endpoints for the server // create Endpoints for the server
endpoints := api.Endpoints{ endpoints := api.Endpoints{
@ -438,7 +439,7 @@ var _ = KubeDescribe("Volumes [Feature:Volumes]", func() {
}() }()
if _, err := endClient.Create(&endpoints); err != nil { if _, err := endClient.Create(&endpoints); err != nil {
Failf("Failed to create endpoints for Gluster server: %v", err) framework.Failf("Failed to create endpoints for Gluster server: %v", err)
} }
volume := api.VolumeSource{ volume := api.VolumeSource{
@ -463,7 +464,7 @@ var _ = KubeDescribe("Volumes [Feature:Volumes]", func() {
// are installed on all nodes! // are installed on all nodes!
// Run the test with "go run hack/e2e.go ... --ginkgo.focus=iSCSI" // Run the test with "go run hack/e2e.go ... --ginkgo.focus=iSCSI"
KubeDescribe("iSCSI", func() { framework.KubeDescribe("iSCSI", func() {
It("should be mountable", func() { It("should be mountable", func() {
config := VolumeTestConfig{ config := VolumeTestConfig{
namespace: namespace.Name, namespace: namespace.Name,
@ -483,7 +484,7 @@ var _ = KubeDescribe("Volumes [Feature:Volumes]", func() {
}() }()
pod := startVolumeServer(c, config) pod := startVolumeServer(c, config)
serverIP := pod.Status.PodIP serverIP := pod.Status.PodIP
Logf("iSCSI server IP address: %v", serverIP) framework.Logf("iSCSI server IP address: %v", serverIP)
volume := api.VolumeSource{ volume := api.VolumeSource{
ISCSI: &api.ISCSIVolumeSource{ ISCSI: &api.ISCSIVolumeSource{
@ -505,7 +506,7 @@ var _ = KubeDescribe("Volumes [Feature:Volumes]", func() {
// Ceph RBD // Ceph RBD
//////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////
KubeDescribe("Ceph RBD", func() { framework.KubeDescribe("Ceph RBD", func() {
It("should be mountable", func() { It("should be mountable", func() {
config := VolumeTestConfig{ config := VolumeTestConfig{
namespace: namespace.Name, namespace: namespace.Name,
@ -526,7 +527,7 @@ var _ = KubeDescribe("Volumes [Feature:Volumes]", func() {
}() }()
pod := startVolumeServer(c, config) pod := startVolumeServer(c, config)
serverIP := pod.Status.PodIP serverIP := pod.Status.PodIP
Logf("Ceph server IP address: %v", serverIP) framework.Logf("Ceph server IP address: %v", serverIP)
// create secrets for the server // create secrets for the server
secret := api.Secret{ secret := api.Secret{
@ -552,7 +553,7 @@ var _ = KubeDescribe("Volumes [Feature:Volumes]", func() {
}() }()
if _, err := secClient.Create(&secret); err != nil { if _, err := secClient.Create(&secret); err != nil {
Failf("Failed to create secrets for Ceph RBD: %v", err) framework.Failf("Failed to create secrets for Ceph RBD: %v", err)
} }
volume := api.VolumeSource{ volume := api.VolumeSource{
@ -578,7 +579,7 @@ var _ = KubeDescribe("Volumes [Feature:Volumes]", func() {
// Ceph // Ceph
//////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////
KubeDescribe("CephFS", func() { framework.KubeDescribe("CephFS", func() {
It("should be mountable", func() { It("should be mountable", func() {
config := VolumeTestConfig{ config := VolumeTestConfig{
namespace: namespace.Name, namespace: namespace.Name,
@ -594,7 +595,7 @@ var _ = KubeDescribe("Volumes [Feature:Volumes]", func() {
}() }()
pod := startVolumeServer(c, config) pod := startVolumeServer(c, config)
serverIP := pod.Status.PodIP serverIP := pod.Status.PodIP
Logf("Ceph server IP address: %v", serverIP) framework.Logf("Ceph server IP address: %v", serverIP)
By("sleeping a bit to give ceph server time to initialize") By("sleeping a bit to give ceph server time to initialize")
time.Sleep(20 * time.Second) time.Sleep(20 * time.Second)
@ -617,14 +618,14 @@ var _ = KubeDescribe("Volumes [Feature:Volumes]", func() {
defer func() { defer func() {
if clean { if clean {
if err := c.Secrets(namespace.Name).Delete(secret.Name); err != nil { if err := c.Secrets(namespace.Name).Delete(secret.Name); err != nil {
Failf("unable to delete secret %v: %v", secret.Name, err) framework.Failf("unable to delete secret %v: %v", secret.Name, err)
} }
} }
}() }()
var err error var err error
if secret, err = c.Secrets(namespace.Name).Create(secret); err != nil { if secret, err = c.Secrets(namespace.Name).Create(secret); err != nil {
Failf("unable to create test secret %s: %v", secret.Name, err) framework.Failf("unable to create test secret %s: %v", secret.Name, err)
} }
volume := api.VolumeSource{ volume := api.VolumeSource{
@ -649,7 +650,7 @@ var _ = KubeDescribe("Volumes [Feature:Volumes]", func() {
// and that the usual OpenStack authentication env. variables are set // and that the usual OpenStack authentication env. variables are set
// (OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME at least). // (OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME at least).
KubeDescribe("Cinder", func() { framework.KubeDescribe("Cinder", func() {
It("should be mountable", func() { It("should be mountable", func() {
config := VolumeTestConfig{ config := VolumeTestConfig{
namespace: namespace.Name, namespace: namespace.Name,
@ -661,7 +662,7 @@ var _ = KubeDescribe("Volumes [Feature:Volumes]", func() {
By("creating a test Cinder volume") By("creating a test Cinder volume")
output, err := exec.Command("cinder", "create", "--display-name="+volumeName, "1").CombinedOutput() output, err := exec.Command("cinder", "create", "--display-name="+volumeName, "1").CombinedOutput()
outputString := string(output[:]) outputString := string(output[:])
Logf("cinder output:\n%s", outputString) framework.Logf("cinder output:\n%s", outputString)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
defer func() { defer func() {
@ -687,12 +688,12 @@ var _ = KubeDescribe("Volumes [Feature:Volumes]", func() {
volumeID = fields[3] volumeID = fields[3]
break break
} }
Logf("Volume ID: %s", volumeID) framework.Logf("Volume ID: %s", volumeID)
Expect(volumeID).NotTo(Equal("")) Expect(volumeID).NotTo(Equal(""))
defer func() { defer func() {
if clean { if clean {
Logf("Running volumeTestCleanup") framework.Logf("Running volumeTestCleanup")
volumeTestCleanup(c, config) volumeTestCleanup(c, config)
} }
}() }()

View File

@ -37,7 +37,7 @@ import (
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd" "k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/test/e2e" e2e "k8s.io/kubernetes/test/e2e/framework"
) )
var ( var (