Merge pull request #14698 from yujuhong/gce_only

Auto commit by PR queue bot
This commit is contained in:
k8s-merge-robot
2015-10-02 02:53:27 -07:00
2 changed files with 72 additions and 45 deletions

View File

@@ -112,8 +112,14 @@ GKE_REQUIRED_SKIP_TESTS=(
"Shell"
"Daemon\sset"
"Deployment"
"experimental\sresource\susage\stracking" # Expect --max-pods=100
)
# Tests which cannot be run on AWS.
AWS_REQUIRED_SKIP_TESTS=(
"experimental\sresource\susage\stracking" # Expect --max-pods=100
)
# The following tests are known to be flaky, and are thus run only in their own
# -flaky- build variants.
GCE_FLAKY_TESTS=(
@@ -301,6 +307,7 @@ case ${JOB_NAME} in
${GCE_PARALLEL_SKIP_TESTS[@]:+${GCE_PARALLEL_SKIP_TESTS[@]}} \
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
${GCE_PARALLEL_FLAKY_TESTS[@]:+${GCE_PARALLEL_FLAKY_TESTS[@]}} \
${AWS_REQUIRED_SKIP_TESTS[@]:+${AWS_REQUIRED_SKIP_TESTS[@]}} \
)"}
: ${ENABLE_DEPLOYMENTS:=true}
# Override AWS defaults.
@@ -439,6 +446,7 @@ case ${JOB_NAME} in
${GKE_REQUIRED_SKIP_TESTS[@]:+${GKE_REQUIRED_SKIP_TESTS[@]}} \
${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
${GCE_SLOW_TESTS[@]:+${GCE_SLOW_TESTS[@]}} \
)"}
;;
@@ -455,6 +463,7 @@ case ${JOB_NAME} in
${REBOOT_SKIP_TESTS[@]:+${REBOOT_SKIP_TESTS[@]}} \
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
${GCE_PARALLEL_SKIP_TESTS[@]:+${GCE_PARALLEL_SKIP_TESTS[@]}} \
${GCE_SLOW_TESTS[@]:+${GCE_SLOW_TESTS[@]}} \
)"}
;;
@@ -560,6 +569,7 @@ case ${JOB_NAME} in
${GKE_REQUIRED_SKIP_TESTS[@]:+${GKE_REQUIRED_SKIP_TESTS[@]}} \
${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
${GCE_SLOW_TESTS[@]:+${GCE_SLOW_TESTS[@]}} \
)"}
;;

View File

@@ -50,39 +50,8 @@ func logPodsOnNodes(c *client.Client, nodeNames []string) {
}
}
var _ = Describe("Kubelet", func() {
var numNodes int
var nodeNames sets.String
framework := NewFramework("kubelet-perf")
var resourceMonitor *resourceMonitor
BeforeEach(func() {
nodes, err := framework.Client.Nodes().List(labels.Everything(), fields.Everything())
expectNoError(err)
numNodes = len(nodes.Items)
nodeNames = sets.NewString()
for _, node := range nodes.Items {
nodeNames.Insert(node.Name)
}
resourceMonitor = newResourceMonitor(framework.Client, targetContainers(), containerStatsPollingPeriod)
resourceMonitor.Start()
})
AfterEach(func() {
resourceMonitor.Stop()
})
Describe("resource usage tracking", func() {
density := []int{0, 50}
for i := range density {
podsPerNode := density[i]
name := fmt.Sprintf(
"over %v with %d pods per node.", monitoringTime, podsPerNode)
It(name, func() {
// Skip this test for GKE.
// TODO: Re-activate this for GKE
SkipIfProviderIs("gke")
func runResourceTrackingTest(framework *Framework, podsPerNode int, nodeNames sets.String, resourceMonitor *resourceMonitor) {
numNodes := nodeNames.Len()
totalPods := podsPerNode * numNodes
By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods))
rcName := fmt.Sprintf("resource%d-%s", totalPods, string(util.NewUUID()))
@@ -110,6 +79,13 @@ var _ = Describe("Kubelet", func() {
for time.Now().Before(deadline) {
Logf("Still running...%v left", deadline.Sub(time.Now()))
time.Sleep(reportingPeriod)
timeLeft := deadline.Sub(time.Now())
Logf("Still running...%v left", timeLeft)
if timeLeft < reportingPeriod {
time.Sleep(timeLeft)
} else {
time.Sleep(reportingPeriod)
}
logPodsOnNodes(framework.Client, nodeNames.List())
}
@@ -120,6 +96,47 @@ var _ = Describe("Kubelet", func() {
By("Deleting the RC")
DeleteRC(framework.Client, framework.Namespace.Name, rcName)
}
var _ = Describe("Kubelet", func() {
var nodeNames sets.String
framework := NewFramework("kubelet-perf")
var resourceMonitor *resourceMonitor
BeforeEach(func() {
nodes, err := framework.Client.Nodes().List(labels.Everything(), fields.Everything())
expectNoError(err)
nodeNames = sets.NewString()
for _, node := range nodes.Items {
nodeNames.Insert(node.Name)
}
resourceMonitor = newResourceMonitor(framework.Client, targetContainers(), containerStatsPollingPeriod)
resourceMonitor.Start()
})
AfterEach(func() {
resourceMonitor.Stop()
})
Describe("regular resource usage tracking", func() {
density := []int{0, 35}
for i := range density {
podsPerNode := density[i]
name := fmt.Sprintf(
"over %v with %d pods per node.", monitoringTime, podsPerNode)
It(name, func() {
runResourceTrackingTest(framework, podsPerNode, nodeNames, resourceMonitor)
})
}
})
Describe("experimental resource usage tracking", func() {
density := []int{50}
for i := range density {
podsPerNode := density[i]
name := fmt.Sprintf(
"over %v with %d pods per node.", monitoringTime, podsPerNode)
It(name, func() {
runResourceTrackingTest(framework, podsPerNode, nodeNames, resourceMonitor)
})
}
})