Merge pull request #14698 from yujuhong/gce_only
Auto commit by PR queue bot
This commit is contained in:
@@ -112,6 +112,12 @@ GKE_REQUIRED_SKIP_TESTS=(
|
|||||||
"Shell"
|
"Shell"
|
||||||
"Daemon\sset"
|
"Daemon\sset"
|
||||||
"Deployment"
|
"Deployment"
|
||||||
|
"experimental\sresource\susage\stracking" # Expect --max-pods=100
|
||||||
|
)
|
||||||
|
|
||||||
|
# Tests which cannot be run on AWS.
|
||||||
|
AWS_REQUIRED_SKIP_TESTS=(
|
||||||
|
"experimental\sresource\susage\stracking" # Expect --max-pods=100
|
||||||
)
|
)
|
||||||
|
|
||||||
# The following tests are known to be flaky, and are thus run only in their own
|
# The following tests are known to be flaky, and are thus run only in their own
|
||||||
@@ -301,6 +307,7 @@ case ${JOB_NAME} in
|
|||||||
${GCE_PARALLEL_SKIP_TESTS[@]:+${GCE_PARALLEL_SKIP_TESTS[@]}} \
|
${GCE_PARALLEL_SKIP_TESTS[@]:+${GCE_PARALLEL_SKIP_TESTS[@]}} \
|
||||||
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
|
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
|
||||||
${GCE_PARALLEL_FLAKY_TESTS[@]:+${GCE_PARALLEL_FLAKY_TESTS[@]}} \
|
${GCE_PARALLEL_FLAKY_TESTS[@]:+${GCE_PARALLEL_FLAKY_TESTS[@]}} \
|
||||||
|
${AWS_REQUIRED_SKIP_TESTS[@]:+${AWS_REQUIRED_SKIP_TESTS[@]}} \
|
||||||
)"}
|
)"}
|
||||||
: ${ENABLE_DEPLOYMENTS:=true}
|
: ${ENABLE_DEPLOYMENTS:=true}
|
||||||
# Override AWS defaults.
|
# Override AWS defaults.
|
||||||
@@ -439,6 +446,7 @@ case ${JOB_NAME} in
|
|||||||
${GKE_REQUIRED_SKIP_TESTS[@]:+${GKE_REQUIRED_SKIP_TESTS[@]}} \
|
${GKE_REQUIRED_SKIP_TESTS[@]:+${GKE_REQUIRED_SKIP_TESTS[@]}} \
|
||||||
${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \
|
${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \
|
||||||
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
|
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
|
||||||
|
${GCE_SLOW_TESTS[@]:+${GCE_SLOW_TESTS[@]}} \
|
||||||
)"}
|
)"}
|
||||||
;;
|
;;
|
||||||
|
|
||||||
@@ -455,6 +463,7 @@ case ${JOB_NAME} in
|
|||||||
${REBOOT_SKIP_TESTS[@]:+${REBOOT_SKIP_TESTS[@]}} \
|
${REBOOT_SKIP_TESTS[@]:+${REBOOT_SKIP_TESTS[@]}} \
|
||||||
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
|
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
|
||||||
${GCE_PARALLEL_SKIP_TESTS[@]:+${GCE_PARALLEL_SKIP_TESTS[@]}} \
|
${GCE_PARALLEL_SKIP_TESTS[@]:+${GCE_PARALLEL_SKIP_TESTS[@]}} \
|
||||||
|
${GCE_SLOW_TESTS[@]:+${GCE_SLOW_TESTS[@]}} \
|
||||||
)"}
|
)"}
|
||||||
;;
|
;;
|
||||||
|
|
||||||
@@ -560,6 +569,7 @@ case ${JOB_NAME} in
|
|||||||
${GKE_REQUIRED_SKIP_TESTS[@]:+${GKE_REQUIRED_SKIP_TESTS[@]}} \
|
${GKE_REQUIRED_SKIP_TESTS[@]:+${GKE_REQUIRED_SKIP_TESTS[@]}} \
|
||||||
${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \
|
${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \
|
||||||
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
|
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
|
||||||
|
${GCE_SLOW_TESTS[@]:+${GCE_SLOW_TESTS[@]}} \
|
||||||
)"}
|
)"}
|
||||||
;;
|
;;
|
||||||
|
|
||||||
|
@@ -50,39 +50,8 @@ func logPodsOnNodes(c *client.Client, nodeNames []string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ = Describe("Kubelet", func() {
|
func runResourceTrackingTest(framework *Framework, podsPerNode int, nodeNames sets.String, resourceMonitor *resourceMonitor) {
|
||||||
var numNodes int
|
numNodes := nodeNames.Len()
|
||||||
var nodeNames sets.String
|
|
||||||
framework := NewFramework("kubelet-perf")
|
|
||||||
var resourceMonitor *resourceMonitor
|
|
||||||
|
|
||||||
BeforeEach(func() {
|
|
||||||
nodes, err := framework.Client.Nodes().List(labels.Everything(), fields.Everything())
|
|
||||||
expectNoError(err)
|
|
||||||
numNodes = len(nodes.Items)
|
|
||||||
nodeNames = sets.NewString()
|
|
||||||
for _, node := range nodes.Items {
|
|
||||||
nodeNames.Insert(node.Name)
|
|
||||||
}
|
|
||||||
resourceMonitor = newResourceMonitor(framework.Client, targetContainers(), containerStatsPollingPeriod)
|
|
||||||
resourceMonitor.Start()
|
|
||||||
})
|
|
||||||
|
|
||||||
AfterEach(func() {
|
|
||||||
resourceMonitor.Stop()
|
|
||||||
})
|
|
||||||
|
|
||||||
Describe("resource usage tracking", func() {
|
|
||||||
density := []int{0, 50}
|
|
||||||
for i := range density {
|
|
||||||
podsPerNode := density[i]
|
|
||||||
name := fmt.Sprintf(
|
|
||||||
"over %v with %d pods per node.", monitoringTime, podsPerNode)
|
|
||||||
It(name, func() {
|
|
||||||
// Skip this test for GKE.
|
|
||||||
// TODO: Re-activate this for GKE
|
|
||||||
SkipIfProviderIs("gke")
|
|
||||||
|
|
||||||
totalPods := podsPerNode * numNodes
|
totalPods := podsPerNode * numNodes
|
||||||
By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods))
|
By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods))
|
||||||
rcName := fmt.Sprintf("resource%d-%s", totalPods, string(util.NewUUID()))
|
rcName := fmt.Sprintf("resource%d-%s", totalPods, string(util.NewUUID()))
|
||||||
@@ -110,6 +79,13 @@ var _ = Describe("Kubelet", func() {
|
|||||||
for time.Now().Before(deadline) {
|
for time.Now().Before(deadline) {
|
||||||
Logf("Still running...%v left", deadline.Sub(time.Now()))
|
Logf("Still running...%v left", deadline.Sub(time.Now()))
|
||||||
time.Sleep(reportingPeriod)
|
time.Sleep(reportingPeriod)
|
||||||
|
timeLeft := deadline.Sub(time.Now())
|
||||||
|
Logf("Still running...%v left", timeLeft)
|
||||||
|
if timeLeft < reportingPeriod {
|
||||||
|
time.Sleep(timeLeft)
|
||||||
|
} else {
|
||||||
|
time.Sleep(reportingPeriod)
|
||||||
|
}
|
||||||
logPodsOnNodes(framework.Client, nodeNames.List())
|
logPodsOnNodes(framework.Client, nodeNames.List())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -120,6 +96,47 @@ var _ = Describe("Kubelet", func() {
|
|||||||
|
|
||||||
By("Deleting the RC")
|
By("Deleting the RC")
|
||||||
DeleteRC(framework.Client, framework.Namespace.Name, rcName)
|
DeleteRC(framework.Client, framework.Namespace.Name, rcName)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ = Describe("Kubelet", func() {
|
||||||
|
var nodeNames sets.String
|
||||||
|
framework := NewFramework("kubelet-perf")
|
||||||
|
var resourceMonitor *resourceMonitor
|
||||||
|
|
||||||
|
BeforeEach(func() {
|
||||||
|
nodes, err := framework.Client.Nodes().List(labels.Everything(), fields.Everything())
|
||||||
|
expectNoError(err)
|
||||||
|
nodeNames = sets.NewString()
|
||||||
|
for _, node := range nodes.Items {
|
||||||
|
nodeNames.Insert(node.Name)
|
||||||
|
}
|
||||||
|
resourceMonitor = newResourceMonitor(framework.Client, targetContainers(), containerStatsPollingPeriod)
|
||||||
|
resourceMonitor.Start()
|
||||||
|
})
|
||||||
|
|
||||||
|
AfterEach(func() {
|
||||||
|
resourceMonitor.Stop()
|
||||||
|
})
|
||||||
|
|
||||||
|
Describe("regular resource usage tracking", func() {
|
||||||
|
density := []int{0, 35}
|
||||||
|
for i := range density {
|
||||||
|
podsPerNode := density[i]
|
||||||
|
name := fmt.Sprintf(
|
||||||
|
"over %v with %d pods per node.", monitoringTime, podsPerNode)
|
||||||
|
It(name, func() {
|
||||||
|
runResourceTrackingTest(framework, podsPerNode, nodeNames, resourceMonitor)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
Describe("experimental resource usage tracking", func() {
|
||||||
|
density := []int{50}
|
||||||
|
for i := range density {
|
||||||
|
podsPerNode := density[i]
|
||||||
|
name := fmt.Sprintf(
|
||||||
|
"over %v with %d pods per node.", monitoringTime, podsPerNode)
|
||||||
|
It(name, func() {
|
||||||
|
runResourceTrackingTest(framework, podsPerNode, nodeNames, resourceMonitor)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
Reference in New Issue
Block a user