Add performance test phase timing export.
This commit is contained in:
@@ -43,6 +43,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/timer"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
@@ -88,6 +89,7 @@ var _ = SIGDescribe("Load capacity", func() {
|
||||
var configMapConfigs []*testutils.ConfigMapConfig
|
||||
|
||||
testCaseBaseName := "load"
|
||||
var testPhaseDurations *timer.TestPhaseTimer
|
||||
|
||||
// Gathers metrics before teardown
|
||||
// TODO add flag that allows to skip cleanup on failure
|
||||
@@ -96,8 +98,9 @@ var _ = SIGDescribe("Load capacity", func() {
|
||||
highLatencyRequests, metrics, err := framework.HighLatencyRequests(clientset, nodeCount)
|
||||
framework.ExpectNoError(err)
|
||||
if err == nil {
|
||||
summaries := make([]framework.TestDataSummary, 0, 1)
|
||||
summaries := make([]framework.TestDataSummary, 0, 2)
|
||||
summaries = append(summaries, metrics)
|
||||
summaries = append(summaries, testPhaseDurations)
|
||||
framework.PrintSummaries(summaries, testCaseBaseName)
|
||||
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests")
|
||||
}
|
||||
@@ -123,6 +126,7 @@ var _ = SIGDescribe("Load capacity", func() {
|
||||
f.NamespaceDeletionTimeout = time.Hour
|
||||
|
||||
BeforeEach(func() {
|
||||
testPhaseDurations = timer.NewTestPhaseTimer()
|
||||
clientset = f.ClientSet
|
||||
|
||||
ns = f.Namespace.Name
|
||||
@@ -187,12 +191,14 @@ var _ = SIGDescribe("Load capacity", func() {
|
||||
It(name, func() {
|
||||
// Create a number of namespaces.
|
||||
namespaceCount := (nodeCount + nodeCountPerNamespace - 1) / nodeCountPerNamespace
|
||||
namespaces, err := CreateNamespaces(f, namespaceCount, fmt.Sprintf("load-%v-nodepods", itArg.podsPerNode))
|
||||
namespaces, err := CreateNamespaces(f, namespaceCount, fmt.Sprintf("load-%v-nodepods", itArg.podsPerNode), testPhaseDurations.StartPhase(110, "namespace creation"))
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
totalPods := (itArg.podsPerNode - itArg.daemonsPerNode) * nodeCount
|
||||
configs, secretConfigs, configMapConfigs = generateConfigs(totalPods, itArg.image, itArg.command, namespaces, itArg.kind, itArg.secretsPerPod, itArg.configMapsPerPod)
|
||||
|
||||
serviceCreationPhase := testPhaseDurations.StartPhase(120, "services creation")
|
||||
defer serviceCreationPhase.End()
|
||||
if itArg.services {
|
||||
framework.Logf("Creating services")
|
||||
services := generateServicesForConfigs(configs)
|
||||
@@ -204,6 +210,8 @@ var _ = SIGDescribe("Load capacity", func() {
|
||||
workqueue.Parallelize(serviceOperationsParallelism, len(services), createService)
|
||||
framework.Logf("%v Services created.", len(services))
|
||||
defer func(services []*v1.Service) {
|
||||
serviceCleanupPhase := testPhaseDurations.StartPhase(800, "services deletion")
|
||||
defer serviceCleanupPhase.End()
|
||||
framework.Logf("Starting to delete services...")
|
||||
deleteService := func(i int) {
|
||||
defer GinkgoRecover()
|
||||
@@ -216,17 +224,26 @@ var _ = SIGDescribe("Load capacity", func() {
|
||||
} else {
|
||||
framework.Logf("Skipping service creation")
|
||||
}
|
||||
serviceCreationPhase.End()
|
||||
// Create all secrets.
|
||||
secretsCreationPhase := testPhaseDurations.StartPhase(130, "secrets creation")
|
||||
defer secretsCreationPhase.End()
|
||||
for i := range secretConfigs {
|
||||
secretConfigs[i].Run()
|
||||
defer secretConfigs[i].Stop()
|
||||
}
|
||||
secretsCreationPhase.End()
|
||||
// Create all configmaps.
|
||||
configMapsCreationPhase := testPhaseDurations.StartPhase(140, "configmaps creation")
|
||||
defer configMapsCreationPhase.End()
|
||||
for i := range configMapConfigs {
|
||||
configMapConfigs[i].Run()
|
||||
defer configMapConfigs[i].Stop()
|
||||
}
|
||||
configMapsCreationPhase.End()
|
||||
// StartDaemon if needed
|
||||
daemonSetCreationPhase := testPhaseDurations.StartPhase(150, "daemonsets creation")
|
||||
defer daemonSetCreationPhase.End()
|
||||
for i := 0; i < itArg.daemonsPerNode; i++ {
|
||||
daemonName := fmt.Sprintf("load-daemon-%v", i)
|
||||
daemonConfig := &testutils.DaemonConfig{
|
||||
@@ -246,6 +263,7 @@ var _ = SIGDescribe("Load capacity", func() {
|
||||
))
|
||||
}(daemonConfig)
|
||||
}
|
||||
daemonSetCreationPhase.End()
|
||||
|
||||
// Simulate lifetime of RC:
|
||||
// * create with initial size
|
||||
@@ -264,7 +282,8 @@ var _ = SIGDescribe("Load capacity", func() {
|
||||
// We may want to revisit it in the future.
|
||||
framework.Logf("Starting to create %v objects...", itArg.kind)
|
||||
creatingTime := time.Duration(totalPods/throughput) * time.Second
|
||||
createAllResources(configs, creatingTime)
|
||||
|
||||
createAllResources(configs, creatingTime, testPhaseDurations.StartPhase(200, "load pods creation"))
|
||||
By("============================================================================")
|
||||
|
||||
// We would like to spread scaling replication controllers over time
|
||||
@@ -273,11 +292,11 @@ var _ = SIGDescribe("Load capacity", func() {
|
||||
// The expected number of created/deleted pods is less than totalPods/3.
|
||||
scalingTime := time.Duration(totalPods/(3*throughput)) * time.Second
|
||||
framework.Logf("Starting to scale %v objects first time...", itArg.kind)
|
||||
scaleAllResources(configs, scalingTime)
|
||||
scaleAllResources(configs, scalingTime, testPhaseDurations.StartPhase(300, "scaling first time"))
|
||||
By("============================================================================")
|
||||
|
||||
framework.Logf("Starting to scale %v objects second time...", itArg.kind)
|
||||
scaleAllResources(configs, scalingTime)
|
||||
scaleAllResources(configs, scalingTime, testPhaseDurations.StartPhase(400, "scaling second time"))
|
||||
By("============================================================================")
|
||||
|
||||
// Cleanup all created replication controllers.
|
||||
@@ -285,7 +304,7 @@ var _ = SIGDescribe("Load capacity", func() {
|
||||
// We may want to revisit it in the future.
|
||||
deletingTime := time.Duration(totalPods/throughput) * time.Second
|
||||
framework.Logf("Starting to delete %v objects...", itArg.kind)
|
||||
deleteAllResources(configs, deletingTime)
|
||||
deleteAllResources(configs, deletingTime, testPhaseDurations.StartPhase(500, "load pods deletion"))
|
||||
})
|
||||
}
|
||||
})
|
||||
@@ -534,7 +553,8 @@ func retryWithExponentialBackOff(initialDuration time.Duration, fn wait.Conditio
|
||||
return wait.ExponentialBackoff(backoff, fn)
|
||||
}
|
||||
|
||||
func createAllResources(configs []testutils.RunObjectConfig, creatingTime time.Duration) {
|
||||
func createAllResources(configs []testutils.RunObjectConfig, creatingTime time.Duration, testPhase *timer.Phase) {
|
||||
defer testPhase.End()
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(configs))
|
||||
for _, config := range configs {
|
||||
@@ -551,7 +571,8 @@ func createResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, creati
|
||||
framework.ExpectNoError(config.Run(), fmt.Sprintf("creating %v %s", config.GetKind(), config.GetName()))
|
||||
}
|
||||
|
||||
func scaleAllResources(configs []testutils.RunObjectConfig, scalingTime time.Duration) {
|
||||
func scaleAllResources(configs []testutils.RunObjectConfig, scalingTime time.Duration, testPhase *timer.Phase) {
|
||||
defer testPhase.End()
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(configs))
|
||||
for _, config := range configs {
|
||||
@@ -592,7 +613,8 @@ func scaleResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, scaling
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
func deleteAllResources(configs []testutils.RunObjectConfig, deletingTime time.Duration) {
|
||||
func deleteAllResources(configs []testutils.RunObjectConfig, deletingTime time.Duration, testPhase *timer.Phase) {
|
||||
defer testPhase.End()
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(configs))
|
||||
for _, config := range configs {
|
||||
@@ -617,7 +639,8 @@ func deleteResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, deleti
|
||||
}
|
||||
}
|
||||
|
||||
func CreateNamespaces(f *framework.Framework, namespaceCount int, namePrefix string) ([]*v1.Namespace, error) {
|
||||
func CreateNamespaces(f *framework.Framework, namespaceCount int, namePrefix string, testPhase *timer.Phase) ([]*v1.Namespace, error) {
|
||||
defer testPhase.End()
|
||||
namespaces := []*v1.Namespace{}
|
||||
for i := 1; i <= namespaceCount; i++ {
|
||||
namespace, err := f.CreateNamespace(fmt.Sprintf("%v-%d", namePrefix, i), nil)
|
||||
|
Reference in New Issue
Block a user