Logs node e2e perf data to standalone json files
This commit is contained in:
@@ -94,10 +94,10 @@ var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() {
|
||||
|
||||
for _, testArg := range dTests {
|
||||
itArg := testArg
|
||||
It(fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval",
|
||||
itArg.podsNr, itArg.interval), func() {
|
||||
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval", itArg.podsNr, itArg.interval)
|
||||
It(desc, func() {
|
||||
itArg.createMethod = "batch"
|
||||
testInfo := getTestNodeInfo(f, itArg.getTestName())
|
||||
testInfo := getTestNodeInfo(f, itArg.getTestName(), desc)
|
||||
|
||||
batchLag, e2eLags := runDensityBatchTest(f, rc, itArg, testInfo, false)
|
||||
|
||||
@@ -152,10 +152,10 @@ var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() {
|
||||
|
||||
for _, testArg := range dTests {
|
||||
itArg := testArg
|
||||
It(fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval [Benchmark]",
|
||||
itArg.podsNr, itArg.interval), func() {
|
||||
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval [Benchmark]", itArg.podsNr, itArg.interval)
|
||||
It(desc, func() {
|
||||
itArg.createMethod = "batch"
|
||||
testInfo := getTestNodeInfo(f, itArg.getTestName())
|
||||
testInfo := getTestNodeInfo(f, itArg.getTestName(), desc)
|
||||
|
||||
batchLag, e2eLags := runDensityBatchTest(f, rc, itArg, testInfo, true)
|
||||
|
||||
@@ -189,10 +189,10 @@ var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() {
|
||||
|
||||
for _, testArg := range dTests {
|
||||
itArg := testArg
|
||||
It(fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval (QPS %d) [Benchmark]",
|
||||
itArg.podsNr, itArg.interval, itArg.APIQPSLimit), func() {
|
||||
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval (QPS %d) [Benchmark]", itArg.podsNr, itArg.interval, itArg.APIQPSLimit)
|
||||
It(desc, func() {
|
||||
itArg.createMethod = "batch"
|
||||
testInfo := getTestNodeInfo(f, itArg.getTestName())
|
||||
testInfo := getTestNodeInfo(f, itArg.getTestName(), desc)
|
||||
// The latency caused by API QPS limit takes a large portion (up to ~33%) of e2e latency.
|
||||
// It makes the pod startup latency of Kubelet (creation throughput as well) under-estimated.
|
||||
// Here we set API QPS limit from default 5 to 60 in order to test real Kubelet performance.
|
||||
@@ -232,10 +232,10 @@ var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() {
|
||||
|
||||
for _, testArg := range dTests {
|
||||
itArg := testArg
|
||||
It(fmt.Sprintf("latency/resource should be within limit when create %d pods with %d background pods",
|
||||
itArg.podsNr, itArg.bgPodsNr), func() {
|
||||
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %d background pods", itArg.podsNr, itArg.bgPodsNr)
|
||||
It(desc, func() {
|
||||
itArg.createMethod = "sequence"
|
||||
testInfo := getTestNodeInfo(f, itArg.getTestName())
|
||||
testInfo := getTestNodeInfo(f, itArg.getTestName(), desc)
|
||||
batchlag, e2eLags := runDensitySeqTest(f, rc, itArg, testInfo)
|
||||
|
||||
By("Verifying latency")
|
||||
@@ -265,10 +265,10 @@ var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() {
|
||||
|
||||
for _, testArg := range dTests {
|
||||
itArg := testArg
|
||||
It(fmt.Sprintf("latency/resource should be within limit when create %d pods with %d background pods [Benchmark]",
|
||||
itArg.podsNr, itArg.bgPodsNr), func() {
|
||||
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %d background pods [Benchmark]", itArg.podsNr, itArg.bgPodsNr)
|
||||
It(desc, func() {
|
||||
itArg.createMethod = "sequence"
|
||||
testInfo := getTestNodeInfo(f, itArg.getTestName())
|
||||
testInfo := getTestNodeInfo(f, itArg.getTestName(), desc)
|
||||
batchlag, e2eLags := runDensitySeqTest(f, rc, itArg, testInfo)
|
||||
|
||||
By("Verifying latency")
|
||||
@@ -551,7 +551,7 @@ func logAndVerifyLatency(batchLag time.Duration, e2eLags []framework.PodLatencyD
|
||||
podCreateLatency := framework.PodStartupLatency{Latency: framework.ExtractLatencyMetrics(e2eLags)}
|
||||
|
||||
// log latency perf data
|
||||
framework.PrintPerfData(getLatencyPerfData(podCreateLatency.Latency, testInfo))
|
||||
logPerfData(getLatencyPerfData(podCreateLatency.Latency, testInfo), "latency")
|
||||
|
||||
if isVerify {
|
||||
// check whether e2e pod startup time is acceptable.
|
||||
@@ -567,7 +567,7 @@ func logAndVerifyLatency(batchLag time.Duration, e2eLags []framework.PodLatencyD
|
||||
|
||||
// logThroughput calculates and logs pod creation throughput.
|
||||
func logPodCreateThroughput(batchLag time.Duration, e2eLags []framework.PodLatencyData, podsNr int, testInfo map[string]string) {
|
||||
framework.PrintPerfData(getThroughputPerfData(batchLag, e2eLags, podsNr, testInfo))
|
||||
logPerfData(getThroughputPerfData(batchLag, e2eLags, podsNr, testInfo), "throughput")
|
||||
}
|
||||
|
||||
// increaseKubeletAPIQPSLimit sets Kubelet API QPS via ConfigMap. Kubelet will restart with the new QPS.
|
||||
|
Reference in New Issue
Block a user