fix(e2e_node): density cleanup pods
This commit is contained in:
		| @@ -354,10 +354,13 @@ func runDensityBatchTest(ctx context.Context, f *framework.Framework, rc *Resour | |||||||
| 	time.Sleep(sleepBeforeCreatePods) | 	time.Sleep(sleepBeforeCreatePods) | ||||||
|  |  | ||||||
| 	rc.Start() | 	rc.Start() | ||||||
|  | 	ginkgo.DeferCleanup(rc.Stop) | ||||||
|  |  | ||||||
| 	ginkgo.By("Creating a batch of pods") | 	ginkgo.By("Creating a batch of pods") | ||||||
| 	// It returns a map['pod name']'creation time' containing the creation timestamps | 	// It returns a map['pod name']'creation time' containing the creation timestamps | ||||||
| 	createTimes := createBatchPodWithRateControl(ctx, f, pods, testArg.interval) | 	createTimes := createBatchPodWithRateControl(ctx, f, pods, testArg.interval) | ||||||
|  | 	ginkgo.DeferCleanup(deletePodsSync, f, pods) | ||||||
|  | 	ginkgo.DeferCleanup(deletePodsSync, f, []*v1.Pod{getCadvisorPod()}) | ||||||
|  |  | ||||||
| 	ginkgo.By("Waiting for all Pods to be observed by the watch...") | 	ginkgo.By("Waiting for all Pods to be observed by the watch...") | ||||||
|  |  | ||||||
| @@ -400,9 +403,6 @@ func runDensityBatchTest(ctx context.Context, f *framework.Framework, rc *Resour | |||||||
| 	sort.Sort(e2emetrics.LatencySlice(e2eLags)) | 	sort.Sort(e2emetrics.LatencySlice(e2eLags)) | ||||||
| 	batchLag := lastRunning.Time.Sub(firstCreate.Time) | 	batchLag := lastRunning.Time.Sub(firstCreate.Time) | ||||||
|  |  | ||||||
| 	rc.Stop() |  | ||||||
| 	deletePodsSync(ctx, f, pods) |  | ||||||
|  |  | ||||||
| 	// Log time series data. | 	// Log time series data. | ||||||
| 	if isLogTimeSeries { | 	if isLogTimeSeries { | ||||||
| 		logDensityTimeSeries(rc, createTimes, watchTimes, testInfo) | 		logDensityTimeSeries(rc, createTimes, watchTimes, testInfo) | ||||||
| @@ -410,8 +410,6 @@ func runDensityBatchTest(ctx context.Context, f *framework.Framework, rc *Resour | |||||||
| 	// Log throughput data. | 	// Log throughput data. | ||||||
| 	logPodCreateThroughput(batchLag, e2eLags, testArg.podsNr, testInfo) | 	logPodCreateThroughput(batchLag, e2eLags, testArg.podsNr, testInfo) | ||||||
|  |  | ||||||
| 	deletePodsSync(ctx, f, []*v1.Pod{getCadvisorPod()}) |  | ||||||
|  |  | ||||||
| 	return batchLag, e2eLags | 	return batchLag, e2eLags | ||||||
| } | } | ||||||
|  |  | ||||||
| @@ -428,22 +426,21 @@ func runDensitySeqTest(ctx context.Context, f *framework.Framework, rc *Resource | |||||||
|  |  | ||||||
| 	// CreatBatch is synchronized, all pods are running when it returns | 	// CreatBatch is synchronized, all pods are running when it returns | ||||||
| 	e2epod.NewPodClient(f).CreateBatch(ctx, bgPods) | 	e2epod.NewPodClient(f).CreateBatch(ctx, bgPods) | ||||||
|  | 	ginkgo.DeferCleanup(deletePodsSync, f, bgPods) | ||||||
|  | 	ginkgo.DeferCleanup(deletePodsSync, f, []*v1.Pod{getCadvisorPod()}) | ||||||
|  |  | ||||||
| 	time.Sleep(sleepBeforeCreatePods) | 	time.Sleep(sleepBeforeCreatePods) | ||||||
|  |  | ||||||
| 	rc.Start() | 	rc.Start() | ||||||
|  | 	ginkgo.DeferCleanup(rc.Stop) | ||||||
|  |  | ||||||
| 	// Create pods sequentially (back-to-back). e2eLags have been sorted. | 	// Create pods sequentially (back-to-back). e2eLags have been sorted. | ||||||
| 	batchlag, e2eLags := createBatchPodSequential(ctx, f, testPods, podType) | 	batchlag, e2eLags := createBatchPodSequential(ctx, f, testPods, podType) | ||||||
|  | 	ginkgo.DeferCleanup(deletePodsSync, f, testPods) | ||||||
| 	rc.Stop() |  | ||||||
| 	deletePodsSync(ctx, f, append(bgPods, testPods...)) |  | ||||||
|  |  | ||||||
| 	// Log throughput data. | 	// Log throughput data. | ||||||
| 	logPodCreateThroughput(batchlag, e2eLags, testArg.podsNr, testInfo) | 	logPodCreateThroughput(batchlag, e2eLags, testArg.podsNr, testInfo) | ||||||
|  |  | ||||||
| 	deletePodsSync(ctx, f, []*v1.Pod{getCadvisorPod()}) |  | ||||||
|  |  | ||||||
| 	return batchlag, e2eLags | 	return batchlag, e2eLags | ||||||
| } | } | ||||||
|  |  | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user
	 zhifei92
					zhifei92