Add possibility to delete pods at specified frequency in scheduler_perf tests
This commit is contained in:
@@ -19,6 +19,7 @@ package benchmark
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
@@ -37,6 +38,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/api/meta"
|
"k8s.io/apimachinery/pkg/api/meta"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
@@ -464,6 +466,9 @@ type createPodsOp struct {
|
|||||||
// Optional
|
// Optional
|
||||||
PersistentVolumeTemplatePath *string
|
PersistentVolumeTemplatePath *string
|
||||||
PersistentVolumeClaimTemplatePath *string
|
PersistentVolumeClaimTemplatePath *string
|
||||||
|
// Number of pods to be deleted per second after they were scheduled. If set to 0, pods are not deleted.
|
||||||
|
// Optional
|
||||||
|
DeletePodsPerSecond int
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cpo *createPodsOp) isValid(allowParameterization bool) error {
|
func (cpo *createPodsOp) isValid(allowParameterization bool) error {
|
||||||
@@ -479,6 +484,9 @@ func (cpo *createPodsOp) isValid(allowParameterization bool) error {
|
|||||||
// use-cases right now.
|
// use-cases right now.
|
||||||
return fmt.Errorf("collectMetrics and skipWaitToCompletion cannot be true at the same time")
|
return fmt.Errorf("collectMetrics and skipWaitToCompletion cannot be true at the same time")
|
||||||
}
|
}
|
||||||
|
if cpo.DeletePodsPerSecond < 0 {
|
||||||
|
return fmt.Errorf("invalid DeletePodsPerSecond=%d; should be non-negative", cpo.DeletePodsPerSecond)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1030,6 +1038,34 @@ func runWorkload(tCtx ktesting.TContext, tc *testCase, w *workload, informerFact
|
|||||||
mu.Unlock()
|
mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if concreteOp.DeletePodsPerSecond > 0 {
|
||||||
|
pods, err := podInformer.Lister().Pods(namespace).List(labels.Everything())
|
||||||
|
if err != nil {
|
||||||
|
tCtx.Fatalf("op %d: error in listing scheduled pods in the namespace: %v", opIndex, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ticker := time.NewTicker(time.Second / time.Duration(concreteOp.DeletePodsPerSecond))
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
for i := 0; i < len(pods); i++ {
|
||||||
|
select {
|
||||||
|
case <-ticker.C:
|
||||||
|
if err := tCtx.Client().CoreV1().Pods(namespace).Delete(tCtx, pods[i].Name, metav1.DeleteOptions{}); err != nil {
|
||||||
|
if errors.Is(err, context.Canceled) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
tCtx.Errorf("op %d: unable to delete pod %v: %v", opIndex, pods[i].Name, err)
|
||||||
|
}
|
||||||
|
case <-tCtx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
if !concreteOp.SkipWaitToCompletion {
|
if !concreteOp.SkipWaitToCompletion {
|
||||||
// SkipWaitToCompletion=false indicates this step has waited for the Pods to be scheduled.
|
// SkipWaitToCompletion=false indicates this step has waited for the Pods to be scheduled.
|
||||||
// So we reset the metrics in global registry; otherwise metrics gathered in this step
|
// So we reset the metrics in global registry; otherwise metrics gathered in this step
|
||||||
|
Reference in New Issue
Block a user