Remove Configurator interface
The Configurator has been used as a holder for listers that tests need, which is not its purpose. By making the tests obtain listers from more appropriate places, such as informers, there is no need for various accessors to the Configurator. Also, FakeConfigurator is not being used anymore, so there's no need for an interface instead of a plain pointer. Signed-off-by: Aldo Culquicondor <acondor@google.com>
This commit is contained in:
@@ -84,7 +84,6 @@ func createConfiguratorArgsWithPodInformer(
|
||||
stopCh <-chan struct{},
|
||||
) *factory.ConfigFactoryArgs {
|
||||
return &factory.ConfigFactoryArgs{
|
||||
SchedulerName: schedulerName,
|
||||
Client: clientSet,
|
||||
NodeInformer: informerFactory.Core().V1().Nodes(),
|
||||
PodInformer: podInformer,
|
||||
|
||||
@@ -42,6 +42,7 @@ go_test(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
|
||||
"//staging/src/k8s.io/csi-translation-lib/plugins:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
|
||||
@@ -25,7 +25,6 @@ import (
|
||||
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/csi-translation-lib/plugins"
|
||||
@@ -359,9 +358,9 @@ func benchmarkScheduling(numNodes, numExistingPods, minPods int,
|
||||
if b.N < minPods {
|
||||
b.N = minPods
|
||||
}
|
||||
schedulerConfigFactory, finalFunc := mustSetupScheduler()
|
||||
schedulerConfigArgs, finalFunc := mustSetupScheduler()
|
||||
defer finalFunc()
|
||||
c := schedulerConfigFactory.GetClient()
|
||||
c := schedulerConfigArgs.Client
|
||||
|
||||
nodePreparer := framework.NewIntegrationTestNodePreparer(
|
||||
c,
|
||||
@@ -378,8 +377,9 @@ func benchmarkScheduling(numNodes, numExistingPods, minPods int,
|
||||
podCreator := testutils.NewTestPodCreator(c, config)
|
||||
podCreator.CreatePods()
|
||||
|
||||
podLister := schedulerConfigArgs.PodInformer.Lister()
|
||||
for {
|
||||
scheduled, err := schedulerConfigFactory.GetScheduledPodLister().List(labels.Everything())
|
||||
scheduled, err := getScheduledPods(podLister)
|
||||
if err != nil {
|
||||
klog.Fatalf("%v", err)
|
||||
}
|
||||
@@ -397,7 +397,7 @@ func benchmarkScheduling(numNodes, numExistingPods, minPods int,
|
||||
for {
|
||||
// This can potentially affect performance of scheduler, since List() is done under mutex.
|
||||
// TODO: Setup watch on apiserver and wait until all pods scheduled.
|
||||
scheduled, err := schedulerConfigFactory.GetScheduledPodLister().List(labels.Everything())
|
||||
scheduled, err := getScheduledPods(podLister)
|
||||
if err != nil {
|
||||
klog.Fatalf("%v", err)
|
||||
}
|
||||
|
||||
@@ -18,17 +18,19 @@ package benchmark
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
"math"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
listers "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -101,22 +103,22 @@ func TestSchedule100Node3KPods(t *testing.T) {
|
||||
|
||||
// testConfig contains the some input parameters needed for running test-suite
|
||||
type testConfig struct {
|
||||
numPods int
|
||||
numNodes int
|
||||
mutatedNodeTemplate *v1.Node
|
||||
mutatedPodTemplate *v1.Pod
|
||||
schedulerSupportFunctions factory.Configurator
|
||||
destroyFunc func()
|
||||
numPods int
|
||||
numNodes int
|
||||
mutatedNodeTemplate *v1.Node
|
||||
mutatedPodTemplate *v1.Pod
|
||||
schedulerSupport *factory.ConfigFactoryArgs
|
||||
destroyFunc func()
|
||||
}
|
||||
|
||||
// getBaseConfig returns baseConfig after initializing number of nodes and pods.
|
||||
func getBaseConfig(nodes int, pods int) *testConfig {
|
||||
schedulerConfigFactory, destroyFunc := mustSetupScheduler()
|
||||
schedulerConfigArgs, destroyFunc := mustSetupScheduler()
|
||||
return &testConfig{
|
||||
schedulerSupportFunctions: schedulerConfigFactory,
|
||||
destroyFunc: destroyFunc,
|
||||
numNodes: nodes,
|
||||
numPods: pods,
|
||||
schedulerSupport: schedulerConfigArgs,
|
||||
destroyFunc: destroyFunc,
|
||||
numNodes: nodes,
|
||||
numPods: pods,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -132,10 +134,11 @@ func schedulePods(config *testConfig) int32 {
|
||||
// We are interested in low scheduling rates (i.e. qps=2),
|
||||
minQPS := int32(math.MaxInt32)
|
||||
start := time.Now()
|
||||
podLister := config.schedulerSupport.PodInformer.Lister()
|
||||
// Bake in time for the first pod scheduling event.
|
||||
for {
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
scheduled, err := config.schedulerSupportFunctions.GetScheduledPodLister().List(labels.Everything())
|
||||
scheduled, err := getScheduledPods(podLister)
|
||||
if err != nil {
|
||||
klog.Fatalf("%v", err)
|
||||
}
|
||||
@@ -153,7 +156,7 @@ func schedulePods(config *testConfig) int32 {
|
||||
// This can potentially affect performance of scheduler, since List() is done under mutex.
|
||||
// Listing 10000 pods is an expensive operation, so running it frequently may impact scheduler.
|
||||
// TODO: Setup watch on apiserver and wait until all pods scheduled.
|
||||
scheduled, err := config.schedulerSupportFunctions.GetScheduledPodLister().List(labels.Everything())
|
||||
scheduled, err := getScheduledPods(podLister)
|
||||
if err != nil {
|
||||
klog.Fatalf("%v", err)
|
||||
}
|
||||
@@ -183,6 +186,20 @@ func schedulePods(config *testConfig) int32 {
|
||||
}
|
||||
}
|
||||
|
||||
func getScheduledPods(lister listers.PodLister) ([]*v1.Pod, error) {
|
||||
all, err := lister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
scheduled := make([]*v1.Pod, 0, len(all))
|
||||
for _, pod := range all {
|
||||
if len(pod.Spec.NodeName) > 0 {
|
||||
scheduled = append(scheduled, pod)
|
||||
}
|
||||
}
|
||||
return scheduled, nil
|
||||
}
|
||||
|
||||
// mutateNodeTemplate returns the modified node needed for creation of nodes.
|
||||
func (na nodeAffinity) mutateNodeTemplate(node *v1.Node) {
|
||||
labels := make(map[string]string)
|
||||
@@ -220,19 +237,17 @@ func (na nodeAffinity) mutatePodTemplate(pod *v1.Pod) {
|
||||
// generateNodes generates nodes to be used for scheduling.
|
||||
func (inputConfig *schedulerPerfConfig) generateNodes(config *testConfig) {
|
||||
for i := 0; i < inputConfig.NodeCount; i++ {
|
||||
config.schedulerSupportFunctions.GetClient().CoreV1().Nodes().Create(config.mutatedNodeTemplate)
|
||||
|
||||
config.schedulerSupport.Client.CoreV1().Nodes().Create(config.mutatedNodeTemplate)
|
||||
}
|
||||
for i := 0; i < config.numNodes-inputConfig.NodeCount; i++ {
|
||||
config.schedulerSupportFunctions.GetClient().CoreV1().Nodes().Create(baseNodeTemplate)
|
||||
|
||||
config.schedulerSupport.Client.CoreV1().Nodes().Create(baseNodeTemplate)
|
||||
}
|
||||
}
|
||||
|
||||
// generatePods generates pods to be used for scheduling.
|
||||
func (inputConfig *schedulerPerfConfig) generatePods(config *testConfig) {
|
||||
testutils.CreatePod(config.schedulerSupportFunctions.GetClient(), "sample", inputConfig.PodCount, config.mutatedPodTemplate)
|
||||
testutils.CreatePod(config.schedulerSupportFunctions.GetClient(), "sample", config.numPods-inputConfig.PodCount, basePodTemplate)
|
||||
testutils.CreatePod(config.schedulerSupport.Client, "sample", inputConfig.PodCount, config.mutatedPodTemplate)
|
||||
testutils.CreatePod(config.schedulerSupport.Client, "sample", config.numPods-inputConfig.PodCount, basePodTemplate)
|
||||
}
|
||||
|
||||
// generatePodAndNodeTopology is the wrapper function for modifying both pods and node objects.
|
||||
|
||||
@@ -31,7 +31,7 @@ import (
|
||||
// remove resources after finished.
|
||||
// Notes on rate limiter:
|
||||
// - client rate limit is set to 5000.
|
||||
func mustSetupScheduler() (factory.Configurator, util.ShutdownFunc) {
|
||||
func mustSetupScheduler() (*factory.ConfigFactoryArgs, util.ShutdownFunc) {
|
||||
apiURL, apiShutdown := util.StartApiserver()
|
||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{
|
||||
Host: apiURL,
|
||||
@@ -39,11 +39,11 @@ func mustSetupScheduler() (factory.Configurator, util.ShutdownFunc) {
|
||||
QPS: 5000.0,
|
||||
Burst: 5000,
|
||||
})
|
||||
schedulerConfig, schedulerShutdown := util.StartScheduler(clientSet)
|
||||
schedulerConfigArgs, schedulerShutdown := util.StartScheduler(clientSet)
|
||||
|
||||
shutdownFunc := func() {
|
||||
schedulerShutdown()
|
||||
apiShutdown()
|
||||
}
|
||||
return schedulerConfig, shutdownFunc
|
||||
return schedulerConfigArgs, shutdownFunc
|
||||
}
|
||||
|
||||
@@ -57,9 +57,9 @@ func StartApiserver() (string, ShutdownFunc) {
|
||||
}
|
||||
|
||||
// StartScheduler configures and starts a scheduler given a handle to the clientSet interface
|
||||
// and event broadcaster. It returns a handle to the configurator for the running scheduler
|
||||
// and event broadcaster. It returns a handle to the configurator args for the running scheduler
|
||||
// and the shutdown function to stop it.
|
||||
func StartScheduler(clientSet clientset.Interface) (factory.Configurator, ShutdownFunc) {
|
||||
func StartScheduler(clientSet clientset.Interface) (*factory.ConfigFactoryArgs, ShutdownFunc) {
|
||||
informerFactory := informers.NewSharedInformerFactory(clientSet, 0)
|
||||
stopCh := make(chan struct{})
|
||||
evtBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{
|
||||
@@ -67,9 +67,10 @@ func StartScheduler(clientSet clientset.Interface) (factory.Configurator, Shutdo
|
||||
|
||||
evtBroadcaster.StartRecordingToSink(stopCh)
|
||||
|
||||
schedulerConfigurator := createSchedulerConfigurator(clientSet, informerFactory, stopCh)
|
||||
configuratorArgs := createSchedulerConfiguratorArgs(clientSet, informerFactory, stopCh)
|
||||
configurator := factory.NewConfigFactory(configuratorArgs)
|
||||
|
||||
config, err := schedulerConfigurator.CreateFromConfig(schedulerapi.Policy{})
|
||||
config, err := configurator.CreateFromConfig(schedulerapi.Policy{})
|
||||
if err != nil {
|
||||
klog.Fatalf("Error creating scheduler: %v", err)
|
||||
}
|
||||
@@ -95,18 +96,17 @@ func StartScheduler(clientSet clientset.Interface) (factory.Configurator, Shutdo
|
||||
close(stopCh)
|
||||
klog.Infof("destroyed scheduler")
|
||||
}
|
||||
return schedulerConfigurator, shutdownFunc
|
||||
return configuratorArgs, shutdownFunc
|
||||
}
|
||||
|
||||
// createSchedulerConfigurator create a configurator for scheduler with given informer factory and default name.
|
||||
func createSchedulerConfigurator(
|
||||
// createSchedulerConfigurator create a configurator for scheduler with given informer factory.
|
||||
func createSchedulerConfiguratorArgs(
|
||||
clientSet clientset.Interface,
|
||||
informerFactory informers.SharedInformerFactory,
|
||||
stopCh <-chan struct{},
|
||||
) factory.Configurator {
|
||||
) *factory.ConfigFactoryArgs {
|
||||
|
||||
return factory.NewConfigFactory(&factory.ConfigFactoryArgs{
|
||||
SchedulerName: v1.DefaultSchedulerName,
|
||||
return &factory.ConfigFactoryArgs{
|
||||
Client: clientSet,
|
||||
NodeInformer: informerFactory.Core().V1().Nodes(),
|
||||
PodInformer: informerFactory.Core().V1().Pods(),
|
||||
@@ -123,5 +123,5 @@ func createSchedulerConfigurator(
|
||||
DisablePreemption: false,
|
||||
PercentageOfNodesToScore: schedulerapi.DefaultPercentageOfNodesToScore,
|
||||
StopCh: stopCh,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user