Add a simple cloud provider for e2e tests on kubemark

This is needed for cluster autoscaler e2e test to
run on kubemark. We need the ability to add and
remove nodes and operate on nodegroups. Kubemark
does not provide this at the moment.
This commit is contained in:
Beata Skiba
2017-07-20 15:05:25 +02:00
parent fddc7f3e50
commit 2f747f3d3c
7 changed files with 397 additions and 7 deletions

View File

@@ -37,12 +37,15 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
staging "k8s.io/client-go/kubernetes"
clientreporestclient "k8s.io/client-go/rest"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/kubemark"
"k8s.io/kubernetes/test/e2e/framework/metrics"
testutils "k8s.io/kubernetes/test/utils"
@@ -95,6 +98,8 @@ type Framework struct {
// Place where various additional data is stored during test run to be printed to ReportDir,
// or stdout if ReportDir is not set once test ends.
TestSummaries []TestDataSummary
kubemarkControllerCloseChannel chan struct{}
}
type TestDataSummary interface {
@@ -190,6 +195,23 @@ func (f *Framework) BeforeEach() {
f.StagingClient, err = staging.NewForConfig(clientRepoConfig)
Expect(err).NotTo(HaveOccurred())
f.ClientPool = dynamic.NewClientPool(config, api.Registry.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
if ProviderIs("kubemark") && TestContext.KubemarkExternalKubeConfig != "" && TestContext.CloudConfig.KubemarkController == nil {
externalConfig, err := clientcmd.BuildConfigFromFlags("", TestContext.KubemarkExternalKubeConfig)
externalConfig.QPS = f.Options.ClientQPS
externalConfig.Burst = f.Options.ClientBurst
Expect(err).NotTo(HaveOccurred())
externalClient, err := clientset.NewForConfig(externalConfig)
Expect(err).NotTo(HaveOccurred())
f.kubemarkControllerCloseChannel = make(chan struct{})
externalInformerFactory := informers.NewSharedInformerFactory(externalClient, 0)
kubemarkInformerFactory := informers.NewSharedInformerFactory(f.ClientSet, 0)
kubemarkNodeInformer := kubemarkInformerFactory.Core().V1().Nodes()
go kubemarkNodeInformer.Informer().Run(f.kubemarkControllerCloseChannel)
TestContext.CloudConfig.KubemarkController, err = kubemark.NewKubemarkController(externalClient, externalInformerFactory, f.ClientSet, kubemarkNodeInformer)
Expect(err).NotTo(HaveOccurred())
externalInformerFactory.Start(f.kubemarkControllerCloseChannel)
TestContext.CloudConfig.KubemarkController.Init(f.kubemarkControllerCloseChannel)
}
}
if !f.SkipNamespaceCreation {
@@ -342,6 +364,10 @@ func (f *Framework) AfterEach() {
}
}
if TestContext.CloudConfig.KubemarkController != nil {
close(f.kubemarkControllerCloseChannel)
}
PrintSummaries(f.TestSummaries, f.BaseName)
// Check whether all nodes are ready after the test.