Add a simple cloud provider for e2e tests on kubemark
This is needed for cluster autoscaler e2e test to run on kubemark. We need the ability to add and remove nodes and operate on nodegroups. Kubemark does not provide this at the moment.
This commit is contained in:
@@ -68,6 +68,7 @@ go_library(
|
||||
"//pkg/kubelet/metrics:go_default_library",
|
||||
"//pkg/kubelet/sysctl:go_default_library",
|
||||
"//pkg/kubelet/util/format:go_default_library",
|
||||
"//pkg/kubemark:go_default_library",
|
||||
"//pkg/master/ports:go_default_library",
|
||||
"//pkg/ssh:go_default_library",
|
||||
"//pkg/util/file:go_default_library",
|
||||
@@ -128,6 +129,7 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/client-go/discovery:go_default_library",
|
||||
"//vendor/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
|
||||
@@ -37,12 +37,15 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
staging "k8s.io/client-go/kubernetes"
|
||||
clientreporestclient "k8s.io/client-go/rest"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/kubemark"
|
||||
"k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
@@ -95,6 +98,8 @@ type Framework struct {
|
||||
// Place where various additional data is stored during test run to be printed to ReportDir,
|
||||
// or stdout if ReportDir is not set once test ends.
|
||||
TestSummaries []TestDataSummary
|
||||
|
||||
kubemarkControllerCloseChannel chan struct{}
|
||||
}
|
||||
|
||||
type TestDataSummary interface {
|
||||
@@ -190,6 +195,23 @@ func (f *Framework) BeforeEach() {
|
||||
f.StagingClient, err = staging.NewForConfig(clientRepoConfig)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
f.ClientPool = dynamic.NewClientPool(config, api.Registry.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
|
||||
if ProviderIs("kubemark") && TestContext.KubemarkExternalKubeConfig != "" && TestContext.CloudConfig.KubemarkController == nil {
|
||||
externalConfig, err := clientcmd.BuildConfigFromFlags("", TestContext.KubemarkExternalKubeConfig)
|
||||
externalConfig.QPS = f.Options.ClientQPS
|
||||
externalConfig.Burst = f.Options.ClientBurst
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
externalClient, err := clientset.NewForConfig(externalConfig)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
f.kubemarkControllerCloseChannel = make(chan struct{})
|
||||
externalInformerFactory := informers.NewSharedInformerFactory(externalClient, 0)
|
||||
kubemarkInformerFactory := informers.NewSharedInformerFactory(f.ClientSet, 0)
|
||||
kubemarkNodeInformer := kubemarkInformerFactory.Core().V1().Nodes()
|
||||
go kubemarkNodeInformer.Informer().Run(f.kubemarkControllerCloseChannel)
|
||||
TestContext.CloudConfig.KubemarkController, err = kubemark.NewKubemarkController(externalClient, externalInformerFactory, f.ClientSet, kubemarkNodeInformer)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
externalInformerFactory.Start(f.kubemarkControllerCloseChannel)
|
||||
TestContext.CloudConfig.KubemarkController.Init(f.kubemarkControllerCloseChannel)
|
||||
}
|
||||
}
|
||||
|
||||
if !f.SkipNamespaceCreation {
|
||||
@@ -342,6 +364,10 @@ func (f *Framework) AfterEach() {
|
||||
}
|
||||
}
|
||||
|
||||
if TestContext.CloudConfig.KubemarkController != nil {
|
||||
close(f.kubemarkControllerCloseChannel)
|
||||
}
|
||||
|
||||
PrintSummaries(f.TestSummaries, f.BaseName)
|
||||
|
||||
// Check whether all nodes are ready after the test.
|
||||
|
||||
@@ -51,6 +51,8 @@ func ResizeGroup(group string, size int32) error {
|
||||
} else if TestContext.Provider == "aws" {
|
||||
client := autoscaling.New(session.New())
|
||||
return awscloud.ResizeInstanceGroup(client, group, int(size))
|
||||
} else if TestContext.Provider == "kubemark" {
|
||||
return TestContext.CloudConfig.KubemarkController.SetNodeGroupSize(group, int(size))
|
||||
} else {
|
||||
return fmt.Errorf("Provider does not support InstanceGroups")
|
||||
}
|
||||
@@ -72,6 +74,8 @@ func GetGroupNodes(group string) ([]string, error) {
|
||||
lines[i] = line[:strings.Index(line, " ")]
|
||||
}
|
||||
return lines, nil
|
||||
} else if TestContext.Provider == "kubemark" {
|
||||
return TestContext.CloudConfig.KubemarkController.GetNodeNamesForNodegroup(group)
|
||||
} else {
|
||||
return nil, fmt.Errorf("provider does not support InstanceGroups")
|
||||
}
|
||||
@@ -99,6 +103,8 @@ func GroupSize(group string) (int, error) {
|
||||
return -1, fmt.Errorf("instance group not found: %s", group)
|
||||
}
|
||||
return instanceGroup.CurrentSize()
|
||||
} else if TestContext.Provider == "kubemark" {
|
||||
return TestContext.CloudConfig.KubemarkController.GetNodeGroupSize(group)
|
||||
} else {
|
||||
return -1, fmt.Errorf("provider does not support InstanceGroups")
|
||||
}
|
||||
|
||||
@@ -27,17 +27,19 @@ import (
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/kubemark"
|
||||
)
|
||||
|
||||
const defaultHost = "http://127.0.0.1:8080"
|
||||
|
||||
type TestContextType struct {
|
||||
KubeConfig string
|
||||
KubeContext string
|
||||
KubeAPIContentType string
|
||||
KubeVolumeDir string
|
||||
CertDir string
|
||||
Host string
|
||||
KubeConfig string
|
||||
KubemarkExternalKubeConfig string
|
||||
KubeContext string
|
||||
KubeAPIContentType string
|
||||
KubeVolumeDir string
|
||||
CertDir string
|
||||
Host string
|
||||
// TODO: Deprecating this over time... instead just use gobindata_util.go , see #23987.
|
||||
RepoRoot string
|
||||
DockershimCheckpointDir string
|
||||
@@ -158,7 +160,8 @@ type CloudConfig struct {
|
||||
NodeTag string
|
||||
MasterTag string
|
||||
|
||||
Provider cloudprovider.Interface
|
||||
Provider cloudprovider.Interface
|
||||
KubemarkController *kubemark.KubemarkController
|
||||
}
|
||||
|
||||
var TestContext TestContextType
|
||||
@@ -201,6 +204,7 @@ func RegisterCommonFlags() {
|
||||
func RegisterClusterFlags() {
|
||||
flag.BoolVar(&TestContext.VerifyServiceAccount, "e2e-verify-service-account", true, "If true tests will verify the service account before running.")
|
||||
flag.StringVar(&TestContext.KubeConfig, clientcmd.RecommendedConfigPathFlag, os.Getenv(clientcmd.RecommendedConfigPathEnvVar), "Path to kubeconfig containing embedded authinfo.")
|
||||
flag.StringVar(&TestContext.KubemarkExternalKubeConfig, fmt.Sprintf("%s-%s", "kubemark-external", clientcmd.RecommendedConfigPathFlag), "", "Path to kubeconfig containing embedded authinfo for external cluster.")
|
||||
flag.StringVar(&TestContext.KubeContext, clientcmd.FlagContext, "", "kubeconfig context to use/override. If unset, will use value from 'current-context'")
|
||||
flag.StringVar(&TestContext.KubeAPIContentType, "kube-api-content-type", "application/vnd.kubernetes.protobuf", "ContentType used to communicate with apiserver")
|
||||
flag.StringVar(&TestContext.FederatedKubeContext, "federated-kube-context", "e2e-federation", "kubeconfig context for federation.")
|
||||
|
||||
Reference in New Issue
Block a user