the changes introduced in this commit plumbs in the generic scaler into kubectl.

note that we don't change the behaviour of kubectl.
For example it won't scale new resources. That's the end goal.
The first step is to retrofit existing code to use the generic scaler.
This commit is contained in:
p0lyn0mial
2018-01-04 14:52:25 +01:00
parent 1a817b1507
commit dd9de90b0a
22 changed files with 177 additions and 100 deletions

View File

@@ -1,9 +1,4 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
@@ -14,7 +9,9 @@ go_library(
"load.go",
],
importpath = "k8s.io/kubernetes/test/e2e/scalability",
visibility = ["//visibility:public"],
deps = [
"//pkg/api/legacyscheme:go_default_library",
"//pkg/apis/batch:go_default_library",
"//pkg/apis/core:go_default_library",
"//pkg/apis/extensions:go_default_library",
@@ -26,6 +23,7 @@ go_library(
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
@@ -38,8 +36,12 @@ go_library(
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/discovery:go_default_library",
"//vendor/k8s.io/client-go/discovery/cached:go_default_library",
"//vendor/k8s.io/client-go/dynamic:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/scale:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/transport:go_default_library",
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
@@ -57,4 +59,5 @@ filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -528,7 +528,7 @@ var _ = SIGDescribe("Density", func() {
podThroughput := 20
timeout := time.Duration(totalPods/podThroughput)*time.Second + 3*time.Minute
// createClients is defined in load.go
clients, internalClients, err := createClients(numberOfCollections)
clients, internalClients, scalesClients, err := createClients(numberOfCollections)
for i := 0; i < numberOfCollections; i++ {
nsName := namespaces[i].Name
secretNames := []string{}
@@ -559,6 +559,7 @@ var _ = SIGDescribe("Density", func() {
baseConfig := &testutils.RCConfig{
Client: clients[i],
InternalClient: internalClients[i],
ScalesGetter: scalesClients[i],
Image: framework.GetPauseImageName(f.ClientSet),
Name: name,
Namespace: nsName,
@@ -590,7 +591,7 @@ var _ = SIGDescribe("Density", func() {
}
// Single client is running out of http2 connections in delete phase, hence we need more.
clients, internalClients, err = createClients(2)
clients, internalClients, _, err = createClients(2)
dConfig := DensityTestConfig{
ClientSets: clients,

View File

@@ -28,14 +28,18 @@ import (
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/intstr"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/discovery"
cacheddiscovery "k8s.io/client-go/discovery/cached"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
scaleclient "k8s.io/client-go/scale"
"k8s.io/client-go/transport"
"k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/pkg/apis/batch"
@@ -48,6 +52,8 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/client-go/dynamic"
"k8s.io/kubernetes/pkg/api/legacyscheme"
)
const (
@@ -309,9 +315,11 @@ var _ = SIGDescribe("Load capacity", func() {
}
})
func createClients(numberOfClients int) ([]clientset.Interface, []internalclientset.Interface, error) {
func createClients(numberOfClients int) ([]clientset.Interface, []internalclientset.Interface, []scaleclient.ScalesGetter, error) {
clients := make([]clientset.Interface, numberOfClients)
internalClients := make([]internalclientset.Interface, numberOfClients)
scalesClients := make([]scaleclient.ScalesGetter, numberOfClients)
for i := 0; i < numberOfClients; i++ {
config, err := framework.LoadConfig()
Expect(err).NotTo(HaveOccurred())
@@ -327,11 +335,11 @@ func createClients(numberOfClients int) ([]clientset.Interface, []internalclient
// each client here.
transportConfig, err := config.TransportConfig()
if err != nil {
return nil, nil, err
return nil, nil, nil, err
}
tlsConfig, err := transport.TLSConfigFor(transportConfig)
if err != nil {
return nil, nil, err
return nil, nil, nil, err
}
config.Transport = utilnet.SetTransportDefaults(&http.Transport{
Proxy: http.ProxyFromEnvironment,
@@ -349,16 +357,37 @@ func createClients(numberOfClients int) ([]clientset.Interface, []internalclient
c, err := clientset.NewForConfig(config)
if err != nil {
return nil, nil, err
return nil, nil, nil, err
}
clients[i] = c
internalClient, err := internalclientset.NewForConfig(config)
if err != nil {
return nil, nil, err
return nil, nil, nil, err
}
internalClients[i] = internalClient
// create scale client, if GroupVersion or NegotiatedSerializer are not set
// assign default values - these fields are mandatory (required by RESTClientFor).
if config.GroupVersion == nil {
config.GroupVersion = &schema.GroupVersion{}
}
if config.NegotiatedSerializer == nil {
config.NegotiatedSerializer = legacyscheme.Codecs
}
restClient, err := restclient.RESTClientFor(config)
if err != nil {
return nil, nil, nil, err
}
discoClient, err := discovery.NewDiscoveryClientForConfig(config)
if err != nil {
return nil, nil, nil, err
}
cachedDiscoClient := cacheddiscovery.NewMemCacheClient(discoClient)
restMapper := discovery.NewDeferredDiscoveryRESTMapper(cachedDiscoClient, meta.InterfacesForUnstructured)
resolver := scaleclient.NewDiscoveryScaleKindResolver(cachedDiscoClient)
scalesClients[i] = scaleclient.New(restClient, restMapper, dynamic.LegacyAPIPathResolverFunc, resolver)
}
return clients, internalClients, nil
return clients, internalClients, scalesClients, nil
}
func computePodCounts(total int) (int, int, int) {
@@ -405,12 +434,13 @@ func generateConfigs(
// Create a number of clients to better simulate real usecase
// where not everyone is using exactly the same client.
rcsPerClient := 20
clients, internalClients, err := createClients((len(configs) + rcsPerClient - 1) / rcsPerClient)
clients, internalClients, scalesClients, err := createClients((len(configs) + rcsPerClient - 1) / rcsPerClient)
framework.ExpectNoError(err)
for i := 0; i < len(configs); i++ {
configs[i].SetClient(clients[i%len(clients)])
configs[i].SetInternalClient(internalClients[i%len(internalClients)])
configs[i].SetScalesClient(scalesClients[i%len(clients)])
}
for i := 0; i < len(secretConfigs); i++ {
secretConfigs[i].Client = clients[i%len(clients)]
@@ -590,7 +620,16 @@ func scaleResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, scaling
sleepUpTo(scalingTime)
newSize := uint(rand.Intn(config.GetReplicas()) + config.GetReplicas()/2)
framework.ExpectNoError(framework.ScaleResource(
config.GetClient(), config.GetInternalClient(), config.GetNamespace(), config.GetName(), newSize, true, config.GetKind()),
config.GetClient(),
config.GetInternalClient(),
config.GetScalesGetter(),
config.GetNamespace(),
config.GetName(),
newSize,
true,
config.GetKind(),
config.GetGroupResource(),
),
fmt.Sprintf("scaling %v %v", config.GetKind(), config.GetName()))
selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": config.GetName()}))