Merge pull request #34905 from ingvagabund/client-to-clientset

Automatic merge from submit-queue

Replace client with clientset

Replace client with clientset in some places

Fixes: #34637
This commit is contained in:
Kubernetes Submit Queue
2016-10-24 05:38:43 -07:00
committed by GitHub
137 changed files with 1762 additions and 1794 deletions

View File

@@ -32,8 +32,8 @@ import (
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/restclient"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/wait"
@@ -47,7 +47,7 @@ func TestClient(t *testing.T) {
_, s := framework.RunAMaster(nil)
defer s.Close()
client := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
ns := framework.CreateTestingNamespace("client", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
@@ -60,7 +60,7 @@ func TestClient(t *testing.T) {
t.Errorf("expected %#v, got %#v", e, a)
}
pods, err := client.Pods(ns.Name).List(api.ListOptions{})
pods, err := client.Core().Pods(ns.Name).List(api.ListOptions{})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
@@ -83,14 +83,14 @@ func TestClient(t *testing.T) {
},
}
got, err := client.Pods(ns.Name).Create(pod)
got, err := client.Core().Pods(ns.Name).Create(pod)
if err == nil {
t.Fatalf("unexpected non-error: %v", got)
}
// get a created pod
pod.Spec.Containers[0].Image = "an-image"
got, err = client.Pods(ns.Name).Create(pod)
got, err = client.Core().Pods(ns.Name).Create(pod)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
@@ -99,7 +99,7 @@ func TestClient(t *testing.T) {
}
// pod is shown, but not scheduled
pods, err = client.Pods(ns.Name).List(api.ListOptions{})
pods, err = client.Core().Pods(ns.Name).List(api.ListOptions{})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
@@ -119,14 +119,14 @@ func TestAtomicPut(t *testing.T) {
_, s := framework.RunAMaster(nil)
defer s.Close()
c := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
ns := framework.CreateTestingNamespace("atomic-put", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
rcBody := api.ReplicationController{
TypeMeta: unversioned.TypeMeta{
APIVersion: c.APIVersion().String(),
APIVersion: c.Core().RESTClient().APIVersion().String(),
},
ObjectMeta: api.ObjectMeta{
Name: "atomicrc",
@@ -154,7 +154,7 @@ func TestAtomicPut(t *testing.T) {
},
},
}
rcs := c.ReplicationControllers(ns.Name)
rcs := c.Core().ReplicationControllers(ns.Name)
rc, err := rcs.Create(&rcBody)
if err != nil {
t.Fatalf("Failed creating atomicRC: %v", err)
@@ -211,7 +211,7 @@ func TestPatch(t *testing.T) {
_, s := framework.RunAMaster(nil)
defer s.Close()
c := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
ns := framework.CreateTestingNamespace("patch", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
@@ -220,7 +220,7 @@ func TestPatch(t *testing.T) {
resource := "pods"
podBody := api.Pod{
TypeMeta: unversioned.TypeMeta{
APIVersion: c.APIVersion().String(),
APIVersion: c.Core().RESTClient().APIVersion().String(),
},
ObjectMeta: api.ObjectMeta{
Name: name,
@@ -233,7 +233,7 @@ func TestPatch(t *testing.T) {
},
},
}
pods := c.Pods(ns.Name)
pods := c.Core().Pods(ns.Name)
pod, err := pods.Create(&podBody)
if err != nil {
t.Fatalf("Failed creating patchpods: %v", err)
@@ -263,10 +263,10 @@ func TestPatch(t *testing.T) {
},
}
pb := patchBodies[c.APIVersion()]
pb := patchBodies[c.Core().RESTClient().APIVersion()]
execPatch := func(pt api.PatchType, body []byte) error {
return c.Patch(pt).
return c.Core().RESTClient().Patch(pt).
Resource(resource).
Namespace(ns.Name).
Name(name).
@@ -320,7 +320,7 @@ func TestPatchWithCreateOnUpdate(t *testing.T) {
_, s := framework.RunAMaster(nil)
defer s.Close()
c := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
ns := framework.CreateTestingNamespace("patch-with-create", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
@@ -339,7 +339,7 @@ func TestPatchWithCreateOnUpdate(t *testing.T) {
}
patchEndpoint := func(json []byte) (runtime.Object, error) {
return c.Patch(api.MergePatchType).Resource("endpoints").Namespace(ns.Name).Name("patchendpoint").Body(json).Do().Get()
return c.Core().RESTClient().Patch(api.MergePatchType).Resource("endpoints").Namespace(ns.Name).Name("patchendpoint").Body(json).Do().Get()
}
// Make sure patch doesn't get to CreateOnUpdate
@@ -354,7 +354,7 @@ func TestPatchWithCreateOnUpdate(t *testing.T) {
}
// Create the endpoint (endpoints set AllowCreateOnUpdate=true) to get a UID and resource version
createdEndpoint, err := c.Endpoints(ns.Name).Update(endpointTemplate)
createdEndpoint, err := c.Core().Endpoints(ns.Name).Update(endpointTemplate)
if err != nil {
t.Fatalf("Failed creating endpoint: %v", err)
}
@@ -431,10 +431,10 @@ func TestAPIVersions(t *testing.T) {
_, s := framework.RunAMaster(nil)
defer s.Close()
c := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
clientVersion := c.APIVersion().String()
g, err := c.ServerGroups()
clientVersion := c.Core().RESTClient().APIVersion().String()
g, err := c.Discovery().ServerGroups()
if err != nil {
t.Fatalf("Failed to get api versions: %v", err)
}
@@ -456,7 +456,7 @@ func TestSingleWatch(t *testing.T) {
ns := framework.CreateTestingNamespace("single-watch", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
client := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
mkEvent := func(i int) *api.Event {
name := fmt.Sprintf("event-%v", i)
@@ -476,7 +476,7 @@ func TestSingleWatch(t *testing.T) {
rv1 := ""
for i := 0; i < 10; i++ {
event := mkEvent(i)
got, err := client.Events(ns.Name).Create(event)
got, err := client.Core().Events(ns.Name).Create(event)
if err != nil {
t.Fatalf("Failed creating event %#q: %v", event, err)
}
@@ -489,7 +489,7 @@ func TestSingleWatch(t *testing.T) {
t.Logf("Created event %#v", got.ObjectMeta)
}
w, err := client.Get().
w, err := client.Core().RESTClient().Get().
Prefix("watch").
Namespace(ns.Name).
Resource("events").
@@ -541,7 +541,7 @@ func TestMultiWatch(t *testing.T) {
ns := framework.CreateTestingNamespace("multi-watch", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
client := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
dummyEvent := func(i int) *api.Event {
name := fmt.Sprintf("unrelated-%v", i)
@@ -570,7 +570,7 @@ func TestMultiWatch(t *testing.T) {
for i := 0; i < watcherCount; i++ {
watchesStarted.Add(1)
name := fmt.Sprintf("multi-watch-%v", i)
got, err := client.Pods(ns.Name).Create(&api.Pod{
got, err := client.Core().Pods(ns.Name).Create(&api.Pod{
ObjectMeta: api.ObjectMeta{
Name: name,
Labels: labels.Set{"watchlabel": name},
@@ -591,7 +591,7 @@ func TestMultiWatch(t *testing.T) {
LabelSelector: labels.Set{"watchlabel": name}.AsSelector(),
ResourceVersion: rv,
}
w, err := client.Pods(ns.Name).Watch(options)
w, err := client.Core().Pods(ns.Name).Watch(options)
if err != nil {
panic(fmt.Sprintf("watch error for %v: %v", name, err))
}
@@ -640,7 +640,7 @@ func TestMultiWatch(t *testing.T) {
if !ok {
return
}
if _, err := client.Events(ns.Name).Create(dummyEvent(i)); err != nil {
if _, err := client.Core().Events(ns.Name).Create(dummyEvent(i)); err != nil {
panic(fmt.Sprintf("couldn't make an event: %v", err))
}
changeMade <- i
@@ -677,7 +677,7 @@ func TestMultiWatch(t *testing.T) {
return
}
name := fmt.Sprintf("unrelated-%v", i)
_, err := client.Pods(ns.Name).Create(&api.Pod{
_, err := client.Core().Pods(ns.Name).Create(&api.Pod{
ObjectMeta: api.ObjectMeta{
Name: name,
},
@@ -711,13 +711,13 @@ func TestMultiWatch(t *testing.T) {
for i := 0; i < watcherCount; i++ {
go func(i int) {
name := fmt.Sprintf("multi-watch-%v", i)
pod, err := client.Pods(ns.Name).Get(name)
pod, err := client.Core().Pods(ns.Name).Get(name)
if err != nil {
panic(fmt.Sprintf("Couldn't get %v: %v", name, err))
}
pod.Spec.Containers[0].Image = e2e.GetPauseImageName(client)
sentTimes <- timePair{time.Now(), name}
if _, err := client.Pods(ns.Name).Update(pod); err != nil {
if _, err := client.Core().Pods(ns.Name).Update(pod); err != nil {
panic(fmt.Sprintf("Couldn't make %v: %v", name, err))
}
}(i)
@@ -740,7 +740,7 @@ func TestMultiWatch(t *testing.T) {
t.Errorf("durations: %v", dur)
}
func runSelfLinkTestOnNamespace(t *testing.T, c *client.Client, namespace string) {
func runSelfLinkTestOnNamespace(t *testing.T, c clientset.Interface, namespace string) {
podBody := api.Pod{
ObjectMeta: api.ObjectMeta{
Name: "selflinktest",
@@ -755,20 +755,20 @@ func runSelfLinkTestOnNamespace(t *testing.T, c *client.Client, namespace string
},
},
}
pod, err := c.Pods(namespace).Create(&podBody)
pod, err := c.Core().Pods(namespace).Create(&podBody)
if err != nil {
t.Fatalf("Failed creating selflinktest pod: %v", err)
}
if err = c.Get().RequestURI(pod.SelfLink).Do().Into(pod); err != nil {
if err = c.Core().RESTClient().Get().RequestURI(pod.SelfLink).Do().Into(pod); err != nil {
t.Errorf("Failed listing pod with supplied self link '%v': %v", pod.SelfLink, err)
}
podList, err := c.Pods(namespace).List(api.ListOptions{})
podList, err := c.Core().Pods(namespace).List(api.ListOptions{})
if err != nil {
t.Errorf("Failed listing pods: %v", err)
}
if err = c.Get().RequestURI(podList.SelfLink).Do().Into(podList); err != nil {
if err = c.Core().RESTClient().Get().RequestURI(podList.SelfLink).Do().Into(podList); err != nil {
t.Errorf("Failed listing pods with supplied self link '%v': %v", podList.SelfLink, err)
}
@@ -779,7 +779,7 @@ func runSelfLinkTestOnNamespace(t *testing.T, c *client.Client, namespace string
continue
}
found = true
err = c.Get().RequestURI(item.SelfLink).Do().Into(pod)
err = c.Core().RESTClient().Get().RequestURI(item.SelfLink).Do().Into(pod)
if err != nil {
t.Errorf("Failed listing pod with supplied self link '%v': %v", item.SelfLink, err)
}
@@ -797,7 +797,7 @@ func TestSelfLinkOnNamespace(t *testing.T) {
ns := framework.CreateTestingNamespace("selflink", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
c := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
runSelfLinkTestOnNamespace(t, c, ns.Name)
}

View File

@@ -27,9 +27,9 @@ import (
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/typed/dynamic"
uclient "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/test/integration/framework"
)
@@ -47,7 +47,7 @@ func TestDynamicClient(t *testing.T) {
ContentConfig: restclient.ContentConfig{GroupVersion: gv},
}
client := uclient.NewOrDie(config)
client := clientset.NewForConfigOrDie(config)
dynamicClient, err := dynamic.NewClient(config)
_ = dynamicClient
if err != nil {
@@ -87,7 +87,7 @@ func TestDynamicClient(t *testing.T) {
},
}
actual, err := client.Pods(ns.Name).Create(pod)
actual, err := client.Core().Pods(ns.Name).Create(pod)
if err != nil {
t.Fatalf("unexpected error when creating pod: %v", err)
}
@@ -136,7 +136,7 @@ func TestDynamicClient(t *testing.T) {
t.Fatalf("unexpected error when deleting pod: %v", err)
}
list, err := client.Pods(ns.Name).List(api.ListOptions{})
list, err := client.Core().Pods(ns.Name).List(api.ListOptions{})
if err != nil {
t.Fatalf("unexpected error when listing pods: %v", err)
}

View File

@@ -25,8 +25,8 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/apimachinery/registered"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/restclient"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/test/integration"
"k8s.io/kubernetes/test/integration/framework"
)
@@ -36,7 +36,7 @@ func TestConfigMap(t *testing.T) {
_, s := framework.RunAMaster(nil)
defer s.Close()
client := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
ns := framework.CreateTestingNamespace("config-map", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
@@ -44,7 +44,7 @@ func TestConfigMap(t *testing.T) {
DoTestConfigMap(t, client, ns)
}
func DoTestConfigMap(t *testing.T, client *client.Client, ns *api.Namespace) {
func DoTestConfigMap(t *testing.T, client clientset.Interface, ns *api.Namespace) {
cfg := api.ConfigMap{
ObjectMeta: api.ObjectMeta{
Name: "configmap",
@@ -57,7 +57,7 @@ func DoTestConfigMap(t *testing.T, client *client.Client, ns *api.Namespace) {
},
}
if _, err := client.ConfigMaps(cfg.Namespace).Create(&cfg); err != nil {
if _, err := client.Core().ConfigMaps(cfg.Namespace).Create(&cfg); err != nil {
t.Errorf("unable to create test configMap: %v", err)
}
defer deleteConfigMapOrErrorf(t, client, cfg.Namespace, cfg.Name)
@@ -112,14 +112,14 @@ func DoTestConfigMap(t *testing.T, client *client.Client, ns *api.Namespace) {
}
pod.ObjectMeta.Name = "uses-configmap"
if _, err := client.Pods(ns.Name).Create(pod); err != nil {
if _, err := client.Core().Pods(ns.Name).Create(pod); err != nil {
t.Errorf("Failed to create pod: %v", err)
}
defer integration.DeletePodOrErrorf(t, client, ns.Name, pod.Name)
}
func deleteConfigMapOrErrorf(t *testing.T, c *client.Client, ns, name string) {
if err := c.ConfigMaps(ns).Delete(name); err != nil {
func deleteConfigMapOrErrorf(t *testing.T, c clientset.Interface, ns, name string) {
if err := c.Core().ConfigMaps(ns).Delete(name, nil); err != nil {
t.Errorf("unable to delete ConfigMap %v: %v", name, err)
}
}

View File

@@ -49,7 +49,6 @@ import (
coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/client/restclient"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/controller"
replicationcontroller "k8s.io/kubernetes/pkg/controller/replication"
"k8s.io/kubernetes/pkg/generated/openapi"
@@ -90,7 +89,7 @@ type MasterComponents struct {
// Kubernetes master, contains an embedded etcd storage
KubeMaster *master.Master
// Restclient used to talk to the kubernetes master
RestClient *client.Client
ClientSet clientset.Interface
// Replication controller manager
ControllerManager *replicationcontroller.ReplicationManager
// Channel for stop signals to rc manager
@@ -117,7 +116,6 @@ func NewMasterComponents(c *Config) *MasterComponents {
// TODO: Allow callers to pipe through a different master url and create a client/start components using it.
glog.Infof("Master %+v", s.URL)
// TODO: caesarxuchao: remove this client when the refactoring of client libraray is done.
restClient := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}, QPS: c.QPS, Burst: c.Burst})
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}, QPS: c.QPS, Burst: c.Burst})
rcStopCh := make(chan struct{})
controllerManager := replicationcontroller.NewReplicationManagerFromClient(clientset, controller.NoResyncPeriodFunc, c.Burst, 4096)
@@ -130,7 +128,7 @@ func NewMasterComponents(c *Config) *MasterComponents {
return &MasterComponents{
ApiServer: s,
KubeMaster: m,
RestClient: restClient,
ClientSet: clientset,
ControllerManager: controllerManager,
rcStopCh: rcStopCh,
}

View File

@@ -27,7 +27,7 @@ import (
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/test/integration/framework"
@@ -98,7 +98,7 @@ func getIterations(bN int) int {
}
// startPodsOnNodes creates numPods sharded across numNodes
func startPodsOnNodes(ns string, numPods, numNodes int, restClient *client.Client) {
func startPodsOnNodes(ns string, numPods, numNodes int, restClient clientset.Interface) {
podsPerNode := numPods / numNodes
if podsPerNode < 1 {
podsPerNode = 1
@@ -137,7 +137,7 @@ func BenchmarkPodList(b *testing.B) {
defer func() {
glog.V(3).Infof("Worker %d: Node %v listing pods took %v", id, host, time.Since(now))
}()
if pods, err := m.RestClient.Pods(ns.Name).List(api.ListOptions{
if pods, err := m.ClientSet.Core().Pods(ns.Name).List(api.ListOptions{
LabelSelector: labels.Everything(),
FieldSelector: fields.OneTermEqualSelector(api.PodHostField, host),
}); err != nil {
@@ -180,7 +180,7 @@ func BenchmarkPodListEtcd(b *testing.B) {
defer func() {
glog.V(3).Infof("Worker %d: listing pods took %v", id, time.Since(now))
}()
pods, err := m.RestClient.Pods(ns.Name).List(api.ListOptions{
pods, err := m.ClientSet.Core().Pods(ns.Name).List(api.ListOptions{
LabelSelector: labels.Everything(),
FieldSelector: fields.Everything(),
})

View File

@@ -36,8 +36,8 @@ import (
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/apis/batch/v2alpha1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/restclient"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/integration"
"k8s.io/kubernetes/test/integration/framework"
@@ -387,10 +387,10 @@ func TestMasterService(t *testing.T) {
_, s := framework.RunAMaster(framework.NewIntegrationTestMasterConfig())
defer s.Close()
client := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
err := wait.Poll(time.Second, time.Minute, func() (bool, error) {
svcList, err := client.Services(api.NamespaceDefault).List(api.ListOptions{})
svcList, err := client.Core().Services(api.NamespaceDefault).List(api.ListOptions{})
if err != nil {
t.Errorf("unexpected error: %v", err)
return false, nil
@@ -403,7 +403,7 @@ func TestMasterService(t *testing.T) {
}
}
if found {
ep, err := client.Endpoints(api.NamespaceDefault).Get("kubernetes")
ep, err := client.Core().Endpoints(api.NamespaceDefault).Get("kubernetes")
if err != nil {
return false, nil
}
@@ -429,7 +429,7 @@ func TestServiceAlloc(t *testing.T) {
_, s := framework.RunAMaster(cfg)
defer s.Close()
client := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
svc := func(i int) *api.Service {
return &api.Service{
@@ -447,7 +447,7 @@ func TestServiceAlloc(t *testing.T) {
// Wait until the default "kubernetes" service is created.
if err = wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
_, err := client.Services(api.NamespaceDefault).Get("kubernetes")
_, err := client.Core().Services(api.NamespaceDefault).Get("kubernetes")
if err != nil && !errors.IsNotFound(err) {
return false, err
}
@@ -457,17 +457,17 @@ func TestServiceAlloc(t *testing.T) {
}
// Make a service.
if _, err := client.Services(api.NamespaceDefault).Create(svc(1)); err != nil {
if _, err := client.Core().Services(api.NamespaceDefault).Create(svc(1)); err != nil {
t.Fatalf("got unexpected error: %v", err)
}
// Make a second service. It will fail because we're out of cluster IPs
if _, err := client.Services(api.NamespaceDefault).Create(svc(2)); err != nil {
if _, err := client.Core().Services(api.NamespaceDefault).Create(svc(2)); err != nil {
if !strings.Contains(err.Error(), "range is full") {
t.Errorf("unexpected error text: %v", err)
}
} else {
svcs, err := client.Services(api.NamespaceAll).List(api.ListOptions{})
svcs, err := client.Core().Services(api.NamespaceAll).List(api.ListOptions{})
if err != nil {
t.Fatalf("unexpected success, and error getting the services: %v", err)
}
@@ -479,12 +479,12 @@ func TestServiceAlloc(t *testing.T) {
}
// Delete the first service.
if err := client.Services(api.NamespaceDefault).Delete(svc(1).ObjectMeta.Name); err != nil {
if err := client.Core().Services(api.NamespaceDefault).Delete(svc(1).ObjectMeta.Name, nil); err != nil {
t.Fatalf("got unexpected error: %v", err)
}
// This time creating the second service should work.
if _, err := client.Services(api.NamespaceDefault).Create(svc(2)); err != nil {
if _, err := client.Core().Services(api.NamespaceDefault).Create(svc(2)); err != nil {
t.Fatalf("got unexpected error: %v", err)
}
}

View File

@@ -27,8 +27,8 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/apimachinery/registered"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/restclient"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/test/integration/framework"
"github.com/golang/glog"
@@ -108,8 +108,8 @@ func TestApiserverMetrics(t *testing.T) {
// Make a request to the apiserver to ensure there's at least one data point
// for the metrics we're expecting -- otherwise, they won't be exported.
client := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
if _, err := client.Pods(api.NamespaceDefault).List(api.ListOptions{}); err != nil {
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
if _, err := client.Core().Pods(api.NamespaceDefault).List(api.ListOptions{}); err != nil {
t.Fatalf("unexpected error getting pods: %v", err)
}

View File

@@ -24,8 +24,8 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/apimachinery/registered"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/restclient"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/genericapiserver"
etcdstorage "k8s.io/kubernetes/pkg/storage/etcd"
"k8s.io/kubernetes/pkg/storage/etcd/etcdtest"
@@ -38,7 +38,7 @@ func TestIgnoreClusterName(t *testing.T) {
_, s := framework.RunAMaster(config)
defer s.Close()
client := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
etcdClient := framework.NewEtcdClient()
etcdStorage := etcdstorage.NewEtcdStorage(etcdClient, testapi.Default.Codec(),
prefix+"/namespaces/", false, etcdtest.DeserializationCacheSize)
@@ -50,7 +50,7 @@ func TestIgnoreClusterName(t *testing.T) {
ClusterName: "cluster-name-to-ignore",
},
}
nsNew, err := client.Namespaces().Create(&ns)
nsNew, err := client.Core().Namespaces().Create(&ns)
assert.Nil(t, err)
assert.Equal(t, ns.Name, nsNew.Name)
assert.Empty(t, nsNew.ClusterName)
@@ -61,7 +61,7 @@ func TestIgnoreClusterName(t *testing.T) {
assert.Equal(t, ns.Name, nsEtcd.Name)
assert.Empty(t, nsEtcd.ClusterName)
nsNew, err = client.Namespaces().Update(&ns)
nsNew, err = client.Core().Namespaces().Update(&ns)
assert.Nil(t, err)
assert.Equal(t, ns.Name, nsNew.Name)
assert.Empty(t, nsNew.ClusterName)

View File

@@ -24,8 +24,8 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/apimachinery/registered"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/restclient"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/test/integration"
"k8s.io/kubernetes/test/integration/framework"
)
@@ -37,7 +37,7 @@ func TestPodUpdateActiveDeadlineSeconds(t *testing.T) {
ns := framework.CreateTestingNamespace("pod-activedeadline-update", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
client := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
var (
iZero = int64(0)
@@ -130,13 +130,13 @@ func TestPodUpdateActiveDeadlineSeconds(t *testing.T) {
pod.Spec.ActiveDeadlineSeconds = tc.original
pod.ObjectMeta.Name = fmt.Sprintf("activedeadlineseconds-test-%v", i)
if _, err := client.Pods(ns.Name).Create(pod); err != nil {
if _, err := client.Core().Pods(ns.Name).Create(pod); err != nil {
t.Errorf("Failed to create pod: %v", err)
}
pod.Spec.ActiveDeadlineSeconds = tc.update
_, err := client.Pods(ns.Name).Update(pod)
_, err := client.Core().Pods(ns.Name).Update(pod)
if tc.valid && err != nil {
t.Errorf("%v: failed to update pod: %v", tc.name, err)
} else if !tc.valid && err == nil {
@@ -155,7 +155,7 @@ func TestPodReadOnlyFilesystem(t *testing.T) {
ns := framework.CreateTestingNamespace("pod-readonly-root", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
client := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
@@ -174,7 +174,7 @@ func TestPodReadOnlyFilesystem(t *testing.T) {
},
}
if _, err := client.Pods(ns.Name).Create(pod); err != nil {
if _, err := client.Core().Pods(ns.Name).Create(pod); err != nil {
t.Errorf("Failed to create pod: %v", err)
}

View File

@@ -37,7 +37,6 @@ import (
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/client/restclient"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/plugin/pkg/scheduler"
_ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider"
@@ -197,7 +196,6 @@ func TestSchedulerExtender(t *testing.T) {
ns := framework.CreateTestingNamespace("scheduler-extender", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
restClient := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
extender1 := &Extender{
@@ -252,13 +250,13 @@ func TestSchedulerExtender(t *testing.T) {
defer close(schedulerConfig.StopEverything)
DoTestPodScheduling(ns, t, restClient)
DoTestPodScheduling(ns, t, clientSet)
}
func DoTestPodScheduling(ns *api.Namespace, t *testing.T, restClient *client.Client) {
func DoTestPodScheduling(ns *api.Namespace, t *testing.T, cs clientset.Interface) {
// NOTE: This test cannot run in parallel, because it is creating and deleting
// non-namespaced objects (Nodes).
defer restClient.Nodes().DeleteCollection(nil, api.ListOptions{})
defer cs.Core().Nodes().DeleteCollection(nil, api.ListOptions{})
goodCondition := api.NodeCondition{
Type: api.NodeReady,
@@ -278,7 +276,7 @@ func DoTestPodScheduling(ns *api.Namespace, t *testing.T, restClient *client.Cli
for ii := 0; ii < 5; ii++ {
node.Name = fmt.Sprintf("machine%d", ii+1)
if _, err := restClient.Nodes().Create(node); err != nil {
if _, err := cs.Core().Nodes().Create(node); err != nil {
t.Fatalf("Failed to create nodes: %v", err)
}
}
@@ -286,21 +284,21 @@ func DoTestPodScheduling(ns *api.Namespace, t *testing.T, restClient *client.Cli
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{Name: "extender-test-pod"},
Spec: api.PodSpec{
Containers: []api.Container{{Name: "container", Image: e2e.GetPauseImageName(restClient)}},
Containers: []api.Container{{Name: "container", Image: e2e.GetPauseImageName(cs)}},
},
}
myPod, err := restClient.Pods(ns.Name).Create(pod)
myPod, err := cs.Core().Pods(ns.Name).Create(pod)
if err != nil {
t.Fatalf("Failed to create pod: %v", err)
}
err = wait.Poll(time.Second, wait.ForeverTestTimeout, podScheduled(restClient, myPod.Namespace, myPod.Name))
err = wait.Poll(time.Second, wait.ForeverTestTimeout, podScheduled(cs, myPod.Namespace, myPod.Name))
if err != nil {
t.Fatalf("Failed to schedule pod: %v", err)
}
if myPod, err := restClient.Pods(ns.Name).Get(myPod.Name); err != nil {
if myPod, err := cs.Core().Pods(ns.Name).Get(myPod.Name); err != nil {
t.Fatalf("Failed to get pod: %v", err)
} else if myPod.Spec.NodeName != "machine3" {
t.Fatalf("Failed to schedule using extender, expected machine3, got %v", myPod.Spec.NodeName)

View File

@@ -32,9 +32,9 @@ import (
"k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/client/restclient"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/plugin/pkg/scheduler"
_ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider"
@@ -43,7 +43,7 @@ import (
"k8s.io/kubernetes/test/integration/framework"
)
type nodeMutationFunc func(t *testing.T, n *api.Node, nodeStore cache.Store, c *client.Client)
type nodeMutationFunc func(t *testing.T, n *api.Node, nodeStore cache.Store, c clientset.Interface)
type nodeStateManager struct {
makeSchedulable nodeMutationFunc
@@ -57,7 +57,6 @@ func TestUnschedulableNodes(t *testing.T) {
ns := framework.CreateTestingNamespace("unschedulable-nodes", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
restClient := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
schedulerConfigFactory := factory.NewConfigFactory(clientSet, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains)
@@ -67,17 +66,17 @@ func TestUnschedulableNodes(t *testing.T) {
}
eventBroadcaster := record.NewBroadcaster()
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: api.DefaultSchedulerName})
eventBroadcaster.StartRecordingToSink(restClient.Events(ns.Name))
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: clientSet.Core().Events(ns.Name)})
scheduler.New(schedulerConfig).Run()
defer close(schedulerConfig.StopEverything)
DoTestUnschedulableNodes(t, restClient, ns, schedulerConfigFactory.NodeLister.Store)
DoTestUnschedulableNodes(t, clientSet, ns, schedulerConfigFactory.NodeLister.Store)
}
func podScheduled(c *client.Client, podNamespace, podName string) wait.ConditionFunc {
func podScheduled(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc {
return func() (bool, error) {
pod, err := c.Pods(podNamespace).Get(podName)
pod, err := c.Core().Pods(podNamespace).Get(podName)
if errors.IsNotFound(err) {
return false, nil
}
@@ -121,10 +120,10 @@ func waitForReflection(t *testing.T, s cache.Store, key string, passFunc func(n
return err
}
func DoTestUnschedulableNodes(t *testing.T, restClient *client.Client, ns *api.Namespace, nodeStore cache.Store) {
func DoTestUnschedulableNodes(t *testing.T, cs clientset.Interface, ns *api.Namespace, nodeStore cache.Store) {
// NOTE: This test cannot run in parallel, because it is creating and deleting
// non-namespaced objects (Nodes).
defer restClient.Nodes().DeleteCollection(nil, api.ListOptions{})
defer cs.Core().Nodes().DeleteCollection(nil, api.ListOptions{})
goodCondition := api.NodeCondition{
Type: api.NodeReady,
@@ -167,9 +166,9 @@ func DoTestUnschedulableNodes(t *testing.T, restClient *client.Client, ns *api.N
nodeModifications := []nodeStateManager{
// Test node.Spec.Unschedulable=true/false
{
makeUnSchedulable: func(t *testing.T, n *api.Node, s cache.Store, c *client.Client) {
makeUnSchedulable: func(t *testing.T, n *api.Node, s cache.Store, c clientset.Interface) {
n.Spec.Unschedulable = true
if _, err := c.Nodes().Update(n); err != nil {
if _, err := c.Core().Nodes().Update(n); err != nil {
t.Fatalf("Failed to update node with unschedulable=true: %v", err)
}
err = waitForReflection(t, s, nodeKey, func(node interface{}) bool {
@@ -183,9 +182,9 @@ func DoTestUnschedulableNodes(t *testing.T, restClient *client.Client, ns *api.N
t.Fatalf("Failed to observe reflected update for setting unschedulable=true: %v", err)
}
},
makeSchedulable: func(t *testing.T, n *api.Node, s cache.Store, c *client.Client) {
makeSchedulable: func(t *testing.T, n *api.Node, s cache.Store, c clientset.Interface) {
n.Spec.Unschedulable = false
if _, err := c.Nodes().Update(n); err != nil {
if _, err := c.Core().Nodes().Update(n); err != nil {
t.Fatalf("Failed to update node with unschedulable=false: %v", err)
}
err = waitForReflection(t, s, nodeKey, func(node interface{}) bool {
@@ -198,14 +197,14 @@ func DoTestUnschedulableNodes(t *testing.T, restClient *client.Client, ns *api.N
},
// Test node.Status.Conditions=ConditionTrue/Unknown
{
makeUnSchedulable: func(t *testing.T, n *api.Node, s cache.Store, c *client.Client) {
makeUnSchedulable: func(t *testing.T, n *api.Node, s cache.Store, c clientset.Interface) {
n.Status = api.NodeStatus{
Capacity: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
},
Conditions: []api.NodeCondition{badCondition},
}
if _, err = c.Nodes().UpdateStatus(n); err != nil {
if _, err = c.Core().Nodes().UpdateStatus(n); err != nil {
t.Fatalf("Failed to update node with bad status condition: %v", err)
}
err = waitForReflection(t, s, nodeKey, func(node interface{}) bool {
@@ -215,14 +214,14 @@ func DoTestUnschedulableNodes(t *testing.T, restClient *client.Client, ns *api.N
t.Fatalf("Failed to observe reflected update for status condition update: %v", err)
}
},
makeSchedulable: func(t *testing.T, n *api.Node, s cache.Store, c *client.Client) {
makeSchedulable: func(t *testing.T, n *api.Node, s cache.Store, c clientset.Interface) {
n.Status = api.NodeStatus{
Capacity: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
},
Conditions: []api.NodeCondition{goodCondition},
}
if _, err = c.Nodes().UpdateStatus(n); err != nil {
if _, err = c.Core().Nodes().UpdateStatus(n); err != nil {
t.Fatalf("Failed to update node with healthy status condition: %v", err)
}
err = waitForReflection(t, s, nodeKey, func(node interface{}) bool {
@@ -236,29 +235,29 @@ func DoTestUnschedulableNodes(t *testing.T, restClient *client.Client, ns *api.N
}
for i, mod := range nodeModifications {
unSchedNode, err := restClient.Nodes().Create(node)
unSchedNode, err := cs.Core().Nodes().Create(node)
if err != nil {
t.Fatalf("Failed to create node: %v", err)
}
// Apply the unschedulable modification to the node, and wait for the reflection
mod.makeUnSchedulable(t, unSchedNode, nodeStore, restClient)
mod.makeUnSchedulable(t, unSchedNode, nodeStore, cs)
// Create the new pod, note that this needs to happen post unschedulable
// modification or we have a race in the test.
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{Name: "node-scheduling-test-pod"},
Spec: api.PodSpec{
Containers: []api.Container{{Name: "container", Image: e2e.GetPauseImageName(restClient)}},
Containers: []api.Container{{Name: "container", Image: e2e.GetPauseImageName(cs)}},
},
}
myPod, err := restClient.Pods(ns.Name).Create(pod)
myPod, err := cs.Core().Pods(ns.Name).Create(pod)
if err != nil {
t.Fatalf("Failed to create pod: %v", err)
}
// There are no schedulable nodes - the pod shouldn't be scheduled.
err = wait.Poll(time.Second, wait.ForeverTestTimeout, podScheduled(restClient, myPod.Namespace, myPod.Name))
err = wait.Poll(time.Second, wait.ForeverTestTimeout, podScheduled(cs, myPod.Namespace, myPod.Name))
if err == nil {
t.Errorf("Pod scheduled successfully on unschedulable nodes")
}
@@ -269,25 +268,25 @@ func DoTestUnschedulableNodes(t *testing.T, restClient *client.Client, ns *api.N
}
// Apply the schedulable modification to the node, and wait for the reflection
schedNode, err := restClient.Nodes().Get(unSchedNode.Name)
schedNode, err := cs.Core().Nodes().Get(unSchedNode.Name)
if err != nil {
t.Fatalf("Failed to get node: %v", err)
}
mod.makeSchedulable(t, schedNode, nodeStore, restClient)
mod.makeSchedulable(t, schedNode, nodeStore, cs)
// Wait until the pod is scheduled.
err = wait.Poll(time.Second, wait.ForeverTestTimeout, podScheduled(restClient, myPod.Namespace, myPod.Name))
err = wait.Poll(time.Second, wait.ForeverTestTimeout, podScheduled(cs, myPod.Namespace, myPod.Name))
if err != nil {
t.Errorf("Test %d: failed to schedule a pod: %v", i, err)
} else {
t.Logf("Test %d: Pod got scheduled on a schedulable node", i)
}
err = restClient.Pods(ns.Name).Delete(myPod.Name, api.NewDeleteOptions(0))
err = cs.Core().Pods(ns.Name).Delete(myPod.Name, api.NewDeleteOptions(0))
if err != nil {
t.Errorf("Failed to delete pod: %v", err)
}
err = restClient.Nodes().Delete(schedNode.Name)
err = cs.Core().Nodes().Delete(schedNode.Name, nil)
if err != nil {
t.Errorf("Failed to delete node: %v", err)
}
@@ -323,12 +322,11 @@ func TestMultiScheduler(t *testing.T) {
- testPodNoAnnotation2 and testPodWithAnnotationFitsDefault2 shoule NOT be scheduled
*/
// 1. create and start default-scheduler
restClient := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
// NOTE: This test cannot run in parallel, because it is creating and deleting
// non-namespaced objects (Nodes).
defer restClient.Nodes().DeleteCollection(nil, api.ListOptions{})
defer clientSet.Core().Nodes().DeleteCollection(nil, api.ListOptions{})
schedulerConfigFactory := factory.NewConfigFactory(clientSet, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains)
schedulerConfig, err := schedulerConfigFactory.Create()
@@ -337,7 +335,7 @@ func TestMultiScheduler(t *testing.T) {
}
eventBroadcaster := record.NewBroadcaster()
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: api.DefaultSchedulerName})
eventBroadcaster.StartRecordingToSink(restClient.Events(ns.Name))
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: clientSet.Core().Events(ns.Name)})
scheduler.New(schedulerConfig).Run()
// default-scheduler will be stopped later
@@ -351,25 +349,25 @@ func TestMultiScheduler(t *testing.T) {
},
},
}
restClient.Nodes().Create(node)
clientSet.Core().Nodes().Create(node)
// 3. create 3 pods for testing
podWithNoAnnotation := createPod(restClient, "pod-with-no-annotation", nil)
testPodNoAnnotation, err := restClient.Pods(ns.Name).Create(podWithNoAnnotation)
podWithNoAnnotation := createPod(clientSet, "pod-with-no-annotation", nil)
testPodNoAnnotation, err := clientSet.Core().Pods(ns.Name).Create(podWithNoAnnotation)
if err != nil {
t.Fatalf("Failed to create pod: %v", err)
}
schedulerAnnotationFitsDefault := map[string]string{"scheduler.alpha.kubernetes.io/name": "default-scheduler"}
podWithAnnotationFitsDefault := createPod(restClient, "pod-with-annotation-fits-default", schedulerAnnotationFitsDefault)
testPodWithAnnotationFitsDefault, err := restClient.Pods(ns.Name).Create(podWithAnnotationFitsDefault)
podWithAnnotationFitsDefault := createPod(clientSet, "pod-with-annotation-fits-default", schedulerAnnotationFitsDefault)
testPodWithAnnotationFitsDefault, err := clientSet.Core().Pods(ns.Name).Create(podWithAnnotationFitsDefault)
if err != nil {
t.Fatalf("Failed to create pod: %v", err)
}
schedulerAnnotationFitsFoo := map[string]string{"scheduler.alpha.kubernetes.io/name": "foo-scheduler"}
podWithAnnotationFitsFoo := createPod(restClient, "pod-with-annotation-fits-foo", schedulerAnnotationFitsFoo)
testPodWithAnnotationFitsFoo, err := restClient.Pods(ns.Name).Create(podWithAnnotationFitsFoo)
podWithAnnotationFitsFoo := createPod(clientSet, "pod-with-annotation-fits-foo", schedulerAnnotationFitsFoo)
testPodWithAnnotationFitsFoo, err := clientSet.Core().Pods(ns.Name).Create(podWithAnnotationFitsFoo)
if err != nil {
t.Fatalf("Failed to create pod: %v", err)
}
@@ -377,21 +375,21 @@ func TestMultiScheduler(t *testing.T) {
// 4. **check point-1**:
// - testPodNoAnnotation, testPodWithAnnotationFitsDefault should be scheduled
// - testPodWithAnnotationFitsFoo should NOT be scheduled
err = wait.Poll(time.Second, time.Second*5, podScheduled(restClient, testPodNoAnnotation.Namespace, testPodNoAnnotation.Name))
err = wait.Poll(time.Second, time.Second*5, podScheduled(clientSet, testPodNoAnnotation.Namespace, testPodNoAnnotation.Name))
if err != nil {
t.Errorf("Test MultiScheduler: %s Pod not scheduled: %v", testPodNoAnnotation.Name, err)
} else {
t.Logf("Test MultiScheduler: %s Pod scheduled", testPodNoAnnotation.Name)
}
err = wait.Poll(time.Second, time.Second*5, podScheduled(restClient, testPodWithAnnotationFitsDefault.Namespace, testPodWithAnnotationFitsDefault.Name))
err = wait.Poll(time.Second, time.Second*5, podScheduled(clientSet, testPodWithAnnotationFitsDefault.Namespace, testPodWithAnnotationFitsDefault.Name))
if err != nil {
t.Errorf("Test MultiScheduler: %s Pod not scheduled: %v", testPodWithAnnotationFitsDefault.Name, err)
} else {
t.Logf("Test MultiScheduler: %s Pod scheduled", testPodWithAnnotationFitsDefault.Name)
}
err = wait.Poll(time.Second, time.Second*5, podScheduled(restClient, testPodWithAnnotationFitsFoo.Namespace, testPodWithAnnotationFitsFoo.Name))
err = wait.Poll(time.Second, time.Second*5, podScheduled(clientSet, testPodWithAnnotationFitsFoo.Namespace, testPodWithAnnotationFitsFoo.Name))
if err == nil {
t.Errorf("Test MultiScheduler: %s Pod got scheduled, %v", testPodWithAnnotationFitsFoo.Name, err)
} else {
@@ -399,7 +397,6 @@ func TestMultiScheduler(t *testing.T) {
}
// 5. create and start a scheduler with name "foo-scheduler"
restClient2 := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
clientSet2 := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
schedulerConfigFactory2 := factory.NewConfigFactory(clientSet2, "foo-scheduler", api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains)
@@ -409,14 +406,14 @@ func TestMultiScheduler(t *testing.T) {
}
eventBroadcaster2 := record.NewBroadcaster()
schedulerConfig2.Recorder = eventBroadcaster2.NewRecorder(api.EventSource{Component: "foo-scheduler"})
eventBroadcaster2.StartRecordingToSink(restClient2.Events(ns.Name))
eventBroadcaster2.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: clientSet2.Core().Events(ns.Name)})
scheduler.New(schedulerConfig2).Run()
defer close(schedulerConfig2.StopEverything)
// 6. **check point-2**:
// - testPodWithAnnotationFitsFoo should be scheduled
err = wait.Poll(time.Second, time.Second*5, podScheduled(restClient, testPodWithAnnotationFitsFoo.Namespace, testPodWithAnnotationFitsFoo.Name))
err = wait.Poll(time.Second, time.Second*5, podScheduled(clientSet, testPodWithAnnotationFitsFoo.Namespace, testPodWithAnnotationFitsFoo.Name))
if err != nil {
t.Errorf("Test MultiScheduler: %s Pod not scheduled, %v", testPodWithAnnotationFitsFoo.Name, err)
} else {
@@ -424,11 +421,11 @@ func TestMultiScheduler(t *testing.T) {
}
// 7. delete the pods that were scheduled by the default scheduler, and stop the default scheduler
err = restClient.Pods(ns.Name).Delete(testPodNoAnnotation.Name, api.NewDeleteOptions(0))
err = clientSet.Core().Pods(ns.Name).Delete(testPodNoAnnotation.Name, api.NewDeleteOptions(0))
if err != nil {
t.Errorf("Failed to delete pod: %v", err)
}
err = restClient.Pods(ns.Name).Delete(testPodWithAnnotationFitsDefault.Name, api.NewDeleteOptions(0))
err = clientSet.Core().Pods(ns.Name).Delete(testPodWithAnnotationFitsDefault.Name, api.NewDeleteOptions(0))
if err != nil {
t.Errorf("Failed to delete pod: %v", err)
}
@@ -446,24 +443,24 @@ func TestMultiScheduler(t *testing.T) {
// - note: these two pods belong to default scheduler which no longer exists
podWithNoAnnotation2 := createPod("pod-with-no-annotation2", nil)
podWithAnnotationFitsDefault2 := createPod("pod-with-annotation-fits-default2", schedulerAnnotationFitsDefault)
testPodNoAnnotation2, err := restClient.Pods(ns.Name).Create(podWithNoAnnotation2)
testPodNoAnnotation2, err := clientSet.Core().Pods(ns.Name).Create(podWithNoAnnotation2)
if err != nil {
t.Fatalf("Failed to create pod: %v", err)
}
testPodWithAnnotationFitsDefault2, err := restClient.Pods(ns.Name).Create(podWithAnnotationFitsDefault2)
testPodWithAnnotationFitsDefault2, err := clientSet.Core().Pods(ns.Name).Create(podWithAnnotationFitsDefault2)
if err != nil {
t.Fatalf("Failed to create pod: %v", err)
}
// 9. **check point-3**:
// - testPodNoAnnotation2 and testPodWithAnnotationFitsDefault2 shoule NOT be scheduled
err = wait.Poll(time.Second, time.Second*5, podScheduled(restClient, testPodNoAnnotation2.Namespace, testPodNoAnnotation2.Name))
err = wait.Poll(time.Second, time.Second*5, podScheduled(clientSet, testPodNoAnnotation2.Namespace, testPodNoAnnotation2.Name))
if err == nil {
t.Errorf("Test MultiScheduler: %s Pod got scheduled, %v", testPodNoAnnotation2.Name, err)
} else {
t.Logf("Test MultiScheduler: %s Pod not scheduled", testPodNoAnnotation2.Name)
}
err = wait.Poll(time.Second, time.Second*5, podScheduled(restClient, testPodWithAnnotationFitsDefault2.Namespace, testPodWithAnnotationFitsDefault2.Name))
err = wait.Poll(time.Second, time.Second*5, podScheduled(clientSet, testPodWithAnnotationFitsDefault2.Namespace, testPodWithAnnotationFitsDefault2.Name))
if err == nil {
t.Errorf("Test MultiScheduler: %s Pod got scheduled, %v", testPodWithAnnotationFitsDefault2.Name, err)
} else {
@@ -472,7 +469,7 @@ func TestMultiScheduler(t *testing.T) {
*/
}
func createPod(client *client.Client, name string, annotation map[string]string) *api.Pod {
func createPod(client clientset.Interface, name string, annotation map[string]string) *api.Pod {
return &api.Pod{
ObjectMeta: api.ObjectMeta{Name: name, Annotations: annotation},
Spec: api.PodSpec{
@@ -490,12 +487,11 @@ func TestAllocatable(t *testing.T) {
defer framework.DeleteTestingNamespace(ns, s, t)
// 1. create and start default-scheduler
restClient := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
// NOTE: This test cannot run in parallel, because it is creating and deleting
// non-namespaced objects (Nodes).
defer restClient.Nodes().DeleteCollection(nil, api.ListOptions{})
defer clientSet.Core().Nodes().DeleteCollection(nil, api.ListOptions{})
schedulerConfigFactory := factory.NewConfigFactory(clientSet, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains)
schedulerConfig, err := schedulerConfigFactory.Create()
@@ -504,7 +500,7 @@ func TestAllocatable(t *testing.T) {
}
eventBroadcaster := record.NewBroadcaster()
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: api.DefaultSchedulerName})
eventBroadcaster.StartRecordingToSink(restClient.Events(ns.Name))
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: clientSet.Core().Events(ns.Name)})
scheduler.New(schedulerConfig).Run()
// default-scheduler will be stopped later
defer close(schedulerConfig.StopEverything)
@@ -522,7 +518,7 @@ func TestAllocatable(t *testing.T) {
},
}
allocNode, err := restClient.Nodes().Create(node)
allocNode, err := clientSet.Core().Nodes().Create(node)
if err != nil {
t.Fatalf("Failed to create node: %v", err)
}
@@ -534,7 +530,7 @@ func TestAllocatable(t *testing.T) {
Containers: []api.Container{
{
Name: "container",
Image: e2e.GetPauseImageName(restClient),
Image: e2e.GetPauseImageName(clientSet),
Resources: api.ResourceRequirements{
Requests: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(20, resource.DecimalSI),
@@ -546,13 +542,13 @@ func TestAllocatable(t *testing.T) {
},
}
testAllocPod, err := restClient.Pods(ns.Name).Create(podResource)
testAllocPod, err := clientSet.Core().Pods(ns.Name).Create(podResource)
if err != nil {
t.Fatalf("Test allocatable unawareness failed to create pod: %v", err)
}
// 4. Test: this test pod should be scheduled since api-server will use Capacity as Allocatable
err = wait.Poll(time.Second, time.Second*5, podScheduled(restClient, testAllocPod.Namespace, testAllocPod.Name))
err = wait.Poll(time.Second, time.Second*5, podScheduled(clientSet, testAllocPod.Namespace, testAllocPod.Name))
if err != nil {
t.Errorf("Test allocatable unawareness: %s Pod not scheduled: %v", testAllocPod.Name, err)
} else {
@@ -573,23 +569,23 @@ func TestAllocatable(t *testing.T) {
},
}
if _, err := restClient.Nodes().UpdateStatus(allocNode); err != nil {
if _, err := clientSet.Core().Nodes().UpdateStatus(allocNode); err != nil {
t.Fatalf("Failed to update node with Status.Allocatable: %v", err)
}
if err := restClient.Pods(ns.Name).Delete(podResource.Name, &api.DeleteOptions{}); err != nil {
if err := clientSet.Core().Pods(ns.Name).Delete(podResource.Name, &api.DeleteOptions{}); err != nil {
t.Fatalf("Failed to remove first resource pod: %v", err)
}
// 6. Make another pod with different name, same resource request
podResource.ObjectMeta.Name = "pod-test-allocatable2"
testAllocPod2, err := restClient.Pods(ns.Name).Create(podResource)
testAllocPod2, err := clientSet.Core().Pods(ns.Name).Create(podResource)
if err != nil {
t.Fatalf("Test allocatable awareness failed to create pod: %v", err)
}
// 7. Test: this test pod should not be scheduled since it request more than Allocatable
err = wait.Poll(time.Second, time.Second*5, podScheduled(restClient, testAllocPod2.Namespace, testAllocPod2.Name))
err = wait.Poll(time.Second, time.Second*5, podScheduled(clientSet, testAllocPod2.Namespace, testAllocPod2.Name))
if err == nil {
t.Errorf("Test allocatable awareness: %s Pod got scheduled unexpectly, %v", testAllocPod2.Name, err)
} else {

View File

@@ -25,14 +25,14 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/apimachinery/registered"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/restclient"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/test/integration"
"k8s.io/kubernetes/test/integration/framework"
)
func deleteSecretOrErrorf(t *testing.T, c *client.Client, ns, name string) {
if err := c.Secrets(ns).Delete(name); err != nil {
func deleteSecretOrErrorf(t *testing.T, c clientset.Interface, ns, name string) {
if err := c.Core().Secrets(ns).Delete(name, nil); err != nil {
t.Errorf("unable to delete secret %v: %v", name, err)
}
}
@@ -42,7 +42,7 @@ func TestSecrets(t *testing.T) {
_, s := framework.RunAMaster(nil)
defer s.Close()
client := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
ns := framework.CreateTestingNamespace("secret", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
@@ -51,7 +51,7 @@ func TestSecrets(t *testing.T) {
}
// DoTestSecrets test secrets for one api version.
func DoTestSecrets(t *testing.T, client *client.Client, ns *api.Namespace) {
func DoTestSecrets(t *testing.T, client clientset.Interface, ns *api.Namespace) {
// Make a secret object.
s := api.Secret{
ObjectMeta: api.ObjectMeta{
@@ -63,7 +63,7 @@ func DoTestSecrets(t *testing.T, client *client.Client, ns *api.Namespace) {
},
}
if _, err := client.Secrets(s.Namespace).Create(&s); err != nil {
if _, err := client.Core().Secrets(s.Namespace).Create(&s); err != nil {
t.Errorf("unable to create test secret: %v", err)
}
defer deleteSecretOrErrorf(t, client, s.Namespace, s.Name)
@@ -103,14 +103,14 @@ func DoTestSecrets(t *testing.T, client *client.Client, ns *api.Namespace) {
// Create a pod to consume secret.
pod.ObjectMeta.Name = "uses-secret"
if _, err := client.Pods(ns.Name).Create(pod); err != nil {
if _, err := client.Core().Pods(ns.Name).Create(pod); err != nil {
t.Errorf("Failed to create pod: %v", err)
}
defer integration.DeletePodOrErrorf(t, client, ns.Name, pod.Name)
// Create a pod that consumes non-existent secret.
pod.ObjectMeta.Name = "uses-non-existent-secret"
if _, err := client.Pods(ns.Name).Create(pod); err != nil {
if _, err := client.Core().Pods(ns.Name).Create(pod); err != nil {
t.Errorf("Failed to create pod: %v", err)
}
defer integration.DeletePodOrErrorf(t, client, ns.Name, pod.Name)

View File

@@ -29,8 +29,8 @@ import (
"k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/apis/storage"
storageutil "k8s.io/kubernetes/pkg/apis/storage/util"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/restclient"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/test/integration/framework"
)
@@ -41,7 +41,7 @@ func TestStorageClasses(t *testing.T) {
_, s := framework.RunAMaster(nil)
defer s.Close()
client := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
ns := framework.CreateTestingNamespace("storageclass", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
@@ -50,7 +50,7 @@ func TestStorageClasses(t *testing.T) {
}
// DoTestStorageClasses tests storage classes for one api version.
func DoTestStorageClasses(t *testing.T, client *client.Client, ns *api.Namespace) {
func DoTestStorageClasses(t *testing.T, client clientset.Interface, ns *api.Namespace) {
// Make a storage class object.
s := storage.StorageClass{
TypeMeta: unversioned.TypeMeta{
@@ -83,20 +83,20 @@ func DoTestStorageClasses(t *testing.T, client *client.Client, ns *api.Namespace
}
pvc.ObjectMeta.Name = "uses-storageclass"
if _, err := client.PersistentVolumeClaims(ns.Name).Create(pvc); err != nil {
if _, err := client.Core().PersistentVolumeClaims(ns.Name).Create(pvc); err != nil {
t.Errorf("Failed to create pvc: %v", err)
}
defer deletePersistentVolumeClaimOrErrorf(t, client, ns.Name, pvc.Name)
}
func deleteStorageClassOrErrorf(t *testing.T, c *client.Client, ns, name string) {
if err := c.Storage().StorageClasses().Delete(name); err != nil {
func deleteStorageClassOrErrorf(t *testing.T, c clientset.Interface, ns, name string) {
if err := c.Storage().StorageClasses().Delete(name, nil); err != nil {
t.Errorf("unable to delete storage class %v: %v", name, err)
}
}
func deletePersistentVolumeClaimOrErrorf(t *testing.T, c *client.Client, ns, name string) {
if err := c.PersistentVolumeClaims(ns).Delete(name); err != nil {
func deletePersistentVolumeClaimOrErrorf(t *testing.T, c clientset.Interface, ns, name string) {
if err := c.Core().PersistentVolumeClaims(ns).Delete(name, nil); err != nil {
t.Errorf("unable to delete persistent volume claim %v: %v", name, err)
}
}

View File

@@ -26,8 +26,8 @@ import (
"github.com/golang/glog"
"golang.org/x/net/context"
"k8s.io/kubernetes/pkg/api/errors"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/integration/framework"
)
@@ -55,8 +55,8 @@ func withEtcdKey(f func(string)) {
f(prefix)
}
func DeletePodOrErrorf(t *testing.T, c *client.Client, ns, name string) {
if err := c.Pods(ns).Delete(name, nil); err != nil {
func DeletePodOrErrorf(t *testing.T, c clientset.Interface, ns, name string) {
if err := c.Core().Pods(ns).Delete(name, nil); err != nil {
t.Errorf("unable to delete pod %v: %v", name, err)
}
}