Merge pull request #32316 from deads2k/client-05-remaining-clients

Automatic merge from submit-queue

remove the rest of the non-generated clients from the kubectl code

Die `Client` Die!

It's always bigger than you think.  Last bit @kargakis after this, it's gone.
This commit is contained in:
Kubernetes Submit Queue
2016-09-12 21:45:16 -07:00
committed by GitHub
59 changed files with 1570 additions and 800 deletions

View File

@@ -27,6 +27,7 @@ import (
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/apis/batch"
client "k8s.io/kubernetes/pkg/client/unversioned"
clientsetadapter "k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset"
"k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/wait"
@@ -128,7 +129,7 @@ var _ = framework.KubeDescribe("V1Job", func() {
Expect(err).NotTo(HaveOccurred())
By("scale job up")
scaler, err := kubectl.ScalerFor(batch.Kind("Job"), f.Client)
scaler, err := kubectl.ScalerFor(batch.Kind("Job"), clientsetadapter.FromUnversionedClient(f.Client))
Expect(err).NotTo(HaveOccurred())
waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
@@ -153,7 +154,7 @@ var _ = framework.KubeDescribe("V1Job", func() {
Expect(err).NotTo(HaveOccurred())
By("scale job down")
scaler, err := kubectl.ScalerFor(batch.Kind("Job"), f.Client)
scaler, err := kubectl.ScalerFor(batch.Kind("Job"), clientsetadapter.FromUnversionedClient(f.Client))
Expect(err).NotTo(HaveOccurred())
waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
@@ -176,7 +177,7 @@ var _ = framework.KubeDescribe("V1Job", func() {
Expect(err).NotTo(HaveOccurred())
By("delete a job")
reaper, err := kubectl.ReaperFor(batch.Kind("Job"), f.Client)
reaper, err := kubectl.ReaperFor(batch.Kind("Job"), clientsetadapter.FromUnversionedClient(f.Client))
Expect(err).NotTo(HaveOccurred())
timeout := 1 * time.Minute
err = reaper.Stop(f.Namespace.Name, job.Name, timeout, api.NewDeleteOptions(0))

View File

@@ -28,6 +28,7 @@ import (
"k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/apis/extensions"
client "k8s.io/kubernetes/pkg/client/unversioned"
clientsetadapter "k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset"
"k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime"
@@ -115,7 +116,7 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
Expect(err).NotTo(HaveOccurred())
defer func() {
framework.Logf("Check that reaper kills all daemon pods for %s", dsName)
dsReaper, err := kubectl.ReaperFor(extensions.Kind("DaemonSet"), c)
dsReaper, err := kubectl.ReaperFor(extensions.Kind("DaemonSet"), clientsetadapter.FromUnversionedClient(c))
Expect(err).NotTo(HaveOccurred())
err = dsReaper.Stop(ns, dsName, 0, nil)
Expect(err).NotTo(HaveOccurred())

View File

@@ -195,7 +195,7 @@ func stopDeploymentMaybeOverlap(c *clientset.Clientset, oldC client.Interface, n
Expect(err).NotTo(HaveOccurred())
framework.Logf("Deleting deployment %s", deploymentName)
reaper, err := kubectl.ReaperFor(extensions.Kind("Deployment"), oldC)
reaper, err := kubectl.ReaperFor(extensions.Kind("Deployment"), c)
Expect(err).NotTo(HaveOccurred())
timeout := 1 * time.Minute
err = reaper.Stop(ns, deployment.Name, timeout, api.NewDeleteOptions(0))
@@ -1114,10 +1114,10 @@ func testScaledRolloutDeployment(f *framework.Framework) {
first, err = c.Extensions().ReplicaSets(first.Namespace).Get(first.Name)
Expect(err).NotTo(HaveOccurred())
firstCond := client.ReplicaSetHasDesiredReplicas(f.Client.Extensions(), first)
firstCond := client.ReplicaSetHasDesiredReplicas(c.Extensions(), first)
wait.PollImmediate(10*time.Millisecond, 1*time.Minute, firstCond)
secondCond := client.ReplicaSetHasDesiredReplicas(f.Client.Extensions(), second)
secondCond := client.ReplicaSetHasDesiredReplicas(c.Extensions(), second)
wait.PollImmediate(10*time.Millisecond, 1*time.Minute, secondCond)
By(fmt.Sprintf("Updating the size (up) and template at the same time for deployment %q", deploymentName))
@@ -1175,10 +1175,10 @@ func testScaledRolloutDeployment(f *framework.Framework) {
newRs, err := deploymentutil.GetNewReplicaSet(deployment, c)
Expect(err).NotTo(HaveOccurred())
oldCond := client.ReplicaSetHasDesiredReplicas(f.Client.Extensions(), oldRs)
oldCond := client.ReplicaSetHasDesiredReplicas(c.Extensions(), oldRs)
wait.PollImmediate(10*time.Millisecond, 1*time.Minute, oldCond)
newCond := client.ReplicaSetHasDesiredReplicas(f.Client.Extensions(), newRs)
newCond := client.ReplicaSetHasDesiredReplicas(c.Extensions(), newRs)
wait.PollImmediate(10*time.Millisecond, 1*time.Minute, newCond)
By(fmt.Sprintf("Updating the size (down) and template at the same time for deployment %q", deploymentName))

View File

@@ -56,6 +56,7 @@ import (
"k8s.io/kubernetes/pkg/client/typed/discovery"
"k8s.io/kubernetes/pkg/client/typed/dynamic"
client "k8s.io/kubernetes/pkg/client/unversioned"
clientsetadapter "k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
@@ -3197,7 +3198,7 @@ func RemoveTaintOffNode(c *client.Client, nodeName string, taint api.Taint) {
func ScaleRC(c *client.Client, ns, name string, size uint, wait bool) error {
By(fmt.Sprintf("Scaling replication controller %s in namespace %s to %d", name, ns, size))
scaler, err := kubectl.ScalerFor(api.Kind("ReplicationController"), c)
scaler, err := kubectl.ScalerFor(api.Kind("ReplicationController"), clientsetadapter.FromUnversionedClient(c))
if err != nil {
return err
}
@@ -3314,7 +3315,7 @@ func DeleteRCAndPods(c *client.Client, ns, name string) error {
}
return err
}
reaper, err := kubectl.ReaperForReplicationController(c, 10*time.Minute)
reaper, err := kubectl.ReaperForReplicationController(clientsetadapter.FromUnversionedClient(c).Core(), 10*time.Minute)
if err != nil {
if apierrs.IsNotFound(err) {
Logf("RC %s was already deleted: %v", name, err)
@@ -3462,7 +3463,7 @@ func DeleteReplicaSet(c *client.Client, ns, name string) error {
}
return err
}
reaper, err := kubectl.ReaperFor(extensions.Kind("ReplicaSet"), c)
reaper, err := kubectl.ReaperFor(extensions.Kind("ReplicaSet"), clientsetadapter.FromUnversionedClient(c))
if err != nil {
if apierrs.IsNotFound(err) {
Logf("ReplicaSet %s was already deleted: %v", name, err)

View File

@@ -23,6 +23,7 @@ import (
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/apis/batch"
client "k8s.io/kubernetes/pkg/client/unversioned"
clientsetadapter "k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset"
"k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/wait"
@@ -124,7 +125,7 @@ var _ = framework.KubeDescribe("Job", func() {
Expect(err).NotTo(HaveOccurred())
By("scale job up")
scaler, err := kubectl.ScalerFor(batch.Kind("Job"), f.Client)
scaler, err := kubectl.ScalerFor(batch.Kind("Job"), clientsetadapter.FromUnversionedClient(f.Client))
Expect(err).NotTo(HaveOccurred())
waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
@@ -149,7 +150,7 @@ var _ = framework.KubeDescribe("Job", func() {
Expect(err).NotTo(HaveOccurred())
By("scale job down")
scaler, err := kubectl.ScalerFor(batch.Kind("Job"), f.Client)
scaler, err := kubectl.ScalerFor(batch.Kind("Job"), clientsetadapter.FromUnversionedClient(f.Client))
Expect(err).NotTo(HaveOccurred())
waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
@@ -172,7 +173,7 @@ var _ = framework.KubeDescribe("Job", func() {
Expect(err).NotTo(HaveOccurred())
By("delete a job")
reaper, err := kubectl.ReaperFor(batch.Kind("Job"), f.Client)
reaper, err := kubectl.ReaperFor(batch.Kind("Job"), clientsetadapter.FromUnversionedClient(f.Client))
Expect(err).NotTo(HaveOccurred())
timeout := 1 * time.Minute
err = reaper.Stop(f.Namespace.Name, job.Name, timeout, api.NewDeleteOptions(0))

View File

@@ -47,6 +47,7 @@ import (
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned"
client "k8s.io/kubernetes/pkg/client/unversioned"
clientsetadapter "k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/labels"
@@ -420,7 +421,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
ExecOrDie()
Expect(runOutput).ToNot(ContainSubstring("stdin closed"))
f := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
runTestPod, _, err := util.GetFirstPod(c, ns, labels.SelectorFromSet(map[string]string{"run": "run-test-3"}), 1*time.Minute, f)
runTestPod, _, err := util.GetFirstPod(clientsetadapter.FromUnversionedClient(c), ns, labels.SelectorFromSet(map[string]string{"run": "run-test-3"}), 1*time.Minute, f)
if err != nil {
os.Exit(1)
}

View File

@@ -42,6 +42,7 @@ import (
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/client/restclient"
client "k8s.io/kubernetes/pkg/client/unversioned"
clientsetadapter "k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset"
"k8s.io/kubernetes/pkg/controller"
replicationcontroller "k8s.io/kubernetes/pkg/controller/replication"
"k8s.io/kubernetes/pkg/fields"
@@ -290,7 +291,7 @@ func RCFromManifest(fileName string) *api.ReplicationController {
// StopRC stops the rc via kubectl's stop library
func StopRC(rc *api.ReplicationController, restClient *client.Client) error {
reaper, err := kubectl.ReaperFor(api.Kind("ReplicationController"), restClient)
reaper, err := kubectl.ReaperFor(api.Kind("ReplicationController"), clientsetadapter.FromUnversionedClient(restClient))
if err != nil || reaper == nil {
return err
}
@@ -303,7 +304,7 @@ func StopRC(rc *api.ReplicationController, restClient *client.Client) error {
// ScaleRC scales the given rc to the given replicas.
func ScaleRC(name, ns string, replicas int32, restClient *client.Client) (*api.ReplicationController, error) {
scaler, err := kubectl.ScalerFor(api.Kind("ReplicationController"), restClient)
scaler, err := kubectl.ScalerFor(api.Kind("ReplicationController"), clientsetadapter.FromUnversionedClient(restClient))
if err != nil {
return nil, err
}