diff --git a/cluster/addons/fluentd-elasticsearch/es-image/elasticsearch_logging_discovery.go b/cluster/addons/fluentd-elasticsearch/es-image/elasticsearch_logging_discovery.go index 702cb123071..91610191270 100644 --- a/cluster/addons/fluentd-elasticsearch/es-image/elasticsearch_logging_discovery.go +++ b/cluster/addons/fluentd-elasticsearch/es-image/elasticsearch_logging_discovery.go @@ -25,7 +25,8 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" - client "k8s.io/kubernetes/pkg/client/unversioned" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + "k8s.io/kubernetes/pkg/client/restclient" ) func flattenSubsets(subsets []api.EndpointSubset) []string { @@ -42,14 +43,19 @@ func main() { flag.Parse() glog.Info("Kubernetes Elasticsearch logging discovery") - c, err := client.NewInCluster() + cc, err := restclient.InClusterConfig() + if err != nil { + glog.Fatalf("Failed to make client: %v", err) + } + client, err := clientset.NewForConfig(cc) + if err != nil { glog.Fatalf("Failed to make client: %v", err) } namespace := api.NamespaceSystem envNamespace := os.Getenv("NAMESPACE") if envNamespace != "" { - if _, err := c.Namespaces().Get(envNamespace); err != nil { + if _, err := client.Core().Namespaces().Get(envNamespace); err != nil { glog.Fatalf("%s namespace doesn't exist: %v", envNamespace, err) } namespace = envNamespace @@ -59,7 +65,7 @@ func main() { // Look for endpoints associated with the Elasticsearch loggging service. // First wait for the service to become available. for t := time.Now(); time.Since(t) < 5*time.Minute; time.Sleep(10 * time.Second) { - elasticsearch, err = c.Services(namespace).Get("elasticsearch-logging") + elasticsearch, err = client.Core().Services(namespace).Get("elasticsearch-logging") if err == nil { break } @@ -76,7 +82,7 @@ func main() { // Wait for some endpoints. count := 0 for t := time.Now(); time.Since(t) < 5*time.Minute; time.Sleep(10 * time.Second) { - endpoints, err = c.Endpoints(namespace).Get("elasticsearch-logging") + endpoints, err = client.Core().Endpoints(namespace).Get("elasticsearch-logging") if err != nil { continue } diff --git a/cmd/kube-proxy/app/server.go b/cmd/kube-proxy/app/server.go index 1f9674debaf..bf257a4ac04 100644 --- a/cmd/kube-proxy/app/server.go +++ b/cmd/kube-proxy/app/server.go @@ -30,8 +30,9 @@ import ( "k8s.io/kubernetes/cmd/kube-proxy/app/options" "k8s.io/kubernetes/pkg/api" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" "k8s.io/kubernetes/pkg/client/record" - kubeclient "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" "k8s.io/kubernetes/pkg/proxy" @@ -56,7 +57,7 @@ import ( ) type ProxyServer struct { - Client *kubeclient.Client + Client clientset.Interface Config *options.ProxyServerConfig IptInterface utiliptables.Interface Proxier proxy.ProxyProvider @@ -82,7 +83,7 @@ func checkKnownProxyMode(proxyMode string) bool { } func NewProxyServer( - client *kubeclient.Client, + client clientset.Interface, config *options.ProxyServerConfig, iptInterface utiliptables.Interface, proxier proxy.ProxyProvider, @@ -185,7 +186,7 @@ func NewProxyServerDefault(config *options.ProxyServerConfig) (*ProxyServer, err kubeconfig.QPS = config.KubeAPIQPS kubeconfig.Burst = int(config.KubeAPIBurst) - client, err := kubeclient.New(kubeconfig) + client, err := clientset.NewForConfig(kubeconfig) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } @@ -198,7 +199,7 @@ func NewProxyServerDefault(config *options.ProxyServerConfig) (*ProxyServer, err var proxier proxy.ProxyProvider var endpointsHandler proxyconfig.EndpointsConfigHandler - proxyMode := getProxyMode(string(config.Mode), client.Nodes(), hostname, iptInterface, iptables.LinuxKernelCompatTester{}) + proxyMode := getProxyMode(string(config.Mode), client.Core().Nodes(), hostname, iptInterface, iptables.LinuxKernelCompatTester{}) if proxyMode == proxyModeIPTables { glog.V(0).Info("Using iptables Proxier.") if config.IPTablesMasqueradeBit == nil { @@ -251,7 +252,7 @@ func NewProxyServerDefault(config *options.ProxyServerConfig) (*ProxyServer, err endpointsConfig.RegisterHandler(endpointsHandler) proxyconfig.NewSourceAPI( - client, + client.Core().RESTClient(), config.ConfigSyncPeriod, serviceConfig.Channel("api"), endpointsConfig.Channel("api"), @@ -281,7 +282,7 @@ func (s *ProxyServer) Run() error { return nil } - s.Broadcaster.StartRecordingToSink(s.Client.Events("")) + s.Broadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: s.Client.Core().Events("")}) // Start up a webserver if requested if s.Config.HealthzPort > 0 { @@ -418,9 +419,9 @@ func (s *ProxyServer) birthCry() { s.Recorder.Eventf(s.Config.NodeRef, api.EventTypeNormal, "Starting", "Starting kube-proxy.") } -func getNodeIP(client *kubeclient.Client, hostname string) net.IP { +func getNodeIP(client clientset.Interface, hostname string) net.IP { var nodeIP net.IP - node, err := client.Nodes().Get(hostname) + node, err := client.Core().Nodes().Get(hostname) if err != nil { glog.Warningf("Failed to retrieve node info: %v", err) return nil diff --git a/cmd/kubemark/hollow-node.go b/cmd/kubemark/hollow-node.go index b05f035b168..9a9ffd7062b 100644 --- a/cmd/kubemark/hollow-node.go +++ b/cmd/kubemark/hollow-node.go @@ -24,7 +24,6 @@ import ( _ "k8s.io/kubernetes/pkg/client/metrics/prometheus" // for client metric registration "k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/restclient" - client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing" "k8s.io/kubernetes/pkg/kubelet/cm" @@ -94,10 +93,7 @@ func main() { if err != nil { glog.Fatalf("Failed to create a ClientConfig: %v. Exiting.", err) } - cl, err := client.New(clientConfig) - if err != nil { - glog.Fatalf("Failed to create a Client: %v. Exiting.", err) - } + clientset, err := internalclientset.NewForConfig(clientConfig) if err != nil { glog.Fatalf("Failed to create a ClientSet: %v. Exiting.", err) @@ -136,7 +132,7 @@ func main() { endpointsConfig := proxyconfig.NewEndpointsConfig() endpointsConfig.RegisterHandler(&kubemark.FakeProxyHandler{}) - hollowProxy := kubemark.NewHollowProxyOrDie(config.NodeName, cl, endpointsConfig, serviceConfig, iptInterface, eventBroadcaster, recorder) + hollowProxy := kubemark.NewHollowProxyOrDie(config.NodeName, clientset, endpointsConfig, serviceConfig, iptInterface, eventBroadcaster, recorder) hollowProxy.Run() } } diff --git a/federation/pkg/federation-controller/util/cluster_util.go b/federation/pkg/federation-controller/util/cluster_util.go index b0b9838116e..5374ef16068 100644 --- a/federation/pkg/federation-controller/util/cluster_util.go +++ b/federation/pkg/federation-controller/util/cluster_util.go @@ -26,8 +26,8 @@ import ( federation_v1beta1 "k8s.io/kubernetes/federation/apis/federation/v1beta1" fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5" "k8s.io/kubernetes/pkg/api" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/restclient" - client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" utilnet "k8s.io/kubernetes/pkg/util/net" @@ -102,14 +102,18 @@ var KubeconfigGetterForSecret = func(secretName string) clientcmd.KubeconfigGett return nil, fmt.Errorf("unexpected: POD_NAMESPACE env var returned empty string") } // Get a client to talk to the k8s apiserver, to fetch secrets from it. - client, err := client.NewInCluster() + cc, err := restclient.InClusterConfig() + if err != nil { + return nil, fmt.Errorf("error in creating in-cluster client: %s", err) + } + client, err := clientset.NewForConfig(cc) if err != nil { return nil, fmt.Errorf("error in creating in-cluster client: %s", err) } data = []byte{} var secret *api.Secret err = wait.PollImmediate(1*time.Second, getSecretTimeout, func() (bool, error) { - secret, err = client.Secrets(namespace).Get(secretName) + secret, err = client.Core().Secrets(namespace).Get(secretName) if err == nil { return true, nil } diff --git a/pkg/client/cache/listwatch_test.go b/pkg/client/cache/listwatch_test.go index a1fe083fd48..b141ecd67a4 100644 --- a/pkg/client/cache/listwatch_test.go +++ b/pkg/client/cache/listwatch_test.go @@ -25,8 +25,8 @@ import ( "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/apimachinery/registered" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/restclient" - client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/fields" utiltesting "k8s.io/kubernetes/pkg/util/testing" ) @@ -98,8 +98,8 @@ func TestListWatchesCanList(t *testing.T) { } server := httptest.NewServer(&handler) defer server.Close() - client := client.NewOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) - lw := NewListWatchFromClient(client, item.resource, item.namespace, item.fieldSelector) + client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) + lw := NewListWatchFromClient(client.Core().RESTClient(), item.resource, item.namespace, item.fieldSelector) // This test merely tests that the correct request is made. lw.List(api.ListOptions{}) handler.ValidateRequest(t, item.location, "GET", nil) @@ -164,8 +164,8 @@ func TestListWatchesCanWatch(t *testing.T) { } server := httptest.NewServer(&handler) defer server.Close() - client := client.NewOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) - lw := NewListWatchFromClient(client, item.resource, item.namespace, item.fieldSelector) + client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) + lw := NewListWatchFromClient(client.Core().RESTClient(), item.resource, item.namespace, item.fieldSelector) // This test merely tests that the correct request is made. lw.Watch(api.ListOptions{ResourceVersion: item.rv}) handler.ValidateRequest(t, item.location, "GET", nil) diff --git a/pkg/kubemark/hollow_proxy.go b/pkg/kubemark/hollow_proxy.go index a51b9f7ae02..f8d8f9187f1 100644 --- a/pkg/kubemark/hollow_proxy.go +++ b/pkg/kubemark/hollow_proxy.go @@ -22,8 +22,8 @@ import ( proxyapp "k8s.io/kubernetes/cmd/kube-proxy/app" "k8s.io/kubernetes/cmd/kube-proxy/app/options" "k8s.io/kubernetes/pkg/api" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/record" - client "k8s.io/kubernetes/pkg/client/unversioned" proxyconfig "k8s.io/kubernetes/pkg/proxy/config" "k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/util" @@ -51,7 +51,7 @@ func (*FakeProxier) SyncLoop() { func NewHollowProxyOrDie( nodeName string, - client *client.Client, + client clientset.Interface, endpointsConfig *proxyconfig.EndpointsConfig, serviceConfig *proxyconfig.ServiceConfig, iptInterface utiliptables.Interface, @@ -69,7 +69,7 @@ func NewHollowProxyOrDie( Namespace: "", } proxyconfig.NewSourceAPI( - client, + client.Core().RESTClient(), 30*time.Second, serviceConfig.Channel("api"), endpointsConfig.Channel("api"), diff --git a/pkg/metrics/api_server_metrics.go b/pkg/metrics/api_server_metrics.go index 72f57588e7e..9f897eb7f21 100644 --- a/pkg/metrics/api_server_metrics.go +++ b/pkg/metrics/api_server_metrics.go @@ -36,7 +36,7 @@ func parseApiServerMetrics(data string) (ApiServerMetrics, error) { } func (g *MetricsGrabber) getMetricsFromApiServer() (string, error) { - rawOutput, err := g.client.Get().RequestURI("/metrics").Do().Raw() + rawOutput, err := g.client.Core().RESTClient().Get().RequestURI("/metrics").Do().Raw() if err != nil { return "", err } diff --git a/pkg/metrics/generic_metrics.go b/pkg/metrics/generic_metrics.go index 10c1273c9b0..f17629a0613 100644 --- a/pkg/metrics/generic_metrics.go +++ b/pkg/metrics/generic_metrics.go @@ -99,7 +99,7 @@ func parseMetrics(data string, output *Metrics) error { } func (g *MetricsGrabber) getMetricsFromPod(podName string, namespace string, port int) (string, error) { - rawOutput, err := g.client.Get(). + rawOutput, err := g.client.Core().RESTClient().Get(). Prefix("proxy"). Namespace(namespace). Resource("pods"). diff --git a/pkg/metrics/kubelet_metrics.go b/pkg/metrics/kubelet_metrics.go index 0324db13e44..2f1b9525cb9 100644 --- a/pkg/metrics/kubelet_metrics.go +++ b/pkg/metrics/kubelet_metrics.go @@ -65,7 +65,7 @@ func (g *MetricsGrabber) getMetricsFromNode(nodeName string, kubeletPort int) (s var err error var rawOutput []byte go func() { - rawOutput, err = g.client.Get(). + rawOutput, err = g.client.Core().RESTClient().Get(). Prefix("proxy"). Resource("nodes"). Name(fmt.Sprintf("%v:%v", nodeName, kubeletPort)). diff --git a/pkg/metrics/metrics_grabber.go b/pkg/metrics/metrics_grabber.go index 8b80d21fdf1..61dfccc7949 100644 --- a/pkg/metrics/metrics_grabber.go +++ b/pkg/metrics/metrics_grabber.go @@ -21,7 +21,7 @@ import ( "time" "k8s.io/kubernetes/pkg/api" - client "k8s.io/kubernetes/pkg/client/unversioned" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/pkg/util/system" @@ -41,7 +41,7 @@ type MetricsCollection struct { } type MetricsGrabber struct { - client *client.Client + client clientset.Interface grabFromApiServer bool grabFromControllerManager bool grabFromKubelets bool @@ -50,10 +50,10 @@ type MetricsGrabber struct { registeredMaster bool } -func NewMetricsGrabber(c *client.Client, kubelets bool, scheduler bool, controllers bool, apiServer bool) (*MetricsGrabber, error) { +func NewMetricsGrabber(c clientset.Interface, kubelets bool, scheduler bool, controllers bool, apiServer bool) (*MetricsGrabber, error) { registeredMaster := false masterName := "" - nodeList, err := c.Nodes().List(api.ListOptions{}) + nodeList, err := c.Core().Nodes().List(api.ListOptions{}) if err != nil { return nil, err } @@ -85,7 +85,7 @@ func NewMetricsGrabber(c *client.Client, kubelets bool, scheduler bool, controll } func (g *MetricsGrabber) GrabFromKubelet(nodeName string) (KubeletMetrics, error) { - nodes, err := g.client.Nodes().List(api.ListOptions{FieldSelector: fields.Set{api.ObjectNameField: nodeName}.AsSelector()}) + nodes, err := g.client.Core().Nodes().List(api.ListOptions{FieldSelector: fields.Set{api.ObjectNameField: nodeName}.AsSelector()}) if err != nil { return KubeletMetrics{}, err } @@ -166,7 +166,7 @@ func (g *MetricsGrabber) Grab() (MetricsCollection, error) { } if g.grabFromKubelets { result.KubeletMetrics = make(map[string]KubeletMetrics) - nodes, err := g.client.Nodes().List(api.ListOptions{}) + nodes, err := g.client.Core().Nodes().List(api.ListOptions{}) if err != nil { errs = append(errs, err) } else { diff --git a/test/e2e/addon_update.go b/test/e2e/addon_update.go index c953ffe4dc5..d3182c9ea37 100644 --- a/test/e2e/addon_update.go +++ b/test/e2e/addon_update.go @@ -26,7 +26,7 @@ import ( "golang.org/x/crypto/ssh" "k8s.io/kubernetes/pkg/api" - client "k8s.io/kubernetes/pkg/client/unversioned" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" @@ -275,8 +275,8 @@ var _ = framework.KubeDescribe("Addon update", func() { sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, rcv1, destinationDir, rcv1)) sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, svcv1, destinationDir, svcv1)) - waitForServiceInAddonTest(f.Client, f.Namespace.Name, "addon-test", true) - waitForReplicationControllerInAddonTest(f.Client, defaultNsName, "addon-test-v1", true) + waitForServiceInAddonTest(f.ClientSet, f.Namespace.Name, "addon-test", true) + waitForReplicationControllerInAddonTest(f.ClientSet, defaultNsName, "addon-test-v1", true) By("update manifests") sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, rcv2, destinationDir, rcv2)) @@ -289,38 +289,38 @@ var _ = framework.KubeDescribe("Addon update", func() { * But it is ok - as long as we don't have rolling update, the result will be the same */ - waitForServiceInAddonTest(f.Client, f.Namespace.Name, "addon-test-updated", true) - waitForReplicationControllerInAddonTest(f.Client, f.Namespace.Name, "addon-test-v2", true) + waitForServiceInAddonTest(f.ClientSet, f.Namespace.Name, "addon-test-updated", true) + waitForReplicationControllerInAddonTest(f.ClientSet, f.Namespace.Name, "addon-test-v2", true) - waitForServiceInAddonTest(f.Client, f.Namespace.Name, "addon-test", false) - waitForReplicationControllerInAddonTest(f.Client, defaultNsName, "addon-test-v1", false) + waitForServiceInAddonTest(f.ClientSet, f.Namespace.Name, "addon-test", false) + waitForReplicationControllerInAddonTest(f.ClientSet, defaultNsName, "addon-test-v1", false) By("remove manifests") sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, rcv2)) sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, svcv2)) - waitForServiceInAddonTest(f.Client, f.Namespace.Name, "addon-test-updated", false) - waitForReplicationControllerInAddonTest(f.Client, f.Namespace.Name, "addon-test-v2", false) + waitForServiceInAddonTest(f.ClientSet, f.Namespace.Name, "addon-test-updated", false) + waitForReplicationControllerInAddonTest(f.ClientSet, f.Namespace.Name, "addon-test-v2", false) By("verify invalid API addons weren't created") - _, err = f.Client.ReplicationControllers(f.Namespace.Name).Get("invalid-addon-test-v1") + _, err = f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Get("invalid-addon-test-v1") Expect(err).To(HaveOccurred()) - _, err = f.Client.ReplicationControllers(defaultNsName).Get("invalid-addon-test-v1") + _, err = f.ClientSet.Core().ReplicationControllers(defaultNsName).Get("invalid-addon-test-v1") Expect(err).To(HaveOccurred()) - _, err = f.Client.Services(f.Namespace.Name).Get("ivalid-addon-test") + _, err = f.ClientSet.Core().Services(f.Namespace.Name).Get("ivalid-addon-test") Expect(err).To(HaveOccurred()) - _, err = f.Client.Services(defaultNsName).Get("ivalid-addon-test") + _, err = f.ClientSet.Core().Services(defaultNsName).Get("ivalid-addon-test") Expect(err).To(HaveOccurred()) // invalid addons will be deleted by the deferred function }) }) -func waitForServiceInAddonTest(c *client.Client, addonNamespace, name string, exist bool) { +func waitForServiceInAddonTest(c clientset.Interface, addonNamespace, name string, exist bool) { framework.ExpectNoError(framework.WaitForService(c, addonNamespace, name, exist, addonTestPollInterval, addonTestPollTimeout)) } -func waitForReplicationControllerInAddonTest(c *client.Client, addonNamespace, name string, exist bool) { +func waitForReplicationControllerInAddonTest(c clientset.Interface, addonNamespace, name string, exist bool) { framework.ExpectNoError(framework.WaitForReplicationController(c, addonNamespace, name, exist, addonTestPollInterval, addonTestPollTimeout)) } diff --git a/test/e2e/autoscaling_utils.go b/test/e2e/autoscaling_utils.go index b35b520bef9..ce060acaf48 100644 --- a/test/e2e/autoscaling_utils.go +++ b/test/e2e/autoscaling_utils.go @@ -22,7 +22,7 @@ import ( "time" "k8s.io/kubernetes/pkg/api" - client "k8s.io/kubernetes/pkg/client/unversioned" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/test/e2e/framework" testutils "k8s.io/kubernetes/test/utils" @@ -97,7 +97,7 @@ cpuLimit argument is in millicores, cpuLimit is a maximum amount of cpu that can func newResourceConsumer(name, kind string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, consumptionTimeInSeconds, requestSizeInMillicores, requestSizeInMegabytes int, requestSizeCustomMetric int, cpuLimit, memLimit int64, f *framework.Framework) *ResourceConsumer { - runServiceAndWorkloadForResourceConsumer(f.Client, f.Namespace.Name, name, kind, replicas, cpuLimit, memLimit) + runServiceAndWorkloadForResourceConsumer(f.ClientSet, f.Namespace.Name, name, kind, replicas, cpuLimit, memLimit) rc := &ResourceConsumer{ name: name, controllerName: name + "-ctrl", @@ -199,7 +199,7 @@ func (rc *ResourceConsumer) makeConsumeCustomMetric() { } func (rc *ResourceConsumer) sendConsumeCPURequest(millicores int) { - proxyRequest, err := framework.GetServicesProxyRequest(rc.framework.Client, rc.framework.Client.Post()) + proxyRequest, err := framework.GetServicesProxyRequest(rc.framework.ClientSet, rc.framework.ClientSet.Core().RESTClient().Post()) framework.ExpectNoError(err) req := proxyRequest.Namespace(rc.framework.Namespace.Name). Name(rc.controllerName). @@ -214,7 +214,7 @@ func (rc *ResourceConsumer) sendConsumeCPURequest(millicores int) { // sendConsumeMemRequest sends POST request for memory consumption func (rc *ResourceConsumer) sendConsumeMemRequest(megabytes int) { - proxyRequest, err := framework.GetServicesProxyRequest(rc.framework.Client, rc.framework.Client.Post()) + proxyRequest, err := framework.GetServicesProxyRequest(rc.framework.ClientSet, rc.framework.ClientSet.Core().RESTClient().Post()) framework.ExpectNoError(err) req := proxyRequest.Namespace(rc.framework.Namespace.Name). Name(rc.controllerName). @@ -229,7 +229,7 @@ func (rc *ResourceConsumer) sendConsumeMemRequest(megabytes int) { // sendConsumeCustomMetric sends POST request for custom metric consumption func (rc *ResourceConsumer) sendConsumeCustomMetric(delta int) { - proxyRequest, err := framework.GetServicesProxyRequest(rc.framework.Client, rc.framework.Client.Post()) + proxyRequest, err := framework.GetServicesProxyRequest(rc.framework.ClientSet, rc.framework.ClientSet.Core().RESTClient().Post()) framework.ExpectNoError(err) req := proxyRequest.Namespace(rc.framework.Namespace.Name). Name(rc.controllerName). @@ -246,21 +246,21 @@ func (rc *ResourceConsumer) sendConsumeCustomMetric(delta int) { func (rc *ResourceConsumer) GetReplicas() int { switch rc.kind { case kindRC: - replicationController, err := rc.framework.Client.ReplicationControllers(rc.framework.Namespace.Name).Get(rc.name) + replicationController, err := rc.framework.ClientSet.Core().ReplicationControllers(rc.framework.Namespace.Name).Get(rc.name) framework.ExpectNoError(err) if replicationController == nil { framework.Failf(rcIsNil) } return int(replicationController.Status.Replicas) case kindDeployment: - deployment, err := rc.framework.Client.Deployments(rc.framework.Namespace.Name).Get(rc.name) + deployment, err := rc.framework.ClientSet.Extensions().Deployments(rc.framework.Namespace.Name).Get(rc.name) framework.ExpectNoError(err) if deployment == nil { framework.Failf(deploymentIsNil) } return int(deployment.Status.Replicas) case kindReplicaSet: - rs, err := rc.framework.Client.ReplicaSets(rc.framework.Namespace.Name).Get(rc.name) + rs, err := rc.framework.ClientSet.Extensions().ReplicaSets(rc.framework.Namespace.Name).Get(rc.name) framework.ExpectNoError(err) if rs == nil { framework.Failf(rsIsNil) @@ -303,15 +303,15 @@ func (rc *ResourceConsumer) CleanUp() { rc.stopCustomMetric <- 0 // Wait some time to ensure all child goroutines are finished. time.Sleep(10 * time.Second) - framework.ExpectNoError(framework.DeleteRCAndPods(rc.framework.Client, rc.framework.ClientSet, rc.framework.Namespace.Name, rc.name)) - framework.ExpectNoError(rc.framework.Client.Services(rc.framework.Namespace.Name).Delete(rc.name)) - framework.ExpectNoError(framework.DeleteRCAndPods(rc.framework.Client, rc.framework.ClientSet, rc.framework.Namespace.Name, rc.controllerName)) - framework.ExpectNoError(rc.framework.Client.Services(rc.framework.Namespace.Name).Delete(rc.controllerName)) + framework.ExpectNoError(framework.DeleteRCAndPods(rc.framework.ClientSet, rc.framework.Namespace.Name, rc.name)) + framework.ExpectNoError(rc.framework.ClientSet.Core().Services(rc.framework.Namespace.Name).Delete(rc.name, nil)) + framework.ExpectNoError(framework.DeleteRCAndPods(rc.framework.ClientSet, rc.framework.Namespace.Name, rc.controllerName)) + framework.ExpectNoError(rc.framework.ClientSet.Core().Services(rc.framework.Namespace.Name).Delete(rc.controllerName, nil)) } -func runServiceAndWorkloadForResourceConsumer(c *client.Client, ns, name, kind string, replicas int, cpuLimitMillis, memLimitMb int64) { +func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name, kind string, replicas int, cpuLimitMillis, memLimitMb int64) { By(fmt.Sprintf("Running consuming RC %s via %s with %v replicas", name, kind, replicas)) - _, err := c.Services(ns).Create(&api.Service{ + _, err := c.Core().Services(ns).Create(&api.Service{ ObjectMeta: api.ObjectMeta{ Name: name, }, @@ -364,7 +364,7 @@ func runServiceAndWorkloadForResourceConsumer(c *client.Client, ns, name, kind s By(fmt.Sprintf("Running controller")) controllerName := name + "-ctrl" - _, err = c.Services(ns).Create(&api.Service{ + _, err = c.Core().Services(ns).Create(&api.Service{ ObjectMeta: api.ObjectMeta{ Name: controllerName, }, diff --git a/test/e2e/batch_v1_jobs.go b/test/e2e/batch_v1_jobs.go index daa99e2aff4..4e3d4b18c41 100644 --- a/test/e2e/batch_v1_jobs.go +++ b/test/e2e/batch_v1_jobs.go @@ -26,7 +26,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/apis/batch" - client "k8s.io/kubernetes/pkg/client/unversioned" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/util/wait" @@ -54,11 +54,11 @@ var _ = framework.KubeDescribe("V1Job", func() { It("should run a job to completion when tasks succeed", func() { By("Creating a job") job := newTestV1Job("succeed", "all-succeed", api.RestartPolicyNever, parallelism, completions) - job, err := createV1Job(f.Client, f.Namespace.Name, job) + job, err := createV1Job(f.ClientSet, f.Namespace.Name, job) Expect(err).NotTo(HaveOccurred()) By("Ensuring job reaches completions") - err = waitForV1JobFinish(f.Client, f.Namespace.Name, job.Name, completions) + err = waitForV1JobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions) Expect(err).NotTo(HaveOccurred()) }) @@ -73,11 +73,11 @@ var _ = framework.KubeDescribe("V1Job", func() { // due to successive failures too likely with a reasonable // test timeout. job := newTestV1Job("failOnce", "fail-once-local", api.RestartPolicyOnFailure, parallelism, completions) - job, err := createV1Job(f.Client, f.Namespace.Name, job) + job, err := createV1Job(f.ClientSet, f.Namespace.Name, job) Expect(err).NotTo(HaveOccurred()) By("Ensuring job reaches completions") - err = waitForV1JobFinish(f.Client, f.Namespace.Name, job.Name, completions) + err = waitForV1JobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions) Expect(err).NotTo(HaveOccurred()) }) @@ -91,23 +91,23 @@ var _ = framework.KubeDescribe("V1Job", func() { // run due to some slowness, 1 in 2^15 chance of happening, // causing test flake. Should be very rare. job := newTestV1Job("randomlySucceedOrFail", "rand-non-local", api.RestartPolicyNever, parallelism, completions) - job, err := createV1Job(f.Client, f.Namespace.Name, job) + job, err := createV1Job(f.ClientSet, f.Namespace.Name, job) Expect(err).NotTo(HaveOccurred()) By("Ensuring job reaches completions") - err = waitForV1JobFinish(f.Client, f.Namespace.Name, job.Name, completions) + err = waitForV1JobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions) Expect(err).NotTo(HaveOccurred()) }) It("should keep restarting failed pods", func() { By("Creating a job") job := newTestV1Job("fail", "all-fail", api.RestartPolicyNever, parallelism, completions) - job, err := createV1Job(f.Client, f.Namespace.Name, job) + job, err := createV1Job(f.ClientSet, f.Namespace.Name, job) Expect(err).NotTo(HaveOccurred()) By("Ensuring job shows many failures") err = wait.Poll(framework.Poll, v1JobTimeout, func() (bool, error) { - curr, err := getV1Job(f.Client, f.Namespace.Name, job.Name) + curr, err := getV1Job(f.ClientSet, f.Namespace.Name, job.Name) if err != nil { return false, err } @@ -120,11 +120,11 @@ var _ = framework.KubeDescribe("V1Job", func() { endParallelism := int32(2) By("Creating a job") job := newTestV1Job("notTerminate", "scale-up", api.RestartPolicyNever, startParallelism, completions) - job, err := createV1Job(f.Client, f.Namespace.Name, job) + job, err := createV1Job(f.ClientSet, f.Namespace.Name, job) Expect(err).NotTo(HaveOccurred()) By("Ensuring active pods == startParallelism") - err = waitForAllPodsRunningV1(f.Client, f.Namespace.Name, job.Name, startParallelism) + err = waitForAllPodsRunningV1(f.ClientSet, f.Namespace.Name, job.Name, startParallelism) Expect(err).NotTo(HaveOccurred()) By("scale job up") @@ -136,7 +136,7 @@ var _ = framework.KubeDescribe("V1Job", func() { Expect(err).NotTo(HaveOccurred()) By("Ensuring active pods == endParallelism") - err = waitForAllPodsRunningV1(f.Client, f.Namespace.Name, job.Name, endParallelism) + err = waitForAllPodsRunningV1(f.ClientSet, f.Namespace.Name, job.Name, endParallelism) Expect(err).NotTo(HaveOccurred()) }) @@ -145,11 +145,11 @@ var _ = framework.KubeDescribe("V1Job", func() { endParallelism := int32(1) By("Creating a job") job := newTestV1Job("notTerminate", "scale-down", api.RestartPolicyNever, startParallelism, completions) - job, err := createV1Job(f.Client, f.Namespace.Name, job) + job, err := createV1Job(f.ClientSet, f.Namespace.Name, job) Expect(err).NotTo(HaveOccurred()) By("Ensuring active pods == startParallelism") - err = waitForAllPodsRunningV1(f.Client, f.Namespace.Name, job.Name, startParallelism) + err = waitForAllPodsRunningV1(f.ClientSet, f.Namespace.Name, job.Name, startParallelism) Expect(err).NotTo(HaveOccurred()) By("scale job down") @@ -161,18 +161,18 @@ var _ = framework.KubeDescribe("V1Job", func() { Expect(err).NotTo(HaveOccurred()) By("Ensuring active pods == endParallelism") - err = waitForAllPodsRunningV1(f.Client, f.Namespace.Name, job.Name, endParallelism) + err = waitForAllPodsRunningV1(f.ClientSet, f.Namespace.Name, job.Name, endParallelism) Expect(err).NotTo(HaveOccurred()) }) It("should delete a job", func() { By("Creating a job") job := newTestV1Job("notTerminate", "foo", api.RestartPolicyNever, parallelism, completions) - job, err := createV1Job(f.Client, f.Namespace.Name, job) + job, err := createV1Job(f.ClientSet, f.Namespace.Name, job) Expect(err).NotTo(HaveOccurred()) By("Ensuring active pods == parallelism") - err = waitForAllPodsRunningV1(f.Client, f.Namespace.Name, job.Name, parallelism) + err = waitForAllPodsRunningV1(f.ClientSet, f.Namespace.Name, job.Name, parallelism) Expect(err).NotTo(HaveOccurred()) By("delete a job") @@ -183,7 +183,7 @@ var _ = framework.KubeDescribe("V1Job", func() { Expect(err).NotTo(HaveOccurred()) By("Ensuring job was deleted") - _, err = getV1Job(f.Client, f.Namespace.Name, job.Name) + _, err = getV1Job(f.ClientSet, f.Namespace.Name, job.Name) Expect(err).To(HaveOccurred()) Expect(errors.IsNotFound(err)).To(BeTrue()) }) @@ -193,21 +193,21 @@ var _ = framework.KubeDescribe("V1Job", func() { job := newTestV1Job("notTerminate", "foo", api.RestartPolicyNever, parallelism, completions) activeDeadlineSeconds := int64(10) job.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds - job, err := createV1Job(f.Client, f.Namespace.Name, job) + job, err := createV1Job(f.ClientSet, f.Namespace.Name, job) Expect(err).NotTo(HaveOccurred()) By("Ensuring job was failed") - err = waitForV1JobFail(f.Client, f.Namespace.Name, job.Name, 20*time.Second) + err = waitForV1JobFail(f.ClientSet, f.Namespace.Name, job.Name, 20*time.Second) if err == wait.ErrWaitTimeout { - job, err = getV1Job(f.Client, f.Namespace.Name, job.Name) + job, err = getV1Job(f.ClientSet, f.Namespace.Name, job.Name) Expect(err).NotTo(HaveOccurred()) // the job stabilized and won't be synced until modification or full // resync happens, we don't want to wait for the latter so we force // sync modifying it job.Spec.Parallelism = &completions - job, err = updateV1Job(f.Client, f.Namespace.Name, job) + job, err = updateV1Job(f.ClientSet, f.Namespace.Name, job) Expect(err).NotTo(HaveOccurred()) - err = waitForV1JobFail(f.Client, f.Namespace.Name, job.Name, v1JobTimeout) + err = waitForV1JobFail(f.ClientSet, f.Namespace.Name, job.Name, v1JobTimeout) } Expect(err).NotTo(HaveOccurred()) }) @@ -275,28 +275,28 @@ func newTestV1Job(behavior, name string, rPol api.RestartPolicy, parallelism, co return job } -func getV1Job(c *client.Client, ns, name string) (*batch.Job, error) { +func getV1Job(c clientset.Interface, ns, name string) (*batch.Job, error) { return c.Batch().Jobs(ns).Get(name) } -func createV1Job(c *client.Client, ns string, job *batch.Job) (*batch.Job, error) { +func createV1Job(c clientset.Interface, ns string, job *batch.Job) (*batch.Job, error) { return c.Batch().Jobs(ns).Create(job) } -func updateV1Job(c *client.Client, ns string, job *batch.Job) (*batch.Job, error) { +func updateV1Job(c clientset.Interface, ns string, job *batch.Job) (*batch.Job, error) { return c.Batch().Jobs(ns).Update(job) } -func deleteV1Job(c *client.Client, ns, name string) error { +func deleteV1Job(c clientset.Interface, ns, name string) error { return c.Batch().Jobs(ns).Delete(name, api.NewDeleteOptions(0)) } // Wait for all pods to become Running. Only use when pods will run for a long time, or it will be racy. -func waitForAllPodsRunningV1(c *client.Client, ns, jobName string, parallelism int32) error { +func waitForAllPodsRunningV1(c clientset.Interface, ns, jobName string, parallelism int32) error { label := labels.SelectorFromSet(labels.Set(map[string]string{v1JobSelectorKey: jobName})) return wait.Poll(framework.Poll, v1JobTimeout, func() (bool, error) { options := api.ListOptions{LabelSelector: label} - pods, err := c.Pods(ns).List(options) + pods, err := c.Core().Pods(ns).List(options) if err != nil { return false, err } @@ -311,7 +311,7 @@ func waitForAllPodsRunningV1(c *client.Client, ns, jobName string, parallelism i } // Wait for job to reach completions. -func waitForV1JobFinish(c *client.Client, ns, jobName string, completions int32) error { +func waitForV1JobFinish(c clientset.Interface, ns, jobName string, completions int32) error { return wait.Poll(framework.Poll, v1JobTimeout, func() (bool, error) { curr, err := c.Batch().Jobs(ns).Get(jobName) if err != nil { @@ -322,7 +322,7 @@ func waitForV1JobFinish(c *client.Client, ns, jobName string, completions int32) } // Wait for job fail. -func waitForV1JobFail(c *client.Client, ns, jobName string, timeout time.Duration) error { +func waitForV1JobFail(c clientset.Interface, ns, jobName string, timeout time.Duration) error { return wait.Poll(framework.Poll, timeout, func() (bool, error) { curr, err := c.Batch().Jobs(ns).Get(jobName) if err != nil { diff --git a/test/e2e/cadvisor.go b/test/e2e/cadvisor.go index dc8dffec2ef..27876a6314e 100644 --- a/test/e2e/cadvisor.go +++ b/test/e2e/cadvisor.go @@ -21,7 +21,7 @@ import ( "time" "k8s.io/kubernetes/pkg/api" - client "k8s.io/kubernetes/pkg/client/unversioned" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" @@ -32,14 +32,14 @@ var _ = framework.KubeDescribe("Cadvisor", func() { f := framework.NewDefaultFramework("cadvisor") It("should be healthy on every node.", func() { - CheckCadvisorHealthOnAllNodes(f.Client, 5*time.Minute) + CheckCadvisorHealthOnAllNodes(f.ClientSet, 5*time.Minute) }) }) -func CheckCadvisorHealthOnAllNodes(c *client.Client, timeout time.Duration) { +func CheckCadvisorHealthOnAllNodes(c clientset.Interface, timeout time.Duration) { // It should be OK to list unschedulable Nodes here. By("getting list of nodes") - nodeList, err := c.Nodes().List(api.ListOptions{}) + nodeList, err := c.Core().Nodes().List(api.ListOptions{}) framework.ExpectNoError(err) var errors []error @@ -69,7 +69,7 @@ func CheckCadvisorHealthOnAllNodes(c *client.Client, timeout time.Duration) { // Here, we access '/stats/' REST endpoint on the kubelet which polls cadvisor internally. statsResource := fmt.Sprintf("api/v1/proxy/nodes/%s/stats/", node.Name) By(fmt.Sprintf("Querying stats from node %s using url %s", node.Name, statsResource)) - _, err = c.Get().AbsPath(statsResource).Timeout(timeout).Do().Raw() + _, err = c.Core().RESTClient().Get().AbsPath(statsResource).Timeout(timeout).Do().Raw() if err != nil { errors = append(errors, err) } diff --git a/test/e2e/cluster_logging_es.go b/test/e2e/cluster_logging_es.go index 8cdd85906d8..b1b8c5f0185 100644 --- a/test/e2e/cluster_logging_es.go +++ b/test/e2e/cluster_logging_es.go @@ -53,7 +53,7 @@ var _ = framework.KubeDescribe("Cluster level logging using Elasticsearch [Featu By("Running synthetic logger") createSynthLogger(f, expectedLinesCount) defer f.PodClient().Delete(synthLoggerPodName, &api.DeleteOptions{}) - err = framework.WaitForPodSuccessInNamespace(f.Client, synthLoggerPodName, f.Namespace.Name) + err = framework.WaitForPodSuccessInNamespace(f.ClientSet, synthLoggerPodName, f.Namespace.Name) framework.ExpectNoError(err, fmt.Sprintf("Should've successfully waited for pod %s to succeed", synthLoggerPodName)) By("Waiting for logs to ingest") @@ -86,7 +86,7 @@ var _ = framework.KubeDescribe("Cluster level logging using Elasticsearch [Featu func checkElasticsearchReadiness(f *framework.Framework) error { // Check for the existence of the Elasticsearch service. By("Checking the Elasticsearch service exists.") - s := f.Client.Services(api.NamespaceSystem) + s := f.ClientSet.Core().Services(api.NamespaceSystem) // Make a few attempts to connect. This makes the test robust against // being run as the first e2e test just after the e2e cluster has been created. var err error @@ -102,10 +102,10 @@ func checkElasticsearchReadiness(f *framework.Framework) error { By("Checking to make sure the Elasticsearch pods are running") label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "elasticsearch-logging"})) options := api.ListOptions{LabelSelector: label} - pods, err := f.Client.Pods(api.NamespaceSystem).List(options) + pods, err := f.ClientSet.Core().Pods(api.NamespaceSystem).List(options) Expect(err).NotTo(HaveOccurred()) for _, pod := range pods.Items { - err = framework.WaitForPodRunningInNamespace(f.Client, &pod) + err = framework.WaitForPodRunningInNamespace(f.ClientSet, &pod) Expect(err).NotTo(HaveOccurred()) } @@ -115,7 +115,7 @@ func checkElasticsearchReadiness(f *framework.Framework) error { err = nil var body []byte for start := time.Now(); time.Since(start) < graceTime; time.Sleep(10 * time.Second) { - proxyRequest, errProxy := framework.GetServicesProxyRequest(f.Client, f.Client.Get()) + proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.Core().RESTClient().Get()) if errProxy != nil { framework.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy) continue @@ -147,7 +147,7 @@ func checkElasticsearchReadiness(f *framework.Framework) error { By("Checking health of Elasticsearch service.") healthy := false for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) { - proxyRequest, errProxy := framework.GetServicesProxyRequest(f.Client, f.Client.Get()) + proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.Core().RESTClient().Get()) if errProxy != nil { framework.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy) continue @@ -189,7 +189,7 @@ func checkElasticsearchReadiness(f *framework.Framework) error { } func getMissingLinesCountElasticsearch(f *framework.Framework, expectedCount int) (int, error) { - proxyRequest, errProxy := framework.GetServicesProxyRequest(f.Client, f.Client.Get()) + proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.Core().RESTClient().Get()) if errProxy != nil { return 0, fmt.Errorf("Failed to get services proxy request: %v", errProxy) } diff --git a/test/e2e/cluster_logging_gcl.go b/test/e2e/cluster_logging_gcl.go index 3215c67b013..3e9c01dd4ea 100644 --- a/test/e2e/cluster_logging_gcl.go +++ b/test/e2e/cluster_logging_gcl.go @@ -43,7 +43,7 @@ var _ = framework.KubeDescribe("Cluster level logging using GCL [Flaky]", func() By("Running synthetic logger") createSynthLogger(f, expectedLinesCount) defer f.PodClient().Delete(synthLoggerPodName, &api.DeleteOptions{}) - err := framework.WaitForPodSuccessInNamespace(f.Client, synthLoggerPodName, f.Namespace.Name) + err := framework.WaitForPodSuccessInNamespace(f.ClientSet, synthLoggerPodName, f.Namespace.Name) framework.ExpectNoError(err, fmt.Sprintf("Should've successfully waited for pod %s to succeed", synthLoggerPodName)) By("Waiting for logs to ingest") diff --git a/test/e2e/cluster_logging_utils.go b/test/e2e/cluster_logging_utils.go index ce1f354b8a0..2dfa0205255 100644 --- a/test/e2e/cluster_logging_utils.go +++ b/test/e2e/cluster_logging_utils.go @@ -73,12 +73,12 @@ func reportLogsFromFluentdPod(f *framework.Framework) error { label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "fluentd-logging"})) options := api.ListOptions{LabelSelector: label} - fluentdPods, err := f.Client.Pods(api.NamespaceSystem).List(options) + fluentdPods, err := f.ClientSet.Core().Pods(api.NamespaceSystem).List(options) for _, fluentdPod := range fluentdPods.Items { if fluentdPod.Spec.NodeName == synthLoggerNodeName { containerName := fluentdPod.Spec.Containers[0].Name - logs, err := framework.GetPodLogs(f.Client, api.NamespaceSystem, fluentdPod.Name, containerName) + logs, err := framework.GetPodLogs(f.ClientSet, api.NamespaceSystem, fluentdPod.Name, containerName) if err != nil { return fmt.Errorf("Failed to get logs from fluentd pod %s due to %v", fluentdPod.Name, err) } diff --git a/test/e2e/cluster_size_autoscaling.go b/test/e2e/cluster_size_autoscaling.go index 58136082ebf..f48cf0017e8 100644 --- a/test/e2e/cluster_size_autoscaling.go +++ b/test/e2e/cluster_size_autoscaling.go @@ -27,7 +27,7 @@ import ( "time" "k8s.io/kubernetes/pkg/api" - client "k8s.io/kubernetes/pkg/client/unversioned" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/test/e2e/framework" @@ -50,14 +50,14 @@ const ( var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() { f := framework.NewDefaultFramework("autoscaling") - var c *client.Client + var c clientset.Interface var nodeCount int var coresPerNode int var memCapacityMb int var originalSizes map[string]int BeforeEach(func() { - c = f.Client + c = f.ClientSet framework.SkipUnlessProviderIs("gce", "gke") nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) @@ -98,7 +98,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() { It("shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]", func() { By("Creating unschedulable pod") ReserveMemory(f, "memory-reservation", 1, memCapacityMb, false) - defer framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, "memory-reservation") + defer framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, "memory-reservation") By("Waiting for scale up hoping it won't happen") // Verfiy, that the appropreate event was generated. @@ -106,7 +106,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() { EventsLoop: for start := time.Now(); time.Since(start) < scaleUpTimeout; time.Sleep(20 * time.Second) { By("Waiting for NotTriggerScaleUp event") - events, err := f.Client.Events(f.Namespace.Name).List(api.ListOptions{}) + events, err := f.ClientSet.Core().Events(f.Namespace.Name).List(api.ListOptions{}) framework.ExpectNoError(err) for _, e := range events.Items { @@ -119,16 +119,16 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() { } Expect(eventFound).Should(Equal(true)) // Verify, that cluster size is not changed. - framework.ExpectNoError(WaitForClusterSizeFunc(f.Client, + framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, func(size int) bool { return size <= nodeCount }, time.Second)) }) It("should increase cluster size if pending pods are small [Feature:ClusterSizeAutoscalingScaleUp]", func() { ReserveMemory(f, "memory-reservation", 100, nodeCount*memCapacityMb, false) - defer framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, "memory-reservation") + defer framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, "memory-reservation") // Verify, that cluster size is increased - framework.ExpectNoError(WaitForClusterSizeFunc(f.Client, + framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout)) framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) }) @@ -144,10 +144,10 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() { glog.Infof("Not enabling cluster autoscaler for the node pool (on purpose).") ReserveMemory(f, "memory-reservation", 100, nodeCount*memCapacityMb, false) - defer framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, "memory-reservation") + defer framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, "memory-reservation") // Verify, that cluster size is increased - framework.ExpectNoError(WaitForClusterSizeFunc(f.Client, + framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout)) framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) }) @@ -166,9 +166,9 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() { It("should increase cluster size if pods are pending due to host port conflict [Feature:ClusterSizeAutoscalingScaleUp]", func() { CreateHostPortPods(f, "host-port", nodeCount+2, false) - defer framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, "host-port") + defer framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, "host-port") - framework.ExpectNoError(WaitForClusterSizeFunc(f.Client, + framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, func(size int) bool { return size >= nodeCount+2 }, scaleUpTimeout)) framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) }) @@ -203,7 +203,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() { By("Waiting for new node to appear and annotating it") WaitForGroupSize(minMig, int32(minSize+1)) // Verify, that cluster size is increased - framework.ExpectNoError(WaitForClusterSizeFunc(f.Client, + framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout)) newNodes, err := GetGroupNodes(minMig) @@ -214,11 +214,11 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() { By(fmt.Sprintf("Setting labels for new nodes: %v", newNodesSet.List())) updateNodeLabels(c, newNodesSet, labels, nil) - framework.ExpectNoError(WaitForClusterSizeFunc(f.Client, + framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout)) framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) - framework.ExpectNoError(framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, "node-selector")) + framework.ExpectNoError(framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, "node-selector")) }) It("should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp]", func() { @@ -233,7 +233,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() { By("Creating rc with 2 pods too big to fit default-pool but fitting extra-pool") ReserveMemory(f, "memory-reservation", 2, 2*memCapacityMb, false) - defer framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, "memory-reservation") + defer framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, "memory-reservation") // Apparently GKE master is restarted couple minutes after the node pool is added // reseting all the timers in scale down code. Adding 5 extra minutes to workaround @@ -251,11 +251,11 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() { increasedSize += val + 2 } setMigSizes(newSizes) - framework.ExpectNoError(WaitForClusterSizeFunc(f.Client, + framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, func(size int) bool { return size >= increasedSize }, scaleUpTimeout)) By("Some node should be removed") - framework.ExpectNoError(WaitForClusterSizeFunc(f.Client, + framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, func(size int) bool { return size < increasedSize }, scaleDownTimeout)) }) @@ -270,14 +270,14 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() { increasedSize += val + 2 } setMigSizes(newSizes) - framework.ExpectNoError(WaitForClusterSizeFunc(f.Client, + framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, func(size int) bool { return size >= increasedSize }, scaleUpTimeout)) const extraPoolName = "extra-pool" addNodePool(extraPoolName, "n1-standard-1", 3) defer deleteNodePool(extraPoolName) - framework.ExpectNoError(WaitForClusterSizeFunc(f.Client, + framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, func(size int) bool { return size >= increasedSize+3 }, scaleUpTimeout)) By("Some node should be removed") @@ -285,7 +285,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() { // reseting all the timers in scale down code. Adding 10 extra minutes to workaround // this issue. // TODO: Remove the extra time when GKE restart is fixed. - framework.ExpectNoError(WaitForClusterSizeFunc(f.Client, + framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, func(size int) bool { return size < increasedSize+3 }, scaleDownTimeout+10*time.Minute)) }) }) @@ -458,11 +458,11 @@ func CreateNodeSelectorPods(f *framework.Framework, id string, replicas int, nod By(fmt.Sprintf("Running RC which reserves host port and defines node selector")) config := &testutils.RCConfig{ - Client: f.Client, + Client: f.ClientSet, Name: "node-selector", Namespace: f.Namespace.Name, Timeout: defaultTimeout, - Image: framework.GetPauseImageName(f.Client), + Image: framework.GetPauseImageName(f.ClientSet), Replicas: replicas, HostPorts: map[string]int{"port1": 4321}, NodeSelector: map[string]string{"cluster-autoscaling-test.special-node": "true"}, @@ -476,11 +476,11 @@ func CreateNodeSelectorPods(f *framework.Framework, id string, replicas int, nod func CreateHostPortPods(f *framework.Framework, id string, replicas int, expectRunning bool) { By(fmt.Sprintf("Running RC which reserves host port")) config := &testutils.RCConfig{ - Client: f.Client, + Client: f.ClientSet, Name: id, Namespace: f.Namespace.Name, Timeout: defaultTimeout, - Image: framework.GetPauseImageName(f.Client), + Image: framework.GetPauseImageName(f.ClientSet), Replicas: replicas, HostPorts: map[string]int{"port1": 4321}, } @@ -494,11 +494,11 @@ func ReserveCpu(f *framework.Framework, id string, replicas, millicores int) { By(fmt.Sprintf("Running RC which reserves %v millicores", millicores)) request := int64(millicores / replicas) config := &testutils.RCConfig{ - Client: f.Client, + Client: f.ClientSet, Name: id, Namespace: f.Namespace.Name, Timeout: defaultTimeout, - Image: framework.GetPauseImageName(f.Client), + Image: framework.GetPauseImageName(f.ClientSet), Replicas: replicas, CpuRequest: request, } @@ -509,11 +509,11 @@ func ReserveMemory(f *framework.Framework, id string, replicas, megabytes int, e By(fmt.Sprintf("Running RC which reserves %v MB of memory", megabytes)) request := int64(1024 * 1024 * megabytes / replicas) config := &testutils.RCConfig{ - Client: f.Client, + Client: f.ClientSet, Name: id, Namespace: f.Namespace.Name, Timeout: defaultTimeout, - Image: framework.GetPauseImageName(f.Client), + Image: framework.GetPauseImageName(f.ClientSet), Replicas: replicas, MemRequest: request, } @@ -524,9 +524,9 @@ func ReserveMemory(f *framework.Framework, id string, replicas, megabytes int, e } // WaitForClusterSize waits until the cluster size matches the given function. -func WaitForClusterSizeFunc(c *client.Client, sizeFunc func(int) bool, timeout time.Duration) error { +func WaitForClusterSizeFunc(c clientset.Interface, sizeFunc func(int) bool, timeout time.Duration) error { for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) { - nodes, err := c.Nodes().List(api.ListOptions{FieldSelector: fields.Set{ + nodes, err := c.Core().Nodes().List(api.ListOptions{FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector()}) if err != nil { @@ -550,10 +550,10 @@ func WaitForClusterSizeFunc(c *client.Client, sizeFunc func(int) bool, timeout t return fmt.Errorf("timeout waiting %v for appropriate cluster size", timeout) } -func waitForAllCaPodsReadyInNamespace(f *framework.Framework, c *client.Client) error { +func waitForAllCaPodsReadyInNamespace(f *framework.Framework, c clientset.Interface) error { var notready []string for start := time.Now(); time.Now().Before(start.Add(scaleUpTimeout)); time.Sleep(20 * time.Second) { - pods, err := c.Pods(f.Namespace.Name).List(api.ListOptions{}) + pods, err := c.Core().Pods(f.Namespace.Name).List(api.ListOptions{}) if err != nil { return fmt.Errorf("failed to get pods: %v", err) } diff --git a/test/e2e/cluster_upgrade.go b/test/e2e/cluster_upgrade.go index 8b84688d9aa..8fd43d5b51a 100644 --- a/test/e2e/cluster_upgrade.go +++ b/test/e2e/cluster_upgrade.go @@ -23,7 +23,6 @@ import ( "k8s.io/kubernetes/pkg/api" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/test/e2e/chaosmonkey" "k8s.io/kubernetes/test/e2e/framework" @@ -44,7 +43,7 @@ var _ = framework.KubeDescribe("Upgrade [Feature:Upgrade]", func() { v, err := realVersion(framework.TestContext.UpgradeTarget) framework.ExpectNoError(err) framework.ExpectNoError(framework.MasterUpgrade(v)) - framework.ExpectNoError(checkMasterVersion(f.Client, v)) + framework.ExpectNoError(checkMasterVersion(f.ClientSet, v)) }) cm.Register(func(sem *chaosmonkey.Semaphore) { // Close over f. @@ -90,7 +89,7 @@ var _ = framework.KubeDescribe("Upgrade [Feature:Upgrade]", func() { v, err := realVersion(framework.TestContext.UpgradeTarget) framework.ExpectNoError(err) framework.ExpectNoError(framework.MasterUpgrade(v)) - framework.ExpectNoError(checkMasterVersion(f.Client, v)) + framework.ExpectNoError(checkMasterVersion(f.ClientSet, v)) framework.ExpectNoError(framework.NodeUpgrade(f, v, framework.TestContext.UpgradeImage)) framework.ExpectNoError(checkNodesVersions(f.ClientSet, v)) }) @@ -106,7 +105,7 @@ var _ = framework.KubeDescribe("Upgrade [Feature:Upgrade]", func() { v, err := realVersion(framework.TestContext.UpgradeTarget) framework.ExpectNoError(err) framework.ExpectNoError(framework.MasterUpgrade(v)) - framework.ExpectNoError(checkMasterVersion(f.Client, v)) + framework.ExpectNoError(checkMasterVersion(f.ClientSet, v)) framework.ExpectNoError(framework.NodeUpgrade(f, v, framework.TestContext.UpgradeImage)) framework.ExpectNoError(checkNodesVersions(f.ClientSet, v)) }) @@ -147,7 +146,7 @@ func testService(f *framework.Framework, sem *chaosmonkey.Semaphore, testDuringD // Setup serviceName := "service-test" - jig := NewServiceTestJig(f.Client, f.ClientSet, serviceName) + jig := NewServiceTestJig(f.ClientSet, serviceName) // nodeIP := pickNodeIP(jig.Client) // for later By("creating a TCP service " + serviceName + " with type=LoadBalancer in namespace " + f.Namespace.Name) @@ -192,7 +191,7 @@ func testService(f *framework.Framework, sem *chaosmonkey.Semaphore, testDuringD jig.SanityCheckService(tcpService, api.ServiceTypeLoadBalancer) } -func checkMasterVersion(c *client.Client, want string) error { +func checkMasterVersion(c clientset.Interface, want string) error { framework.Logf("Checking master version") v, err := c.Discovery().ServerVersion() if err != nil { diff --git a/test/e2e/common/configmap.go b/test/e2e/common/configmap.go index 2bfe68cd726..91f88277db8 100644 --- a/test/e2e/common/configmap.go +++ b/test/e2e/common/configmap.go @@ -91,7 +91,7 @@ var _ = framework.KubeDescribe("ConfigMap", func() { By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) var err error - if configMap, err = f.Client.ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { + if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -133,7 +133,7 @@ var _ = framework.KubeDescribe("ConfigMap", func() { f.PodClient().CreateSync(pod) pollLogs := func() (string, error) { - return framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, containerName) + return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName) } Eventually(pollLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1")) @@ -141,7 +141,7 @@ var _ = framework.KubeDescribe("ConfigMap", func() { By(fmt.Sprintf("Updating configmap %v", configMap.Name)) configMap.ResourceVersion = "" // to force update configMap.Data["data-1"] = "value-2" - _, err = f.Client.ConfigMaps(f.Namespace.Name).Update(configMap) + _, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Update(configMap) Expect(err).NotTo(HaveOccurred(), "Failed to update configmap %q in namespace %q", configMap.Name, f.Namespace.Name) By("waiting to observe update in volume") @@ -153,7 +153,7 @@ var _ = framework.KubeDescribe("ConfigMap", func() { configMap := newConfigMap(f, name) By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name)) var err error - if configMap, err = f.Client.ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { + if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -203,7 +203,7 @@ var _ = framework.KubeDescribe("ConfigMap", func() { By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) var err error - if configMap, err = f.Client.ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { + if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -288,7 +288,7 @@ func doConfigMapE2EWithoutMappings(f *framework.Framework, uid, fsGroup int64, d By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) var err error - if configMap, err = f.Client.ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { + if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -367,7 +367,7 @@ func doConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup int64, item By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) var err error - if configMap, err = f.Client.ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { + if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } diff --git a/test/e2e/common/container_probe.go b/test/e2e/common/container_probe.go index 292f1da5748..f78629d0a48 100644 --- a/test/e2e/common/container_probe.go +++ b/test/e2e/common/container_probe.go @@ -331,7 +331,7 @@ func runLivenessTest(f *framework.Framework, pod *api.Pod, expectNumRestarts int // Wait until the pod is not pending. (Here we need to check for something other than // 'Pending' other than checking for 'Running', since when failures occur, we go to // 'Terminated' which can cause indefinite blocking.) - framework.ExpectNoError(framework.WaitForPodNotPending(f.Client, ns, pod.Name, pod.ResourceVersion), + framework.ExpectNoError(framework.WaitForPodNotPending(f.ClientSet, ns, pod.Name, pod.ResourceVersion), fmt.Sprintf("starting pod %s in namespace %s", pod.Name, ns)) framework.Logf("Started pod %s in namespace %s", pod.Name, ns) diff --git a/test/e2e/common/downwardapi_volume.go b/test/e2e/common/downwardapi_volume.go index b3f1824a325..2f4e56538dd 100644 --- a/test/e2e/common/downwardapi_volume.go +++ b/test/e2e/common/downwardapi_volume.go @@ -93,7 +93,7 @@ var _ = framework.KubeDescribe("Downward API volume", func() { podClient.CreateSync(pod) Eventually(func() (string, error) { - return framework.GetPodLogs(f.Client, f.Namespace.Name, podName, containerName) + return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, containerName) }, podLogTimeout, framework.Poll).Should(ContainSubstring("key1=\"value1\"\n")) @@ -103,7 +103,7 @@ var _ = framework.KubeDescribe("Downward API volume", func() { }) Eventually(func() (string, error) { - return framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, containerName) + return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName) }, podLogTimeout, framework.Poll).Should(ContainSubstring("key3=\"value3\"\n")) }) @@ -122,7 +122,7 @@ var _ = framework.KubeDescribe("Downward API volume", func() { Expect(err).NotTo(HaveOccurred(), "Failed to get pod %q", pod.Name) Eventually(func() (string, error) { - return framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, containerName) + return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName) }, podLogTimeout, framework.Poll).Should(ContainSubstring("builder=\"bar\"\n")) @@ -132,7 +132,7 @@ var _ = framework.KubeDescribe("Downward API volume", func() { }) Eventually(func() (string, error) { - return framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, containerName) + return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName) }, podLogTimeout, framework.Poll).Should(ContainSubstring("builder=\"foo\"\n")) }) diff --git a/test/e2e/common/init_container.go b/test/e2e/common/init_container.go index d6aaaaabf12..e849e5f5b61 100644 --- a/test/e2e/common/init_container.go +++ b/test/e2e/common/init_container.go @@ -128,7 +128,7 @@ var _ = framework.KubeDescribe("InitContainer", func() { Containers: []api.Container{ { Name: "run1", - Image: framework.GetPauseImageName(f.Client), + Image: framework.GetPauseImageName(f.ClientSet), Resources: api.ResourceRequirements{ Limits: api.ResourceList{ api.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI), @@ -191,7 +191,7 @@ var _ = framework.KubeDescribe("InitContainer", func() { Containers: []api.Container{ { Name: "run1", - Image: framework.GetPauseImageName(f.Client), + Image: framework.GetPauseImageName(f.ClientSet), Resources: api.ResourceRequirements{ Limits: api.ResourceList{ api.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI), diff --git a/test/e2e/common/pods.go b/test/e2e/common/pods.go index 94007412bba..36ba4229eaf 100644 --- a/test/e2e/common/pods.go +++ b/test/e2e/common/pods.go @@ -135,7 +135,7 @@ var _ = framework.KubeDescribe("Pods", func() { Containers: []api.Container{ { Name: "test", - Image: framework.GetPauseImageName(f.Client), + Image: framework.GetPauseImageName(f.ClientSet), }, }, }, @@ -211,7 +211,7 @@ var _ = framework.KubeDescribe("Pods", func() { By("verifying the kubelet observed the termination notice") Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) { - podList, err := framework.GetKubeletPods(f.Client, pod.Spec.NodeName) + podList, err := framework.GetKubeletPods(f.ClientSet, pod.Spec.NodeName) if err != nil { framework.Logf("Unable to retrieve kubelet pods for node %v: %v", pod.Spec.NodeName, err) return false, nil @@ -396,7 +396,7 @@ var _ = framework.KubeDescribe("Pods", func() { }, }, } - _, err := f.Client.Services(f.Namespace.Name).Create(svc) + _, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(svc) Expect(err).NotTo(HaveOccurred(), "failed to create service") // Make a client pod that verifies that it has the service environment variables. @@ -460,7 +460,7 @@ var _ = framework.KubeDescribe("Pods", func() { By("submitting the pod to kubernetes") pod = podClient.CreateSync(pod) - req := f.Client.Get(). + req := f.ClientSet.Core().RESTClient().Get(). Namespace(f.Namespace.Name). Resource("pods"). Name(pod.Name). @@ -530,7 +530,7 @@ var _ = framework.KubeDescribe("Pods", func() { By("submitting the pod to kubernetes") podClient.CreateSync(pod) - req := f.Client.Get(). + req := f.ClientSet.Core().RESTClient().Get(). Namespace(f.Namespace.Name). Resource("pods"). Name(pod.Name). diff --git a/test/e2e/common/secrets.go b/test/e2e/common/secrets.go index d979df8bf18..7781bba4c68 100644 --- a/test/e2e/common/secrets.go +++ b/test/e2e/common/secrets.go @@ -63,7 +63,7 @@ var _ = framework.KubeDescribe("Secrets", func() { By(fmt.Sprintf("Creating secret with name %s", secret.Name)) var err error - if secret, err = f.Client.Secrets(f.Namespace.Name).Create(secret); err != nil { + if secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } @@ -127,7 +127,7 @@ var _ = framework.KubeDescribe("Secrets", func() { By(fmt.Sprintf("Creating secret with name %s", secret.Name)) var err error - if secret, err = f.Client.Secrets(f.Namespace.Name).Create(secret); err != nil { + if secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } @@ -190,7 +190,7 @@ func doSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int32) { By(fmt.Sprintf("Creating secret with name %s", secret.Name)) var err error - if secret, err = f.Client.Secrets(f.Namespace.Name).Create(secret); err != nil { + if secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } @@ -254,7 +254,7 @@ func doSecretE2EWithMapping(f *framework.Framework, mode *int32) { By(fmt.Sprintf("Creating secret with name %s", secret.Name)) var err error - if secret, err = f.Client.Secrets(f.Namespace.Name).Create(secret); err != nil { + if secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } diff --git a/test/e2e/common/sysctl.go b/test/e2e/common/sysctl.go index 49c0fe79029..5aa089f989f 100644 --- a/test/e2e/common/sysctl.go +++ b/test/e2e/common/sysctl.go @@ -58,7 +58,7 @@ var _ = framework.KubeDescribe("Sysctls", func() { waitForPodErrorEventOrStarted := func(pod *api.Pod) (*api.Event, error) { var ev *api.Event err := wait.Poll(framework.Poll, framework.PodStartTimeout, func() (bool, error) { - evnts, err := f.Client.Events(pod.Namespace).Search(pod) + evnts, err := f.ClientSet.Core().Events(pod.Namespace).Search(pod) if err != nil { return false, fmt.Errorf("error in listing events: %s", err) } @@ -114,7 +114,7 @@ var _ = framework.KubeDescribe("Sysctls", func() { Expect(pod.Status.Phase).To(Equal(api.PodSucceeded)) By("Getting logs from the pod") - log, err := framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) + log, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) Expect(err).NotTo(HaveOccurred()) By("Checking that the sysctl is actually updated") @@ -155,7 +155,7 @@ var _ = framework.KubeDescribe("Sysctls", func() { Expect(pod.Status.Phase).To(Equal(api.PodSucceeded)) By("Getting logs from the pod") - log, err := framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) + log, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) Expect(err).NotTo(HaveOccurred()) By("Checking that the sysctl is actually updated") @@ -194,7 +194,7 @@ var _ = framework.KubeDescribe("Sysctls", func() { }) By("Creating a pod with one valid and two invalid sysctls") - client := f.Client.Pods(f.Namespace.Name) + client := f.ClientSet.Core().Pods(f.Namespace.Name) _, err := client.Create(pod) Expect(err).NotTo(BeNil()) diff --git a/test/e2e/daemon_restart.go b/test/e2e/daemon_restart.go index db6a32ea736..728d8229f3f 100644 --- a/test/e2e/daemon_restart.go +++ b/test/e2e/daemon_restart.go @@ -23,7 +23,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/client/cache" - client "k8s.io/kubernetes/pkg/client/unversioned" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/pkg/runtime" @@ -169,9 +169,9 @@ func replacePods(pods []*api.Pod, store cache.Store) { // getContainerRestarts returns the count of container restarts across all pods matching the given labelSelector, // and a list of nodenames across which these containers restarted. -func getContainerRestarts(c *client.Client, ns string, labelSelector labels.Selector) (int, []string) { +func getContainerRestarts(c clientset.Interface, ns string, labelSelector labels.Selector) (int, []string) { options := api.ListOptions{LabelSelector: labelSelector} - pods, err := c.Pods(ns).List(options) + pods, err := c.Core().Pods(ns).List(options) framework.ExpectNoError(err) failedContainers := 0 containerRestartNodes := sets.NewString() @@ -205,10 +205,10 @@ var _ = framework.KubeDescribe("DaemonRestart [Disruptive]", func() { // All the restart tests need an rc and a watch on pods of the rc. // Additionally some of them might scale the rc during the test. config = testutils.RCConfig{ - Client: f.Client, + Client: f.ClientSet, Name: rcName, Namespace: ns, - Image: framework.GetPauseImageName(f.Client), + Image: framework.GetPauseImageName(f.ClientSet), Replicas: numPods, CreatedPods: &[]*api.Pod{}, } @@ -221,11 +221,12 @@ var _ = framework.KubeDescribe("DaemonRestart [Disruptive]", func() { &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { options.LabelSelector = labelSelector - return f.Client.Pods(ns).List(options) + obj, err := f.ClientSet.Core().Pods(ns).List(options) + return runtime.Object(obj), err }, WatchFunc: func(options api.ListOptions) (watch.Interface, error) { options.LabelSelector = labelSelector - return f.Client.Pods(ns).Watch(options) + return f.ClientSet.Core().Pods(ns).Watch(options) }, }, &api.Pod{}, @@ -262,7 +263,7 @@ var _ = framework.KubeDescribe("DaemonRestart [Disruptive]", func() { // that it had the opportunity to create/delete pods, if it were going to do so. Scaling the RC // to the same size achieves this, because the scale operation advances the RC's sequence number // and awaits it to be observed and reported back in the RC's status. - framework.ScaleRC(f.Client, f.ClientSet, ns, rcName, numPods, true) + framework.ScaleRC(f.ClientSet, ns, rcName, numPods, true) // Only check the keys, the pods can be different if the kubelet updated it. // TODO: Can it really? @@ -293,16 +294,16 @@ var _ = framework.KubeDescribe("DaemonRestart [Disruptive]", func() { restarter.kill() // This is best effort to try and create pods while the scheduler is down, // since we don't know exactly when it is restarted after the kill signal. - framework.ExpectNoError(framework.ScaleRC(f.Client, f.ClientSet, ns, rcName, numPods+5, false)) + framework.ExpectNoError(framework.ScaleRC(f.ClientSet, ns, rcName, numPods+5, false)) restarter.waitUp() - framework.ExpectNoError(framework.ScaleRC(f.Client, f.ClientSet, ns, rcName, numPods+5, true)) + framework.ExpectNoError(framework.ScaleRC(f.ClientSet, ns, rcName, numPods+5, true)) }) It("Kubelet should not restart containers across restart", func() { nodeIPs, err := getNodePublicIps(f.ClientSet) framework.ExpectNoError(err) - preRestarts, badNodes := getContainerRestarts(f.Client, ns, labelSelector) + preRestarts, badNodes := getContainerRestarts(f.ClientSet, ns, labelSelector) if preRestarts != 0 { framework.Logf("WARNING: Non-zero container restart count: %d across nodes %v", preRestarts, badNodes) } @@ -311,9 +312,9 @@ var _ = framework.KubeDescribe("DaemonRestart [Disruptive]", func() { ip, "kubelet", ports.KubeletReadOnlyPort, restartPollInterval, restartTimeout) restarter.restart() } - postRestarts, badNodes := getContainerRestarts(f.Client, ns, labelSelector) + postRestarts, badNodes := getContainerRestarts(f.ClientSet, ns, labelSelector) if postRestarts != preRestarts { - framework.DumpNodeDebugInfo(f.Client, badNodes, framework.Logf) + framework.DumpNodeDebugInfo(f.ClientSet, badNodes, framework.Logf) framework.Failf("Net container restart count went from %v -> %v after kubelet restart on nodes %v \n\n %+v", preRestarts, postRestarts, badNodes, tracker) } }) diff --git a/test/e2e/daemon_set.go b/test/e2e/daemon_set.go index fbfc0d52985..aa0f59aad32 100644 --- a/test/e2e/daemon_set.go +++ b/test/e2e/daemon_set.go @@ -28,7 +28,6 @@ import ( "k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apis/extensions" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" @@ -59,17 +58,17 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() { var f *framework.Framework AfterEach(func() { - if daemonsets, err := f.Client.DaemonSets(f.Namespace.Name).List(api.ListOptions{}); err == nil { + if daemonsets, err := f.ClientSet.Extensions().DaemonSets(f.Namespace.Name).List(api.ListOptions{}); err == nil { framework.Logf("daemonset: %s", runtime.EncodeOrDie(api.Codecs.LegacyCodec(registered.EnabledVersions()...), daemonsets)) } else { framework.Logf("unable to dump daemonsets: %v", err) } - if pods, err := f.Client.Pods(f.Namespace.Name).List(api.ListOptions{}); err == nil { + if pods, err := f.ClientSet.Core().Pods(f.Namespace.Name).List(api.ListOptions{}); err == nil { framework.Logf("pods: %s", runtime.EncodeOrDie(api.Codecs.LegacyCodec(registered.EnabledVersions()...), pods)) } else { framework.Logf("unable to dump pods: %v", err) } - err := clearDaemonSetNodeLabels(f.Client, f.ClientSet) + err := clearDaemonSetNodeLabels(f.ClientSet) Expect(err).NotTo(HaveOccurred()) }) @@ -79,12 +78,13 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() { dsName := "daemon-set" var ns string - var c *client.Client + var c clientset.Interface BeforeEach(func() { ns = f.Namespace.Name - c = f.Client - err := clearDaemonSetNodeLabels(c, f.ClientSet) + + c = f.ClientSet + err := clearDaemonSetNodeLabels(c) Expect(err).NotTo(HaveOccurred()) }) @@ -92,7 +92,7 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() { label := map[string]string{daemonsetNameLabel: dsName} framework.Logf("Creating simple daemon set %s", dsName) - _, err := c.DaemonSets(ns).Create(&extensions.DaemonSet{ + _, err := c.Extensions().DaemonSets(ns).Create(&extensions.DaemonSet{ ObjectMeta: api.ObjectMeta{ Name: dsName, }, @@ -132,7 +132,7 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() { Expect(err).NotTo(HaveOccurred()) By("Stop a daemon pod, check that the daemon pod is revived.") - podClient := c.Pods(ns) + podClient := c.Core().Pods(ns) selector := labels.Set(label).AsSelector() options := api.ListOptions{LabelSelector: selector} @@ -151,7 +151,7 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() { complexLabel := map[string]string{daemonsetNameLabel: dsName} nodeSelector := map[string]string{daemonsetColorLabel: "blue"} framework.Logf("Creating daemon with a node selector %s", dsName) - _, err := c.DaemonSets(ns).Create(&extensions.DaemonSet{ + _, err := c.Extensions().DaemonSets(ns).Create(&extensions.DaemonSet{ ObjectMeta: api.ObjectMeta{ Name: dsName, }, @@ -199,7 +199,7 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() { NotTo(HaveOccurred(), "error waiting for daemon pod to not be running on nodes") By("We should now be able to delete the daemon set.") - Expect(c.DaemonSets(ns).Delete(dsName)).NotTo(HaveOccurred()) + Expect(c.Extensions().DaemonSets(ns).Delete(dsName, nil)).NotTo(HaveOccurred()) }) @@ -219,7 +219,7 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() { }] }}}`, daemonsetColorLabel, nodeSelector[daemonsetColorLabel]), } - _, err := c.DaemonSets(ns).Create(&extensions.DaemonSet{ + _, err := c.Extensions().DaemonSets(ns).Create(&extensions.DaemonSet{ ObjectMeta: api.ObjectMeta{ Name: dsName, }, @@ -267,7 +267,7 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() { NotTo(HaveOccurred(), "error waiting for daemon pod to not be running on nodes") By("We should now be able to delete the daemon set.") - Expect(c.DaemonSets(ns).Delete(dsName)).NotTo(HaveOccurred()) + Expect(c.Extensions().DaemonSets(ns).Delete(dsName, nil)).NotTo(HaveOccurred()) }) }) @@ -285,8 +285,8 @@ func separateDaemonSetNodeLabels(labels map[string]string) (map[string]string, m return daemonSetLabels, otherLabels } -func clearDaemonSetNodeLabels(c *client.Client, cs clientset.Interface) error { - nodeList := framework.GetReadySchedulableNodesOrDie(cs) +func clearDaemonSetNodeLabels(c clientset.Interface) error { + nodeList := framework.GetReadySchedulableNodesOrDie(c) for _, node := range nodeList.Items { _, err := setDaemonSetNodeLabels(c, node.Name, map[string]string{}) if err != nil { @@ -296,8 +296,8 @@ func clearDaemonSetNodeLabels(c *client.Client, cs clientset.Interface) error { return nil } -func setDaemonSetNodeLabels(c *client.Client, nodeName string, labels map[string]string) (*api.Node, error) { - nodeClient := c.Nodes() +func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[string]string) (*api.Node, error) { + nodeClient := c.Core().Nodes() var newNode *api.Node var newLabels map[string]string err := wait.Poll(dsRetryPeriod, dsRetryTimeout, func() (bool, error) { @@ -340,7 +340,7 @@ func checkDaemonPodOnNodes(f *framework.Framework, selector map[string]string, n return func() (bool, error) { selector := labels.Set(selector).AsSelector() options := api.ListOptions{LabelSelector: selector} - podList, err := f.Client.Pods(f.Namespace.Name).List(options) + podList, err := f.ClientSet.Core().Pods(f.Namespace.Name).List(options) if err != nil { return false, nil } @@ -368,7 +368,7 @@ func checkDaemonPodOnNodes(f *framework.Framework, selector map[string]string, n func checkRunningOnAllNodes(f *framework.Framework, selector map[string]string) func() (bool, error) { return func() (bool, error) { - nodeList, err := f.Client.Nodes().List(api.ListOptions{}) + nodeList, err := f.ClientSet.Core().Nodes().List(api.ListOptions{}) framework.ExpectNoError(err) nodeNames := make([]string, 0) for _, node := range nodeList.Items { @@ -383,7 +383,7 @@ func checkRunningOnNoNodes(f *framework.Framework, selector map[string]string) f } func checkDaemonStatus(f *framework.Framework, dsName string) error { - ds, err := f.Client.DaemonSets(f.Namespace.Name).Get(dsName) + ds, err := f.ClientSet.Extensions().DaemonSets(f.Namespace.Name).Get(dsName) if err != nil { return fmt.Errorf("Could not get daemon set from api.") } diff --git a/test/e2e/dashboard.go b/test/e2e/dashboard.go index 71b0f0da4e3..d7125c4441a 100644 --- a/test/e2e/dashboard.go +++ b/test/e2e/dashboard.go @@ -43,18 +43,18 @@ var _ = framework.KubeDescribe("Kubernetes Dashboard", func() { It("should check that the kubernetes-dashboard instance is alive", func() { By("Checking whether the kubernetes-dashboard service exists.") - err := framework.WaitForService(f.Client, uiNamespace, uiServiceName, true, framework.Poll, framework.ServiceStartTimeout) + err := framework.WaitForService(f.ClientSet, uiNamespace, uiServiceName, true, framework.Poll, framework.ServiceStartTimeout) Expect(err).NotTo(HaveOccurred()) By("Checking to make sure the kubernetes-dashboard pods are running") selector := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": uiAppName})) - err = testutils.WaitForPodsWithLabelRunning(f.Client, uiNamespace, selector) + err = testutils.WaitForPodsWithLabelRunning(f.ClientSet, uiNamespace, selector) Expect(err).NotTo(HaveOccurred()) By("Checking to make sure we get a response from the kubernetes-dashboard.") err = wait.Poll(framework.Poll, serverStartTimeout, func() (bool, error) { var status int - proxyRequest, errProxy := framework.GetServicesProxyRequest(f.Client, f.Client.Get()) + proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.Core().RESTClient().Get()) if errProxy != nil { framework.Logf("Get services proxy request failed: %v", errProxy) } @@ -77,7 +77,7 @@ var _ = framework.KubeDescribe("Kubernetes Dashboard", func() { By("Checking that the ApiServer /ui endpoint redirects to a valid server.") var status int - err = f.Client.Get(). + err = f.ClientSet.Core().RESTClient().Get(). AbsPath("/ui"). Timeout(framework.SingleCallTimeout). Do(). diff --git a/test/e2e/density.go b/test/e2e/density.go index 2df18619237..be26d3f777d 100644 --- a/test/e2e/density.go +++ b/test/e2e/density.go @@ -30,7 +30,6 @@ import ( "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" @@ -55,7 +54,6 @@ var MaxContainerFailures = 0 type DensityTestConfig struct { Configs []testutils.RCConfig - Client *client.Client ClientSet internalclientset.Interface Namespace string PollInterval time.Duration @@ -162,7 +160,7 @@ func density30AddonResourceVerifier(numNodes int) map[string]framework.ResourceC return constraints } -func logPodStartupStatus(c *client.Client, expectedPods int, ns string, observedLabels map[string]string, period time.Duration, stopCh chan struct{}) { +func logPodStartupStatus(c internalclientset.Interface, expectedPods int, ns string, observedLabels map[string]string, period time.Duration, stopCh chan struct{}) { label := labels.SelectorFromSet(labels.Set(observedLabels)) podStore := testutils.NewPodStore(c, ns, label, fields.Everything()) defer podStore.Stop() @@ -194,10 +192,10 @@ func runDensityTest(dtc DensityTestConfig) time.Duration { _, controller := cache.NewInformer( &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { - return dtc.Client.Events(dtc.Namespace).List(options) + return dtc.ClientSet.Core().Events(dtc.Namespace).List(options) }, WatchFunc: func(options api.ListOptions) (watch.Interface, error) { - return dtc.Client.Events(dtc.Namespace).Watch(options) + return dtc.ClientSet.Core().Events(dtc.Namespace).Watch(options) }, }, &api.Event{}, @@ -222,11 +220,11 @@ func runDensityTest(dtc DensityTestConfig) time.Duration { &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { options.LabelSelector = label - return dtc.Client.Pods(dtc.Namespace).List(options) + return dtc.ClientSet.Core().Pods(dtc.Namespace).List(options) }, WatchFunc: func(options api.ListOptions) (watch.Interface, error) { options.LabelSelector = label - return dtc.Client.Pods(dtc.Namespace).Watch(options) + return dtc.ClientSet.Core().Pods(dtc.Namespace).Watch(options) }, }, &api.Pod{}, @@ -254,7 +252,7 @@ func runDensityTest(dtc DensityTestConfig) time.Duration { }() } logStopCh := make(chan struct{}) - go logPodStartupStatus(dtc.Client, dtc.PodCount, dtc.Namespace, map[string]string{"type": "densityPod"}, dtc.PollInterval, logStopCh) + go logPodStartupStatus(dtc.ClientSet, dtc.PodCount, dtc.Namespace, map[string]string{"type": "densityPod"}, dtc.PollInterval, logStopCh) wg.Wait() startupTime := time.Now().Sub(startTime) close(logStopCh) @@ -296,7 +294,7 @@ func runDensityTest(dtc DensityTestConfig) time.Duration { Expect(badEvents).NotTo(BeNumerically(">", int(math.Floor(0.01*float64(dtc.PodCount))))) // Print some data about Pod to Node allocation By("Printing Pod to Node allocation data") - podList, err := dtc.Client.Pods(api.NamespaceAll).List(api.ListOptions{}) + podList, err := dtc.ClientSet.Core().Pods(api.NamespaceAll).List(api.ListOptions{}) framework.ExpectNoError(err) pausePodAllocation := make(map[string]int) systemPodAllocation := make(map[string][]string) @@ -324,15 +322,15 @@ func cleanupDensityTest(dtc DensityTestConfig) { // We explicitly delete all pods to have API calls necessary for deletion accounted in metrics. for i := range dtc.Configs { rcName := dtc.Configs[i].Name - rc, err := dtc.Client.ReplicationControllers(dtc.Namespace).Get(rcName) + rc, err := dtc.ClientSet.Core().ReplicationControllers(dtc.Namespace).Get(rcName) if err == nil && rc.Spec.Replicas != 0 { if framework.TestContext.GarbageCollectorEnabled { By("Cleaning up only the replication controller, garbage collector will clean up the pods") - err := framework.DeleteRCAndWaitForGC(dtc.Client, dtc.Namespace, rcName) + err := framework.DeleteRCAndWaitForGC(dtc.ClientSet, dtc.Namespace, rcName) framework.ExpectNoError(err) } else { By("Cleaning up the replication controller and pods") - err := framework.DeleteRCAndPods(dtc.Client, dtc.ClientSet, dtc.Namespace, rcName) + err := framework.DeleteRCAndPods(dtc.ClientSet, dtc.Namespace, rcName) framework.ExpectNoError(err) } } @@ -347,7 +345,7 @@ func cleanupDensityTest(dtc DensityTestConfig) { // results will not be representative for control-plane performance as we'll start hitting // limits on Docker's concurrent container startup. var _ = framework.KubeDescribe("Density", func() { - var c *client.Client + var c internalclientset.Interface var nodeCount int var RCName string var additionalPodsPrefix string @@ -392,7 +390,7 @@ var _ = framework.KubeDescribe("Density", func() { f.NamespaceDeletionTimeout = time.Hour BeforeEach(func() { - c = f.Client + c = f.ClientSet ns = f.Namespace.Name // In large clusters we may get to this point but still have a bunch @@ -484,7 +482,7 @@ var _ = framework.KubeDescribe("Density", func() { for i := 0; i < numberOrRCs; i++ { RCName := "density" + strconv.Itoa(totalPods) + "-" + strconv.Itoa(i) + "-" + uuid RCConfigs[i] = testutils.RCConfig{Client: c, - Image: framework.GetPauseImageName(f.Client), + Image: framework.GetPauseImageName(f.ClientSet), Name: RCName, Namespace: ns, Labels: map[string]string{"type": "densityPod"}, @@ -499,7 +497,6 @@ var _ = framework.KubeDescribe("Density", func() { } dConfig := DensityTestConfig{ - Client: c, ClientSet: f.ClientSet, Configs: RCConfigs, PodCount: totalPods, @@ -550,11 +547,12 @@ var _ = framework.KubeDescribe("Density", func() { &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": additionalPodsPrefix}) - return c.Pods(ns).List(options) + obj, err := c.Core().Pods(ns).List(options) + return runtime.Object(obj), err }, WatchFunc: func(options api.ListOptions) (watch.Interface, error) { options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": additionalPodsPrefix}) - return c.Pods(ns).Watch(options) + return c.Core().Pods(ns).Watch(options) }, }, &api.Pod{}, @@ -593,7 +591,7 @@ var _ = framework.KubeDescribe("Density", func() { } for i := 1; i <= nodeCount; i++ { name := additionalPodsPrefix + "-" + strconv.Itoa(i) - go createRunningPodFromRC(&wg, c, name, ns, framework.GetPauseImageName(f.Client), additionalPodsPrefix, cpuRequest, memRequest) + go createRunningPodFromRC(&wg, c, name, ns, framework.GetPauseImageName(f.ClientSet), additionalPodsPrefix, cpuRequest, memRequest) time.Sleep(200 * time.Millisecond) } wg.Wait() @@ -623,7 +621,7 @@ var _ = framework.KubeDescribe("Density", func() { "source": api.DefaultSchedulerName, }.AsSelector() options := api.ListOptions{FieldSelector: selector} - schedEvents, err := c.Events(ns).List(options) + schedEvents, err := c.Core().Events(ns).List(options) framework.ExpectNoError(err) for k := range createTimes { for _, event := range schedEvents.Items { @@ -707,7 +705,7 @@ var _ = framework.KubeDescribe("Density", func() { } RCName = "density" + strconv.Itoa(totalPods) + "-" + strconv.Itoa(i) + "-" + uuid RCConfigs[i] = testutils.RCConfig{Client: c, - Image: framework.GetPauseImageName(f.Client), + Image: framework.GetPauseImageName(f.ClientSet), Name: RCName, Namespace: ns, Labels: map[string]string{"type": "densityPod"}, @@ -719,7 +717,6 @@ var _ = framework.KubeDescribe("Density", func() { } } dConfig := DensityTestConfig{ - Client: c, ClientSet: f.ClientSet, Configs: RCConfigs, PodCount: totalPods, @@ -732,7 +729,7 @@ var _ = framework.KubeDescribe("Density", func() { }) }) -func createRunningPodFromRC(wg *sync.WaitGroup, c *client.Client, name, ns, image, podType string, cpuRequest, memRequest resource.Quantity) { +func createRunningPodFromRC(wg *sync.WaitGroup, c internalclientset.Interface, name, ns, image, podType string, cpuRequest, memRequest resource.Quantity) { defer GinkgoRecover() defer wg.Done() labels := map[string]string{ @@ -769,7 +766,7 @@ func createRunningPodFromRC(wg *sync.WaitGroup, c *client.Client, name, ns, imag }, }, } - _, err := c.ReplicationControllers(ns).Create(rc) + _, err := c.Core().ReplicationControllers(ns).Create(rc) framework.ExpectNoError(err) framework.ExpectNoError(framework.WaitForRCPodsRunning(c, ns, name)) framework.Logf("Found pod '%s' running", name) diff --git a/test/e2e/deployment.go b/test/e2e/deployment.go index 82c6307470c..5150142a14c 100644 --- a/test/e2e/deployment.go +++ b/test/e2e/deployment.go @@ -320,9 +320,6 @@ func testDeleteDeployment(f *framework.Framework) { func testRollingUpdateDeployment(f *framework.Framework) { ns := f.Namespace.Name - // TODO: remove unversionedClient when the refactoring is done. Currently some - // functions like verifyPod still expects a unversioned#Client. - unversionedClient := f.Client c := f.ClientSet // Create nginx pods. deploymentPodLabels := map[string]string{"name": "sample-pod"} @@ -336,7 +333,7 @@ func testRollingUpdateDeployment(f *framework.Framework) { _, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, nginxImageName, nginxImage)) Expect(err).NotTo(HaveOccurred()) // Verify that the required pods have come up. - err = framework.VerifyPods(unversionedClient, ns, "sample-pod", false, 3) + err = framework.VerifyPods(c, ns, "sample-pod", false, 3) if err != nil { framework.Logf("error in waiting for pods to come up: %s", err) Expect(err).NotTo(HaveOccurred()) @@ -369,9 +366,6 @@ func testRollingUpdateDeployment(f *framework.Framework) { func testRollingUpdateDeploymentEvents(f *framework.Framework) { ns := f.Namespace.Name - // TODO: remove unversionedClient when the refactoring is done. Currently some - // functions like verifyPod still expects a unversioned#Client. - unversionedClient := f.Client c := f.ClientSet // Create nginx pods. deploymentPodLabels := map[string]string{"name": "sample-pod-2"} @@ -391,7 +385,7 @@ func testRollingUpdateDeploymentEvents(f *framework.Framework) { _, err := c.Extensions().ReplicaSets(ns).Create(rs) Expect(err).NotTo(HaveOccurred()) // Verify that the required pods have come up. - err = framework.VerifyPods(unversionedClient, ns, "sample-pod-2", false, 1) + err = framework.VerifyPods(c, ns, "sample-pod-2", false, 1) if err != nil { framework.Logf("error in waiting for pods to come up: %s", err) Expect(err).NotTo(HaveOccurred()) @@ -412,7 +406,7 @@ func testRollingUpdateDeploymentEvents(f *framework.Framework) { // Verify that the pods were scaled up and down as expected. We use events to verify that. deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) Expect(err).NotTo(HaveOccurred()) - framework.WaitForEvents(unversionedClient, ns, deployment, 2) + framework.WaitForEvents(c, ns, deployment, 2) events, err := c.Core().Events(ns).Search(deployment) if err != nil { framework.Logf("error in listing events: %s", err) @@ -430,9 +424,6 @@ func testRollingUpdateDeploymentEvents(f *framework.Framework) { func testRecreateDeployment(f *framework.Framework) { ns := f.Namespace.Name - // TODO: remove unversionedClient when the refactoring is done. Currently some - // functions like verifyPod still expects a unversioned#Client. - unversionedClient := f.Client c := f.ClientSet // Create nginx pods. deploymentPodLabels := map[string]string{"name": "sample-pod-3"} @@ -446,7 +437,7 @@ func testRecreateDeployment(f *framework.Framework) { _, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, nginxImageName, nginxImage)) Expect(err).NotTo(HaveOccurred()) // Verify that the required pods have come up. - err = framework.VerifyPods(unversionedClient, ns, "sample-pod-3", false, 3) + err = framework.VerifyPods(c, ns, "sample-pod-3", false, 3) if err != nil { framework.Logf("error in waiting for pods to come up: %s", err) Expect(err).NotTo(HaveOccurred()) @@ -468,7 +459,7 @@ func testRecreateDeployment(f *framework.Framework) { // Verify that the pods were scaled up and down as expected. We use events to verify that. deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) Expect(err).NotTo(HaveOccurred()) - framework.WaitForEvents(unversionedClient, ns, deployment, 2) + framework.WaitForEvents(c, ns, deployment, 2) events, err := c.Core().Events(ns).Search(deployment) if err != nil { framework.Logf("error in listing events: %s", err) @@ -486,7 +477,6 @@ func testRecreateDeployment(f *framework.Framework) { // testDeploymentCleanUpPolicy tests that deployment supports cleanup policy func testDeploymentCleanUpPolicy(f *framework.Framework) { ns := f.Namespace.Name - unversionedClient := f.Client c := f.ClientSet // Create nginx pods. deploymentPodLabels := map[string]string{"name": "cleanup-pod"} @@ -501,7 +491,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) { Expect(err).NotTo(HaveOccurred()) // Verify that the required pods have come up. - err = framework.VerifyPods(unversionedClient, ns, "cleanup-pod", false, 1) + err = framework.VerifyPods(c, ns, "cleanup-pod", false, 1) if err != nil { framework.Logf("error in waiting for pods to come up: %s", err) Expect(err).NotTo(HaveOccurred()) @@ -558,9 +548,6 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) { // i.e. we can change desired state and kick off rolling update, then change desired state again before it finishes. func testRolloverDeployment(f *framework.Framework) { ns := f.Namespace.Name - // TODO: remove unversionedClient when the refactoring is done. Currently some - // functions like verifyPod still expects a unversioned#Client. - unversionedClient := f.Client c := f.ClientSet podName := "rollover-pod" deploymentPodLabels := map[string]string{"name": podName} @@ -574,7 +561,7 @@ func testRolloverDeployment(f *framework.Framework) { _, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, rsReplicas, rsPodLabels, nginxImageName, nginxImage)) Expect(err).NotTo(HaveOccurred()) // Verify that the required pods have come up. - err = framework.VerifyPods(unversionedClient, ns, podName, false, rsReplicas) + err = framework.VerifyPods(c, ns, podName, false, rsReplicas) if err != nil { framework.Logf("error in waiting for pods to come up: %s", err) Expect(err).NotTo(HaveOccurred()) @@ -962,9 +949,6 @@ func testRollbackDeploymentRSNoRevision(f *framework.Framework) { func testDeploymentLabelAdopted(f *framework.Framework) { ns := f.Namespace.Name - // TODO: remove unversionedClient when the refactoring is done. Currently some - // functions like verifyPod still expects a unversioned#Client. - unversionedClient := f.Client c := f.ClientSet // Create nginx pods. podName := "nginx" @@ -976,7 +960,7 @@ func testDeploymentLabelAdopted(f *framework.Framework) { _, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, podLabels, podName, image)) Expect(err).NotTo(HaveOccurred()) // Verify that the required pods have come up. - err = framework.VerifyPods(unversionedClient, ns, podName, false, 3) + err = framework.VerifyPods(c, ns, podName, false, 3) if err != nil { framework.Logf("error in waiting for pods to come up: %s", err) Expect(err).NotTo(HaveOccurred()) @@ -1097,7 +1081,7 @@ func testScaledRolloutDeployment(f *framework.Framework) { // Verify that the required pods have come up. By("Waiting for all required pods to come up") - err = framework.VerifyPods(f.Client, ns, nginxImageName, false, deployment.Spec.Replicas) + err = framework.VerifyPods(f.ClientSet, ns, nginxImageName, false, deployment.Spec.Replicas) if err != nil { framework.Logf("error in waiting for pods to come up: %s", err) Expect(err).NotTo(HaveOccurred()) diff --git a/test/e2e/dns.go b/test/e2e/dns.go index a4aa7ee1a4e..027ab34ccde 100644 --- a/test/e2e/dns.go +++ b/test/e2e/dns.go @@ -27,7 +27,7 @@ import ( "k8s.io/kubernetes/pkg/api/pod" "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/apimachinery/registered" - client "k8s.io/kubernetes/pkg/client/unversioned" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/wait" @@ -171,23 +171,23 @@ func createTargetedProbeCommand(nameToResolve string, lookup string, fileNamePre return probeCmd, fileName } -func assertFilesExist(fileNames []string, fileDir string, pod *api.Pod, client *client.Client) { +func assertFilesExist(fileNames []string, fileDir string, pod *api.Pod, client clientset.Interface) { assertFilesContain(fileNames, fileDir, pod, client, false, "") } -func assertFilesContain(fileNames []string, fileDir string, pod *api.Pod, client *client.Client, check bool, expected string) { +func assertFilesContain(fileNames []string, fileDir string, pod *api.Pod, client clientset.Interface, check bool, expected string) { var failed []string framework.ExpectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) { failed = []string{} - subResourceProxyAvailable, err := framework.ServerVersionGTE(framework.SubResourcePodProxyVersion, client) + subResourceProxyAvailable, err := framework.ServerVersionGTE(framework.SubResourcePodProxyVersion, client.Discovery()) if err != nil { return false, err } var contents []byte for _, fileName := range fileNames { if subResourceProxyAvailable { - contents, err = client.Get(). + contents, err = client.Core().RESTClient().Get(). Namespace(pod.Namespace). Resource("pods"). SubResource("proxy"). @@ -195,7 +195,7 @@ func assertFilesContain(fileNames []string, fileDir string, pod *api.Pod, client Suffix(fileDir, fileName). Do().Raw() } else { - contents, err = client.Get(). + contents, err = client.Core().RESTClient().Get(). Prefix("proxy"). Resource("pods"). Namespace(pod.Namespace). @@ -223,7 +223,7 @@ func assertFilesContain(fileNames []string, fileDir string, pod *api.Pod, client func validateDNSResults(f *framework.Framework, pod *api.Pod, fileNames []string) { By("submitting the pod to kubernetes") - podClient := f.Client.Pods(f.Namespace.Name) + podClient := f.ClientSet.Core().Pods(f.Namespace.Name) defer func() { By("deleting the pod") defer GinkgoRecover() @@ -242,7 +242,7 @@ func validateDNSResults(f *framework.Framework, pod *api.Pod, fileNames []string } // Try to find results for each expected name. By("looking for the results for each expected name from probers") - assertFilesExist(fileNames, "results", pod, f.Client) + assertFilesExist(fileNames, "results", pod, f.ClientSet) // TODO: probe from the host, too. @@ -252,7 +252,7 @@ func validateDNSResults(f *framework.Framework, pod *api.Pod, fileNames []string func validateTargetedProbeOutput(f *framework.Framework, pod *api.Pod, fileNames []string, value string) { By("submitting the pod to kubernetes") - podClient := f.Client.Pods(f.Namespace.Name) + podClient := f.ClientSet.Core().Pods(f.Namespace.Name) defer func() { By("deleting the pod") defer GinkgoRecover() @@ -271,13 +271,13 @@ func validateTargetedProbeOutput(f *framework.Framework, pod *api.Pod, fileNames } // Try to find the expected value for each expected name. By("looking for the results for each expected name from probers") - assertFilesContain(fileNames, "results", pod, f.Client, true, value) + assertFilesContain(fileNames, "results", pod, f.ClientSet, true, value) framework.Logf("DNS probes using %s succeeded\n", pod.Name) } func verifyDNSPodIsRunning(f *framework.Framework) { - systemClient := f.Client.Pods(api.NamespaceSystem) + systemClient := f.ClientSet.Core().Pods(api.NamespaceSystem) By("Waiting for DNS Service to be Running") options := api.ListOptions{LabelSelector: dnsServiceLabelSelector} dnsPods, err := systemClient.List(options) @@ -288,7 +288,7 @@ func verifyDNSPodIsRunning(f *framework.Framework) { framework.Failf("No pods match the label selector %v", dnsServiceLabelSelector.String()) } pod := dnsPods.Items[0] - framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.Client, &pod)) + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, &pod)) } func createServiceSpec(serviceName, externalName string, isHeadless bool, selector map[string]string) *api.Service { @@ -358,21 +358,21 @@ var _ = framework.KubeDescribe("DNS", func() { "dns-test": "true", } headlessService := createServiceSpec(dnsTestServiceName, "", true, testServiceSelector) - _, err := f.Client.Services(f.Namespace.Name).Create(headlessService) + _, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(headlessService) Expect(err).NotTo(HaveOccurred()) defer func() { By("deleting the test headless service") defer GinkgoRecover() - f.Client.Services(f.Namespace.Name).Delete(headlessService.Name) + f.ClientSet.Core().Services(f.Namespace.Name).Delete(headlessService.Name, nil) }() regularService := createServiceSpec("test-service-2", "", false, testServiceSelector) - regularService, err = f.Client.Services(f.Namespace.Name).Create(regularService) + regularService, err = f.ClientSet.Core().Services(f.Namespace.Name).Create(regularService) Expect(err).NotTo(HaveOccurred()) defer func() { By("deleting the test service") defer GinkgoRecover() - f.Client.Services(f.Namespace.Name).Delete(regularService.Name) + f.ClientSet.Core().Services(f.Namespace.Name).Delete(regularService.Name, nil) }() // All the names we need to be able to resolve. @@ -408,12 +408,12 @@ var _ = framework.KubeDescribe("DNS", func() { serviceName := "dns-test-service-2" podHostname := "dns-querier-2" headlessService := createServiceSpec(serviceName, "", true, testServiceSelector) - _, err := f.Client.Services(f.Namespace.Name).Create(headlessService) + _, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(headlessService) Expect(err).NotTo(HaveOccurred()) defer func() { By("deleting the test headless service") defer GinkgoRecover() - f.Client.Services(f.Namespace.Name).Delete(headlessService.Name) + f.ClientSet.Core().Services(f.Namespace.Name).Delete(headlessService.Name, nil) }() hostFQDN := fmt.Sprintf("%s.%s.%s.svc.cluster.local", podHostname, serviceName, f.Namespace.Name) @@ -441,12 +441,12 @@ var _ = framework.KubeDescribe("DNS", func() { By("Creating a test externalName service") serviceName := "dns-test-service-3" externalNameService := createServiceSpec(serviceName, "foo.example.com", false, nil) - _, err := f.Client.Services(f.Namespace.Name).Create(externalNameService) + _, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(externalNameService) Expect(err).NotTo(HaveOccurred()) defer func() { By("deleting the test externalName service") defer GinkgoRecover() - f.Client.Services(f.Namespace.Name).Delete(externalNameService.Name) + f.ClientSet.Core().Services(f.Namespace.Name).Delete(externalNameService.Name, nil) }() hostFQDN := fmt.Sprintf("%s.%s.svc.cluster.local", serviceName, f.Namespace.Name) @@ -463,7 +463,7 @@ var _ = framework.KubeDescribe("DNS", func() { // Test changing the externalName field By("changing the externalName to bar.example.com") - _, err = updateService(f.Client, f.Namespace.Name, serviceName, func(s *api.Service) { + _, err = updateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *api.Service) { s.Spec.ExternalName = "bar.example.com" }) Expect(err).NotTo(HaveOccurred()) @@ -480,7 +480,7 @@ var _ = framework.KubeDescribe("DNS", func() { // Test changing type from ExternalName to ClusterIP By("changing the service to type=ClusterIP") - _, err = updateService(f.Client, f.Namespace.Name, serviceName, func(s *api.Service) { + _, err = updateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *api.Service) { s.Spec.Type = api.ServiceTypeClusterIP s.Spec.ClusterIP = "127.1.2.3" s.Spec.Ports = []api.ServicePort{ diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go index f07c1258285..a76eb725537 100644 --- a/test/e2e/e2e.go +++ b/test/e2e/e2e.go @@ -94,7 +94,7 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { framework.Failf("Failed to setup provider config: %v", err) } - c, err := framework.LoadClient() + c, err := framework.LoadInternalClientset() if err != nil { glog.Fatal("Error loading client: ", err) } diff --git a/test/e2e/empty.go b/test/e2e/empty.go index 9fecfa5053d..b344625dc5d 100644 --- a/test/e2e/empty.go +++ b/test/e2e/empty.go @@ -28,7 +28,7 @@ var _ = framework.KubeDescribe("[Feature:Empty]", func() { f := framework.NewDefaultFramework("empty") BeforeEach(func() { - c := f.Client + c := f.ClientSet ns := f.Namespace.Name // TODO: respect --allow-notready-nodes flag in those functions. diff --git a/test/e2e/empty_dir_wrapper.go b/test/e2e/empty_dir_wrapper.go index 006182a8873..77d57e57794 100644 --- a/test/e2e/empty_dir_wrapper.go +++ b/test/e2e/empty_dir_wrapper.go @@ -68,7 +68,7 @@ var _ = framework.KubeDescribe("EmptyDir wrapper volumes", func() { } var err error - if secret, err = f.Client.Secrets(f.Namespace.Name).Create(secret); err != nil { + if secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } @@ -124,11 +124,11 @@ var _ = framework.KubeDescribe("EmptyDir wrapper volumes", func() { defer func() { By("Cleaning up the secret") - if err := f.Client.Secrets(f.Namespace.Name).Delete(secret.Name); err != nil { + if err := f.ClientSet.Core().Secrets(f.Namespace.Name).Delete(secret.Name, nil); err != nil { framework.Failf("unable to delete secret %v: %v", secret.Name, err) } By("Cleaning up the git vol pod") - if err = f.Client.Pods(f.Namespace.Name).Delete(pod.Name, api.NewDeleteOptions(0)); err != nil { + if err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, api.NewDeleteOptions(0)); err != nil { framework.Failf("unable to delete git vol pod %v: %v", pod.Name, err) } }() @@ -216,17 +216,17 @@ func createGitServer(f *framework.Framework) (gitURL string, gitRepo string, cle }, } - if gitServerSvc, err = f.Client.Services(f.Namespace.Name).Create(gitServerSvc); err != nil { + if gitServerSvc, err = f.ClientSet.Core().Services(f.Namespace.Name).Create(gitServerSvc); err != nil { framework.Failf("unable to create test git server service %s: %v", gitServerSvc.Name, err) } return "http://" + gitServerSvc.Spec.ClusterIP + ":" + strconv.Itoa(httpPort), "test", func() { By("Cleaning up the git server pod") - if err := f.Client.Pods(f.Namespace.Name).Delete(gitServerPod.Name, api.NewDeleteOptions(0)); err != nil { + if err := f.ClientSet.Core().Pods(f.Namespace.Name).Delete(gitServerPod.Name, api.NewDeleteOptions(0)); err != nil { framework.Failf("unable to delete git server pod %v: %v", gitServerPod.Name, err) } By("Cleaning up the git server svc") - if err := f.Client.Services(f.Namespace.Name).Delete(gitServerSvc.Name); err != nil { + if err := f.ClientSet.Core().Services(f.Namespace.Name).Delete(gitServerSvc.Name, nil); err != nil { framework.Failf("unable to delete git server svc %v: %v", gitServerSvc.Name, err) } } @@ -266,7 +266,7 @@ func createConfigmapsForRace(f *framework.Framework) (configMapNames []string) { "data-1": "value-1", }, } - _, err := f.Client.ConfigMaps(f.Namespace.Name).Create(configMap) + _, err := f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap) framework.ExpectNoError(err) } return @@ -275,7 +275,7 @@ func createConfigmapsForRace(f *framework.Framework) (configMapNames []string) { func deleteConfigMaps(f *framework.Framework, configMapNames []string) { By("Cleaning up the configMaps") for _, configMapName := range configMapNames { - err := f.Client.ConfigMaps(f.Namespace.Name).Delete(configMapName) + err := f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Delete(configMapName, nil) Expect(err).NotTo(HaveOccurred(), "unable to delete configMap %v", configMapName) } } @@ -361,15 +361,15 @@ func testNoWrappedVolumeRace(f *framework.Framework, volumes []api.Volume, volum }, }, } - _, err := f.Client.ReplicationControllers(f.Namespace.Name).Create(rc) + _, err := f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Create(rc) Expect(err).NotTo(HaveOccurred(), "error creating replication controller") defer func() { - err := framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, rcName) + err := framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, rcName) framework.ExpectNoError(err) }() - pods, err := framework.PodsCreated(f.Client, f.Namespace.Name, rcName, podCount) + pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, rcName, podCount) By("Ensuring each pod is running") diff --git a/test/e2e/etcd_failure.go b/test/e2e/etcd_failure.go index b574401e93f..f9eba185430 100644 --- a/test/e2e/etcd_failure.go +++ b/test/e2e/etcd_failure.go @@ -42,10 +42,10 @@ var _ = framework.KubeDescribe("Etcd failure [Disruptive]", func() { framework.SkipUnlessProviderIs("gce") Expect(framework.RunRC(testutils.RCConfig{ - Client: f.Client, + Client: f.ClientSet, Name: "baz", Namespace: f.Namespace.Name, - Image: framework.GetPauseImageName(f.Client), + Image: framework.GetPauseImageName(f.ClientSet), Replicas: 1, })).NotTo(HaveOccurred()) }) @@ -101,7 +101,7 @@ func masterExec(cmd string) { func checkExistingRCRecovers(f *framework.Framework) { By("assert that the pre-existing replication controller recovers") - podClient := f.Client.Pods(f.Namespace.Name) + podClient := f.ClientSet.Core().Pods(f.Namespace.Name) rcSelector := labels.Set{"name": "baz"}.AsSelector() By("deleting pods from existing replication controller") diff --git a/test/e2e/events.go b/test/e2e/events.go index 9d087ca4e59..0575799cd76 100644 --- a/test/e2e/events.go +++ b/test/e2e/events.go @@ -37,7 +37,7 @@ var _ = framework.KubeDescribe("Events", func() { It("should be sent by kubelets and the scheduler about pods scheduling and running [Conformance]", func() { - podClient := f.Client.Pods(f.Namespace.Name) + podClient := f.ClientSet.Core().Pods(f.Namespace.Name) By("creating the pod") name := "send-events-" + string(uuid.NewUUID()) @@ -95,7 +95,7 @@ var _ = framework.KubeDescribe("Events", func() { "source": api.DefaultSchedulerName, }.AsSelector() options := api.ListOptions{FieldSelector: selector} - events, err := f.Client.Events(f.Namespace.Name).List(options) + events, err := f.ClientSet.Core().Events(f.Namespace.Name).List(options) if err != nil { return false, err } @@ -115,7 +115,7 @@ var _ = framework.KubeDescribe("Events", func() { "source": "kubelet", }.AsSelector() options := api.ListOptions{FieldSelector: selector} - events, err = f.Client.Events(f.Namespace.Name).List(options) + events, err = f.ClientSet.Core().Events(f.Namespace.Name).List(options) if err != nil { return false, err } diff --git a/test/e2e/example_cluster_dns.go b/test/e2e/example_cluster_dns.go index 36f8bcd1f44..f3867b02603 100644 --- a/test/e2e/example_cluster_dns.go +++ b/test/e2e/example_cluster_dns.go @@ -22,7 +22,7 @@ import ( "time" "k8s.io/kubernetes/pkg/api" - client "k8s.io/kubernetes/pkg/client/unversioned" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/test/e2e/framework" @@ -45,9 +45,9 @@ except: var _ = framework.KubeDescribe("ClusterDns [Feature:Example]", func() { f := framework.NewDefaultFramework("cluster-dns") - var c *client.Client + var c clientset.Interface BeforeEach(func() { - c = f.Client + c = f.ClientSet }) It("should create pod that uses dns", func() { @@ -98,7 +98,7 @@ var _ = framework.KubeDescribe("ClusterDns [Feature:Example]", func() { for _, ns := range namespaces { label := labels.SelectorFromSet(labels.Set(map[string]string{"name": backendRcName})) options := api.ListOptions{LabelSelector: label} - pods, err := c.Pods(ns.Name).List(options) + pods, err := c.Core().Pods(ns.Name).List(options) Expect(err).NotTo(HaveOccurred()) err = framework.PodsResponding(c, ns.Name, backendPodName, false, pods) Expect(err).NotTo(HaveOccurred(), "waiting for all pods to respond") @@ -118,7 +118,7 @@ var _ = framework.KubeDescribe("ClusterDns [Feature:Example]", func() { // This code is probably unnecessary, but let's stay on the safe side. label := labels.SelectorFromSet(labels.Set(map[string]string{"name": backendPodName})) options := api.ListOptions{LabelSelector: label} - pods, err := c.Pods(namespaces[0].Name).List(options) + pods, err := c.Core().Pods(namespaces[0].Name).List(options) if err != nil || pods == nil || len(pods.Items) == 0 { framework.Failf("no running pods found") diff --git a/test/e2e/example_k8petstore.go b/test/e2e/example_k8petstore.go index 4dba9a9fd86..b112529b462 100644 --- a/test/e2e/example_k8petstore.go +++ b/test/e2e/example_k8petstore.go @@ -25,7 +25,7 @@ import ( "syscall" "time" - client "k8s.io/kubernetes/pkg/client/unversioned" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" @@ -47,8 +47,8 @@ const ( // readTransactions reads # of transactions from the k8petstore web server endpoint. // for more details see the source of the k8petstore web server. -func readTransactions(c *client.Client, ns string) (error, int) { - proxyRequest, errProxy := framework.GetServicesProxyRequest(c, c.Get()) +func readTransactions(c clientset.Interface, ns string) (error, int) { + proxyRequest, errProxy := framework.GetServicesProxyRequest(c, c.Core().RESTClient().Get()) if errProxy != nil { return errProxy, -1 } @@ -66,7 +66,7 @@ func readTransactions(c *client.Client, ns string) (error, int) { // runK8petstore runs the k8petstore application, bound to external nodeport, and // polls until finalTransactionsExpected transactions are acquired, in a maximum of maxSeconds. -func runK8petstore(restServers int, loadGenerators int, c *client.Client, ns string, finalTransactionsExpected int, maxTime time.Duration) { +func runK8petstore(restServers int, loadGenerators int, c clientset.Interface, ns string, finalTransactionsExpected int, maxTime time.Duration) { var err error = nil k8bpsScriptLocation := filepath.Join(framework.TestContext.RepoRoot, "examples/k8petstore/k8petstore-nodeport.sh") @@ -171,7 +171,7 @@ var _ = framework.KubeDescribe("Pet Store [Feature:Example]", func() { loadGenerators := nodeCount restServers := nodeCount fmt.Printf("load generators / rest servers [ %v / %v ] ", loadGenerators, restServers) - runK8petstore(restServers, loadGenerators, f.Client, f.Namespace.Name, k8bpsSmokeTestFinalTransactions, k8bpsSmokeTestTimeout) + runK8petstore(restServers, loadGenerators, f.ClientSet, f.Namespace.Name, k8bpsSmokeTestFinalTransactions, k8bpsSmokeTestTimeout) }) }) diff --git a/test/e2e/examples.go b/test/e2e/examples.go index 58b339dfb24..c65ffa747a4 100644 --- a/test/e2e/examples.go +++ b/test/e2e/examples.go @@ -27,7 +27,7 @@ import ( "time" "k8s.io/kubernetes/pkg/api" - client "k8s.io/kubernetes/pkg/client/unversioned" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" @@ -56,10 +56,10 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { forEachPod := func(selectorKey string, selectorValue string, fn func(api.Pod)) { clusterState(selectorKey, selectorValue).ForEach(fn) } - var c *client.Client + var c clientset.Interface var ns string BeforeEach(func() { - c = f.Client + c = f.ClientSet ns = f.Namespace.Name }) @@ -281,7 +281,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { label := labels.SelectorFromSet(labels.Set(map[string]string{"app": "cassandra"})) err = wait.PollImmediate(petsetPoll, petsetTimeout, func() (bool, error) { - podList, err := c.Pods(ns).List(api.ListOptions{LabelSelector: label}) + podList, err := c.Core().Pods(ns).List(api.ListOptions{LabelSelector: label}) if err != nil { return false, fmt.Errorf("Unable to get list of pods in petset %s", label) } @@ -396,7 +396,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { err := framework.WaitForPodNameRunningInNamespace(c, podName, ns) Expect(err).NotTo(HaveOccurred()) for t := time.Now(); time.Since(t) < timeout; time.Sleep(framework.Poll) { - pod, err := c.Pods(ns).Get(podName) + pod, err := c.Core().Pods(ns).Get(podName) framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podName)) stat := api.GetExistingContainerStatus(pod.Status.ContainerStatuses, podName) framework.Logf("Pod: %s, restart count:%d", stat.Name, stat.RestartCount) @@ -504,7 +504,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { Expect(err).NotTo(HaveOccurred()) By("scaling rethinkdb") - framework.ScaleRC(c, f.ClientSet, ns, "rethinkdb-rc", 2, true) + framework.ScaleRC(f.ClientSet, ns, "rethinkdb-rc", 2, true) checkDbInstances() By("starting admin") @@ -547,7 +547,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { Expect(err).NotTo(HaveOccurred()) By("scaling hazelcast") - framework.ScaleRC(c, f.ClientSet, ns, "hazelcast", 2, true) + framework.ScaleRC(f.ClientSet, ns, "hazelcast", 2, true) forEachPod("name", "hazelcast", func(pod api.Pod) { _, err := framework.LookForStringInLog(ns, pod.Name, "hazelcast", "Members [2]", serverStartTimeout) Expect(err).NotTo(HaveOccurred()) @@ -556,11 +556,11 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { }) }) -func makeHttpRequestToService(c *client.Client, ns, service, path string, timeout time.Duration) (string, error) { +func makeHttpRequestToService(c clientset.Interface, ns, service, path string, timeout time.Duration) (string, error) { var result []byte var err error for t := time.Now(); time.Since(t) < timeout; time.Sleep(framework.Poll) { - proxyRequest, errProxy := framework.GetServicesProxyRequest(c, c.Get()) + proxyRequest, errProxy := framework.GetServicesProxyRequest(c, c.Core().RESTClient().Get()) if errProxy != nil { break } diff --git a/test/e2e/federated-ingress.go b/test/e2e/federated-ingress.go index 4495a3e89b3..b82bbc51f24 100644 --- a/test/e2e/federated-ingress.go +++ b/test/e2e/federated-ingress.go @@ -63,7 +63,7 @@ var _ = framework.KubeDescribe("Federated ingresses [Feature:Federation]", func( }) It("should be created and deleted successfully", func() { - framework.SkipUnlessFederated(f.Client) + framework.SkipUnlessFederated(f.ClientSet) framework.SkipUnlessProviderIs("gce", "gke") // TODO: Federated ingress is not yet supported on non-GCP platforms. nsName := f.FederationNamespace.Name ingress := createIngressOrFail(f.FederationClientset_1_5, nsName) @@ -85,7 +85,7 @@ var _ = framework.KubeDescribe("Federated ingresses [Feature:Federation]", func( // register clusters in federation apiserver BeforeEach(func() { - framework.SkipUnlessFederated(f.Client) + framework.SkipUnlessFederated(f.ClientSet) framework.SkipUnlessProviderIs("gce", "gke") // TODO: Federated ingress is not yet supported on non-GCP platforms. if federationName = os.Getenv("FEDERATION_NAME"); federationName == "" { federationName = DefaultFederationName @@ -124,7 +124,7 @@ var _ = framework.KubeDescribe("Federated ingresses [Feature:Federation]", func( ) BeforeEach(func() { - framework.SkipUnlessFederated(f.Client) + framework.SkipUnlessFederated(f.ClientSet) // create backend pod createBackendPodsOrFail(clusters, ns, FederatedIngressServicePodName) // create backend service diff --git a/test/e2e/federated-namespace.go b/test/e2e/federated-namespace.go index 8ecad2d897d..1117a0a77eb 100644 --- a/test/e2e/federated-namespace.go +++ b/test/e2e/federated-namespace.go @@ -44,7 +44,7 @@ var _ = framework.KubeDescribe("Federation namespace [Feature:Federation]", func var clusters map[string]*cluster // All clusters, keyed by cluster name BeforeEach(func() { - framework.SkipUnlessFederated(f.Client) + framework.SkipUnlessFederated(f.ClientSet) // TODO: Federation API server should be able to answer this. if federationName = os.Getenv("FEDERATION_NAME"); federationName == "" { @@ -56,7 +56,7 @@ var _ = framework.KubeDescribe("Federation namespace [Feature:Federation]", func }) AfterEach(func() { - framework.SkipUnlessFederated(f.Client) + framework.SkipUnlessFederated(f.ClientSet) deleteAllTestNamespaces( f.FederationClientset_1_5.Core().Namespaces().List, f.FederationClientset_1_5.Core().Namespaces().Delete) @@ -69,7 +69,7 @@ var _ = framework.KubeDescribe("Federation namespace [Feature:Federation]", func }) It("should be created and deleted successfully", func() { - framework.SkipUnlessFederated(f.Client) + framework.SkipUnlessFederated(f.ClientSet) ns := api_v1.Namespace{ ObjectMeta: api_v1.ObjectMeta{ diff --git a/test/e2e/federated-secret.go b/test/e2e/federated-secret.go index 3142bc83b8a..f5a1b245b5c 100644 --- a/test/e2e/federated-secret.go +++ b/test/e2e/federated-secret.go @@ -46,18 +46,18 @@ var _ = framework.KubeDescribe("Federation secrets [Feature:Federation12]", func Describe("Secret objects", func() { BeforeEach(func() { - framework.SkipUnlessFederated(f.Client) + framework.SkipUnlessFederated(f.ClientSet) clusters = map[string]*cluster{} registerClusters(clusters, UserAgentName, "", f) }) AfterEach(func() { - framework.SkipUnlessFederated(f.Client) + framework.SkipUnlessFederated(f.ClientSet) unregisterClusters(clusters, f) }) It("should be created and deleted successfully", func() { - framework.SkipUnlessFederated(f.Client) + framework.SkipUnlessFederated(f.ClientSet) nsName := f.FederationNamespace.Name secret := createSecretOrFail(f.FederationClientset_1_5, nsName) defer func() { // Cleanup diff --git a/test/e2e/federated-service.go b/test/e2e/federated-service.go index 3966464f03a..e6656ec6d1b 100644 --- a/test/e2e/federated-service.go +++ b/test/e2e/federated-service.go @@ -48,7 +48,7 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() { var _ = Describe("Federated Services", func() { BeforeEach(func() { - framework.SkipUnlessFederated(f.Client) + framework.SkipUnlessFederated(f.ClientSet) // TODO: Federation API server should be able to answer this. if federationName = os.Getenv("FEDERATION_NAME"); federationName == "" { @@ -70,12 +70,12 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() { ) BeforeEach(func() { - framework.SkipUnlessFederated(f.Client) + framework.SkipUnlessFederated(f.ClientSet) // Placeholder }) AfterEach(func() { - framework.SkipUnlessFederated(f.Client) + framework.SkipUnlessFederated(f.ClientSet) if service != nil { By(fmt.Sprintf("Deleting service shards and their provider resources in underlying clusters for service %q in namespace %q", service.Name, nsName)) @@ -86,7 +86,7 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() { }) It("should succeed", func() { - framework.SkipUnlessFederated(f.Client) + framework.SkipUnlessFederated(f.ClientSet) nsName = f.FederationNamespace.Name service = createServiceOrFail(f.FederationClientset_1_5, nsName, FederatedServiceName) @@ -99,7 +99,7 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() { }) It("should create matching services in underlying clusters", func() { - framework.SkipUnlessFederated(f.Client) + framework.SkipUnlessFederated(f.ClientSet) nsName = f.FederationNamespace.Name service = createServiceOrFail(f.FederationClientset_1_5, nsName, FederatedServiceName) @@ -119,7 +119,7 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() { ) BeforeEach(func() { - framework.SkipUnlessFederated(f.Client) + framework.SkipUnlessFederated(f.ClientSet) nsName := f.FederationNamespace.Name createBackendPodsOrFail(clusters, nsName, FederatedServicePodName) @@ -128,7 +128,7 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() { }) AfterEach(func() { - framework.SkipUnlessFederated(f.Client) + framework.SkipUnlessFederated(f.ClientSet) nsName := f.FederationNamespace.Name deleteBackendPodsOrFail(clusters, nsName) @@ -146,7 +146,7 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() { }) It("should be able to discover a federated service", func() { - framework.SkipUnlessFederated(f.Client) + framework.SkipUnlessFederated(f.ClientSet) nsName := f.FederationNamespace.Name svcDNSNames := []string{ @@ -166,7 +166,7 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() { Context("non-local federated service", func() { BeforeEach(func() { - framework.SkipUnlessFederated(f.Client) + framework.SkipUnlessFederated(f.ClientSet) // Delete all the backend pods from the shard which is local to the discovery pod. deleteOneBackendPodOrFail(clusters[primaryClusterName]) @@ -174,7 +174,7 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() { }) It("should be able to discover a non-local federated service", func() { - framework.SkipUnlessFederated(f.Client) + framework.SkipUnlessFederated(f.ClientSet) nsName := f.FederationNamespace.Name svcDNSNames := []string{ @@ -190,7 +190,7 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() { // TTL and/or running the pods in parallel. Context("[Slow] missing local service", func() { It("should never find DNS entries for a missing local service", func() { - framework.SkipUnlessFederated(f.Client) + framework.SkipUnlessFederated(f.ClientSet) nsName := f.FederationNamespace.Name localSvcDNSNames := []string{ diff --git a/test/e2e/federation-apiserver.go b/test/e2e/federation-apiserver.go index c1ce4ad6706..c57f69b291e 100644 --- a/test/e2e/federation-apiserver.go +++ b/test/e2e/federation-apiserver.go @@ -36,7 +36,7 @@ var _ = framework.KubeDescribe("Federation apiserver [Feature:Federation]", func Describe("Cluster objects", func() { AfterEach(func() { - framework.SkipUnlessFederated(f.Client) + framework.SkipUnlessFederated(f.ClientSet) // Delete registered clusters. // This is if a test failed, it should not affect other tests. @@ -49,7 +49,7 @@ var _ = framework.KubeDescribe("Federation apiserver [Feature:Federation]", func }) It("should be created and deleted successfully", func() { - framework.SkipUnlessFederated(f.Client) + framework.SkipUnlessFederated(f.ClientSet) contexts := f.GetUnderlyingFederatedContexts() @@ -85,11 +85,11 @@ var _ = framework.KubeDescribe("Federation apiserver [Feature:Federation]", func }) Describe("Admission control", func() { AfterEach(func() { - framework.SkipUnlessFederated(f.Client) + framework.SkipUnlessFederated(f.ClientSet) }) It("should not be able to create resources if namespace does not exist", func() { - framework.SkipUnlessFederated(f.Client) + framework.SkipUnlessFederated(f.ClientSet) // Creating a service in a non-existing namespace should fail. svcNamespace := "federation-admission-test-ns" diff --git a/test/e2e/federation-authn.go b/test/e2e/federation-authn.go index e8164ad525e..d6c9bd55e25 100644 --- a/test/e2e/federation-authn.go +++ b/test/e2e/federation-authn.go @@ -34,11 +34,11 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() { var _ = Describe("Federation API server authentication", func() { BeforeEach(func() { - framework.SkipUnlessFederated(f.Client) + framework.SkipUnlessFederated(f.ClientSet) }) It("should accept cluster resources when the client has right authentication credentials", func() { - framework.SkipUnlessFederated(f.Client) + framework.SkipUnlessFederated(f.ClientSet) nsName := f.FederationNamespace.Name svc := createServiceOrFail(f.FederationClientset_1_5, nsName, FederatedServiceName) @@ -46,7 +46,7 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() { }) It("should not accept cluster resources when the client has invalid authentication credentials", func() { - framework.SkipUnlessFederated(f.Client) + framework.SkipUnlessFederated(f.ClientSet) contexts := f.GetUnderlyingFederatedContexts() @@ -67,7 +67,7 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() { }) It("should not accept cluster resources when the client has no authentication credentials", func() { - framework.SkipUnlessFederated(f.Client) + framework.SkipUnlessFederated(f.ClientSet) fcs, err := invalidAuthFederationClientSet(nil) ExpectNoError(err) diff --git a/test/e2e/federation-event.go b/test/e2e/federation-event.go index 16d74f160be..fb09a309109 100644 --- a/test/e2e/federation-event.go +++ b/test/e2e/federation-event.go @@ -37,7 +37,7 @@ var _ = framework.KubeDescribe("Federation events [Feature:Federation]", func() Describe("Event objects", func() { AfterEach(func() { - framework.SkipUnlessFederated(f.Client) + framework.SkipUnlessFederated(f.ClientSet) nsName := f.FederationNamespace.Name // Delete registered events. @@ -50,7 +50,7 @@ var _ = framework.KubeDescribe("Federation events [Feature:Federation]", func() }) It("should be created and deleted successfully", func() { - framework.SkipUnlessFederated(f.Client) + framework.SkipUnlessFederated(f.ClientSet) nsName := f.FederationNamespace.Name event := createEventOrFail(f.FederationClientset_1_5, nsName) diff --git a/test/e2e/federation-replicaset.go b/test/e2e/federation-replicaset.go index 58d0d00912e..daf90c6e321 100644 --- a/test/e2e/federation-replicaset.go +++ b/test/e2e/federation-replicaset.go @@ -46,7 +46,7 @@ var _ = framework.KubeDescribe("Federation replicasets [Feature:Federation]", fu Describe("ReplicaSet objects", func() { AfterEach(func() { - framework.SkipUnlessFederated(f.Client) + framework.SkipUnlessFederated(f.ClientSet) // Delete registered replicasets. nsName := f.FederationNamespace.Name @@ -59,7 +59,7 @@ var _ = framework.KubeDescribe("Federation replicasets [Feature:Federation]", fu }) It("should be created and deleted successfully", func() { - framework.SkipUnlessFederated(f.Client) + framework.SkipUnlessFederated(f.ClientSet) nsName := f.FederationNamespace.Name replicaset := createReplicaSetOrFail(f.FederationClientset_1_5, nsName) @@ -79,7 +79,7 @@ var _ = framework.KubeDescribe("Federation replicasets [Feature:Federation]", fu federationName string ) BeforeEach(func() { - framework.SkipUnlessFederated(f.Client) + framework.SkipUnlessFederated(f.ClientSet) if federationName = os.Getenv("FEDERATION_NAME"); federationName == "" { federationName = DefaultFederationName } diff --git a/test/e2e/federation-util.go b/test/e2e/federation-util.go index ab15fa9f8a4..f51ff4c0e63 100644 --- a/test/e2e/federation-util.go +++ b/test/e2e/federation-util.go @@ -385,7 +385,7 @@ func podExitCodeDetector(f *framework.Framework, name, namespace string, code in } return func() error { - pod, err := f.Client.Pods(namespace).Get(name) + pod, err := f.ClientSet.Core().Pods(namespace).Get(name) if err != nil { return logerr(err) } @@ -394,7 +394,7 @@ func podExitCodeDetector(f *framework.Framework, name, namespace string, code in } // Best effort attempt to grab pod logs for debugging - logs, err = framework.GetPodLogs(f.Client, namespace, name, pod.Spec.Containers[0].Name) + logs, err = framework.GetPodLogs(f.ClientSet, namespace, name, pod.Spec.Containers[0].Name) if err != nil { framework.Logf("Cannot fetch pod logs: %v", err) } @@ -433,12 +433,12 @@ func discoverService(f *framework.Framework, name string, exists bool, podName s nsName := f.FederationNamespace.Name By(fmt.Sprintf("Creating pod %q in namespace %q", pod.Name, nsName)) - _, err := f.Client.Pods(nsName).Create(pod) + _, err := f.ClientSet.Core().Pods(nsName).Create(pod) framework.ExpectNoError(err, "Trying to create pod to run %q", command) By(fmt.Sprintf("Successfully created pod %q in namespace %q", pod.Name, nsName)) defer func() { By(fmt.Sprintf("Deleting pod %q from namespace %q", podName, nsName)) - err := f.Client.Pods(nsName).Delete(podName, api.NewDeleteOptions(0)) + err := f.ClientSet.Core().Pods(nsName).Delete(podName, api.NewDeleteOptions(0)) framework.ExpectNoError(err, "Deleting pod %q from namespace %q", podName, nsName) By(fmt.Sprintf("Deleted pod %q from namespace %q", podName, nsName)) }() diff --git a/test/e2e/framework/exec_util.go b/test/e2e/framework/exec_util.go index 08e84824687..a6939702e77 100644 --- a/test/e2e/framework/exec_util.go +++ b/test/e2e/framework/exec_util.go @@ -47,7 +47,7 @@ func (f *Framework) ExecCommandInContainerWithFullOutput(podName, containerName var stdout, stderr bytes.Buffer var stdin io.Reader tty := false - req := f.Client.RESTClient.Post(). + req := f.ClientSet.Core().RESTClient().Post(). Resource("pods"). Name(podName). Namespace(f.Namespace.Name). diff --git a/test/e2e/framework/framework.go b/test/e2e/framework/framework.go index f0c83fa3af8..f9384af1fb4 100644 --- a/test/e2e/framework/framework.go +++ b/test/e2e/framework/framework.go @@ -39,7 +39,6 @@ import ( "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" "k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/typed/dynamic" - client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/metrics" @@ -61,9 +60,6 @@ const ( type Framework struct { BaseName string - // Client is manually created and should not be used unless absolutely necessary. Use ClientSet_1_5 - // where possible. - Client *client.Client // ClientSet uses internal objects, you should use ClientSet_1_5 where possible. ClientSet internalclientset.Interface @@ -134,12 +130,12 @@ func NewDefaultGroupVersionFramework(baseName string, groupVersion unversioned.G return f } -func NewFramework(baseName string, options FrameworkOptions, client *client.Client) *Framework { +func NewFramework(baseName string, options FrameworkOptions, client internalclientset.Interface) *Framework { f := &Framework{ BaseName: baseName, AddonResourceConstraints: make(map[string]ResourceConstraint), options: options, - Client: client, + ClientSet: client, } BeforeEach(f.BeforeEach) @@ -185,7 +181,7 @@ func (f *Framework) BeforeEach() { // The fact that we need this feels like a bug in ginkgo. // https://github.com/onsi/ginkgo/issues/222 f.cleanupHandle = AddCleanupAction(f.AfterEach) - if f.Client == nil { + if f.ClientSet == nil { By("Creating a kubernetes client") config, err := LoadConfig() Expect(err).NotTo(HaveOccurred()) @@ -197,9 +193,6 @@ func (f *Framework) BeforeEach() { if TestContext.KubeAPIContentType != "" { config.ContentType = TestContext.KubeAPIContentType } - c, err := loadClientFromConfig(config) - Expect(err).NotTo(HaveOccurred()) - f.Client = c f.ClientSet, err = internalclientset.NewForConfig(config) Expect(err).NotTo(HaveOccurred()) f.ClientSet_1_5, err = release_1_5.NewForConfig(config) @@ -239,14 +232,14 @@ func (f *Framework) BeforeEach() { if TestContext.VerifyServiceAccount { By("Waiting for a default service account to be provisioned in namespace") - err = WaitForDefaultServiceAccountInNamespace(f.Client, namespace.Name) + err = WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name) Expect(err).NotTo(HaveOccurred()) } else { Logf("Skipping waiting for service account") } if TestContext.GatherKubeSystemResourceUsageData != "false" && TestContext.GatherKubeSystemResourceUsageData != "none" { - f.gatherer, err = NewResourceUsageGatherer(f.Client, ResourceGathererOptions{ + f.gatherer, err = NewResourceUsageGatherer(f.ClientSet, ResourceGathererOptions{ inKubemark: ProviderIs("kubemark"), masterOnly: TestContext.GatherKubeSystemResourceUsageData == "master", }) @@ -261,7 +254,7 @@ func (f *Framework) BeforeEach() { f.logsSizeWaitGroup = sync.WaitGroup{} f.logsSizeWaitGroup.Add(1) f.logsSizeCloseChannel = make(chan bool) - f.logsSizeVerifier = NewLogsVerifier(f.Client, f.ClientSet, f.logsSizeCloseChannel) + f.logsSizeVerifier = NewLogsVerifier(f.ClientSet, f.logsSizeCloseChannel) go func() { f.logsSizeVerifier.Run() f.logsSizeWaitGroup.Done() @@ -326,7 +319,7 @@ func (f *Framework) AfterEach() { if f.NamespaceDeletionTimeout != 0 { timeout = f.NamespaceDeletionTimeout } - if err := deleteNS(f.Client, f.ClientPool, ns.Name, timeout); err != nil { + if err := deleteNS(f.ClientSet, f.ClientPool, ns.Name, timeout); err != nil { if !apierrs.IsNotFound(err) { nsDeletionErrors[ns.Name] = err } else { @@ -348,7 +341,7 @@ func (f *Framework) AfterEach() { // Paranoia-- prevent reuse! f.Namespace = nil f.FederationNamespace = nil - f.Client = nil + f.ClientSet = nil f.namespacesToDelete = nil // if we had errors deleting, report them now. @@ -376,18 +369,18 @@ func (f *Framework) AfterEach() { // Print events if the test failed. if CurrentGinkgoTestDescription().Failed && TestContext.DumpLogsOnFailure { // Pass both unversioned client and and versioned clientset, till we have removed all uses of the unversioned client. - DumpAllNamespaceInfo(f.Client, f.ClientSet_1_5, f.Namespace.Name) + DumpAllNamespaceInfo(f.ClientSet, f.ClientSet_1_5, f.Namespace.Name) By(fmt.Sprintf("Dumping a list of prepulled images on each node")) - LogContainersInPodsWithLabels(f.Client, api.NamespaceSystem, ImagePullerLabels, "image-puller", Logf) + LogContainersInPodsWithLabels(f.ClientSet, api.NamespaceSystem, ImagePullerLabels, "image-puller", Logf) if f.federated { // Dump federation events in federation namespace. DumpEventsInNamespace(func(opts v1.ListOptions, ns string) (*v1.EventList, error) { return f.FederationClientset_1_5.Core().Events(ns).List(opts) }, f.FederationNamespace.Name) // Print logs of federation control plane pods (federation-apiserver and federation-controller-manager) - LogPodsWithLabels(f.Client, "federation", map[string]string{"app": "federated-cluster"}, Logf) + LogPodsWithLabels(f.ClientSet, "federation", map[string]string{"app": "federated-cluster"}, Logf) // Print logs of kube-dns pod - LogPodsWithLabels(f.Client, "kube-system", map[string]string{"k8s-app": "kube-dns"}, Logf) + LogPodsWithLabels(f.ClientSet, "kube-system", map[string]string{"k8s-app": "kube-dns"}, Logf) } } @@ -407,7 +400,7 @@ func (f *Framework) AfterEach() { if TestContext.GatherMetricsAfterTest { By("Gathering metrics") // TODO: enable Scheduler and ControllerManager metrics grabbing when Master's Kubelet will be registered. - grabber, err := metrics.NewMetricsGrabber(f.Client, true, false, false, true) + grabber, err := metrics.NewMetricsGrabber(f.ClientSet, true, false, false, true) if err != nil { Logf("Failed to create MetricsGrabber. Skipping metrics gathering.") } else { @@ -441,7 +434,7 @@ func (f *Framework) AfterEach() { // Check whether all nodes are ready after the test. // This is explicitly done at the very end of the test, to avoid // e.g. not removing namespace in case of this failure. - if err := AllNodesReady(f.Client, 3*time.Minute); err != nil { + if err := AllNodesReady(f.ClientSet, 3*time.Minute); err != nil { Failf("All nodes should be ready after test, %v", err) } } @@ -451,7 +444,7 @@ func (f *Framework) CreateNamespace(baseName string, labels map[string]string) ( if createTestingNS == nil { createTestingNS = CreateTestingNS } - ns, err := createTestingNS(baseName, f.Client, labels) + ns, err := createTestingNS(baseName, f.ClientSet, labels) if err == nil { f.namespacesToDelete = append(f.namespacesToDelete, ns) } @@ -483,29 +476,29 @@ func (f *Framework) createFederationNamespace(baseName string) (*v1.Namespace, e // WaitForPodTerminated waits for the pod to be terminated with the given reason. func (f *Framework) WaitForPodTerminated(podName, reason string) error { - return waitForPodTerminatedInNamespace(f.Client, podName, reason, f.Namespace.Name) + return waitForPodTerminatedInNamespace(f.ClientSet, podName, reason, f.Namespace.Name) } // WaitForPodRunning waits for the pod to run in the namespace. func (f *Framework) WaitForPodRunning(podName string) error { - return WaitForPodNameRunningInNamespace(f.Client, podName, f.Namespace.Name) + return WaitForPodNameRunningInNamespace(f.ClientSet, podName, f.Namespace.Name) } // WaitForPodReady waits for the pod to flip to ready in the namespace. func (f *Framework) WaitForPodReady(podName string) error { - return waitTimeoutForPodReadyInNamespace(f.Client, podName, f.Namespace.Name, "", PodStartTimeout) + return waitTimeoutForPodReadyInNamespace(f.ClientSet, podName, f.Namespace.Name, "", PodStartTimeout) } // WaitForPodRunningSlow waits for the pod to run in the namespace. // It has a longer timeout then WaitForPodRunning (util.slowPodStartTimeout). func (f *Framework) WaitForPodRunningSlow(podName string) error { - return waitForPodRunningInNamespaceSlow(f.Client, podName, f.Namespace.Name, "") + return waitForPodRunningInNamespaceSlow(f.ClientSet, podName, f.Namespace.Name, "") } // WaitForPodNoLongerRunning waits for the pod to no longer be running in the namespace, for either // success or failure. func (f *Framework) WaitForPodNoLongerRunning(podName string) error { - return WaitForPodNoLongerRunningInNamespace(f.Client, podName, f.Namespace.Name, "") + return WaitForPodNoLongerRunningInNamespace(f.ClientSet, podName, f.Namespace.Name, "") } // TestContainerOutput runs the given pod in the given namespace and waits @@ -528,7 +521,7 @@ func (f *Framework) WaitForAnEndpoint(serviceName string) error { for { // TODO: Endpoints client should take a field selector so we // don't have to list everything. - list, err := f.Client.Endpoints(f.Namespace.Name).List(api.ListOptions{}) + list, err := f.ClientSet.Core().Endpoints(f.Namespace.Name).List(api.ListOptions{}) if err != nil { return err } @@ -547,7 +540,7 @@ func (f *Framework) WaitForAnEndpoint(serviceName string) error { FieldSelector: fields.Set{"metadata.name": serviceName}.AsSelector(), ResourceVersion: rv, } - w, err := f.Client.Endpoints(f.Namespace.Name).Watch(options) + w, err := f.ClientSet.Core().Endpoints(f.Namespace.Name).Watch(options) if err != nil { return err } @@ -613,7 +606,7 @@ func (f *Framework) CreateServiceForSimpleAppWithPods(contPort int, svcPort int, theService := f.CreateServiceForSimpleApp(contPort, svcPort, appName) f.CreatePodsPerNodeForSimpleApp(appName, podSpec, count) if block { - err = testutils.WaitForPodsWithLabelRunning(f.Client, f.Namespace.Name, labels.SelectorFromSet(labels.Set(theService.Spec.Selector))) + err = testutils.WaitForPodsWithLabelRunning(f.ClientSet, f.Namespace.Name, labels.SelectorFromSet(labels.Set(theService.Spec.Selector))) } return err, theService } @@ -641,7 +634,7 @@ func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName str } } Logf("Creating a service-for-%v for selecting app=%v-pod", appName, appName) - service, err := f.Client.Services(f.Namespace.Name).Create(&api.Service{ + service, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(&api.Service{ ObjectMeta: api.ObjectMeta{ Name: "service-for-" + appName, Labels: map[string]string{ @@ -667,7 +660,7 @@ func (f *Framework) CreatePodsPerNodeForSimpleApp(appName string, podSpec func(n // one per node, but no more than maxCount. if i <= maxCount { Logf("%v/%v : Creating container with label app=%v-pod", i, maxCount, appName) - _, err := f.Client.Pods(f.Namespace.Name).Create(&api.Pod{ + _, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(&api.Pod{ ObjectMeta: api.ObjectMeta{ Name: fmt.Sprintf(appName+"-pod-%v", i), Labels: labels, @@ -852,14 +845,14 @@ type PodStateVerification struct { } type ClusterVerification struct { - client *client.Client + client internalclientset.Interface namespace *api.Namespace // pointer rather than string, since ns isn't created until before each. podState PodStateVerification } func (f *Framework) NewClusterVerification(filter PodStateVerification) *ClusterVerification { return &ClusterVerification{ - f.Client, + f.ClientSet, f.Namespace, filter, } @@ -894,7 +887,7 @@ func passesPhasesFilter(pod api.Pod, validPhases []api.PodPhase) bool { } // filterLabels returns a list of pods which have labels. -func filterLabels(selectors map[string]string, cli *client.Client, ns string) (*api.PodList, error) { +func filterLabels(selectors map[string]string, cli internalclientset.Interface, ns string) (*api.PodList, error) { var err error var selector labels.Selector var pl *api.PodList @@ -903,9 +896,9 @@ func filterLabels(selectors map[string]string, cli *client.Client, ns string) (* if len(selectors) > 0 { selector = labels.SelectorFromSet(labels.Set(selectors)) options := api.ListOptions{LabelSelector: selector} - pl, err = cli.Pods(ns).List(options) + pl, err = cli.Core().Pods(ns).List(options) } else { - pl, err = cli.Pods(ns).List(api.ListOptions{}) + pl, err = cli.Core().Pods(ns).List(api.ListOptions{}) } return pl, err } @@ -913,7 +906,7 @@ func filterLabels(selectors map[string]string, cli *client.Client, ns string) (* // filter filters pods which pass a filter. It can be used to compose // the more useful abstractions like ForEach, WaitFor, and so on, which // can be used directly by tests. -func (p *PodStateVerification) filter(c *client.Client, namespace *api.Namespace) ([]api.Pod, error) { +func (p *PodStateVerification) filter(c internalclientset.Interface, namespace *api.Namespace) ([]api.Pod, error) { if len(p.ValidPhases) == 0 || namespace == nil { panic(fmt.Errorf("Need to specify a valid pod phases (%v) and namespace (%v). ", p.ValidPhases, namespace)) } diff --git a/test/e2e/framework/kubelet_stats.go b/test/e2e/framework/kubelet_stats.go index af2ac930056..8856643e355 100644 --- a/test/e2e/framework/kubelet_stats.go +++ b/test/e2e/framework/kubelet_stats.go @@ -30,7 +30,7 @@ import ( cadvisorapi "github.com/google/cadvisor/info/v1" "github.com/prometheus/common/model" "k8s.io/kubernetes/pkg/api" - client "k8s.io/kubernetes/pkg/client/unversioned" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats" kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics" kubeletstats "k8s.io/kubernetes/pkg/kubelet/server/stats" @@ -63,7 +63,7 @@ func (a KubeletLatencyMetrics) Less(i, j int) bool { return a[i].Latency > a[j]. // If a apiserver client is passed in, the function will try to get kubelet metrics from metrics grabber; // or else, the function will try to get kubelet metrics directly from the node. -func getKubeletMetricsFromNode(c *client.Client, nodeName string) (metrics.KubeletMetrics, error) { +func getKubeletMetricsFromNode(c clientset.Interface, nodeName string) (metrics.KubeletMetrics, error) { if c == nil { return metrics.GrabKubeletMetricsWithoutProxy(nodeName) } @@ -76,7 +76,7 @@ func getKubeletMetricsFromNode(c *client.Client, nodeName string) (metrics.Kubel // getKubeletMetrics gets all metrics in kubelet subsystem from specified node and trims // the subsystem prefix. -func getKubeletMetrics(c *client.Client, nodeName string) (metrics.KubeletMetrics, error) { +func getKubeletMetrics(c clientset.Interface, nodeName string) (metrics.KubeletMetrics, error) { ms, err := getKubeletMetricsFromNode(c, nodeName) if err != nil { return metrics.KubeletMetrics{}, err @@ -138,7 +138,7 @@ func GetKubeletLatencyMetrics(ms metrics.KubeletMetrics) KubeletLatencyMetrics { // RuntimeOperationMonitor is the tool getting and parsing docker operation metrics. type RuntimeOperationMonitor struct { - client *client.Client + client clientset.Interface nodesRuntimeOps map[string]NodeRuntimeOperationErrorRate } @@ -152,12 +152,12 @@ type RuntimeOperationErrorRate struct { TimeoutRate float64 } -func NewRuntimeOperationMonitor(c *client.Client) *RuntimeOperationMonitor { +func NewRuntimeOperationMonitor(c clientset.Interface) *RuntimeOperationMonitor { m := &RuntimeOperationMonitor{ client: c, nodesRuntimeOps: make(map[string]NodeRuntimeOperationErrorRate), } - nodes, err := m.client.Nodes().List(api.ListOptions{}) + nodes, err := m.client.Core().Nodes().List(api.ListOptions{}) if err != nil { Failf("RuntimeOperationMonitor: unable to get list of nodes: %v", err) } @@ -224,7 +224,7 @@ func FormatRuntimeOperationErrorRate(nodesResult map[string]NodeRuntimeOperation } // getNodeRuntimeOperationErrorRate gets runtime operation error rate from specified node. -func getNodeRuntimeOperationErrorRate(c *client.Client, node string) (NodeRuntimeOperationErrorRate, error) { +func getNodeRuntimeOperationErrorRate(c clientset.Interface, node string) (NodeRuntimeOperationErrorRate, error) { result := make(NodeRuntimeOperationErrorRate) ms, err := getKubeletMetrics(c, node) if err != nil { @@ -256,7 +256,7 @@ func getNodeRuntimeOperationErrorRate(c *client.Client, node string) (NodeRuntim } // HighLatencyKubeletOperations logs and counts the high latency metrics exported by the kubelet server via /metrics. -func HighLatencyKubeletOperations(c *client.Client, threshold time.Duration, nodeName string, logFunc func(fmt string, args ...interface{})) (KubeletLatencyMetrics, error) { +func HighLatencyKubeletOperations(c clientset.Interface, threshold time.Duration, nodeName string, logFunc func(fmt string, args ...interface{})) (KubeletLatencyMetrics, error) { ms, err := getKubeletMetrics(c, nodeName) if err != nil { return KubeletLatencyMetrics{}, err @@ -278,19 +278,19 @@ func HighLatencyKubeletOperations(c *client.Client, threshold time.Duration, nod // in the returned ContainerInfo is subject to the requirements in statsRequest. // TODO: This function uses the deprecated kubelet stats API; it should be // removed. -func getContainerInfo(c *client.Client, nodeName string, req *kubeletstats.StatsRequest) (map[string]cadvisorapi.ContainerInfo, error) { +func getContainerInfo(c clientset.Interface, nodeName string, req *kubeletstats.StatsRequest) (map[string]cadvisorapi.ContainerInfo, error) { reqBody, err := json.Marshal(req) if err != nil { return nil, err } - subResourceProxyAvailable, err := ServerVersionGTE(subResourceServiceAndNodeProxyVersion, c) + subResourceProxyAvailable, err := ServerVersionGTE(subResourceServiceAndNodeProxyVersion, c.Discovery()) if err != nil { return nil, err } var data []byte if subResourceProxyAvailable { - data, err = c.Post(). + data, err = c.Core().RESTClient().Post(). Resource("nodes"). SubResource("proxy"). Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)). @@ -300,7 +300,7 @@ func getContainerInfo(c *client.Client, nodeName string, req *kubeletstats.Stats Do().Raw() } else { - data, err = c.Post(). + data, err = c.Core().RESTClient().Post(). Prefix("proxy"). Resource("nodes"). Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)). @@ -344,7 +344,7 @@ func getContainerInfo(c *client.Client, nodeName string, req *kubeletstats.Stats // TODO: This function relies on the deprecated kubelet stats API and should be // removed and/or rewritten. func getOneTimeResourceUsageOnNode( - c *client.Client, + c clientset.Interface, nodeName string, cpuInterval time.Duration, containerNames func() []string, @@ -400,15 +400,15 @@ func getOneTimeResourceUsageOnNode( return usageMap, nil } -func getNodeStatsSummary(c *client.Client, nodeName string) (*stats.Summary, error) { - subResourceProxyAvailable, err := ServerVersionGTE(subResourceServiceAndNodeProxyVersion, c) +func getNodeStatsSummary(c clientset.Interface, nodeName string) (*stats.Summary, error) { + subResourceProxyAvailable, err := ServerVersionGTE(subResourceServiceAndNodeProxyVersion, c.Discovery()) if err != nil { return nil, err } var data []byte if subResourceProxyAvailable { - data, err = c.Get(). + data, err = c.Core().RESTClient().Get(). Resource("nodes"). SubResource("proxy"). Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)). @@ -417,7 +417,7 @@ func getNodeStatsSummary(c *client.Client, nodeName string) (*stats.Summary, err Do().Raw() } else { - data, err = c.Get(). + data, err = c.Core().RESTClient().Get(). Prefix("proxy"). Resource("nodes"). Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)). @@ -515,7 +515,7 @@ type usageDataPerContainer struct { memWorkSetData []uint64 } -func GetKubeletHeapStats(c *client.Client, nodeName string) (string, error) { +func GetKubeletHeapStats(c clientset.Interface, nodeName string) (string, error) { client, err := NodeProxyRequest(c, nodeName, "debug/pprof/heap") if err != nil { return "", err @@ -531,7 +531,7 @@ func GetKubeletHeapStats(c *client.Client, nodeName string) (string, error) { return strings.Join(lines[len(lines)-numLines:], "\n"), nil } -func PrintAllKubeletPods(c *client.Client, nodeName string) { +func PrintAllKubeletPods(c clientset.Interface, nodeName string) { podList, err := GetKubeletPods(c, nodeName) if err != nil { Logf("Unable to retrieve kubelet pods for node %v: %v", nodeName, err) @@ -565,13 +565,13 @@ type resourceCollector struct { lock sync.RWMutex node string containers []string - client *client.Client + client clientset.Interface buffers map[string][]*ContainerResourceUsage pollingInterval time.Duration stopCh chan struct{} } -func newResourceCollector(c *client.Client, nodeName string, containerNames []string, pollingInterval time.Duration) *resourceCollector { +func newResourceCollector(c clientset.Interface, nodeName string, containerNames []string, pollingInterval time.Duration) *resourceCollector { buffers := make(map[string][]*ContainerResourceUsage) return &resourceCollector{ node: nodeName, @@ -679,13 +679,13 @@ func (r *resourceCollector) GetBasicCPUStats(containerName string) map[float64]f // ResourceMonitor manages a resourceCollector per node. type ResourceMonitor struct { - client *client.Client + client clientset.Interface containers []string pollingInterval time.Duration collectors map[string]*resourceCollector } -func NewResourceMonitor(c *client.Client, containerNames []string, pollingInterval time.Duration) *ResourceMonitor { +func NewResourceMonitor(c clientset.Interface, containerNames []string, pollingInterval time.Duration) *ResourceMonitor { return &ResourceMonitor{ containers: containerNames, client: c, @@ -695,7 +695,7 @@ func NewResourceMonitor(c *client.Client, containerNames []string, pollingInterv func (r *ResourceMonitor) Start() { // It should be OK to monitor unschedulable Nodes - nodes, err := r.client.Nodes().List(api.ListOptions{}) + nodes, err := r.client.Core().Nodes().List(api.ListOptions{}) if err != nil { Failf("ResourceMonitor: unable to get list of nodes: %v", err) } diff --git a/test/e2e/framework/log_size_monitoring.go b/test/e2e/framework/log_size_monitoring.go index b23d2bdbac9..4a9da5750c8 100644 --- a/test/e2e/framework/log_size_monitoring.go +++ b/test/e2e/framework/log_size_monitoring.go @@ -26,7 +26,6 @@ import ( "time" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - client "k8s.io/kubernetes/pkg/client/unversioned" ) const ( @@ -65,8 +64,7 @@ type LogSizeGatherer struct { // LogsSizeVerifier gathers data about log files sizes from master and node machines. // It oversees a workers which do the gathering. type LogsSizeVerifier struct { - client *client.Client - clientset clientset.Interface + client clientset.Interface stopChannel chan bool // data stores LogSizeData groupped per IP and log_path data *LogsSizeData @@ -144,8 +142,8 @@ func (d *LogsSizeData) AddNewData(ip, path string, timestamp time.Time, size int } // NewLogsVerifier creates a new LogsSizeVerifier which will stop when stopChannel is closed -func NewLogsVerifier(c *client.Client, cs clientset.Interface, stopChannel chan bool) *LogsSizeVerifier { - nodeAddresses, err := NodeSSHHosts(cs) +func NewLogsVerifier(c clientset.Interface, stopChannel chan bool) *LogsSizeVerifier { + nodeAddresses, err := NodeSSHHosts(c) ExpectNoError(err) masterAddress := GetMasterHost() + ":22" @@ -154,7 +152,6 @@ func NewLogsVerifier(c *client.Client, cs clientset.Interface, stopChannel chan verifier := &LogsSizeVerifier{ client: c, - clientset: cs, stopChannel: stopChannel, data: prepareData(masterAddress, nodeAddresses), masterAddress: masterAddress, diff --git a/test/e2e/framework/metrics_util.go b/test/e2e/framework/metrics_util.go index 8ba7a389aa9..dc832bda1b1 100644 --- a/test/e2e/framework/metrics_util.go +++ b/test/e2e/framework/metrics_util.go @@ -28,7 +28,7 @@ import ( "time" "k8s.io/kubernetes/pkg/api" - client "k8s.io/kubernetes/pkg/client/unversioned" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/pkg/metrics" "k8s.io/kubernetes/pkg/util/sets" @@ -205,7 +205,7 @@ func setQuantile(metric *LatencyMetric, quantile float64, latency time.Duration) } } -func readLatencyMetrics(c *client.Client) (APIResponsiveness, error) { +func readLatencyMetrics(c clientset.Interface) (APIResponsiveness, error) { var a APIResponsiveness body, err := getMetrics(c) @@ -247,7 +247,7 @@ func readLatencyMetrics(c *client.Client) (APIResponsiveness, error) { // Prints top five summary metrics for request types with latency and returns // number of such request types above threshold. -func HighLatencyRequests(c *client.Client) (int, error) { +func HighLatencyRequests(c clientset.Interface) (int, error) { metrics, err := readLatencyMetrics(c) if err != nil { return 0, err @@ -297,9 +297,9 @@ func VerifyPodStartupLatency(latency PodStartupLatency) error { } // Resets latency metrics in apiserver. -func ResetMetrics(c *client.Client) error { +func ResetMetrics(c clientset.Interface) error { Logf("Resetting latency metrics in apiserver...") - body, err := c.Delete().AbsPath("/metrics").DoRaw() + body, err := c.Core().RESTClient().Delete().AbsPath("/metrics").DoRaw() if err != nil { return err } @@ -310,8 +310,8 @@ func ResetMetrics(c *client.Client) error { } // Retrieves metrics information. -func getMetrics(c *client.Client) (string, error) { - body, err := c.Get().AbsPath("/metrics").DoRaw() +func getMetrics(c clientset.Interface) (string, error) { + body, err := c.Core().RESTClient().Get().AbsPath("/metrics").DoRaw() if err != nil { return "", err } @@ -319,11 +319,11 @@ func getMetrics(c *client.Client) (string, error) { } // Retrieves scheduler metrics information. -func getSchedulingLatency(c *client.Client) (SchedulingLatency, error) { +func getSchedulingLatency(c clientset.Interface) (SchedulingLatency, error) { result := SchedulingLatency{} // Check if master Node is registered - nodes, err := c.Nodes().List(api.ListOptions{}) + nodes, err := c.Core().Nodes().List(api.ListOptions{}) ExpectNoError(err) var data string @@ -334,7 +334,7 @@ func getSchedulingLatency(c *client.Client) (SchedulingLatency, error) { } } if masterRegistered { - rawData, err := c.Get(). + rawData, err := c.Core().RESTClient().Get(). Prefix("proxy"). Namespace(api.NamespaceSystem). Resource("pods"). @@ -383,7 +383,7 @@ func getSchedulingLatency(c *client.Client) (SchedulingLatency, error) { } // Verifies (currently just by logging them) the scheduling latencies. -func VerifySchedulerLatency(c *client.Client) error { +func VerifySchedulerLatency(c clientset.Interface) error { latency, err := getSchedulingLatency(c) if err != nil { return err @@ -457,7 +457,7 @@ func ExtractLatencyMetrics(latencies []PodLatencyData) LatencyMetric { // LogSuspiciousLatency logs metrics/docker errors from all nodes that had slow startup times // If latencyDataLag is nil then it will be populated from latencyData -func LogSuspiciousLatency(latencyData []PodLatencyData, latencyDataLag []PodLatencyData, nodeCount int, c *client.Client) { +func LogSuspiciousLatency(latencyData []PodLatencyData, latencyDataLag []PodLatencyData, nodeCount int, c clientset.Interface) { if latencyDataLag == nil { latencyDataLag = latencyData } diff --git a/test/e2e/framework/networking_utils.go b/test/e2e/framework/networking_utils.go index 67be328b841..fcdb71e22f8 100644 --- a/test/e2e/framework/networking_utils.go +++ b/test/e2e/framework/networking_utils.go @@ -27,7 +27,7 @@ import ( api "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/apimachinery/registered" - client "k8s.io/kubernetes/pkg/client/unversioned" + coreclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/rand" @@ -372,7 +372,7 @@ func (config *NetworkingTestConfig) createNodePortService(selector map[string]st } func (config *NetworkingTestConfig) DeleteNodePortService() { - err := config.getServiceClient().Delete(config.NodePortService.Name) + err := config.getServiceClient().Delete(config.NodePortService.Name, nil) Expect(err).NotTo(HaveOccurred(), "error while deleting NodePortService. err:%v)", err) time.Sleep(15 * time.Second) // wait for kube-proxy to catch up with the service being deleted. } @@ -403,7 +403,7 @@ func (config *NetworkingTestConfig) createService(serviceSpec *api.Service) *api _, err := config.getServiceClient().Create(serviceSpec) Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create %s service: %v", serviceSpec.Name, err)) - err = WaitForService(config.f.Client, config.Namespace, serviceSpec.Name, true, 5*time.Second, 45*time.Second) + err = WaitForService(config.f.ClientSet, config.Namespace, serviceSpec.Name, true, 5*time.Second, 45*time.Second) Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("error while waiting for service:%s err: %v", serviceSpec.Name, err)) createdService, err := config.getServiceClient().Get(serviceSpec.Name) @@ -431,7 +431,7 @@ func (config *NetworkingTestConfig) setup(selector map[string]string) { config.setupCore(selector) By("Getting node addresses") - ExpectNoError(WaitForAllNodesSchedulable(config.f.Client)) + ExpectNoError(WaitForAllNodesSchedulable(config.f.ClientSet)) nodeList := GetReadySchedulableNodesOrDie(config.f.ClientSet) config.ExternalAddrs = NodeAddresses(nodeList, api.NodeExternalIP) if len(config.ExternalAddrs) < 2 { @@ -464,7 +464,7 @@ func (config *NetworkingTestConfig) cleanup() { if err == nil { for _, ns := range nsList.Items { if strings.Contains(ns.Name, config.f.BaseName) && ns.Name != config.Namespace { - nsClient.Delete(ns.Name) + nsClient.Delete(ns.Name, nil) } } } @@ -482,7 +482,7 @@ func shuffleNodes(nodes []api.Node) []api.Node { } func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector map[string]string) []*api.Pod { - ExpectNoError(WaitForAllNodesSchedulable(config.f.Client)) + ExpectNoError(WaitForAllNodesSchedulable(config.f.ClientSet)) nodeList := GetReadySchedulableNodesOrDie(config.f.ClientSet) // To make this test work reasonably fast in large clusters, @@ -520,12 +520,12 @@ func (config *NetworkingTestConfig) DeleteNetProxyPod() { config.getPodClient().Delete(pod.Name, api.NewDeleteOptions(0)) config.EndpointPods = config.EndpointPods[1:] // wait for pod being deleted. - err := WaitForPodToDisappear(config.f.Client, config.Namespace, pod.Name, labels.Everything(), time.Second, wait.ForeverTestTimeout) + err := WaitForPodToDisappear(config.f.ClientSet, config.Namespace, pod.Name, labels.Everything(), time.Second, wait.ForeverTestTimeout) if err != nil { Failf("Failed to delete %s pod: %v", pod.Name, err) } // wait for endpoint being removed. - err = WaitForServiceEndpointsNum(config.f.Client, config.Namespace, nodePortServiceName, len(config.EndpointPods), time.Second, wait.ForeverTestTimeout) + err = WaitForServiceEndpointsNum(config.f.ClientSet, config.Namespace, nodePortServiceName, len(config.EndpointPods), time.Second, wait.ForeverTestTimeout) if err != nil { Failf("Failed to remove endpoint from service: %s", nodePortServiceName) } @@ -544,10 +544,10 @@ func (config *NetworkingTestConfig) getPodClient() *PodClient { return config.podClient } -func (config *NetworkingTestConfig) getServiceClient() client.ServiceInterface { - return config.f.Client.Services(config.Namespace) +func (config *NetworkingTestConfig) getServiceClient() coreclientset.ServiceInterface { + return config.f.ClientSet.Core().Services(config.Namespace) } -func (config *NetworkingTestConfig) getNamespacesClient() client.NamespaceInterface { - return config.f.Client.Namespaces() +func (config *NetworkingTestConfig) getNamespacesClient() coreclientset.NamespaceInterface { + return config.f.ClientSet.Core().Namespaces() } diff --git a/test/e2e/framework/nodes_util.go b/test/e2e/framework/nodes_util.go index 1b706d5926d..7ff5e96ef06 100644 --- a/test/e2e/framework/nodes_util.go +++ b/test/e2e/framework/nodes_util.go @@ -23,7 +23,7 @@ import ( "time" "k8s.io/kubernetes/pkg/api" - client "k8s.io/kubernetes/pkg/client/unversioned" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/util/wait" ) @@ -82,7 +82,7 @@ var NodeUpgrade = func(f *Framework, v string, img string) error { // TODO(ihmccreery) We shouldn't have to wait for nodes to be ready in // GKE; the operation shouldn't return until they all are. Logf("Waiting up to %v for all nodes to be ready after the upgrade", RestartNodeReadyAgainTimeout) - if _, err := CheckNodesReady(f.Client, RestartNodeReadyAgainTimeout, TestContext.CloudConfig.NumNodes); err != nil { + if _, err := CheckNodesReady(f.ClientSet, RestartNodeReadyAgainTimeout, TestContext.CloudConfig.NumNodes); err != nil { return err } return nil @@ -139,7 +139,7 @@ func nodeUpgradeGKE(v string, img string) error { // CheckNodesReady waits up to nt for expect nodes accessed by c to be ready, // returning an error if this doesn't happen in time. It returns the names of // nodes it finds. -func CheckNodesReady(c *client.Client, nt time.Duration, expect int) ([]string, error) { +func CheckNodesReady(c clientset.Interface, nt time.Duration, expect int) ([]string, error) { // First, keep getting all of the nodes until we get the number we expect. var nodeList *api.NodeList var errLast error @@ -148,7 +148,7 @@ func CheckNodesReady(c *client.Client, nt time.Duration, expect int) ([]string, // A rolling-update (GCE/GKE implementation of restart) can complete before the apiserver // knows about all of the nodes. Thus, we retry the list nodes call // until we get the expected number of nodes. - nodeList, errLast = c.Nodes().List(api.ListOptions{ + nodeList, errLast = c.Core().Nodes().List(api.ListOptions{ FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector()}) if errLast != nil { return false, nil diff --git a/test/e2e/framework/pods.go b/test/e2e/framework/pods.go index 1bb3f8ec7b8..ce390729a64 100644 --- a/test/e2e/framework/pods.go +++ b/test/e2e/framework/pods.go @@ -23,7 +23,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/client/unversioned" + unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/wait" @@ -43,13 +43,13 @@ var ImageWhiteList sets.String func (f *Framework) PodClient() *PodClient { return &PodClient{ f: f, - PodInterface: f.Client.Pods(f.Namespace.Name), + PodInterface: f.ClientSet.Core().Pods(f.Namespace.Name), } } type PodClient struct { f *Framework - unversioned.PodInterface + unversionedcore.PodInterface } // Create creates a new pod according to the framework specifications (don't wait for it to start). @@ -116,7 +116,7 @@ func (c *PodClient) DeleteSync(name string, options *api.DeleteOptions, timeout if err != nil && !errors.IsNotFound(err) { Failf("Failed to delete pod %q: %v", name, err) } - Expect(WaitForPodToDisappear(c.f.Client, c.f.Namespace.Name, name, labels.Everything(), + Expect(WaitForPodToDisappear(c.f.ClientSet, c.f.Namespace.Name, name, labels.Everything(), 2*time.Second, timeout)).To(Succeed(), "wait for pod %q to disappear", name) } @@ -156,7 +156,7 @@ func (c *PodClient) mungeSpec(pod *api.Pod) { // WaitForSuccess waits for pod to success. func (c *PodClient) WaitForSuccess(name string, timeout time.Duration) { f := c.f - Expect(waitForPodCondition(f.Client, f.Namespace.Name, name, "success or failure", timeout, + Expect(waitForPodCondition(f.ClientSet, f.Namespace.Name, name, "success or failure", timeout, func(pod *api.Pod) (bool, error) { switch pod.Status.Phase { case api.PodFailed: diff --git a/test/e2e/framework/resource_usage_gatherer.go b/test/e2e/framework/resource_usage_gatherer.go index 45691e5c4ef..16d65443977 100644 --- a/test/e2e/framework/resource_usage_gatherer.go +++ b/test/e2e/framework/resource_usage_gatherer.go @@ -30,7 +30,7 @@ import ( . "github.com/onsi/gomega" "k8s.io/kubernetes/pkg/api" - client "k8s.io/kubernetes/pkg/client/unversioned" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" utilruntime "k8s.io/kubernetes/pkg/util/runtime" "k8s.io/kubernetes/pkg/util/system" ) @@ -129,7 +129,7 @@ func leftMergeData(left, right map[int]ResourceUsagePerContainer) map[int]Resour } type resourceGatherWorker struct { - c *client.Client + c clientset.Interface nodeName string wg *sync.WaitGroup containerIDToNameMap map[string]string @@ -204,7 +204,7 @@ func getKubemarkMasterComponentsResourceUsage() ResourceUsagePerContainer { return result } -func (g *containerResourceGatherer) getKubeSystemContainersResourceUsage(c *client.Client) { +func (g *containerResourceGatherer) getKubeSystemContainersResourceUsage(c clientset.Interface) { if len(g.workers) == 0 { return } @@ -218,7 +218,7 @@ func (g *containerResourceGatherer) getKubeSystemContainersResourceUsage(c *clie } type containerResourceGatherer struct { - client *client.Client + client clientset.Interface stopCh chan struct{} workers []resourceGatherWorker workerWg sync.WaitGroup @@ -232,7 +232,7 @@ type ResourceGathererOptions struct { masterOnly bool } -func NewResourceUsageGatherer(c *client.Client, options ResourceGathererOptions) (*containerResourceGatherer, error) { +func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOptions) (*containerResourceGatherer, error) { g := containerResourceGatherer{ client: c, stopCh: make(chan struct{}), @@ -250,7 +250,7 @@ func NewResourceUsageGatherer(c *client.Client, options ResourceGathererOptions) finished: false, }) } else { - pods, err := c.Pods("kube-system").List(api.ListOptions{}) + pods, err := c.Core().Pods("kube-system").List(api.ListOptions{}) if err != nil { Logf("Error while listing Pods: %v", err) return nil, err @@ -262,7 +262,7 @@ func NewResourceUsageGatherer(c *client.Client, options ResourceGathererOptions) g.containerIDs = append(g.containerIDs, containerID) } } - nodeList, err := c.Nodes().List(api.ListOptions{}) + nodeList, err := c.Core().Nodes().List(api.ListOptions{}) if err != nil { Logf("Error while listing Nodes: %v", err) return nil, err diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 5a962a308b9..e12a5a606f0 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -183,7 +183,7 @@ var ( ) // GetServerArchitecture fetches the architecture of the cluster's apiserver. -func GetServerArchitecture(c *client.Client) string { +func GetServerArchitecture(c clientset.Interface) string { arch := "" sVer, err := c.Discovery().ServerVersion() if err != nil || sVer.Platform == "" { @@ -199,7 +199,7 @@ func GetServerArchitecture(c *client.Client) string { } // GetPauseImageName fetches the pause image name for the same architecture as the apiserver. -func GetPauseImageName(c *client.Client) string { +func GetPauseImageName(c clientset.Interface) string { return currentPodInfraContainerImageName + "-" + GetServerArchitecture(c) + ":" + currentPodInfraContainerImageVersion } @@ -217,8 +217,8 @@ func GetPauseImageNameForHostArch() string { var SubResourcePodProxyVersion = version.MustParse("v1.1.0") var subResourceServiceAndNodeProxyVersion = version.MustParse("v1.2.0") -func GetServicesProxyRequest(c *client.Client, request *restclient.Request) (*restclient.Request, error) { - subResourceProxyAvailable, err := ServerVersionGTE(subResourceServiceAndNodeProxyVersion, c) +func GetServicesProxyRequest(c clientset.Interface, request *restclient.Request) (*restclient.Request, error) { + subResourceProxyAvailable, err := ServerVersionGTE(subResourceServiceAndNodeProxyVersion, c.Discovery()) if err != nil { return nil, err } @@ -231,7 +231,7 @@ func GetServicesProxyRequest(c *client.Client, request *restclient.Request) (*re // unique identifier of the e2e run var RunId = uuid.NewUUID() -type CreateTestingNSFn func(baseName string, c *client.Client, labels map[string]string) (*api.Namespace, error) +type CreateTestingNSFn func(baseName string, c clientset.Interface, labels map[string]string) (*api.Namespace, error) type ContainerFailures struct { status *api.ContainerStateTerminated @@ -320,13 +320,13 @@ func SkipUnlessServerVersionGTE(v semver.Version, c discovery.ServerVersionInter } // Detects whether the federation namespace exists in the underlying cluster -func SkipUnlessFederated(c *client.Client) { +func SkipUnlessFederated(c clientset.Interface) { federationNS := os.Getenv("FEDERATION_NAMESPACE") if federationNS == "" { federationNS = "federation" } - _, err := c.Namespaces().Get(federationNS) + _, err := c.Core().Namespaces().Get(federationNS) if err != nil { if apierrs.IsNotFound(err) { Skipf("Could not find federation namespace %s: skipping federated test", federationNS) @@ -434,12 +434,12 @@ func hasReplicationControllersForPod(rcs *api.ReplicationControllerList, pod api // WaitForPodsSuccess waits till all labels matching the given selector enter // the Success state. The caller is expected to only invoke this method once the // pods have been created. -func WaitForPodsSuccess(c *client.Client, ns string, successPodLabels map[string]string, timeout time.Duration) error { +func WaitForPodsSuccess(c clientset.Interface, ns string, successPodLabels map[string]string, timeout time.Duration) error { successPodSelector := labels.SelectorFromSet(successPodLabels) start, badPods, desiredPods := time.Now(), []api.Pod{}, 0 if wait.PollImmediate(30*time.Second, timeout, func() (bool, error) { - podList, err := c.Pods(ns).List(api.ListOptions{LabelSelector: successPodSelector}) + podList, err := c.Core().Pods(ns).List(api.ListOptions{LabelSelector: successPodSelector}) if err != nil { Logf("Error getting pods in namespace %q: %v", ns, err) return false, nil @@ -484,7 +484,7 @@ func WaitForPodsSuccess(c *client.Client, ns string, successPodLabels map[string // even if there are minPods pods, some of which are in Running/Ready // and some in Success. This is to allow the client to decide if "Success" // means "Ready" or not. -func WaitForPodsRunningReady(c *client.Client, ns string, minPods int32, timeout time.Duration, ignoreLabels map[string]string) error { +func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods int32, timeout time.Duration, ignoreLabels map[string]string) error { ignoreSelector := labels.SelectorFromSet(ignoreLabels) start := time.Now() Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready", @@ -503,7 +503,7 @@ func WaitForPodsRunningReady(c *client.Client, ns string, minPods int32, timeout // We get the new list of pods and replication controllers in every // iteration because more pods come online during startup and we want to // ensure they are also checked. - rcList, err := c.ReplicationControllers(ns).List(api.ListOptions{}) + rcList, err := c.Core().ReplicationControllers(ns).List(api.ListOptions{}) if err != nil { Logf("Error getting replication controllers in namespace '%s': %v", ns, err) return false, nil @@ -513,7 +513,7 @@ func WaitForPodsRunningReady(c *client.Client, ns string, minPods int32, timeout replicas += rc.Spec.Replicas } - podList, err := c.Pods(ns).List(api.ListOptions{}) + podList, err := c.Core().Pods(ns).List(api.ListOptions{}) if err != nil { Logf("Error getting pods in namespace '%s': %v", ns, err) return false, nil @@ -578,7 +578,7 @@ func podFromManifest(filename string) (*api.Pod, error) { // Run a test container to try and contact the Kubernetes api-server from a pod, wait for it // to flip to Ready, log its output and delete it. -func RunKubernetesServiceTestContainer(c *client.Client, ns string) { +func RunKubernetesServiceTestContainer(c clientset.Interface, ns string) { path := "test/images/clusterapi-tester/pod.yaml" p, err := podFromManifest(path) if err != nil { @@ -586,12 +586,12 @@ func RunKubernetesServiceTestContainer(c *client.Client, ns string) { return } p.Namespace = ns - if _, err := c.Pods(ns).Create(p); err != nil { + if _, err := c.Core().Pods(ns).Create(p); err != nil { Logf("Failed to create %v: %v", p.Name, err) return } defer func() { - if err := c.Pods(ns).Delete(p.Name, nil); err != nil { + if err := c.Core().Pods(ns).Delete(p.Name, nil); err != nil { Logf("Failed to delete pod %v: %v", p.Name, err) } }() @@ -608,7 +608,7 @@ func RunKubernetesServiceTestContainer(c *client.Client, ns string) { } } -func kubectlLogPod(c *client.Client, pod api.Pod, containerNameSubstr string, logFunc func(ftm string, args ...interface{})) { +func kubectlLogPod(c clientset.Interface, pod api.Pod, containerNameSubstr string, logFunc func(ftm string, args ...interface{})) { for _, container := range pod.Spec.Containers { if strings.Contains(container.Name, containerNameSubstr) { // Contains() matches all strings if substr is empty @@ -625,8 +625,8 @@ func kubectlLogPod(c *client.Client, pod api.Pod, containerNameSubstr string, lo } } -func LogFailedContainers(c *client.Client, ns string, logFunc func(ftm string, args ...interface{})) { - podList, err := c.Pods(ns).List(api.ListOptions{}) +func LogFailedContainers(c clientset.Interface, ns string, logFunc func(ftm string, args ...interface{})) { + podList, err := c.Core().Pods(ns).List(api.ListOptions{}) if err != nil { logFunc("Error getting pods in namespace '%s': %v", ns, err) return @@ -639,8 +639,8 @@ func LogFailedContainers(c *client.Client, ns string, logFunc func(ftm string, a } } -func LogPodsWithLabels(c *client.Client, ns string, match map[string]string, logFunc func(ftm string, args ...interface{})) { - podList, err := c.Pods(ns).List(api.ListOptions{LabelSelector: labels.SelectorFromSet(match)}) +func LogPodsWithLabels(c clientset.Interface, ns string, match map[string]string, logFunc func(ftm string, args ...interface{})) { + podList, err := c.Core().Pods(ns).List(api.ListOptions{LabelSelector: labels.SelectorFromSet(match)}) if err != nil { logFunc("Error getting pods in namespace %q: %v", ns, err) return @@ -651,8 +651,8 @@ func LogPodsWithLabels(c *client.Client, ns string, match map[string]string, log } } -func LogContainersInPodsWithLabels(c *client.Client, ns string, match map[string]string, containerSubstr string, logFunc func(ftm string, args ...interface{})) { - podList, err := c.Pods(ns).List(api.ListOptions{LabelSelector: labels.SelectorFromSet(match)}) +func LogContainersInPodsWithLabels(c clientset.Interface, ns string, match map[string]string, containerSubstr string, logFunc func(ftm string, args ...interface{})) { + podList, err := c.Core().Pods(ns).List(api.ListOptions{LabelSelector: labels.SelectorFromSet(match)}) if err != nil { Logf("Error getting pods in namespace %q: %v", ns, err) return @@ -665,9 +665,9 @@ func LogContainersInPodsWithLabels(c *client.Client, ns string, match map[string // DeleteNamespaces deletes all namespaces that match the given delete and skip filters. // Filter is by simple strings.Contains; first skip filter, then delete filter. // Returns the list of deleted namespaces or an error. -func DeleteNamespaces(c *client.Client, deleteFilter, skipFilter []string) ([]string, error) { +func DeleteNamespaces(c clientset.Interface, deleteFilter, skipFilter []string) ([]string, error) { By("Deleting namespaces") - nsList, err := c.Namespaces().List(api.ListOptions{}) + nsList, err := c.Core().Namespaces().List(api.ListOptions{}) Expect(err).NotTo(HaveOccurred()) var deleted []string var wg sync.WaitGroup @@ -697,7 +697,7 @@ OUTER: go func(nsName string) { defer wg.Done() defer GinkgoRecover() - Expect(c.Namespaces().Delete(nsName)).To(Succeed()) + Expect(c.Core().Namespaces().Delete(nsName, nil)).To(Succeed()) Logf("namespace : %v api call to delete is complete ", nsName) }(item.Name) } @@ -705,7 +705,7 @@ OUTER: return deleted, nil } -func WaitForNamespacesDeleted(c *client.Client, namespaces []string, timeout time.Duration) error { +func WaitForNamespacesDeleted(c clientset.Interface, namespaces []string, timeout time.Duration) error { By("Waiting for namespaces to vanish") nsMap := map[string]bool{} for _, ns := range namespaces { @@ -714,7 +714,7 @@ func WaitForNamespacesDeleted(c *client.Client, namespaces []string, timeout tim //Now POLL until all namespaces have been eradicated. return wait.Poll(2*time.Second, timeout, func() (bool, error) { - nsList, err := c.Namespaces().List(api.ListOptions{}) + nsList, err := c.Core().Namespaces().List(api.ListOptions{}) if err != nil { return false, err } @@ -727,8 +727,8 @@ func WaitForNamespacesDeleted(c *client.Client, namespaces []string, timeout tim }) } -func waitForServiceAccountInNamespace(c *client.Client, ns, serviceAccountName string, timeout time.Duration) error { - w, err := c.ServiceAccounts(ns).Watch(api.SingleObject(api.ObjectMeta{Name: serviceAccountName})) +func waitForServiceAccountInNamespace(c clientset.Interface, ns, serviceAccountName string, timeout time.Duration) error { + w, err := c.Core().ServiceAccounts(ns).Watch(api.SingleObject(api.ObjectMeta{Name: serviceAccountName})) if err != nil { return err } @@ -736,10 +736,10 @@ func waitForServiceAccountInNamespace(c *client.Client, ns, serviceAccountName s return err } -func waitForPodCondition(c *client.Client, ns, podName, desc string, timeout time.Duration, condition podCondition) error { +func waitForPodCondition(c clientset.Interface, ns, podName, desc string, timeout time.Duration, condition podCondition) error { Logf("Waiting up to %[1]v for pod %[2]s status to be %[3]s", timeout, podName, desc) for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { - pod, err := c.Pods(ns).Get(podName) + pod, err := c.Core().Pods(ns).Get(podName) if err != nil { if apierrs.IsNotFound(err) { Logf("Pod %q in namespace %q disappeared. Error: %v", podName, ns, err) @@ -763,10 +763,10 @@ func waitForPodCondition(c *client.Client, ns, podName, desc string, timeout tim // WaitForMatchPodsCondition finds match pods based on the input ListOptions. // waits and checks if all match pods are in the given podCondition -func WaitForMatchPodsCondition(c *client.Client, opts api.ListOptions, desc string, timeout time.Duration, condition podCondition) error { +func WaitForMatchPodsCondition(c clientset.Interface, opts api.ListOptions, desc string, timeout time.Duration, condition podCondition) error { Logf("Waiting up to %v for matching pods' status to be %s", timeout, desc) for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { - pods, err := c.Pods(api.NamespaceAll).List(opts) + pods, err := c.Core().Pods(api.NamespaceAll).List(opts) if err != nil { return err } @@ -791,7 +791,7 @@ func WaitForMatchPodsCondition(c *client.Client, opts api.ListOptions, desc stri // WaitForDefaultServiceAccountInNamespace waits for the default service account to be provisioned // the default service account is what is associated with pods when they do not specify a service account // as a result, pods are not able to be provisioned in a namespace until the service account is provisioned -func WaitForDefaultServiceAccountInNamespace(c *client.Client, namespace string) error { +func WaitForDefaultServiceAccountInNamespace(c clientset.Interface, namespace string) error { return waitForServiceAccountInNamespace(c, namespace, "default", ServiceAccountProvisionTimeout) } @@ -808,10 +808,10 @@ func WaitForFederationApiserverReady(c *federation_release_1_5.Clientset) error } // WaitForPersistentVolumePhase waits for a PersistentVolume to be in a specific phase or until timeout occurs, whichever comes first. -func WaitForPersistentVolumePhase(phase api.PersistentVolumePhase, c *client.Client, pvName string, Poll, timeout time.Duration) error { +func WaitForPersistentVolumePhase(phase api.PersistentVolumePhase, c clientset.Interface, pvName string, Poll, timeout time.Duration) error { Logf("Waiting up to %v for PersistentVolume %s to have phase %s", timeout, pvName, phase) for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { - pv, err := c.PersistentVolumes().Get(pvName) + pv, err := c.Core().PersistentVolumes().Get(pvName) if err != nil { Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, Poll, err) continue @@ -828,10 +828,10 @@ func WaitForPersistentVolumePhase(phase api.PersistentVolumePhase, c *client.Cli } // WaitForPersistentVolumeDeleted waits for a PersistentVolume to get deleted or until timeout occurs, whichever comes first. -func WaitForPersistentVolumeDeleted(c *client.Client, pvName string, Poll, timeout time.Duration) error { +func WaitForPersistentVolumeDeleted(c clientset.Interface, pvName string, Poll, timeout time.Duration) error { Logf("Waiting up to %v for PersistentVolume %s to get deleted", timeout, pvName) for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { - pv, err := c.PersistentVolumes().Get(pvName) + pv, err := c.Core().PersistentVolumes().Get(pvName) if err == nil { Logf("PersistentVolume %s found and phase=%s (%v)", pvName, pv.Status.Phase, time.Since(start)) continue @@ -848,10 +848,10 @@ func WaitForPersistentVolumeDeleted(c *client.Client, pvName string, Poll, timeo } // WaitForPersistentVolumeClaimPhase waits for a PersistentVolumeClaim to be in a specific phase or until timeout occurs, whichever comes first. -func WaitForPersistentVolumeClaimPhase(phase api.PersistentVolumeClaimPhase, c *client.Client, ns string, pvcName string, Poll, timeout time.Duration) error { +func WaitForPersistentVolumeClaimPhase(phase api.PersistentVolumeClaimPhase, c clientset.Interface, ns string, pvcName string, Poll, timeout time.Duration) error { Logf("Waiting up to %v for PersistentVolumeClaim %s to have phase %s", timeout, pvcName, phase) for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { - pvc, err := c.PersistentVolumeClaims(ns).Get(pvcName) + pvc, err := c.Core().PersistentVolumeClaims(ns).Get(pvcName) if err != nil { Logf("Get persistent volume claim %s in failed, ignoring for %v: %v", pvcName, Poll, err) continue @@ -869,7 +869,7 @@ func WaitForPersistentVolumeClaimPhase(phase api.PersistentVolumeClaimPhase, c * // CreateTestingNS should be used by every test, note that we append a common prefix to the provided test name. // Please see NewFramework instead of using this directly. -func CreateTestingNS(baseName string, c *client.Client, labels map[string]string) (*api.Namespace, error) { +func CreateTestingNS(baseName string, c clientset.Interface, labels map[string]string) (*api.Namespace, error) { if labels == nil { labels = map[string]string{} } @@ -887,7 +887,7 @@ func CreateTestingNS(baseName string, c *client.Client, labels map[string]string var got *api.Namespace if err := wait.PollImmediate(Poll, 30*time.Second, func() (bool, error) { var err error - got, err = c.Namespaces().Create(namespaceObj) + got, err = c.Core().Namespaces().Create(namespaceObj) if err != nil { Logf("Unexpected error while creating namespace: %v", err) return false, nil @@ -907,7 +907,7 @@ func CreateTestingNS(baseName string, c *client.Client, labels map[string]string // CheckTestingNSDeletedExcept checks whether all e2e based existing namespaces are in the Terminating state // and waits until they are finally deleted. It ignores namespace skip. -func CheckTestingNSDeletedExcept(c *client.Client, skip string) error { +func CheckTestingNSDeletedExcept(c clientset.Interface, skip string) error { // TODO: Since we don't have support for bulk resource deletion in the API, // while deleting a namespace we are deleting all objects from that namespace // one by one (one deletion == one API call). This basically exposes us to @@ -923,7 +923,7 @@ func CheckTestingNSDeletedExcept(c *client.Client, skip string) error { Logf("Waiting for terminating namespaces to be deleted...") for start := time.Now(); time.Since(start) < timeout; time.Sleep(15 * time.Second) { - namespaces, err := c.Namespaces().List(api.ListOptions{}) + namespaces, err := c.Core().Namespaces().List(api.ListOptions{}) if err != nil { Logf("Listing namespaces failed: %v", err) continue @@ -946,14 +946,14 @@ func CheckTestingNSDeletedExcept(c *client.Client, skip string) error { // deleteNS deletes the provided namespace, waits for it to be completely deleted, and then checks // whether there are any pods remaining in a non-terminating state. -func deleteNS(c *client.Client, clientPool dynamic.ClientPool, namespace string, timeout time.Duration) error { - if err := c.Namespaces().Delete(namespace); err != nil { +func deleteNS(c clientset.Interface, clientPool dynamic.ClientPool, namespace string, timeout time.Duration) error { + if err := c.Core().Namespaces().Delete(namespace, nil); err != nil { return err } // wait for namespace to delete or timeout. err := wait.PollImmediate(5*time.Second, timeout, func() (bool, error) { - if _, err := c.Namespaces().Get(namespace); err != nil { + if _, err := c.Core().Namespaces().Get(namespace); err != nil { if apierrs.IsNotFound(err) { return true, nil } @@ -1005,8 +1005,8 @@ func deleteNS(c *client.Client, clientPool dynamic.ClientPool, namespace string, // logNamespaces logs the number of namespaces by phase // namespace is the namespace the test was operating against that failed to delete so it can be grepped in logs -func logNamespaces(c *client.Client, namespace string) { - namespaceList, err := c.Namespaces().List(api.ListOptions{}) +func logNamespaces(c clientset.Interface, namespace string) { + namespaceList, err := c.Core().Namespaces().List(api.ListOptions{}) if err != nil { Logf("namespace: %v, unable to list namespaces: %v", namespace, err) return @@ -1025,8 +1025,8 @@ func logNamespaces(c *client.Client, namespace string) { } // logNamespace logs detail about a namespace -func logNamespace(c *client.Client, namespace string) { - ns, err := c.Namespaces().Get(namespace) +func logNamespace(c clientset.Interface, namespace string) { + ns, err := c.Core().Namespaces().Get(namespace) if err != nil { if apierrs.IsNotFound(err) { Logf("namespace: %v no longer exists", namespace) @@ -1039,9 +1039,9 @@ func logNamespace(c *client.Client, namespace string) { } // countRemainingPods queries the server to count number of remaining pods, and number of pods that had a missing deletion timestamp. -func countRemainingPods(c *client.Client, namespace string) (int, int, error) { +func countRemainingPods(c clientset.Interface, namespace string) (int, int, error) { // check for remaining pods - pods, err := c.Pods(namespace).List(api.ListOptions{}) + pods, err := c.Core().Pods(namespace).List(api.ListOptions{}) if err != nil { return 0, 0, err } @@ -1066,7 +1066,7 @@ func countRemainingPods(c *client.Client, namespace string) (int, int, error) { } // hasRemainingContent checks if there is remaining content in the namespace via API discovery -func hasRemainingContent(c *client.Client, clientPool dynamic.ClientPool, namespace string) (bool, error) { +func hasRemainingContent(c clientset.Interface, clientPool dynamic.ClientPool, namespace string) (bool, error) { // some tests generate their own framework.Client rather than the default // TODO: ensure every test call has a configured clientPool if clientPool == nil { @@ -1225,7 +1225,7 @@ func CheckInvariants(events []watch.Event, fns ...InvariantFunc) error { // Waits default amount of time (PodStartTimeout) for the specified pod to become running. // Returns an error if timeout occurs first, or pod goes in to failed state. -func WaitForPodRunningInNamespace(c *client.Client, pod *api.Pod) error { +func WaitForPodRunningInNamespace(c clientset.Interface, pod *api.Pod) error { // this short-cicuit is needed for cases when we pass a list of pods instead // of newly created pod (e.g. VerifyPods) which means we are getting already // running pod for which waiting does not make sense and will always fail @@ -1237,19 +1237,19 @@ func WaitForPodRunningInNamespace(c *client.Client, pod *api.Pod) error { // Waits default amount of time (PodStartTimeout) for the specified pod to become running. // Returns an error if timeout occurs first, or pod goes in to failed state. -func WaitForPodNameRunningInNamespace(c *client.Client, podName, namespace string) error { +func WaitForPodNameRunningInNamespace(c clientset.Interface, podName, namespace string) error { return waitTimeoutForPodRunningInNamespace(c, podName, namespace, "", PodStartTimeout) } // Waits an extended amount of time (slowPodStartTimeout) for the specified pod to become running. // The resourceVersion is used when Watching object changes, it tells since when we care // about changes to the pod. Returns an error if timeout occurs first, or pod goes in to failed state. -func waitForPodRunningInNamespaceSlow(c *client.Client, podName, namespace, resourceVersion string) error { +func waitForPodRunningInNamespaceSlow(c clientset.Interface, podName, namespace, resourceVersion string) error { return waitTimeoutForPodRunningInNamespace(c, podName, namespace, resourceVersion, slowPodStartTimeout) } -func waitTimeoutForPodRunningInNamespace(c *client.Client, podName, namespace, resourceVersion string, timeout time.Duration) error { - w, err := c.Pods(namespace).Watch(api.SingleObject(api.ObjectMeta{Name: podName, ResourceVersion: resourceVersion})) +func waitTimeoutForPodRunningInNamespace(c clientset.Interface, podName, namespace, resourceVersion string, timeout time.Duration) error { + w, err := c.Core().Pods(namespace).Watch(api.SingleObject(api.ObjectMeta{Name: podName, ResourceVersion: resourceVersion})) if err != nil { return err } @@ -1259,12 +1259,12 @@ func waitTimeoutForPodRunningInNamespace(c *client.Client, podName, namespace, r // Waits default amount of time (podNoLongerRunningTimeout) for the specified pod to stop running. // Returns an error if timeout occurs first. -func WaitForPodNoLongerRunningInNamespace(c *client.Client, podName, namespace, resourceVersion string) error { +func WaitForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namespace, resourceVersion string) error { return WaitTimeoutForPodNoLongerRunningInNamespace(c, podName, namespace, resourceVersion, podNoLongerRunningTimeout) } -func WaitTimeoutForPodNoLongerRunningInNamespace(c *client.Client, podName, namespace, resourceVersion string, timeout time.Duration) error { - w, err := c.Pods(namespace).Watch(api.SingleObject(api.ObjectMeta{Name: podName, ResourceVersion: resourceVersion})) +func WaitTimeoutForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namespace, resourceVersion string, timeout time.Duration) error { + w, err := c.Core().Pods(namespace).Watch(api.SingleObject(api.ObjectMeta{Name: podName, ResourceVersion: resourceVersion})) if err != nil { return err } @@ -1272,8 +1272,8 @@ func WaitTimeoutForPodNoLongerRunningInNamespace(c *client.Client, podName, name return err } -func waitTimeoutForPodReadyInNamespace(c *client.Client, podName, namespace, resourceVersion string, timeout time.Duration) error { - w, err := c.Pods(namespace).Watch(api.SingleObject(api.ObjectMeta{Name: podName, ResourceVersion: resourceVersion})) +func waitTimeoutForPodReadyInNamespace(c clientset.Interface, podName, namespace, resourceVersion string, timeout time.Duration) error { + w, err := c.Core().Pods(namespace).Watch(api.SingleObject(api.ObjectMeta{Name: podName, ResourceVersion: resourceVersion})) if err != nil { return err } @@ -1284,8 +1284,8 @@ func waitTimeoutForPodReadyInNamespace(c *client.Client, podName, namespace, res // WaitForPodNotPending returns an error if it took too long for the pod to go out of pending state. // The resourceVersion is used when Watching object changes, it tells since when we care // about changes to the pod. -func WaitForPodNotPending(c *client.Client, ns, podName, resourceVersion string) error { - w, err := c.Pods(ns).Watch(api.SingleObject(api.ObjectMeta{Name: podName, ResourceVersion: resourceVersion})) +func WaitForPodNotPending(c clientset.Interface, ns, podName, resourceVersion string) error { + w, err := c.Core().Pods(ns).Watch(api.SingleObject(api.ObjectMeta{Name: podName, ResourceVersion: resourceVersion})) if err != nil { return err } @@ -1295,7 +1295,7 @@ func WaitForPodNotPending(c *client.Client, ns, podName, resourceVersion string) // waitForPodTerminatedInNamespace returns an error if it took too long for the pod // to terminate or if the pod terminated with an unexpected reason. -func waitForPodTerminatedInNamespace(c *client.Client, podName, reason, namespace string) error { +func waitForPodTerminatedInNamespace(c clientset.Interface, podName, reason, namespace string) error { return waitForPodCondition(c, namespace, podName, "terminated due to deadline exceeded", PodStartTimeout, func(pod *api.Pod) (bool, error) { if pod.Status.Phase == api.PodFailed { if pod.Status.Reason == reason { @@ -1310,7 +1310,7 @@ func waitForPodTerminatedInNamespace(c *client.Client, podName, reason, namespac } // waitForPodSuccessInNamespaceTimeout returns nil if the pod reached state success, or an error if it reached failure or ran too long. -func waitForPodSuccessInNamespaceTimeout(c *client.Client, podName string, namespace string, timeout time.Duration) error { +func waitForPodSuccessInNamespaceTimeout(c clientset.Interface, podName string, namespace string, timeout time.Duration) error { return waitForPodCondition(c, namespace, podName, "success or failure", timeout, func(pod *api.Pod) (bool, error) { if pod.Spec.RestartPolicy == api.RestartPolicyAlways { return false, fmt.Errorf("pod %q will never terminate with a succeeded state since its restart policy is Always", podName) @@ -1328,24 +1328,24 @@ func waitForPodSuccessInNamespaceTimeout(c *client.Client, podName string, names } // WaitForPodSuccessInNamespace returns nil if the pod reached state success, or an error if it reached failure or until podStartupTimeout. -func WaitForPodSuccessInNamespace(c *client.Client, podName string, namespace string) error { +func WaitForPodSuccessInNamespace(c clientset.Interface, podName string, namespace string) error { return waitForPodSuccessInNamespaceTimeout(c, podName, namespace, PodStartTimeout) } // WaitForPodSuccessInNamespaceSlow returns nil if the pod reached state success, or an error if it reached failure or until slowPodStartupTimeout. -func WaitForPodSuccessInNamespaceSlow(c *client.Client, podName string, namespace string) error { +func WaitForPodSuccessInNamespaceSlow(c clientset.Interface, podName string, namespace string) error { return waitForPodSuccessInNamespaceTimeout(c, podName, namespace, slowPodStartTimeout) } // waitForRCPodOnNode returns the pod from the given replication controller (described by rcName) which is scheduled on the given node. // In case of failure or too long waiting time, an error is returned. -func waitForRCPodOnNode(c *client.Client, ns, rcName, node string) (*api.Pod, error) { +func waitForRCPodOnNode(c clientset.Interface, ns, rcName, node string) (*api.Pod, error) { label := labels.SelectorFromSet(labels.Set(map[string]string{"name": rcName})) var p *api.Pod = nil err := wait.PollImmediate(10*time.Second, 5*time.Minute, func() (bool, error) { Logf("Waiting for pod %s to appear on node %s", rcName, node) options := api.ListOptions{LabelSelector: label} - pods, err := c.Pods(ns).List(options) + pods, err := c.Core().Pods(ns).List(options) if err != nil { return false, err } @@ -1362,12 +1362,12 @@ func waitForRCPodOnNode(c *client.Client, ns, rcName, node string) (*api.Pod, er } // WaitForRCToStabilize waits till the RC has a matching generation/replica count between spec and status. -func WaitForRCToStabilize(c *client.Client, ns, name string, timeout time.Duration) error { +func WaitForRCToStabilize(c clientset.Interface, ns, name string, timeout time.Duration) error { options := api.ListOptions{FieldSelector: fields.Set{ "metadata.name": name, "metadata.namespace": ns, }.AsSelector()} - w, err := c.ReplicationControllers(ns).Watch(options) + w, err := c.Core().ReplicationControllers(ns).Watch(options) if err != nil { return err } @@ -1391,11 +1391,11 @@ func WaitForRCToStabilize(c *client.Client, ns, name string, timeout time.Durati return err } -func WaitForPodToDisappear(c *client.Client, ns, podName string, label labels.Selector, interval, timeout time.Duration) error { +func WaitForPodToDisappear(c clientset.Interface, ns, podName string, label labels.Selector, interval, timeout time.Duration) error { return wait.PollImmediate(interval, timeout, func() (bool, error) { Logf("Waiting for pod %s to disappear", podName) options := api.ListOptions{LabelSelector: label} - pods, err := c.Pods(ns).List(options) + pods, err := c.Core().Pods(ns).List(options) if err != nil { return false, err } @@ -1417,7 +1417,7 @@ func WaitForPodToDisappear(c *client.Client, ns, podName string, label labels.Se // WaitForRCPodToDisappear returns nil if the pod from the given replication controller (described by rcName) no longer exists. // In case of failure or too long waiting time, an error is returned. -func WaitForRCPodToDisappear(c *client.Client, ns, rcName, podName string) error { +func WaitForRCPodToDisappear(c clientset.Interface, ns, rcName, podName string) error { label := labels.SelectorFromSet(labels.Set(map[string]string{"name": rcName})) // NodeController evicts pod after 5 minutes, so we need timeout greater than that. // Additionally, there can be non-zero grace period, so we are setting 10 minutes @@ -1426,9 +1426,9 @@ func WaitForRCPodToDisappear(c *client.Client, ns, rcName, podName string) error } // WaitForService waits until the service appears (exist == true), or disappears (exist == false) -func WaitForService(c *client.Client, namespace, name string, exist bool, interval, timeout time.Duration) error { +func WaitForService(c clientset.Interface, namespace, name string, exist bool, interval, timeout time.Duration) error { err := wait.PollImmediate(interval, timeout, func() (bool, error) { - _, err := c.Services(namespace).Get(name) + _, err := c.Core().Services(namespace).Get(name) switch { case err == nil: if !exist { @@ -1455,10 +1455,10 @@ func WaitForService(c *client.Client, namespace, name string, exist bool, interv } //WaitForServiceEndpointsNum waits until the amount of endpoints that implement service to expectNum. -func WaitForServiceEndpointsNum(c *client.Client, namespace, serviceName string, expectNum int, interval, timeout time.Duration) error { +func WaitForServiceEndpointsNum(c clientset.Interface, namespace, serviceName string, expectNum int, interval, timeout time.Duration) error { return wait.Poll(interval, timeout, func() (bool, error) { Logf("Waiting for amount of service:%s endpoints to be %d", serviceName, expectNum) - list, err := c.Endpoints(namespace).List(api.ListOptions{}) + list, err := c.Core().Endpoints(namespace).List(api.ListOptions{}) if err != nil { return false, err } @@ -1481,9 +1481,9 @@ func countEndpointsNum(e *api.Endpoints) int { } // WaitForReplicationController waits until the RC appears (exist == true), or disappears (exist == false) -func WaitForReplicationController(c *client.Client, namespace, name string, exist bool, interval, timeout time.Duration) error { +func WaitForReplicationController(c clientset.Interface, namespace, name string, exist bool, interval, timeout time.Duration) error { err := wait.PollImmediate(interval, timeout, func() (bool, error) { - _, err := c.ReplicationControllers(namespace).Get(name) + _, err := c.Core().ReplicationControllers(namespace).Get(name) if err != nil { Logf("Get ReplicationController %s in namespace %s failed (%v).", name, namespace, err) return !exist, nil @@ -1499,9 +1499,9 @@ func WaitForReplicationController(c *client.Client, namespace, name string, exis return nil } -func WaitForEndpoint(c *client.Client, ns, name string) error { +func WaitForEndpoint(c clientset.Interface, ns, name string) error { for t := time.Now(); time.Since(t) < EndpointRegisterTimeout; time.Sleep(Poll) { - endpoint, err := c.Endpoints(ns).Get(name) + endpoint, err := c.Core().Endpoints(ns).Get(name) Expect(err).NotTo(HaveOccurred()) if len(endpoint.Subsets) == 0 || len(endpoint.Subsets[0].Addresses) == 0 { Logf("Endpoint %s/%s is not ready yet", ns, name) @@ -1516,7 +1516,7 @@ func WaitForEndpoint(c *client.Client, ns, name string) error { // Context for checking pods responses by issuing GETs to them (via the API // proxy) and verifying that they answer with ther own pod name. type podProxyResponseChecker struct { - c *client.Client + c clientset.Interface ns string label labels.Selector controllerName string @@ -1524,7 +1524,7 @@ type podProxyResponseChecker struct { pods *api.PodList } -func PodProxyResponseChecker(c *client.Client, ns string, label labels.Selector, controllerName string, respondName bool, pods *api.PodList) podProxyResponseChecker { +func PodProxyResponseChecker(c clientset.Interface, ns string, label labels.Selector, controllerName string, respondName bool, pods *api.PodList) podProxyResponseChecker { return podProxyResponseChecker{c, ns, label, controllerName, respondName, pods} } @@ -1533,20 +1533,20 @@ func PodProxyResponseChecker(c *client.Client, ns string, label labels.Selector, func (r podProxyResponseChecker) CheckAllResponses() (done bool, err error) { successes := 0 options := api.ListOptions{LabelSelector: r.label} - currentPods, err := r.c.Pods(r.ns).List(options) + currentPods, err := r.c.Core().Pods(r.ns).List(options) Expect(err).NotTo(HaveOccurred()) for i, pod := range r.pods.Items { // Check that the replica list remains unchanged, otherwise we have problems. if !isElementOf(pod.UID, currentPods) { return false, fmt.Errorf("pod with UID %s is no longer a member of the replica set. Must have been restarted for some reason. Current replica set: %v", pod.UID, currentPods) } - subResourceProxyAvailable, err := ServerVersionGTE(SubResourcePodProxyVersion, r.c) + subResourceProxyAvailable, err := ServerVersionGTE(SubResourcePodProxyVersion, r.c.Discovery()) if err != nil { return false, err } var body []byte if subResourceProxyAvailable { - body, err = r.c.Get(). + body, err = r.c.Core().RESTClient().Get(). Namespace(r.ns). Resource("pods"). SubResource("proxy"). @@ -1554,7 +1554,7 @@ func (r podProxyResponseChecker) CheckAllResponses() (done bool, err error) { Do(). Raw() } else { - body, err = r.c.Get(). + body, err = r.c.Core().RESTClient().Get(). Prefix("proxy"). Namespace(r.ns). Resource("pods"). @@ -1645,19 +1645,19 @@ func KubectlVersion() (semver.Version, error) { return version.Parse(matches[1]) } -func PodsResponding(c *client.Client, ns, name string, wantName bool, pods *api.PodList) error { +func PodsResponding(c clientset.Interface, ns, name string, wantName bool, pods *api.PodList) error { By("trying to dial each unique pod") label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) return wait.PollImmediate(Poll, podRespondingTimeout, PodProxyResponseChecker(c, ns, label, name, wantName, pods).CheckAllResponses) } -func PodsCreated(c *client.Client, ns, name string, replicas int32) (*api.PodList, error) { +func PodsCreated(c clientset.Interface, ns, name string, replicas int32) (*api.PodList, error) { timeout := 2 * time.Minute // List the pods, making sure we observe all the replicas. label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) { options := api.ListOptions{LabelSelector: label} - pods, err := c.Pods(ns).List(options) + pods, err := c.Core().Pods(ns).List(options) if err != nil { return nil, err } @@ -1679,7 +1679,7 @@ func PodsCreated(c *client.Client, ns, name string, replicas int32) (*api.PodLis return nil, fmt.Errorf("Pod name %s: Gave up waiting %v for %d pods to come up", name, timeout, replicas) } -func podsRunning(c *client.Client, pods *api.PodList) []error { +func podsRunning(c clientset.Interface, pods *api.PodList) []error { // Wait for the pods to enter the running state. Waiting loops until the pods // are running so non-running pods cause a timeout for this test. By("ensuring each pod is running") @@ -1702,7 +1702,7 @@ func podsRunning(c *client.Client, pods *api.PodList) []error { return e } -func VerifyPods(c *client.Client, ns, name string, wantName bool, replicas int32) error { +func VerifyPods(c clientset.Interface, ns, name string, wantName bool, replicas int32) error { pods, err := PodsCreated(c, ns, name, replicas) if err != nil { return err @@ -1718,11 +1718,11 @@ func VerifyPods(c *client.Client, ns, name string, wantName bool, replicas int32 return nil } -func ServiceResponding(c *client.Client, ns, name string) error { +func ServiceResponding(c clientset.Interface, ns, name string) error { By(fmt.Sprintf("trying to dial the service %s.%s via the proxy", ns, name)) return wait.PollImmediate(Poll, ServiceRespondingTimeout, func() (done bool, err error) { - proxyRequest, errProxy := GetServicesProxyRequest(c, c.Get()) + proxyRequest, errProxy := GetServicesProxyRequest(c, c.Core().RESTClient().Get()) if errProxy != nil { Logf("Failed to get services proxy request: %v:", errProxy) return false, nil @@ -1821,6 +1821,14 @@ func LoadClient() (*client.Client, error) { return loadClientFromConfig(config) } +func LoadInternalClientset() (*clientset.Clientset, error) { + config, err := LoadConfig() + if err != nil { + return nil, fmt.Errorf("error creating client: %v", err.Error()) + } + return clientset.NewForConfig(config) +} + func LoadClientset() (*release_1_5.Clientset, error) { config, err := LoadConfig() if err != nil { @@ -1889,7 +1897,7 @@ func AssertCleanup(ns string, selectors ...string) { // validatorFn is the function which is individual tests will implement. // we may want it to return more than just an error, at some point. -type validatorFn func(c *client.Client, podID string) error +type validatorFn func(c clientset.Interface, podID string) error // ValidateController is a generic mechanism for testing RC's that are running. // It takes a container name, a test name, and a validator function which is plugged in by a specific test. @@ -1897,7 +1905,7 @@ type validatorFn func(c *client.Client, podID string) error // "containerImage" : this is the name of the image we expect to be launched. Not to confuse w/ images (kitten.jpg) which are validated. // "testname": which gets bubbled up to the logging/failure messages if errors happen. // "validator" function: This function is given a podID and a client, and it can do some specific validations that way. -func ValidateController(c *client.Client, containerImage string, replicas int, containername string, testname string, validator validatorFn, ns string) { +func ValidateController(c clientset.Interface, containerImage string, replicas int, containername string, testname string, validator validatorFn, ns string) { getPodsTemplate := "--template={{range.items}}{{.metadata.name}} {{end}}" // NB: kubectl adds the "exists" function to the standard template functions. // This lets us check to see if the "running" entry exists for each of the containers @@ -2148,7 +2156,7 @@ func (f *Framework) MatchContainerOutput( createdPod := podClient.Create(pod) // Wait for client pod to complete. - if err := WaitForPodSuccessInNamespace(f.Client, createdPod.Name, ns); err != nil { + if err := WaitForPodSuccessInNamespace(f.ClientSet, createdPod.Name, ns); err != nil { return fmt.Errorf("expected pod %q success: %v", pod.Name, err) } @@ -2162,7 +2170,7 @@ func (f *Framework) MatchContainerOutput( podStatus.Spec.NodeName, podStatus.Name, containerName, err) // Sometimes the actual containers take a second to get started, try to get logs for 60s - logs, err := GetPodLogs(f.Client, ns, podStatus.Name, containerName) + logs, err := GetPodLogs(f.ClientSet, ns, podStatus.Name, containerName) if err != nil { Logf("Failed to get logs from node %q pod %q container %q. %v", podStatus.Spec.NodeName, podStatus.Name, containerName, err) @@ -2223,7 +2231,7 @@ func DumpEventsInNamespace(eventsLister EventsLister, namespace string) { // you may or may not see the killing/deletion/Cleanup events. } -func DumpAllNamespaceInfo(c *client.Client, cs *release_1_5.Clientset, namespace string) { +func DumpAllNamespaceInfo(c clientset.Interface, cs *release_1_5.Clientset, namespace string) { DumpEventsInNamespace(func(opts v1.ListOptions, ns string) (*v1.EventList, error) { return cs.Core().Events(ns).List(opts) }, namespace) @@ -2233,7 +2241,7 @@ func DumpAllNamespaceInfo(c *client.Client, cs *release_1_5.Clientset, namespace // 2. there are so many of them that working with them are mostly impossible // So we dump them only if the cluster is relatively small. maxNodesForDump := 20 - if nodes, err := c.Nodes().List(api.ListOptions{}); err == nil { + if nodes, err := c.Core().Nodes().List(api.ListOptions{}); err == nil { if len(nodes.Items) <= maxNodesForDump { dumpAllPodInfo(c) dumpAllNodeInfo(c) @@ -2258,17 +2266,17 @@ func (o byFirstTimestamp) Less(i, j int) bool { return o[i].FirstTimestamp.Before(o[j].FirstTimestamp) } -func dumpAllPodInfo(c *client.Client) { - pods, err := c.Pods("").List(api.ListOptions{}) +func dumpAllPodInfo(c clientset.Interface) { + pods, err := c.Core().Pods("").List(api.ListOptions{}) if err != nil { Logf("unable to fetch pod debug info: %v", err) } logPodStates(pods.Items) } -func dumpAllNodeInfo(c *client.Client) { +func dumpAllNodeInfo(c clientset.Interface) { // It should be OK to list unschedulable Nodes here. - nodes, err := c.Nodes().List(api.ListOptions{}) + nodes, err := c.Core().Nodes().List(api.ListOptions{}) if err != nil { Logf("unable to fetch node list: %v", err) return @@ -2280,10 +2288,10 @@ func dumpAllNodeInfo(c *client.Client) { DumpNodeDebugInfo(c, names, Logf) } -func DumpNodeDebugInfo(c *client.Client, nodeNames []string, logFunc func(fmt string, args ...interface{})) { +func DumpNodeDebugInfo(c clientset.Interface, nodeNames []string, logFunc func(fmt string, args ...interface{})) { for _, n := range nodeNames { logFunc("\nLogging node info for node %v", n) - node, err := c.Nodes().Get(n) + node, err := c.Core().Nodes().Get(n) if err != nil { logFunc("Error getting node info %v", err) } @@ -2319,7 +2327,7 @@ func DumpNodeDebugInfo(c *client.Client, nodeNames []string, logFunc func(fmt st // logNodeEvents logs kubelet events from the given node. This includes kubelet // restart and node unhealthy events. Note that listing events like this will mess // with latency metrics, beware of calling it during a test. -func getNodeEvents(c *client.Client, nodeName string) []api.Event { +func getNodeEvents(c clientset.Interface, nodeName string) []api.Event { selector := fields.Set{ "involvedObject.kind": "Node", "involvedObject.name": nodeName, @@ -2327,7 +2335,7 @@ func getNodeEvents(c *client.Client, nodeName string) []api.Event { "source": "kubelet", }.AsSelector() options := api.ListOptions{FieldSelector: selector} - events, err := c.Events(api.NamespaceSystem).List(options) + events, err := c.Core().Events(api.NamespaceSystem).List(options) if err != nil { Logf("Unexpected error retrieving node events %v", err) return []api.Event{} @@ -2375,7 +2383,7 @@ func GetReadySchedulableNodesOrDie(c clientset.Interface) (nodes *api.NodeList) return nodes } -func WaitForAllNodesSchedulable(c *client.Client) error { +func WaitForAllNodesSchedulable(c clientset.Interface) error { Logf("Waiting up to %v for all (but %d) nodes to be schedulable", 4*time.Hour, TestContext.AllowedNotReadyNodes) var notSchedulable []*api.Node @@ -2385,7 +2393,7 @@ func WaitForAllNodesSchedulable(c *client.Client) error { ResourceVersion: "0", FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector(), } - nodes, err := c.Nodes().List(opts) + nodes, err := c.Core().Nodes().List(opts) if err != nil { Logf("Unexpected error listing nodes: %v", err) // Ignore the error here - it will be retried. @@ -2432,9 +2440,9 @@ func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKey string) ExpectNoError(testutils.VerifyLabelsRemoved(c, nodeName, []string{labelKey})) } -func AddOrUpdateTaintOnNode(c *client.Client, nodeName string, taint api.Taint) { +func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taint api.Taint) { for attempt := 0; attempt < UpdateRetries; attempt++ { - node, err := c.Nodes().Get(nodeName) + node, err := c.Core().Nodes().Get(nodeName) ExpectNoError(err) nodeTaints, err := api.GetTaintsFromNodeAnnotations(node.Annotations) @@ -2463,7 +2471,7 @@ func AddOrUpdateTaintOnNode(c *client.Client, nodeName string, taint api.Taint) node.Annotations = make(map[string]string) } node.Annotations[api.TaintsAnnotationKey] = string(taintsData) - _, err = c.Nodes().Update(node) + _, err = c.Core().Nodes().Update(node) if err != nil { if !apierrs.IsConflict(err) { ExpectNoError(err) @@ -2486,9 +2494,9 @@ func taintExists(taints []api.Taint, taintToFind api.Taint) bool { return false } -func ExpectNodeHasTaint(c *client.Client, nodeName string, taint api.Taint) { +func ExpectNodeHasTaint(c clientset.Interface, nodeName string, taint api.Taint) { By("verifying the node has the taint " + taint.ToString()) - node, err := c.Nodes().Get(nodeName) + node, err := c.Core().Nodes().Get(nodeName) ExpectNoError(err) nodeTaints, err := api.GetTaintsFromNodeAnnotations(node.Annotations) @@ -2518,10 +2526,10 @@ func deleteTaint(oldTaints []api.Taint, taintToDelete api.Taint) ([]api.Taint, e // RemoveTaintOffNode is for cleaning up taints temporarily added to node, // won't fail if target taint doesn't exist or has been removed. -func RemoveTaintOffNode(c *client.Client, nodeName string, taint api.Taint) { +func RemoveTaintOffNode(c clientset.Interface, nodeName string, taint api.Taint) { By("removing the taint " + taint.ToString() + " off the node " + nodeName) for attempt := 0; attempt < UpdateRetries; attempt++ { - node, err := c.Nodes().Get(nodeName) + node, err := c.Core().Nodes().Get(nodeName) ExpectNoError(err) nodeTaints, err := api.GetTaintsFromNodeAnnotations(node.Annotations) @@ -2540,7 +2548,7 @@ func RemoveTaintOffNode(c *client.Client, nodeName string, taint api.Taint) { taintsData, err := json.Marshal(newTaints) ExpectNoError(err) node.Annotations[api.TaintsAnnotationKey] = string(taintsData) - _, err = c.Nodes().Update(node) + _, err = c.Core().Nodes().Update(node) if err != nil { if !apierrs.IsConflict(err) { ExpectNoError(err) @@ -2553,7 +2561,7 @@ func RemoveTaintOffNode(c *client.Client, nodeName string, taint api.Taint) { time.Sleep(100 * time.Millisecond) } - nodeUpdated, err := c.Nodes().Get(nodeName) + nodeUpdated, err := c.Core().Nodes().Get(nodeName) ExpectNoError(err) By("verifying the node doesn't have the taint " + taint.ToString()) taintsGot, err := api.GetTaintsFromNodeAnnotations(nodeUpdated.Annotations) @@ -2563,7 +2571,7 @@ func RemoveTaintOffNode(c *client.Client, nodeName string, taint api.Taint) { } } -func ScaleRC(c *client.Client, clientset clientset.Interface, ns, name string, size uint, wait bool) error { +func ScaleRC(clientset clientset.Interface, ns, name string, size uint, wait bool) error { By(fmt.Sprintf("Scaling replication controller %s in namespace %s to %d", name, ns, size)) scaler, err := kubectl.ScalerFor(api.Kind("ReplicationController"), clientset) if err != nil { @@ -2577,12 +2585,12 @@ func ScaleRC(c *client.Client, clientset clientset.Interface, ns, name string, s if !wait { return nil } - return WaitForRCPodsRunning(c, ns, name) + return WaitForRCPodsRunning(clientset, ns, name) } // Wait up to 10 minutes for pods to become Running. -func WaitForRCPodsRunning(c *client.Client, ns, rcName string) error { - rc, err := c.ReplicationControllers(ns).Get(rcName) +func WaitForRCPodsRunning(c clientset.Interface, ns, rcName string) error { + rc, err := c.Core().ReplicationControllers(ns).Get(rcName) if err != nil { return err } @@ -2595,7 +2603,7 @@ func WaitForRCPodsRunning(c *client.Client, ns, rcName string) error { } // Returns true if all the specified pods are scheduled, else returns false. -func podsWithLabelScheduled(c *client.Client, ns string, label labels.Selector) (bool, error) { +func podsWithLabelScheduled(c clientset.Interface, ns string, label labels.Selector) (bool, error) { PodStore := testutils.NewPodStore(c, ns, label, fields.Everything()) defer PodStore.Stop() pods := PodStore.List() @@ -2612,7 +2620,7 @@ func podsWithLabelScheduled(c *client.Client, ns string, label labels.Selector) // Wait for all matching pods to become scheduled and at least one // matching pod exists. Return the list of matching pods. -func WaitForPodsWithLabelScheduled(c *client.Client, ns string, label labels.Selector) (pods *api.PodList, err error) { +func WaitForPodsWithLabelScheduled(c clientset.Interface, ns string, label labels.Selector) (pods *api.PodList, err error) { err = wait.PollImmediate(Poll, podScheduledBeforeTimeout, func() (bool, error) { pods, err = WaitForPodsWithLabel(c, ns, label) @@ -2630,10 +2638,10 @@ func WaitForPodsWithLabelScheduled(c *client.Client, ns string, label labels.Sel } // Wait up to PodListTimeout for getting pods with certain label -func WaitForPodsWithLabel(c *client.Client, ns string, label labels.Selector) (pods *api.PodList, err error) { +func WaitForPodsWithLabel(c clientset.Interface, ns string, label labels.Selector) (pods *api.PodList, err error) { for t := time.Now(); time.Since(t) < PodListTimeout; time.Sleep(Poll) { options := api.ListOptions{LabelSelector: label} - pods, err = c.Pods(ns).List(options) + pods, err = c.Core().Pods(ns).List(options) Expect(err).NotTo(HaveOccurred()) if len(pods.Items) > 0 { break @@ -2646,9 +2654,9 @@ func WaitForPodsWithLabel(c *client.Client, ns string, label labels.Selector) (p } // DeleteRCAndPods a Replication Controller and all pods it spawned -func DeleteRCAndPods(c *client.Client, clientset clientset.Interface, ns, name string) error { +func DeleteRCAndPods(clientset clientset.Interface, ns, name string) error { By(fmt.Sprintf("deleting replication controller %s in namespace %s", name, ns)) - rc, err := c.ReplicationControllers(ns).Get(name) + rc, err := clientset.Core().ReplicationControllers(ns).Get(name) if err != nil { if apierrs.IsNotFound(err) { Logf("RC %s was already deleted: %v", name, err) @@ -2664,7 +2672,7 @@ func DeleteRCAndPods(c *client.Client, clientset clientset.Interface, ns, name s } return err } - ps, err := podStoreForRC(c, rc) + ps, err := podStoreForRC(clientset, rc) if err != nil { return err } @@ -2696,9 +2704,9 @@ func DeleteRCAndPods(c *client.Client, clientset clientset.Interface, ns, name s } // DeleteRCAndWaitForGC deletes only the Replication Controller and waits for GC to delete the pods. -func DeleteRCAndWaitForGC(c *client.Client, ns, name string) error { +func DeleteRCAndWaitForGC(c clientset.Interface, ns, name string) error { By(fmt.Sprintf("deleting replication controller %s in namespace %s, will wait for the garbage collector to delete the pods", name, ns)) - rc, err := c.ReplicationControllers(ns).Get(name) + rc, err := c.Core().ReplicationControllers(ns).Get(name) if err != nil { if apierrs.IsNotFound(err) { Logf("RC %s was already deleted: %v", name, err) @@ -2714,7 +2722,7 @@ func DeleteRCAndWaitForGC(c *client.Client, ns, name string) error { startTime := time.Now() falseVar := false deleteOption := &api.DeleteOptions{OrphanDependents: &falseVar} - err = c.ReplicationControllers(ns).Delete(name, deleteOption) + err = c.Core().ReplicationControllers(ns).Delete(name, deleteOption) if err != nil && apierrs.IsNotFound(err) { Logf("RC %s was already deleted: %v", name, err) return nil @@ -2755,7 +2763,7 @@ func DeleteRCAndWaitForGC(c *client.Client, ns, name string) error { // podStoreForRC creates a PodStore that monitors pods belong to the rc. It // waits until the reflector does a List() before returning. -func podStoreForRC(c *client.Client, rc *api.ReplicationController) (*testutils.PodStore, error) { +func podStoreForRC(c clientset.Interface, rc *api.ReplicationController) (*testutils.PodStore, error) { labels := labels.SelectorFromSet(rc.Spec.Selector) ps := testutils.NewPodStore(c, rc.Namespace, labels, fields.Everything()) err := wait.Poll(1*time.Second, 1*time.Minute, func() (bool, error) { @@ -2794,9 +2802,9 @@ func waitForPodsGone(ps *testutils.PodStore, interval, timeout time.Duration) er } // Delete a ReplicaSet and all pods it spawned -func DeleteReplicaSet(c *client.Client, clientset clientset.Interface, ns, name string) error { +func DeleteReplicaSet(clientset clientset.Interface, ns, name string) error { By(fmt.Sprintf("deleting ReplicaSet %s in namespace %s", name, ns)) - rc, err := c.Extensions().ReplicaSets(ns).Get(name) + rc, err := clientset.Extensions().ReplicaSets(ns).Get(name) if err != nil { if apierrs.IsNotFound(err) { Logf("ReplicaSet %s was already deleted: %v", name, err) @@ -2821,7 +2829,7 @@ func DeleteReplicaSet(c *client.Client, clientset clientset.Interface, ns, name deleteRSTime := time.Now().Sub(startTime) Logf("Deleting RS %s took: %v", name, deleteRSTime) if err == nil { - err = waitForReplicaSetPodsGone(c, rc) + err = waitForReplicaSetPodsGone(clientset, rc) } terminatePodTime := time.Now().Sub(startTime) - deleteRSTime Logf("Terminating ReplicaSet %s pods took: %v", name, terminatePodTime) @@ -2830,12 +2838,12 @@ func DeleteReplicaSet(c *client.Client, clientset clientset.Interface, ns, name // waitForReplicaSetPodsGone waits until there are no pods reported under a // ReplicaSet selector (because the pods have completed termination). -func waitForReplicaSetPodsGone(c *client.Client, rs *extensions.ReplicaSet) error { +func waitForReplicaSetPodsGone(c clientset.Interface, rs *extensions.ReplicaSet) error { return wait.PollImmediate(Poll, 2*time.Minute, func() (bool, error) { selector, err := unversioned.LabelSelectorAsSelector(rs.Spec.Selector) ExpectNoError(err) options := api.ListOptions{LabelSelector: selector} - if pods, err := c.Pods(rs.Namespace).List(options); err == nil && len(pods.Items) == 0 { + if pods, err := c.Core().Pods(rs.Namespace).List(options); err == nil && len(pods.Items) == 0 { return true, nil } return false, nil @@ -3154,9 +3162,9 @@ func logPodsOfDeployment(c clientset.Interface, deployment *extensions.Deploymen } // Waits for the number of events on the given object to reach a desired count. -func WaitForEvents(c *client.Client, ns string, objOrRef runtime.Object, desiredEventsCount int) error { +func WaitForEvents(c clientset.Interface, ns string, objOrRef runtime.Object, desiredEventsCount int) error { return wait.Poll(Poll, 5*time.Minute, func() (bool, error) { - events, err := c.Events(ns).Search(objOrRef) + events, err := c.Core().Events(ns).Search(objOrRef) if err != nil { return false, fmt.Errorf("error in listing events: %s", err) } @@ -3173,9 +3181,9 @@ func WaitForEvents(c *client.Client, ns string, objOrRef runtime.Object, desired } // Waits for the number of events on the given object to be at least a desired count. -func WaitForPartialEvents(c *client.Client, ns string, objOrRef runtime.Object, atLeastEventsCount int) error { +func WaitForPartialEvents(c clientset.Interface, ns string, objOrRef runtime.Object, atLeastEventsCount int) error { return wait.Poll(Poll, 5*time.Minute, func() (bool, error) { - events, err := c.Events(ns).Search(objOrRef) + events, err := c.Core().Events(ns).Search(objOrRef) if err != nil { return false, fmt.Errorf("error in listing events: %s", err) } @@ -3378,9 +3386,9 @@ func RunHostCmdOrDie(ns, name, cmd string) string { // LaunchHostExecPod launches a hostexec pod in the given namespace and waits // until it's Running -func LaunchHostExecPod(client *client.Client, ns, name string) *api.Pod { +func LaunchHostExecPod(client clientset.Interface, ns, name string) *api.Pod { hostExecPod := NewHostExecPodSpec(ns, name) - pod, err := client.Pods(ns).Create(hostExecPod) + pod, err := client.Core().Pods(ns).Create(hostExecPod) ExpectNoError(err) err = WaitForPodRunningInNamespace(client, pod) ExpectNoError(err) @@ -3425,20 +3433,20 @@ func GetSigner(provider string) (ssh.Signer, error) { // CheckPodsRunningReady returns whether all pods whose names are listed in // podNames in namespace ns are running and ready, using c and waiting at most // timeout. -func CheckPodsRunningReady(c *client.Client, ns string, podNames []string, timeout time.Duration) bool { +func CheckPodsRunningReady(c clientset.Interface, ns string, podNames []string, timeout time.Duration) bool { return CheckPodsCondition(c, ns, podNames, timeout, testutils.PodRunningReady, "running and ready") } // CheckPodsRunningReadyOrSucceeded returns whether all pods whose names are // listed in podNames in namespace ns are running and ready, or succeeded; use // c and waiting at most timeout. -func CheckPodsRunningReadyOrSucceeded(c *client.Client, ns string, podNames []string, timeout time.Duration) bool { +func CheckPodsRunningReadyOrSucceeded(c clientset.Interface, ns string, podNames []string, timeout time.Duration) bool { return CheckPodsCondition(c, ns, podNames, timeout, testutils.PodRunningReadyOrSucceeded, "running and ready, or succeeded") } // CheckPodsCondition returns whether all pods whose names are listed in podNames // in namespace ns are in the condition, using c and waiting at most timeout. -func CheckPodsCondition(c *client.Client, ns string, podNames []string, timeout time.Duration, condition podCondition, desc string) bool { +func CheckPodsCondition(c clientset.Interface, ns string, podNames []string, timeout time.Duration, condition podCondition, desc string) bool { np := len(podNames) Logf("Waiting up to %v for %d pods to be %s: %s", timeout, np, desc, podNames) result := make(chan bool, len(podNames)) @@ -3464,14 +3472,14 @@ func CheckPodsCondition(c *client.Client, ns string, podNames []string, timeout } // WaitForNodeToBeReady returns whether node name is ready within timeout. -func WaitForNodeToBeReady(c *client.Client, name string, timeout time.Duration) bool { +func WaitForNodeToBeReady(c clientset.Interface, name string, timeout time.Duration) bool { return WaitForNodeToBe(c, name, api.NodeReady, true, timeout) } // WaitForNodeToBeNotReady returns whether node name is not ready (i.e. the // readiness condition is anything but ready, e.g false or unknown) within // timeout. -func WaitForNodeToBeNotReady(c *client.Client, name string, timeout time.Duration) bool { +func WaitForNodeToBeNotReady(c clientset.Interface, name string, timeout time.Duration) bool { return WaitForNodeToBe(c, name, api.NodeReady, false, timeout) } @@ -3518,10 +3526,10 @@ func IsNodeConditionUnset(node *api.Node, conditionType api.NodeConditionType) b // within timeout. If wantTrue is true, it will ensure the node condition status // is ConditionTrue; if it's false, it ensures the node condition is in any state // other than ConditionTrue (e.g. not true or unknown). -func WaitForNodeToBe(c *client.Client, name string, conditionType api.NodeConditionType, wantTrue bool, timeout time.Duration) bool { +func WaitForNodeToBe(c clientset.Interface, name string, conditionType api.NodeConditionType, wantTrue bool, timeout time.Duration) bool { Logf("Waiting up to %v for node %s condition %s to be %t", timeout, name, conditionType, wantTrue) for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { - node, err := c.Nodes().Get(name) + node, err := c.Core().Nodes().Get(name) if err != nil { Logf("Couldn't get node %s", name) continue @@ -3556,14 +3564,14 @@ func allowedNotReadyReasons(nodes []*api.Node) bool { // TODO: we should change the AllNodesReady call in AfterEach to WaitForAllNodesHealthy, // and figure out how to do it in a configurable way, as we can't expect all setups to run // default test add-ons. -func AllNodesReady(c *client.Client, timeout time.Duration) error { +func AllNodesReady(c clientset.Interface, timeout time.Duration) error { Logf("Waiting up to %v for all (but %d) nodes to be ready", timeout, TestContext.AllowedNotReadyNodes) var notReady []*api.Node err := wait.PollImmediate(Poll, timeout, func() (bool, error) { notReady = nil // It should be OK to list unschedulable Nodes here. - nodes, err := c.Nodes().List(api.ListOptions{}) + nodes, err := c.Core().Nodes().List(api.ListOptions{}) if err != nil { return false, err } @@ -3597,7 +3605,7 @@ func AllNodesReady(c *client.Client, timeout time.Duration) error { } // checks whether all registered nodes are ready and all required Pods are running on them. -func WaitForAllNodesHealthy(c *client.Client, timeout time.Duration) error { +func WaitForAllNodesHealthy(c clientset.Interface, timeout time.Duration) error { Logf("Waiting up to %v for all nodes to be ready", timeout) var notReady []api.Node @@ -3605,7 +3613,7 @@ func WaitForAllNodesHealthy(c *client.Client, timeout time.Duration) error { err := wait.PollImmediate(Poll, timeout, func() (bool, error) { notReady = nil // It should be OK to list unschedulable Nodes here. - nodes, err := c.Nodes().List(api.ListOptions{ResourceVersion: "0"}) + nodes, err := c.Core().Nodes().List(api.ListOptions{ResourceVersion: "0"}) if err != nil { return false, err } @@ -3614,7 +3622,7 @@ func WaitForAllNodesHealthy(c *client.Client, timeout time.Duration) error { notReady = append(notReady, node) } } - pods, err := c.Pods(api.NamespaceAll).List(api.ListOptions{ResourceVersion: "0"}) + pods, err := c.Core().Pods(api.NamespaceAll).List(api.ListOptions{ResourceVersion: "0"}) if err != nil { return false, err } @@ -3729,7 +3737,7 @@ func RestartKubeProxy(host string) error { return nil } -func RestartApiserver(c *client.Client) error { +func RestartApiserver(c discovery.ServerVersionInterface) error { // TODO: Make it work for all providers. if !ProviderIs("gce", "gke", "aws") { return fmt.Errorf("unsupported provider: %s", TestContext.Provider) @@ -3765,9 +3773,9 @@ func sshRestartMaster() error { return nil } -func WaitForApiserverUp(c *client.Client) error { +func WaitForApiserverUp(c clientset.Interface) error { for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) { - body, err := c.Get().AbsPath("/healthz").Do().Raw() + body, err := c.Core().RESTClient().Get().AbsPath("/healthz").Do().Raw() if err == nil && string(body) == "ok" { return nil } @@ -3777,9 +3785,9 @@ func WaitForApiserverUp(c *client.Client) error { // WaitForClusterSize waits until the cluster has desired size and there is no not-ready nodes in it. // By cluster size we mean number of Nodes excluding Master Node. -func WaitForClusterSize(c *client.Client, size int, timeout time.Duration) error { +func WaitForClusterSize(c clientset.Interface, size int, timeout time.Duration) error { for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) { - nodes, err := c.Nodes().List(api.ListOptions{FieldSelector: fields.Set{ + nodes, err := c.Core().Nodes().List(api.ListOptions{FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector()}) if err != nil { @@ -3806,8 +3814,8 @@ func WaitForClusterSize(c *client.Client, size int, timeout time.Duration) error // GetHostExternalAddress gets the node for a pod and returns the first External // address. Returns an error if the node the pod is on doesn't have an External // address. -func GetHostExternalAddress(client *client.Client, p *api.Pod) (externalAddress string, err error) { - node, err := client.Nodes().Get(p.Spec.NodeName) +func GetHostExternalAddress(client clientset.Interface, p *api.Pod) (externalAddress string, err error) { + node, err := client.Core().Nodes().Get(p.Spec.NodeName) if err != nil { return "", err } @@ -3882,8 +3890,8 @@ func OpenWebSocketForURL(url *url.URL, config *restclient.Config, protocols []st } // getIngressAddress returns the ips/hostnames associated with the Ingress. -func getIngressAddress(client *client.Client, ns, name string) ([]string, error) { - ing, err := client.Extensions().Ingress(ns).Get(name) +func getIngressAddress(client clientset.Interface, ns, name string) ([]string, error) { + ing, err := client.Extensions().Ingresses(ns).Get(name) if err != nil { return nil, err } @@ -3900,7 +3908,7 @@ func getIngressAddress(client *client.Client, ns, name string) ([]string, error) } // WaitForIngressAddress waits for the Ingress to acquire an address. -func WaitForIngressAddress(c *client.Client, ns, ingName string, timeout time.Duration) (string, error) { +func WaitForIngressAddress(c clientset.Interface, ns, ingName string, timeout time.Duration) (string, error) { var address string err := wait.PollImmediate(10*time.Second, timeout, func() (bool, error) { ipOrNameList, err := getIngressAddress(c, ns, ingName) @@ -3953,8 +3961,8 @@ func LookForString(expectedString string, timeout time.Duration, fn func() strin } // getSvcNodePort returns the node port for the given service:port. -func getSvcNodePort(client *client.Client, ns, name string, svcPort int) (int, error) { - svc, err := client.Services(ns).Get(name) +func getSvcNodePort(client clientset.Interface, ns, name string, svcPort int) (int, error) { + svc, err := client.Core().Services(ns).Get(name) if err != nil { return 0, err } @@ -3970,7 +3978,7 @@ func getSvcNodePort(client *client.Client, ns, name string, svcPort int) (int, e } // GetNodePortURL returns the url to a nodeport Service. -func GetNodePortURL(client *client.Client, ns, name string, svcPort int) (string, error) { +func GetNodePortURL(client clientset.Interface, ns, name string, svcPort int) (string, error) { nodePort, err := getSvcNodePort(client, ns, name, svcPort) if err != nil { return "", err @@ -3980,7 +3988,7 @@ func GetNodePortURL(client *client.Client, ns, name string, svcPort int) (string // kube-proxy NodePorts won't work. var nodes *api.NodeList if wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) { - nodes, err = client.Nodes().List(api.ListOptions{FieldSelector: fields.Set{ + nodes, err = client.Core().Nodes().List(api.ListOptions{FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector()}) return err == nil, nil @@ -4004,9 +4012,9 @@ func GetNodePortURL(client *client.Client, ns, name string, svcPort int) (string // ScaleRCByLabels scales an RC via ns/label lookup. If replicas == 0 it waits till // none are running, otherwise it does what a synchronous scale operation would do. -func ScaleRCByLabels(client *client.Client, clientset clientset.Interface, ns string, l map[string]string, replicas uint) error { +func ScaleRCByLabels(clientset clientset.Interface, ns string, l map[string]string, replicas uint) error { listOpts := api.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set(l))} - rcs, err := client.ReplicationControllers(ns).List(listOpts) + rcs, err := clientset.Core().ReplicationControllers(ns).List(listOpts) if err != nil { return err } @@ -4016,15 +4024,15 @@ func ScaleRCByLabels(client *client.Client, clientset clientset.Interface, ns st Logf("Scaling %v RCs with labels %v in ns %v to %v replicas.", len(rcs.Items), l, ns, replicas) for _, labelRC := range rcs.Items { name := labelRC.Name - if err := ScaleRC(client, clientset, ns, name, replicas, false); err != nil { + if err := ScaleRC(clientset, ns, name, replicas, false); err != nil { return err } - rc, err := client.ReplicationControllers(ns).Get(name) + rc, err := clientset.Core().ReplicationControllers(ns).Get(name) if err != nil { return err } if replicas == 0 { - ps, err := podStoreForRC(client, rc) + ps, err := podStoreForRC(clientset, rc) if err != nil { return err } @@ -4034,7 +4042,7 @@ func ScaleRCByLabels(client *client.Client, clientset clientset.Interface, ns st } } else { if err := testutils.WaitForPodsWithLabelRunning( - client, ns, labels.SelectorFromSet(labels.Set(rc.Spec.Selector))); err != nil { + clientset, ns, labels.SelectorFromSet(labels.Set(rc.Spec.Selector))); err != nil { return err } } @@ -4043,17 +4051,17 @@ func ScaleRCByLabels(client *client.Client, clientset clientset.Interface, ns st } // TODO(random-liu): Change this to be a member function of the framework. -func GetPodLogs(c *client.Client, namespace, podName, containerName string) (string, error) { +func GetPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) { return getPodLogsInternal(c, namespace, podName, containerName, false) } -func getPreviousPodLogs(c *client.Client, namespace, podName, containerName string) (string, error) { +func getPreviousPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) { return getPodLogsInternal(c, namespace, podName, containerName, true) } // utility function for gomega Eventually -func getPodLogsInternal(c *client.Client, namespace, podName, containerName string, previous bool) (string, error) { - logs, err := c.Get(). +func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName string, previous bool) (string, error) { + logs, err := c.Core().RESTClient().Get(). Resource("pods"). Namespace(namespace). Name(podName).SubResource("log"). @@ -4199,10 +4207,10 @@ func CheckPodHashLabel(pods *api.PodList) error { const proxyTimeout = 2 * time.Minute // NodeProxyRequest performs a get on a node proxy endpoint given the nodename and rest client. -func NodeProxyRequest(c *client.Client, node, endpoint string) (restclient.Result, error) { +func NodeProxyRequest(c clientset.Interface, node, endpoint string) (restclient.Result, error) { // proxy tends to hang in some cases when Node is not ready. Add an artificial timeout for this call. // This will leak a goroutine if proxy hangs. #22165 - subResourceProxyAvailable, err := ServerVersionGTE(subResourceServiceAndNodeProxyVersion, c) + subResourceProxyAvailable, err := ServerVersionGTE(subResourceServiceAndNodeProxyVersion, c.Discovery()) if err != nil { return restclient.Result{}, err } @@ -4210,7 +4218,7 @@ func NodeProxyRequest(c *client.Client, node, endpoint string) (restclient.Resul finished := make(chan struct{}) go func() { if subResourceProxyAvailable { - result = c.Get(). + result = c.Core().RESTClient().Get(). Resource("nodes"). SubResource("proxy"). Name(fmt.Sprintf("%v:%v", node, ports.KubeletPort)). @@ -4218,7 +4226,7 @@ func NodeProxyRequest(c *client.Client, node, endpoint string) (restclient.Resul Do() } else { - result = c.Get(). + result = c.Core().RESTClient().Get(). Prefix("proxy"). Resource("nodes"). Name(fmt.Sprintf("%v:%v", node, ports.KubeletPort)). @@ -4236,18 +4244,18 @@ func NodeProxyRequest(c *client.Client, node, endpoint string) (restclient.Resul } // GetKubeletPods retrieves the list of pods on the kubelet -func GetKubeletPods(c *client.Client, node string) (*api.PodList, error) { +func GetKubeletPods(c clientset.Interface, node string) (*api.PodList, error) { return getKubeletPods(c, node, "pods") } // GetKubeletRunningPods retrieves the list of running pods on the kubelet. The pods // includes necessary information (e.g., UID, name, namespace for // pods/containers), but do not contain the full spec. -func GetKubeletRunningPods(c *client.Client, node string) (*api.PodList, error) { +func GetKubeletRunningPods(c clientset.Interface, node string) (*api.PodList, error) { return getKubeletPods(c, node, "runningpods") } -func getKubeletPods(c *client.Client, node, resource string) (*api.PodList, error) { +func getKubeletPods(c clientset.Interface, node, resource string) (*api.PodList, error) { result := &api.PodList{} client, err := NodeProxyRequest(c, node, resource) if err != nil { @@ -4283,7 +4291,7 @@ func LaunchWebserverPod(f *Framework, podName, nodeName string) (ip string) { RestartPolicy: api.RestartPolicyNever, }, } - podClient := f.Client.Pods(f.Namespace.Name) + podClient := f.ClientSet.Core().Pods(f.Namespace.Name) _, err := podClient.Create(pod) ExpectNoError(err) ExpectNoError(f.WaitForPodRunning(podName)) @@ -4315,15 +4323,15 @@ func CheckConnectivityToHost(f *Framework, nodeName, podName, host string, timeo RestartPolicy: api.RestartPolicyNever, }, } - podClient := f.Client.Pods(f.Namespace.Name) + podClient := f.ClientSet.Core().Pods(f.Namespace.Name) _, err := podClient.Create(pod) if err != nil { return err } - err = WaitForPodSuccessInNamespace(f.Client, podName, f.Namespace.Name) + err = WaitForPodSuccessInNamespace(f.ClientSet, podName, f.Namespace.Name) if err != nil { - logs, logErr := GetPodLogs(f.Client, f.Namespace.Name, pod.Name, contName) + logs, logErr := GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, contName) if logErr != nil { Logf("Warning: Failed to get logs from pod %q: %v", pod.Name, logErr) } else { @@ -4345,14 +4353,14 @@ func CoreDump(dir string) { } } -func UpdatePodWithRetries(client *client.Client, ns, name string, update func(*api.Pod)) (*api.Pod, error) { +func UpdatePodWithRetries(client clientset.Interface, ns, name string, update func(*api.Pod)) (*api.Pod, error) { for i := 0; i < 3; i++ { - pod, err := client.Pods(ns).Get(name) + pod, err := client.Core().Pods(ns).Get(name) if err != nil { return nil, fmt.Errorf("Failed to get pod %q: %v", name, err) } update(pod) - pod, err = client.Pods(ns).Update(pod) + pod, err = client.Core().Pods(ns).Update(pod) if err == nil { return pod, nil } @@ -4363,8 +4371,8 @@ func UpdatePodWithRetries(client *client.Client, ns, name string, update func(*a return nil, fmt.Errorf("Too many retries updating Pod %q", name) } -func GetPodsInNamespace(c *client.Client, ns string, ignoreLabels map[string]string) ([]*api.Pod, error) { - pods, err := c.Pods(ns).List(api.ListOptions{}) +func GetPodsInNamespace(c clientset.Interface, ns string, ignoreLabels map[string]string) ([]*api.Pod, error) { + pods, err := c.Core().Pods(ns).List(api.ListOptions{}) if err != nil { return []*api.Pod{}, err } @@ -4441,11 +4449,11 @@ func GetPodsScheduled(masterNodes sets.String, pods *api.PodList) (scheduledPods } // WaitForStableCluster waits until all existing pods are scheduled and returns their amount. -func WaitForStableCluster(c *client.Client, masterNodes sets.String) int { +func WaitForStableCluster(c clientset.Interface, masterNodes sets.String) int { timeout := 10 * time.Minute startTime := time.Now() - allPods, err := c.Pods(api.NamespaceAll).List(api.ListOptions{}) + allPods, err := c.Core().Pods(api.NamespaceAll).List(api.ListOptions{}) ExpectNoError(err) // API server returns also Pods that succeeded. We need to filter them out. currentPods := make([]api.Pod, 0, len(allPods.Items)) @@ -4460,7 +4468,7 @@ func WaitForStableCluster(c *client.Client, masterNodes sets.String) int { for len(currentlyNotScheduledPods) != 0 { time.Sleep(2 * time.Second) - allPods, err := c.Pods(api.NamespaceAll).List(api.ListOptions{}) + allPods, err := c.Core().Pods(api.NamespaceAll).List(api.ListOptions{}) ExpectNoError(err) scheduledPods, currentlyNotScheduledPods = GetPodsScheduled(masterNodes, allPods) @@ -4473,10 +4481,10 @@ func WaitForStableCluster(c *client.Client, masterNodes sets.String) int { } // GetMasterAndWorkerNodesOrDie will return a list masters and schedulable worker nodes -func GetMasterAndWorkerNodesOrDie(c *client.Client) (sets.String, *api.NodeList) { +func GetMasterAndWorkerNodesOrDie(c clientset.Interface) (sets.String, *api.NodeList) { nodes := &api.NodeList{} masters := sets.NewString() - all, _ := c.Nodes().List(api.ListOptions{}) + all, _ := c.Core().Nodes().List(api.ListOptions{}) for _, n := range all.Items { if system.IsMasterNode(&n) { masters.Insert(n.Name) @@ -4504,8 +4512,8 @@ func CreateFileForGoBinData(gobindataPath, outputFilename string) error { return nil } -func ListNamespaceEvents(c *client.Client, ns string) error { - ls, err := c.Events(ns).List(api.ListOptions{}) +func ListNamespaceEvents(c clientset.Interface, ns string) error { + ls, err := c.Core().Events(ns).List(api.ListOptions{}) if err != nil { return err } diff --git a/test/e2e/garbage_collector.go b/test/e2e/garbage_collector.go index 9ef0ff0fabf..6952b0d28ea 100644 --- a/test/e2e/garbage_collector.go +++ b/test/e2e/garbage_collector.go @@ -100,7 +100,7 @@ func verifyRemainingObjects(f *framework.Framework, clientSet clientset.Interfac func gatherMetrics(f *framework.Framework) { By("Gathering metrics") var summary framework.TestDataSummary - grabber, err := metrics.NewMetricsGrabber(f.Client, false, false, true, false) + grabber, err := metrics.NewMetricsGrabber(f.ClientSet, false, false, true, false) if err != nil { framework.Logf("Failed to create MetricsGrabber. Skipping metrics gathering.") } else { diff --git a/test/e2e/ha_master.go b/test/e2e/ha_master.go index 62818b71981..b9fd7bcefb7 100644 --- a/test/e2e/ha_master.go +++ b/test/e2e/ha_master.go @@ -19,12 +19,13 @@ package e2e import ( "bytes" "fmt" - . "github.com/onsi/ginkgo" - client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/test/e2e/framework" "os/exec" "path" "strconv" + + . "github.com/onsi/ginkgo" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + "k8s.io/kubernetes/test/e2e/framework" ) func addMasterReplica() error { @@ -47,13 +48,13 @@ func removeMasterReplica() error { return nil } -func verifyRCs(c *client.Client, ns string, names []string) { +func verifyRCs(c clientset.Interface, ns string, names []string) { for _, name := range names { framework.ExpectNoError(framework.VerifyPods(c, ns, name, true, 1)) } } -func createNewRC(c *client.Client, ns string, name string) { +func createNewRC(c clientset.Interface, ns string, name string) { _, err := newRCByName(c, ns, name, 1) framework.ExpectNoError(err) } @@ -77,14 +78,14 @@ func verifyNumberOfMasterReplicas(expected int) { var _ = framework.KubeDescribe("HA-master [Feature:HAMaster]", func() { f := framework.NewDefaultFramework("ha-master") - var c *client.Client + var c clientset.Interface var ns string var additionalReplicas int var existingRCs []string BeforeEach(func() { framework.SkipUnlessProviderIs("gce") - c = f.Client + c = f.ClientSet ns = f.Namespace.Name verifyNumberOfMasterReplicas(1) additionalReplicas = 0 diff --git a/test/e2e/horizontal_pod_autoscaling.go b/test/e2e/horizontal_pod_autoscaling.go index fe5a41f7f80..90e57f830f5 100644 --- a/test/e2e/horizontal_pod_autoscaling.go +++ b/test/e2e/horizontal_pod_autoscaling.go @@ -191,6 +191,6 @@ func createCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, ma TargetCPUUtilizationPercentage: &cpu, }, } - _, errHPA := rc.framework.Client.Autoscaling().HorizontalPodAutoscalers(rc.framework.Namespace.Name).Create(hpa) + _, errHPA := rc.framework.ClientSet.Autoscaling().HorizontalPodAutoscalers(rc.framework.Namespace.Name).Create(hpa) framework.ExpectNoError(errHPA) } diff --git a/test/e2e/ingress.go b/test/e2e/ingress.go index a9e05c6729c..3b65847e481 100644 --- a/test/e2e/ingress.go +++ b/test/e2e/ingress.go @@ -66,7 +66,7 @@ var _ = framework.KubeDescribe("Loadbalancing: L7 [Feature:Ingress]", func() { BeforeEach(func() { f.BeforeEach() - jig = newTestJig(f.Client) + jig = newTestJig(f.ClientSet) ns = f.Namespace.Name }) diff --git a/test/e2e/ingress_utils.go b/test/e2e/ingress_utils.go index 22193fc02c3..247461ec3c0 100644 --- a/test/e2e/ingress_utils.go +++ b/test/e2e/ingress_utils.go @@ -43,7 +43,7 @@ import ( "google.golang.org/api/googleapi" apierrs "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/apis/extensions" - client "k8s.io/kubernetes/pkg/client/unversioned" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" @@ -68,7 +68,7 @@ const ( ) type testJig struct { - client *client.Client + client clientset.Interface rootCAs map[string][]byte address string ing *extensions.Ingress @@ -269,7 +269,7 @@ func buildInsecureClient(timeout time.Duration) *http.Client { // createSecret creates a secret containing TLS certificates for the given Ingress. // If a secret with the same name already exists in the namespace of the // Ingress, it's updated. -func createSecret(kubeClient *client.Client, ing *extensions.Ingress) (host string, rootCA, privKey []byte, err error) { +func createSecret(kubeClient clientset.Interface, ing *extensions.Ingress) (host string, rootCA, privKey []byte, err error) { var k, c bytes.Buffer tls := ing.Spec.TLS[0] host = strings.Join(tls.Hosts, ",") @@ -290,14 +290,14 @@ func createSecret(kubeClient *client.Client, ing *extensions.Ingress) (host stri }, } var s *api.Secret - if s, err = kubeClient.Secrets(ing.Namespace).Get(tls.SecretName); err == nil { + if s, err = kubeClient.Core().Secrets(ing.Namespace).Get(tls.SecretName); err == nil { // TODO: Retry the update. We don't really expect anything to conflict though. framework.Logf("Updating secret %v in ns %v with hosts %v for ingress %v", secret.Name, secret.Namespace, host, ing.Name) s.Data = secret.Data - _, err = kubeClient.Secrets(ing.Namespace).Update(s) + _, err = kubeClient.Core().Secrets(ing.Namespace).Update(s) } else { framework.Logf("Creating secret %v in ns %v with hosts %v for ingress %v", secret.Name, secret.Namespace, host, ing.Name) - _, err = kubeClient.Secrets(ing.Namespace).Create(secret) + _, err = kubeClient.Core().Secrets(ing.Namespace).Create(secret) } return host, cert, key, err } @@ -684,7 +684,7 @@ func (j *testJig) createIngress(manifestPath, ns string, ingAnnotations map[stri } framework.Logf(fmt.Sprintf("creating" + j.ing.Name + " ingress")) var err error - j.ing, err = j.client.Extensions().Ingress(ns).Create(j.ing) + j.ing, err = j.client.Extensions().Ingresses(ns).Create(j.ing) ExpectNoError(err) } @@ -692,12 +692,12 @@ func (j *testJig) update(update func(ing *extensions.Ingress)) { var err error ns, name := j.ing.Namespace, j.ing.Name for i := 0; i < 3; i++ { - j.ing, err = j.client.Extensions().Ingress(ns).Get(name) + j.ing, err = j.client.Extensions().Ingresses(ns).Get(name) if err != nil { framework.Failf("failed to get ingress %q: %v", name, err) } update(j.ing) - j.ing, err = j.client.Extensions().Ingress(ns).Update(j.ing) + j.ing, err = j.client.Extensions().Ingresses(ns).Update(j.ing) if err == nil { describeIng(j.ing.Namespace) return @@ -732,7 +732,7 @@ func (j *testJig) getRootCA(secretName string) (rootCA []byte) { } func (j *testJig) deleteIngress() { - ExpectNoError(j.client.Extensions().Ingress(j.ing.Namespace).Delete(j.ing.Name, nil)) + ExpectNoError(j.client.Extensions().Ingresses(j.ing.Namespace).Delete(j.ing.Name, nil)) } func (j *testJig) waitForIngress() { @@ -803,7 +803,7 @@ func ingFromManifest(fileName string) *extensions.Ingress { func (cont *GCEIngressController) getL7AddonUID() (string, error) { framework.Logf("Retrieving UID from config map: %v/%v", api.NamespaceSystem, uidConfigMap) - cm, err := cont.c.ConfigMaps(api.NamespaceSystem).Get(uidConfigMap) + cm, err := cont.c.Core().ConfigMaps(api.NamespaceSystem).Get(uidConfigMap) if err != nil { return "", err } @@ -833,11 +833,11 @@ type GCEIngressController struct { staticIPName string rc *api.ReplicationController svc *api.Service - c *client.Client + c clientset.Interface cloud framework.CloudConfig } -func newTestJig(c *client.Client) *testJig { +func newTestJig(c clientset.Interface) *testJig { return &testJig{client: c, rootCAs: map[string][]byte{}} } @@ -846,7 +846,7 @@ type NginxIngressController struct { ns string rc *api.ReplicationController pod *api.Pod - c *client.Client + c clientset.Interface externalIP string } @@ -857,14 +857,14 @@ func (cont *NginxIngressController) init() { framework.Logf("initializing nginx ingress controller") framework.RunKubectlOrDie("create", "-f", mkpath("rc.yaml"), fmt.Sprintf("--namespace=%v", cont.ns)) - rc, err := cont.c.ReplicationControllers(cont.ns).Get("nginx-ingress-controller") + rc, err := cont.c.Core().ReplicationControllers(cont.ns).Get("nginx-ingress-controller") ExpectNoError(err) cont.rc = rc framework.Logf("waiting for pods with label %v", rc.Spec.Selector) sel := labels.SelectorFromSet(labels.Set(rc.Spec.Selector)) ExpectNoError(testutils.WaitForPodsWithLabelRunning(cont.c, cont.ns, sel)) - pods, err := cont.c.Pods(cont.ns).List(api.ListOptions{LabelSelector: sel}) + pods, err := cont.c.Core().Pods(cont.ns).List(api.ListOptions{LabelSelector: sel}) ExpectNoError(err) if len(pods.Items) == 0 { framework.Failf("Failed to find nginx ingress controller pods with selector %v", sel) diff --git a/test/e2e/initial_resources.go b/test/e2e/initial_resources.go index 7fa63e90a1e..808e59c42c5 100644 --- a/test/e2e/initial_resources.go +++ b/test/e2e/initial_resources.go @@ -65,8 +65,8 @@ func runPod(f *framework.Framework, name, image string) *api.Pod { }, }, } - createdPod, err := f.Client.Pods(f.Namespace.Name).Create(pod) + createdPod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod) framework.ExpectNoError(err) - framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.Client, createdPod)) + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, createdPod)) return createdPod } diff --git a/test/e2e/job.go b/test/e2e/job.go index 0e50d80229d..05bea4bd323 100644 --- a/test/e2e/job.go +++ b/test/e2e/job.go @@ -22,7 +22,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/apis/batch" - client "k8s.io/kubernetes/pkg/client/unversioned" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/util/wait" @@ -50,11 +50,11 @@ var _ = framework.KubeDescribe("Job", func() { It("should run a job to completion when tasks succeed", func() { By("Creating a job") job := newTestJob("succeed", "all-succeed", api.RestartPolicyNever, parallelism, completions) - job, err := createJob(f.Client, f.Namespace.Name, job) + job, err := createJob(f.ClientSet, f.Namespace.Name, job) Expect(err).NotTo(HaveOccurred()) By("Ensuring job reaches completions") - err = waitForJobFinish(f.Client, f.Namespace.Name, job.Name, completions) + err = waitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions) Expect(err).NotTo(HaveOccurred()) }) @@ -69,11 +69,11 @@ var _ = framework.KubeDescribe("Job", func() { // due to successive failures too likely with a reasonable // test timeout. job := newTestJob("failOnce", "fail-once-local", api.RestartPolicyOnFailure, parallelism, completions) - job, err := createJob(f.Client, f.Namespace.Name, job) + job, err := createJob(f.ClientSet, f.Namespace.Name, job) Expect(err).NotTo(HaveOccurred()) By("Ensuring job reaches completions") - err = waitForJobFinish(f.Client, f.Namespace.Name, job.Name, completions) + err = waitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions) Expect(err).NotTo(HaveOccurred()) }) @@ -87,23 +87,23 @@ var _ = framework.KubeDescribe("Job", func() { // run due to some slowness, 1 in 2^15 chance of happening, // causing test flake. Should be very rare. job := newTestJob("randomlySucceedOrFail", "rand-non-local", api.RestartPolicyNever, parallelism, completions) - job, err := createJob(f.Client, f.Namespace.Name, job) + job, err := createJob(f.ClientSet, f.Namespace.Name, job) Expect(err).NotTo(HaveOccurred()) By("Ensuring job reaches completions") - err = waitForJobFinish(f.Client, f.Namespace.Name, job.Name, completions) + err = waitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions) Expect(err).NotTo(HaveOccurred()) }) It("should keep restarting failed pods", func() { By("Creating a job") job := newTestJob("fail", "all-fail", api.RestartPolicyNever, parallelism, completions) - job, err := createJob(f.Client, f.Namespace.Name, job) + job, err := createJob(f.ClientSet, f.Namespace.Name, job) Expect(err).NotTo(HaveOccurred()) By("Ensuring job shows many failures") err = wait.Poll(framework.Poll, jobTimeout, func() (bool, error) { - curr, err := getJob(f.Client, f.Namespace.Name, job.Name) + curr, err := getJob(f.ClientSet, f.Namespace.Name, job.Name) if err != nil { return false, err } @@ -116,11 +116,11 @@ var _ = framework.KubeDescribe("Job", func() { endParallelism := int32(2) By("Creating a job") job := newTestJob("notTerminate", "scale-up", api.RestartPolicyNever, startParallelism, completions) - job, err := createJob(f.Client, f.Namespace.Name, job) + job, err := createJob(f.ClientSet, f.Namespace.Name, job) Expect(err).NotTo(HaveOccurred()) By("Ensuring active pods == startParallelism") - err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, startParallelism) + err = waitForAllPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, startParallelism) Expect(err).NotTo(HaveOccurred()) By("scale job up") @@ -132,7 +132,7 @@ var _ = framework.KubeDescribe("Job", func() { Expect(err).NotTo(HaveOccurred()) By("Ensuring active pods == endParallelism") - err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, endParallelism) + err = waitForAllPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, endParallelism) Expect(err).NotTo(HaveOccurred()) }) @@ -141,11 +141,11 @@ var _ = framework.KubeDescribe("Job", func() { endParallelism := int32(1) By("Creating a job") job := newTestJob("notTerminate", "scale-down", api.RestartPolicyNever, startParallelism, completions) - job, err := createJob(f.Client, f.Namespace.Name, job) + job, err := createJob(f.ClientSet, f.Namespace.Name, job) Expect(err).NotTo(HaveOccurred()) By("Ensuring active pods == startParallelism") - err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, startParallelism) + err = waitForAllPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, startParallelism) Expect(err).NotTo(HaveOccurred()) By("scale job down") @@ -157,18 +157,18 @@ var _ = framework.KubeDescribe("Job", func() { Expect(err).NotTo(HaveOccurred()) By("Ensuring active pods == endParallelism") - err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, endParallelism) + err = waitForAllPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, endParallelism) Expect(err).NotTo(HaveOccurred()) }) It("should delete a job", func() { By("Creating a job") job := newTestJob("notTerminate", "foo", api.RestartPolicyNever, parallelism, completions) - job, err := createJob(f.Client, f.Namespace.Name, job) + job, err := createJob(f.ClientSet, f.Namespace.Name, job) Expect(err).NotTo(HaveOccurred()) By("Ensuring active pods == parallelism") - err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, parallelism) + err = waitForAllPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism) Expect(err).NotTo(HaveOccurred()) By("delete a job") @@ -179,7 +179,7 @@ var _ = framework.KubeDescribe("Job", func() { Expect(err).NotTo(HaveOccurred()) By("Ensuring job was deleted") - _, err = getJob(f.Client, f.Namespace.Name, job.Name) + _, err = getJob(f.ClientSet, f.Namespace.Name, job.Name) Expect(err).To(HaveOccurred()) Expect(errors.IsNotFound(err)).To(BeTrue()) }) @@ -189,21 +189,21 @@ var _ = framework.KubeDescribe("Job", func() { job := newTestJob("notTerminate", "foo", api.RestartPolicyNever, parallelism, completions) activeDeadlineSeconds := int64(10) job.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds - job, err := createJob(f.Client, f.Namespace.Name, job) + job, err := createJob(f.ClientSet, f.Namespace.Name, job) Expect(err).NotTo(HaveOccurred()) By("Ensuring job was failed") - err = waitForJobFail(f.Client, f.Namespace.Name, job.Name, 20*time.Second) + err = waitForJobFail(f.ClientSet, f.Namespace.Name, job.Name, 20*time.Second) if err == wait.ErrWaitTimeout { - job, err = getJob(f.Client, f.Namespace.Name, job.Name) + job, err = getJob(f.ClientSet, f.Namespace.Name, job.Name) Expect(err).NotTo(HaveOccurred()) // the job stabilized and won't be synced until modification or full // resync happens, we don't want to wait for the latter so we force // sync modifying it job.Spec.Parallelism = &completions - job, err = updateJob(f.Client, f.Namespace.Name, job) + job, err = updateJob(f.ClientSet, f.Namespace.Name, job) Expect(err).NotTo(HaveOccurred()) - err = waitForJobFail(f.Client, f.Namespace.Name, job.Name, jobTimeout) + err = waitForJobFail(f.ClientSet, f.Namespace.Name, job.Name, jobTimeout) } Expect(err).NotTo(HaveOccurred()) }) @@ -218,7 +218,7 @@ func newTestJob(behavior, name string, rPol api.RestartPolicy, parallelism, comp Spec: batch.JobSpec{ Parallelism: ¶llelism, Completions: &completions, - ManualSelector: newBool(true), + ManualSelector: newBool(false), Template: api.PodTemplateSpec{ ObjectMeta: api.ObjectMeta{ Labels: map[string]string{jobSelectorKey: name}, @@ -272,28 +272,28 @@ func newTestJob(behavior, name string, rPol api.RestartPolicy, parallelism, comp return job } -func getJob(c *client.Client, ns, name string) (*batch.Job, error) { - return c.Extensions().Jobs(ns).Get(name) +func getJob(c clientset.Interface, ns, name string) (*batch.Job, error) { + return c.Batch().Jobs(ns).Get(name) } -func createJob(c *client.Client, ns string, job *batch.Job) (*batch.Job, error) { - return c.Extensions().Jobs(ns).Create(job) +func createJob(c clientset.Interface, ns string, job *batch.Job) (*batch.Job, error) { + return c.Batch().Jobs(ns).Create(job) } -func updateJob(c *client.Client, ns string, job *batch.Job) (*batch.Job, error) { - return c.Extensions().Jobs(ns).Update(job) +func updateJob(c clientset.Interface, ns string, job *batch.Job) (*batch.Job, error) { + return c.Batch().Jobs(ns).Update(job) } -func deleteJob(c *client.Client, ns, name string) error { - return c.Extensions().Jobs(ns).Delete(name, nil) +func deleteJob(c clientset.Interface, ns, name string) error { + return c.Batch().Jobs(ns).Delete(name, nil) } // Wait for all pods to become Running. Only use when pods will run for a long time, or it will be racy. -func waitForAllPodsRunning(c *client.Client, ns, jobName string, parallelism int32) error { +func waitForAllPodsRunning(c clientset.Interface, ns, jobName string, parallelism int32) error { label := labels.SelectorFromSet(labels.Set(map[string]string{jobSelectorKey: jobName})) return wait.Poll(framework.Poll, jobTimeout, func() (bool, error) { options := api.ListOptions{LabelSelector: label} - pods, err := c.Pods(ns).List(options) + pods, err := c.Core().Pods(ns).List(options) if err != nil { return false, err } @@ -308,9 +308,9 @@ func waitForAllPodsRunning(c *client.Client, ns, jobName string, parallelism int } // Wait for job to reach completions. -func waitForJobFinish(c *client.Client, ns, jobName string, completions int32) error { +func waitForJobFinish(c clientset.Interface, ns, jobName string, completions int32) error { return wait.Poll(framework.Poll, jobTimeout, func() (bool, error) { - curr, err := c.Extensions().Jobs(ns).Get(jobName) + curr, err := c.Batch().Jobs(ns).Get(jobName) if err != nil { return false, err } @@ -319,9 +319,9 @@ func waitForJobFinish(c *client.Client, ns, jobName string, completions int32) e } // Wait for job fail. -func waitForJobFail(c *client.Client, ns, jobName string, timeout time.Duration) error { +func waitForJobFail(c clientset.Interface, ns, jobName string, timeout time.Duration) error { return wait.Poll(framework.Poll, timeout, func() (bool, error) { - curr, err := c.Extensions().Jobs(ns).Get(jobName) + curr, err := c.Batch().Jobs(ns).Get(jobName) if err != nil { return false, err } diff --git a/test/e2e/kibana_logging.go b/test/e2e/kibana_logging.go index 52a756047e4..d6db3e5cf35 100644 --- a/test/e2e/kibana_logging.go +++ b/test/e2e/kibana_logging.go @@ -54,7 +54,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) { // Check for the existence of the Kibana service. By("Checking the Kibana service exists.") - s := f.Client.Services(api.NamespaceSystem) + s := f.ClientSet.Core().Services(api.NamespaceSystem) // Make a few attempts to connect. This makes the test robust against // being run as the first e2e test just after the e2e cluster has been created. var err error @@ -70,17 +70,17 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) { By("Checking to make sure the Kibana pods are running") label := labels.SelectorFromSet(labels.Set(map[string]string{kibanaKey: kibanaValue})) options := api.ListOptions{LabelSelector: label} - pods, err := f.Client.Pods(api.NamespaceSystem).List(options) + pods, err := f.ClientSet.Core().Pods(api.NamespaceSystem).List(options) Expect(err).NotTo(HaveOccurred()) for _, pod := range pods.Items { - err = framework.WaitForPodRunningInNamespace(f.Client, &pod) + err = framework.WaitForPodRunningInNamespace(f.ClientSet, &pod) Expect(err).NotTo(HaveOccurred()) } By("Checking to make sure we get a response from the Kibana UI.") err = nil for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) { - proxyRequest, errProxy := framework.GetServicesProxyRequest(f.Client, f.Client.Get()) + proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.Core().RESTClient().Get()) if errProxy != nil { framework.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy) continue diff --git a/test/e2e/kubectl.go b/test/e2e/kubectl.go index 89bd934f80e..1a42ed578c1 100644 --- a/test/e2e/kubectl.go +++ b/test/e2e/kubectl.go @@ -46,7 +46,7 @@ import ( apierrs "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/unversioned" - client "k8s.io/kubernetes/pkg/client/unversioned" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/labels" @@ -184,10 +184,10 @@ var _ = framework.KubeDescribe("Kubectl client", func() { forEachPod := func(podFunc func(p api.Pod)) { clusterState().ForEach(podFunc) } - var c *client.Client + var c clientset.Interface var ns string BeforeEach(func() { - c = f.Client + c = f.ClientSet ns = f.Namespace.Name }) @@ -260,7 +260,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() { } It("should create and stop a working application [Conformance]", func() { - framework.SkipUnlessServerVersionGTE(deploymentsVersion, c) + framework.SkipUnlessServerVersionGTE(deploymentsVersion, c.Discovery()) defer forEachGBFile(func(contents string) { cleanupKubectlInputs(contents, ns) @@ -393,7 +393,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() { WithStdinData("abcd1234"). Exec() ExpectNoError(err) - framework.WaitForPodToDisappear(f.Client, ns, "failure-3", labels.Everything(), 2*time.Second, wait.ForeverTestTimeout) + framework.WaitForPodToDisappear(f.ClientSet, ns, "failure-3", labels.Everything(), 2*time.Second, wait.ForeverTestTimeout) By("running a failing command with --leave-stdin-open") _, err = framework.NewKubectlCommand(nsFlag, "run", "-i", "--image="+busyboxImage, "--restart=Never", "failure-4", "--leave-stdin-open", "--", "/bin/sh", "-c", "exit 42"). @@ -404,7 +404,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() { It("should support inline execution and attach", func() { framework.SkipIfContainerRuntimeIs("rkt") // #23335 - framework.SkipUnlessServerVersionGTE(jobsVersion, c) + framework.SkipUnlessServerVersionGTE(jobsVersion, c.Discovery()) nsFlag := fmt.Sprintf("--namespace=%v", ns) @@ -414,7 +414,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() { ExecOrDie() Expect(runOutput).To(ContainSubstring("abcd1234")) Expect(runOutput).To(ContainSubstring("stdin closed")) - Expect(c.Extensions().Jobs(ns).Delete("run-test", nil)).To(BeNil()) + Expect(c.Batch().Jobs(ns).Delete("run-test", nil)).To(BeNil()) By("executing a command with run and attach without stdin") runOutput = framework.NewKubectlCommand(fmt.Sprintf("--namespace=%v", ns), "run", "run-test-2", "--image="+busyboxImage, "--restart=OnFailure", "--attach=true", "--leave-stdin-open=true", "--", "sh", "-c", "cat && echo 'stdin closed'"). @@ -422,7 +422,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() { ExecOrDie() Expect(runOutput).ToNot(ContainSubstring("abcd1234")) Expect(runOutput).To(ContainSubstring("stdin closed")) - Expect(c.Extensions().Jobs(ns).Delete("run-test-2", nil)).To(BeNil()) + Expect(c.Batch().Jobs(ns).Delete("run-test-2", nil)).To(BeNil()) By("executing a command with run and attach with stdin with open stdin should remain running") runOutput = framework.NewKubectlCommand(nsFlag, "run", "run-test-3", "--image="+busyboxImage, "--restart=OnFailure", "--attach=true", "--leave-stdin-open=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'"). @@ -453,7 +453,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() { } Expect(err).To(BeNil()) - Expect(c.Extensions().Jobs(ns).Delete("run-test-3", nil)).To(BeNil()) + Expect(c.Batch().Jobs(ns).Delete("run-test-3", nil)).To(BeNil()) }) It("should support port-forward", func() { @@ -541,10 +541,10 @@ var _ = framework.KubeDescribe("Kubectl client", func() { framework.KubeDescribe("Kubectl describe", func() { It("should check if kubectl describe prints relevant information for rc and pods [Conformance]", func() { - framework.SkipUnlessServerVersionGTE(nodePortsOptionalVersion, c) + framework.SkipUnlessServerVersionGTE(nodePortsOptionalVersion, c.Discovery()) kv, err := framework.KubectlVersion() Expect(err).NotTo(HaveOccurred()) - framework.SkipUnlessServerVersionGTE(kv, c) + framework.SkipUnlessServerVersionGTE(kv, c.Discovery()) controllerJson := readTestFileOrDie(redisControllerFilename) serviceJson := readTestFileOrDie(redisServiceFilename) @@ -610,7 +610,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() { // Node // It should be OK to list unschedulable Nodes here. - nodes, err := c.Nodes().List(api.ListOptions{}) + nodes, err := c.Core().Nodes().List(api.ListOptions{}) Expect(err).NotTo(HaveOccurred()) node := nodes.Items[0] output = framework.RunKubectlOrDie("describe", "node", node.Name) @@ -664,7 +664,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() { }) validateService := func(name string, servicePort int, timeout time.Duration) { err := wait.Poll(framework.Poll, timeout, func() (bool, error) { - endpoints, err := c.Endpoints(ns).Get(name) + endpoints, err := c.Core().Endpoints(ns).Get(name) if err != nil { // log the real error framework.Logf("Get endpoints failed (interval %v): %v", framework.Poll, err) @@ -695,7 +695,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() { }) Expect(err).NotTo(HaveOccurred()) - service, err := c.Services(ns).Get(name) + service, err := c.Core().Services(ns).Get(name) Expect(err).NotTo(HaveOccurred()) if len(service.Spec.Ports) != 1 { @@ -773,7 +773,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() { }) It("should be able to retrieve and filter logs [Conformance]", func() { - framework.SkipUnlessServerVersionGTE(extendedPodLogFilterVersion, c) + framework.SkipUnlessServerVersionGTE(extendedPodLogFilterVersion, c.Discovery()) // Split("something\n", "\n") returns ["something", ""], so // strip trailing newline first @@ -873,7 +873,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() { BeforeEach(func() { nsFlag = fmt.Sprintf("--namespace=%v", ns) - gte, err := framework.ServerVersionGTE(deploymentsVersion, c) + gte, err := framework.ServerVersionGTE(deploymentsVersion, c.Discovery()) if err != nil { framework.Failf("Failed to get server version: %v", err) } @@ -924,7 +924,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() { By("running the image " + nginxImage) framework.RunKubectlOrDie("run", rcName, "--image="+nginxImage, "--generator=run/v1", nsFlag) By("verifying the rc " + rcName + " was created") - rc, err := c.ReplicationControllers(ns).Get(rcName) + rc, err := c.Core().ReplicationControllers(ns).Get(rcName) if err != nil { framework.Failf("Failed getting rc %s: %v", rcName, err) } @@ -964,10 +964,10 @@ var _ = framework.KubeDescribe("Kubectl client", func() { framework.KubeDescribe("Kubectl rolling-update", func() { var nsFlag string var rcName string - var c *client.Client + var c clientset.Interface BeforeEach(func() { - c = f.Client + c = f.ClientSet nsFlag = fmt.Sprintf("--namespace=%v", ns) rcName = "e2e-test-nginx-rc" }) @@ -980,7 +980,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() { By("running the image " + nginxImage) framework.RunKubectlOrDie("run", rcName, "--image="+nginxImage, "--generator=run/v1", nsFlag) By("verifying the rc " + rcName + " was created") - rc, err := c.ReplicationControllers(ns).Get(rcName) + rc, err := c.Core().ReplicationControllers(ns).Get(rcName) if err != nil { framework.Failf("Failed getting rc %s: %v", rcName, err) } @@ -1021,7 +1021,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() { }) It("should create a deployment from an image [Conformance]", func() { - framework.SkipUnlessServerVersionGTE(deploymentsVersion, c) + framework.SkipUnlessServerVersionGTE(deploymentsVersion, c.Discovery()) By("running the image " + nginxImage) framework.RunKubectlOrDie("run", dName, "--image="+nginxImage, "--generator=deployment/v1beta1", nsFlag) @@ -1063,12 +1063,12 @@ var _ = framework.KubeDescribe("Kubectl client", func() { }) It("should create a job from an image when restart is OnFailure [Conformance]", func() { - framework.SkipUnlessServerVersionGTE(jobsVersion, c) + framework.SkipUnlessServerVersionGTE(jobsVersion, c.Discovery()) By("running the image " + nginxImage) framework.RunKubectlOrDie("run", jobName, "--restart=OnFailure", "--generator=job/v1", "--image="+nginxImage, nsFlag) By("verifying the job " + jobName + " was created") - job, err := c.Extensions().Jobs(ns).Get(jobName) + job, err := c.Batch().Jobs(ns).Get(jobName) if err != nil { framework.Failf("Failed getting job %s: %v", jobName, err) } @@ -1133,12 +1133,12 @@ var _ = framework.KubeDescribe("Kubectl client", func() { }) It("should create a pod from an image when restart is Never [Conformance]", func() { - framework.SkipUnlessServerVersionGTE(jobsVersion, c) + framework.SkipUnlessServerVersionGTE(jobsVersion, c.Discovery()) By("running the image " + nginxImage) framework.RunKubectlOrDie("run", podName, "--restart=Never", "--generator=run-pod/v1", "--image="+nginxImage, nsFlag) By("verifying the pod " + podName + " was created") - pod, err := c.Pods(ns).Get(podName) + pod, err := c.Core().Pods(ns).Get(podName) if err != nil { framework.Failf("Failed getting pod %s: %v", podName, err) } @@ -1166,7 +1166,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() { }) It("should update a single-container pod's image [Conformance]", func() { - framework.SkipUnlessServerVersionGTE(jobsVersion, c) + framework.SkipUnlessServerVersionGTE(jobsVersion, c.Discovery()) By("running the image " + nginxImage) framework.RunKubectlOrDie("run", podName, "--generator=run-pod/v1", "--image="+nginxImage, "--labels=run="+podName, nsFlag) @@ -1189,7 +1189,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() { framework.RunKubectlOrDieInput(podJson, "replace", "-f", "-", nsFlag) By("verifying the pod " + podName + " has the right image " + busyboxImage) - pod, err := c.Pods(ns).Get(podName) + pod, err := c.Core().Pods(ns).Get(podName) if err != nil { framework.Failf("Failed getting deployment %s: %v", podName, err) } @@ -1208,7 +1208,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() { // The rkt runtime doesn't support attach, see #23335 framework.SkipIfContainerRuntimeIs("rkt") - framework.SkipUnlessServerVersionGTE(jobsVersion, c) + framework.SkipUnlessServerVersionGTE(jobsVersion, c.Discovery()) By("executing a command with run --rm and attach with stdin") t := time.NewTimer(runJobTimeout) @@ -1221,7 +1221,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() { Expect(runOutput).To(ContainSubstring("stdin closed")) By("verifying the job " + jobName + " was deleted") - _, err := c.Extensions().Jobs(ns).Get(jobName) + _, err := c.Batch().Jobs(ns).Get(jobName) Expect(err).To(HaveOccurred()) Expect(apierrs.IsNotFound(err)).To(BeTrue()) }) @@ -1286,7 +1286,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() { Effect: api.TaintEffectNoSchedule, } - nodes, err := c.Nodes().List(api.ListOptions{}) + nodes, err := c.Core().Nodes().List(api.ListOptions{}) Expect(err).NotTo(HaveOccurred()) node := nodes.Items[0] nodeName := node.Name @@ -1318,7 +1318,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() { Effect: api.TaintEffectNoSchedule, } - nodes, err := c.Nodes().List(api.ListOptions{}) + nodes, err := c.Core().Nodes().List(api.ListOptions{}) Expect(err).NotTo(HaveOccurred()) node := nodes.Items[0] nodeName := node.Name @@ -1370,7 +1370,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() { framework.RunKubectlOrDie("create", "quota", quotaName, "--hard=pods=1000000,services=1000000", nsFlag) By("verifying that the quota was created") - quota, err := c.ResourceQuotas(ns).Get(quotaName) + quota, err := c.Core().ResourceQuotas(ns).Get(quotaName) if err != nil { framework.Failf("Failed getting quota %s: %v", quotaName, err) } @@ -1400,7 +1400,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() { framework.RunKubectlOrDie("create", "quota", quotaName, "--hard=pods=1000000", "--scopes=BestEffort,NotTerminating", nsFlag) By("verifying that the quota was created") - quota, err := c.ResourceQuotas(ns).Get(quotaName) + quota, err := c.Core().ResourceQuotas(ns).Get(quotaName) if err != nil { framework.Failf("Failed getting quota %s: %v", quotaName, err) } @@ -1517,7 +1517,7 @@ func curl(url string) (string, error) { return curlTransport(url, utilnet.SetTransportDefaults(&http.Transport{})) } -func validateGuestbookApp(c *client.Client, ns string) { +func validateGuestbookApp(c clientset.Interface, ns string) { framework.Logf("Waiting for all frontend pods to be Running.") label := labels.SelectorFromSet(labels.Set(map[string]string{"tier": "frontend", "app": "guestbook"})) err := testutils.WaitForPodsWithLabelRunning(c, ns, label) @@ -1539,7 +1539,7 @@ func validateGuestbookApp(c *client.Client, ns string) { } // Returns whether received expected response from guestbook on time. -func waitForGuestbookResponse(c *client.Client, cmd, arg, expectedResponse string, timeout time.Duration, ns string) bool { +func waitForGuestbookResponse(c clientset.Interface, cmd, arg, expectedResponse string, timeout time.Duration, ns string) bool { for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) { res, err := makeRequestToGuestbook(c, cmd, arg, ns) if err == nil && res == expectedResponse { @@ -1550,8 +1550,8 @@ func waitForGuestbookResponse(c *client.Client, cmd, arg, expectedResponse strin return false } -func makeRequestToGuestbook(c *client.Client, cmd, value string, ns string) (string, error) { - proxyRequest, errProxy := framework.GetServicesProxyRequest(c, c.Get()) +func makeRequestToGuestbook(c clientset.Interface, cmd, value string, ns string) (string, error) { + proxyRequest, errProxy := framework.GetServicesProxyRequest(c, c.Core().RESTClient().Get()) if errProxy != nil { return "", errProxy } @@ -1609,13 +1609,13 @@ func modifyReplicationControllerConfiguration(contents string) io.Reader { return bytes.NewReader(data) } -func forEachReplicationController(c *client.Client, ns, selectorKey, selectorValue string, fn func(api.ReplicationController)) { +func forEachReplicationController(c clientset.Interface, ns, selectorKey, selectorValue string, fn func(api.ReplicationController)) { var rcs *api.ReplicationControllerList var err error for t := time.Now(); time.Since(t) < framework.PodListTimeout; time.Sleep(framework.Poll) { label := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue})) options := api.ListOptions{LabelSelector: label} - rcs, err = c.ReplicationControllers(ns).List(options) + rcs, err = c.Core().ReplicationControllers(ns).List(options) Expect(err).NotTo(HaveOccurred()) if len(rcs.Items) > 0 { break @@ -1646,18 +1646,18 @@ func validateReplicationControllerConfiguration(rc api.ReplicationController) { // getUDData creates a validator function based on the input string (i.e. kitten.jpg). // For example, if you send "kitten.jpg", this function verifies that the image jpg = kitten.jpg // in the container's json field. -func getUDData(jpgExpected string, ns string) func(*client.Client, string) error { +func getUDData(jpgExpected string, ns string) func(clientset.Interface, string) error { // getUDData validates data.json in the update-demo (returns nil if data is ok). - return func(c *client.Client, podID string) error { + return func(c clientset.Interface, podID string) error { framework.Logf("validating pod %s", podID) - subResourceProxyAvailable, err := framework.ServerVersionGTE(framework.SubResourcePodProxyVersion, c) + subResourceProxyAvailable, err := framework.ServerVersionGTE(framework.SubResourcePodProxyVersion, c.Discovery()) if err != nil { return err } var body []byte if subResourceProxyAvailable { - body, err = c.Get(). + body, err = c.Core().RESTClient().Get(). Namespace(ns). Resource("pods"). SubResource("proxy"). @@ -1666,7 +1666,7 @@ func getUDData(jpgExpected string, ns string) func(*client.Client, string) error Do(). Raw() } else { - body, err = c.Get(). + body, err = c.Core().RESTClient().Get(). Prefix("proxy"). Namespace(ns). Resource("pods"). @@ -1692,7 +1692,7 @@ func getUDData(jpgExpected string, ns string) func(*client.Client, string) error } } -func noOpValidatorFn(c *client.Client, podID string) error { return nil } +func noOpValidatorFn(c clientset.Interface, podID string) error { return nil } // newBlockingReader returns a reader that allows reading the given string, // then blocks until Close() is called on the returned closer. diff --git a/test/e2e/kubelet.go b/test/e2e/kubelet.go index ede700440d2..0dca8b87b6c 100644 --- a/test/e2e/kubelet.go +++ b/test/e2e/kubelet.go @@ -22,7 +22,7 @@ import ( "time" "k8s.io/kubernetes/pkg/api" - client "k8s.io/kubernetes/pkg/client/unversioned" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/wait" @@ -44,7 +44,7 @@ const ( // getPodMatches returns a set of pod names on the given node that matches the // podNamePrefix and namespace. -func getPodMatches(c *client.Client, nodeName string, podNamePrefix string, namespace string) sets.String { +func getPodMatches(c clientset.Interface, nodeName string, podNamePrefix string, namespace string) sets.String { matches := sets.NewString() framework.Logf("Checking pods on node %v via /runningpods endpoint", nodeName) runningPods, err := framework.GetKubeletPods(c, nodeName) @@ -68,7 +68,7 @@ func getPodMatches(c *client.Client, nodeName string, podNamePrefix string, name // information; they are reconstructed by examining the container runtime. In // the scope of this test, we do not expect pod naming conflicts so // podNamePrefix should be sufficient to identify the pods. -func waitTillNPodsRunningOnNodes(c *client.Client, nodeNames sets.String, podNamePrefix string, namespace string, targetNumPods int, timeout time.Duration) error { +func waitTillNPodsRunningOnNodes(c clientset.Interface, nodeNames sets.String, podNamePrefix string, namespace string, targetNumPods int, timeout time.Duration) error { return wait.Poll(pollInterval, timeout, func() (bool, error) { matchCh := make(chan sets.String, len(nodeNames)) for _, item := range nodeNames.List() { @@ -95,13 +95,13 @@ func waitTillNPodsRunningOnNodes(c *client.Client, nodeNames sets.String, podNam // In case a given label already exists, it overwrites it. If label to remove doesn't exist // it silently ignores it. // TODO: migrate to use framework.AddOrUpdateLabelOnNode/framework.RemoveLabelOffNode -func updateNodeLabels(c *client.Client, nodeNames sets.String, toAdd, toRemove map[string]string) { +func updateNodeLabels(c clientset.Interface, nodeNames sets.String, toAdd, toRemove map[string]string) { const maxRetries = 5 for nodeName := range nodeNames { var node *api.Node var err error for i := 0; i < maxRetries; i++ { - node, err = c.Nodes().Get(nodeName) + node, err = c.Core().Nodes().Get(nodeName) if err != nil { framework.Logf("Error getting node %s: %v", nodeName, err) continue @@ -116,7 +116,7 @@ func updateNodeLabels(c *client.Client, nodeNames sets.String, toAdd, toRemove m delete(node.ObjectMeta.Labels, k) } } - _, err = c.Nodes().Update(node) + _, err = c.Core().Nodes().Update(node) if err != nil { framework.Logf("Error updating node %s: %v", nodeName, err) } else { @@ -128,7 +128,7 @@ func updateNodeLabels(c *client.Client, nodeNames sets.String, toAdd, toRemove m } var _ = framework.KubeDescribe("kubelet", func() { - var c *client.Client + var c clientset.Interface var numNodes int var nodeNames sets.String var nodeLabels map[string]string @@ -136,8 +136,8 @@ var _ = framework.KubeDescribe("kubelet", func() { var resourceMonitor *framework.ResourceMonitor BeforeEach(func() { - c = f.Client - nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) + c = f.ClientSet + nodes := framework.GetReadySchedulableNodesOrDie(c) numNodes = len(nodes.Items) nodeNames = sets.NewString() // If there are a lot of nodes, we don't want to use all of them @@ -158,7 +158,7 @@ var _ = framework.KubeDescribe("kubelet", func() { // Start resourceMonitor only in small clusters. if len(nodes.Items) <= maxNodesToCheck { - resourceMonitor = framework.NewResourceMonitor(f.Client, framework.TargetContainers(), containerStatsPollingInterval) + resourceMonitor = framework.NewResourceMonitor(f.ClientSet, framework.TargetContainers(), containerStatsPollingInterval) resourceMonitor.Start() } }) @@ -188,10 +188,10 @@ var _ = framework.KubeDescribe("kubelet", func() { rcName := fmt.Sprintf("cleanup%d-%s", totalPods, string(uuid.NewUUID())) Expect(framework.RunRC(testutils.RCConfig{ - Client: f.Client, + Client: f.ClientSet, Name: rcName, Namespace: f.Namespace.Name, - Image: framework.GetPauseImageName(f.Client), + Image: framework.GetPauseImageName(f.ClientSet), Replicas: totalPods, NodeSelector: nodeLabels, })).NotTo(HaveOccurred()) @@ -199,14 +199,14 @@ var _ = framework.KubeDescribe("kubelet", func() { // running on the nodes according to kubelet. The timeout is set to // only 30 seconds here because framework.RunRC already waited for all pods to // transition to the running status. - Expect(waitTillNPodsRunningOnNodes(f.Client, nodeNames, rcName, f.Namespace.Name, totalPods, + Expect(waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, f.Namespace.Name, totalPods, time.Second*30)).NotTo(HaveOccurred()) if resourceMonitor != nil { resourceMonitor.LogLatest() } By("Deleting the RC") - framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, rcName) + framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, rcName) // Check that the pods really are gone by querying /runningpods on the // node. The /runningpods handler checks the container runtime (or its // cache) and returns a list of running pods. Some possible causes of @@ -215,7 +215,7 @@ var _ = framework.KubeDescribe("kubelet", func() { // - a bug in graceful termination (if it is enabled) // - docker slow to delete pods (or resource problems causing slowness) start := time.Now() - Expect(waitTillNPodsRunningOnNodes(f.Client, nodeNames, rcName, f.Namespace.Name, 0, + Expect(waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, f.Namespace.Name, 0, itArg.timeout)).NotTo(HaveOccurred()) framework.Logf("Deleting %d pods on %d nodes completed in %v after the RC was deleted", totalPods, len(nodeNames), time.Since(start)) diff --git a/test/e2e/kubelet_perf.go b/test/e2e/kubelet_perf.go index 15876588b2d..680efb702f3 100644 --- a/test/e2e/kubelet_perf.go +++ b/test/e2e/kubelet_perf.go @@ -22,7 +22,7 @@ import ( "time" "k8s.io/kubernetes/pkg/api" - client "k8s.io/kubernetes/pkg/client/unversioned" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats" "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/uuid" @@ -50,7 +50,7 @@ type resourceTest struct { memLimits framework.ResourceUsagePerContainer } -func logPodsOnNodes(c *client.Client, nodeNames []string) { +func logPodsOnNodes(c clientset.Interface, nodeNames []string) { for _, n := range nodeNames { podList, err := framework.GetKubeletRunningPods(c, n) if err != nil { @@ -70,10 +70,10 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames // TODO: Use a more realistic workload Expect(framework.RunRC(testutils.RCConfig{ - Client: f.Client, + Client: f.ClientSet, Name: rcName, Namespace: f.Namespace.Name, - Image: framework.GetPauseImageName(f.Client), + Image: framework.GetPauseImageName(f.ClientSet), Replicas: totalPods, })).NotTo(HaveOccurred()) @@ -96,18 +96,18 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames } else { time.Sleep(reportingPeriod) } - logPodsOnNodes(f.Client, nodeNames.List()) + logPodsOnNodes(f.ClientSet, nodeNames.List()) } By("Reporting overall resource usage") - logPodsOnNodes(f.Client, nodeNames.List()) + logPodsOnNodes(f.ClientSet, nodeNames.List()) usageSummary, err := rm.GetLatest() Expect(err).NotTo(HaveOccurred()) // TODO(random-liu): Remove the original log when we migrate to new perfdash framework.Logf("%s", rm.FormatResourceUsage(usageSummary)) // Log perf result framework.PrintPerfData(framework.ResourceUsageToPerfData(rm.GetMasterNodeLatest(usageSummary))) - verifyMemoryLimits(f.Client, expectedMemory, usageSummary) + verifyMemoryLimits(f.ClientSet, expectedMemory, usageSummary) cpuSummary := rm.GetCPUSummary() framework.Logf("%s", rm.FormatCPUSummary(cpuSummary)) @@ -116,10 +116,10 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames verifyCPULimits(expectedCPU, cpuSummary) By("Deleting the RC") - framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, rcName) + framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, rcName) } -func verifyMemoryLimits(c *client.Client, expected framework.ResourceUsagePerContainer, actual framework.ResourceUsagePerNode) { +func verifyMemoryLimits(c clientset.Interface, expected framework.ResourceUsagePerContainer, actual framework.ResourceUsagePerNode) { if expected == nil { return } @@ -200,16 +200,16 @@ var _ = framework.KubeDescribe("Kubelet [Serial] [Slow]", func() { // Wait until image prepull pod has completed so that they wouldn't // affect the runtime cpu usage. Fail the test if prepulling cannot // finish in time. - if err := framework.WaitForPodsSuccess(f.Client, api.NamespaceSystem, framework.ImagePullerLabels, imagePrePullingLongTimeout); err != nil { - framework.Failf("Image puller didn't complete in %v, not running resource usage test since the metrics might be adulterated", imagePrePullingLongTimeout) + if err := framework.WaitForPodsSuccess(f.ClientSet, api.NamespaceSystem, framework.ImagePullerLabels, imagePrePullingLongTimeout); err != nil { + framework.Failf("Image puller didn't complete in %v, not running resource usage test since the metrics might be adultrated", imagePrePullingLongTimeout) } nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodeNames = sets.NewString() for _, node := range nodes.Items { nodeNames.Insert(node.Name) } - om = framework.NewRuntimeOperationMonitor(f.Client) - rm = framework.NewResourceMonitor(f.Client, framework.TargetContainers(), containerStatsPollingPeriod) + om = framework.NewRuntimeOperationMonitor(f.ClientSet) + rm = framework.NewResourceMonitor(f.ClientSet, framework.TargetContainers(), containerStatsPollingPeriod) rm.Start() }) diff --git a/test/e2e/limit_range.go b/test/e2e/limit_range.go index fc0c5ed8cc9..9c7d1d8674b 100644 --- a/test/e2e/limit_range.go +++ b/test/e2e/limit_range.go @@ -42,11 +42,11 @@ var _ = framework.KubeDescribe("LimitRange", func() { min, max, defaultLimit, defaultRequest, maxLimitRequestRatio) - limitRange, err := f.Client.LimitRanges(f.Namespace.Name).Create(limitRange) + limitRange, err := f.ClientSet.Core().LimitRanges(f.Namespace.Name).Create(limitRange) Expect(err).NotTo(HaveOccurred()) By("Fetching the LimitRange to ensure it has proper values") - limitRange, err = f.Client.LimitRanges(f.Namespace.Name).Get(limitRange.Name) + limitRange, err = f.ClientSet.Core().LimitRanges(f.Namespace.Name).Get(limitRange.Name) expected := api.ResourceRequirements{Requests: defaultRequest, Limits: defaultLimit} actual := api.ResourceRequirements{Requests: limitRange.Spec.Limits[0].DefaultRequest, Limits: limitRange.Spec.Limits[0].Default} err = equalResourceRequirement(expected, actual) @@ -54,11 +54,11 @@ var _ = framework.KubeDescribe("LimitRange", func() { By("Creating a Pod with no resource requirements") pod := newTestPod(f, "pod-no-resources", api.ResourceList{}, api.ResourceList{}) - pod, err = f.Client.Pods(f.Namespace.Name).Create(pod) + pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod) Expect(err).NotTo(HaveOccurred()) By("Ensuring Pod has resource requirements applied from LimitRange") - pod, err = f.Client.Pods(f.Namespace.Name).Get(pod.Name) + pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Get(pod.Name) Expect(err).NotTo(HaveOccurred()) for i := range pod.Spec.Containers { err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources) @@ -71,11 +71,11 @@ var _ = framework.KubeDescribe("LimitRange", func() { By("Creating a Pod with partial resource requirements") pod = newTestPod(f, "pod-partial-resources", getResourceList("", "150Mi"), getResourceList("300m", "")) - pod, err = f.Client.Pods(f.Namespace.Name).Create(pod) + pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod) Expect(err).NotTo(HaveOccurred()) By("Ensuring Pod has merged resource requirements applied from LimitRange") - pod, err = f.Client.Pods(f.Namespace.Name).Get(pod.Name) + pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Get(pod.Name) Expect(err).NotTo(HaveOccurred()) // This is an interesting case, so it's worth a comment // If you specify a Limit, and no Request, the Limit will default to the Request @@ -92,12 +92,12 @@ var _ = framework.KubeDescribe("LimitRange", func() { By("Failing to create a Pod with less than min resources") pod = newTestPod(f, podName, getResourceList("10m", "50Mi"), api.ResourceList{}) - pod, err = f.Client.Pods(f.Namespace.Name).Create(pod) + pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod) Expect(err).To(HaveOccurred()) By("Failing to create a Pod with more than max resources") pod = newTestPod(f, podName, getResourceList("600m", "600Mi"), api.ResourceList{}) - pod, err = f.Client.Pods(f.Namespace.Name).Create(pod) + pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod) Expect(err).To(HaveOccurred()) }) @@ -176,7 +176,7 @@ func newTestPod(f *framework.Framework, name string, requests api.ResourceList, Containers: []api.Container{ { Name: "pause", - Image: framework.GetPauseImageName(f.Client), + Image: framework.GetPauseImageName(f.ClientSet), Resources: api.ResourceRequirements{ Requests: requests, Limits: limits, diff --git a/test/e2e/load.go b/test/e2e/load.go index 12442518659..55ce1110b10 100644 --- a/test/e2e/load.go +++ b/test/e2e/load.go @@ -29,10 +29,8 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" "k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/transport" - client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/util/intstr" utilnet "k8s.io/kubernetes/pkg/util/net" @@ -64,7 +62,7 @@ const ( // To run this suite you must explicitly ask for it by setting the // -t/--test flag or ginkgo.focus flag. var _ = framework.KubeDescribe("Load capacity", func() { - var c *client.Client + var clientset internalclientset.Interface var nodeCount int var ns string var configs []*testutils.RCConfig @@ -74,7 +72,7 @@ var _ = framework.KubeDescribe("Load capacity", func() { // TODO add flag that allows to skip cleanup on failure AfterEach(func() { // Verify latency metrics - highLatencyRequests, err := framework.HighLatencyRequests(c) + highLatencyRequests, err := framework.HighLatencyRequests(clientset) framework.ExpectNoError(err, "Too many instances metrics above the threshold") Expect(highLatencyRequests).NotTo(BeNumerically(">", 0)) }) @@ -99,25 +97,25 @@ var _ = framework.KubeDescribe("Load capacity", func() { f.NamespaceDeletionTimeout = time.Hour BeforeEach(func() { - c = f.Client + clientset = f.ClientSet // In large clusters we may get to this point but still have a bunch // of nodes without Routes created. Since this would make a node // unschedulable, we need to wait until all of them are schedulable. - framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c)) + framework.ExpectNoError(framework.WaitForAllNodesSchedulable(clientset)) ns = f.Namespace.Name - nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) + nodes := framework.GetReadySchedulableNodesOrDie(clientset) nodeCount = len(nodes.Items) Expect(nodeCount).NotTo(BeZero()) // Terminating a namespace (deleting the remaining objects from it - which // generally means events) can affect the current run. Thus we wait for all // terminating namespace to be finally deleted before starting this test. - err := framework.CheckTestingNSDeletedExcept(c, ns) + err := framework.CheckTestingNSDeletedExcept(clientset, ns) framework.ExpectNoError(err) - framework.ExpectNoError(framework.ResetMetrics(c)) + framework.ExpectNoError(framework.ResetMetrics(clientset)) }) type Load struct { @@ -153,7 +151,7 @@ var _ = framework.KubeDescribe("Load capacity", func() { framework.Logf("Creating services") services := generateServicesForConfigs(configs) for _, service := range services { - _, err := c.Services(service.Namespace).Create(service) + _, err := clientset.Core().Services(service.Namespace).Create(service) framework.ExpectNoError(err) } framework.Logf("%v Services created.", len(services)) @@ -203,7 +201,7 @@ var _ = framework.KubeDescribe("Load capacity", func() { if createServices == "true" { framework.Logf("Starting to delete services...") for _, service := range services { - err := c.Services(ns).Delete(service.Name) + err := clientset.Core().Services(ns).Delete(service.Name, nil) framework.ExpectNoError(err) } framework.Logf("Services deleted") @@ -223,8 +221,8 @@ func createNamespaces(f *framework.Framework, nodeCount, podsPerNode int) []*api return namespaces } -func createClients(numberOfClients int) ([]*client.Client, error) { - clients := make([]*client.Client, numberOfClients) +func createClients(numberOfClients int) ([]*internalclientset.Clientset, error) { + clients := make([]*internalclientset.Clientset, numberOfClients) for i := 0; i < numberOfClients; i++ { config, err := framework.LoadConfig() Expect(err).NotTo(HaveOccurred()) @@ -260,7 +258,7 @@ func createClients(numberOfClients int) ([]*client.Client, error) { // Transport field. config.TLSClientConfig = restclient.TLSClientConfig{} - c, err := client.New(config) + c, err := internalclientset.NewForConfig(config) if err != nil { return nil, err } @@ -385,14 +383,14 @@ func scaleRC(wg *sync.WaitGroup, config *testutils.RCConfig, scalingTime time.Du sleepUpTo(scalingTime) newSize := uint(rand.Intn(config.Replicas) + config.Replicas/2) - framework.ExpectNoError(framework.ScaleRC(config.Client, coreClientSetFromUnversioned(config.Client), config.Namespace, config.Name, newSize, true), + framework.ExpectNoError(framework.ScaleRC(config.Client, config.Namespace, config.Name, newSize, true), fmt.Sprintf("scaling rc %s for the first time", config.Name)) selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": config.Name})) options := api.ListOptions{ LabelSelector: selector, ResourceVersion: "0", } - _, err := config.Client.Pods(config.Namespace).List(options) + _, err := config.Client.Core().Pods(config.Namespace).List(options) framework.ExpectNoError(err, fmt.Sprintf("listing pods from rc %v", config.Name)) } @@ -413,17 +411,6 @@ func deleteRC(wg *sync.WaitGroup, config *testutils.RCConfig, deletingTime time. if framework.TestContext.GarbageCollectorEnabled { framework.ExpectNoError(framework.DeleteRCAndWaitForGC(config.Client, config.Namespace, config.Name), fmt.Sprintf("deleting rc %s", config.Name)) } else { - framework.ExpectNoError(framework.DeleteRCAndPods(config.Client, coreClientSetFromUnversioned(config.Client), config.Namespace, config.Name), fmt.Sprintf("deleting rc %s", config.Name)) + framework.ExpectNoError(framework.DeleteRCAndPods(config.Client, config.Namespace, config.Name), fmt.Sprintf("deleting rc %s", config.Name)) } } - -// coreClientSetFromUnversioned adapts just enough of a a unversioned.Client to work with the scale RC function -func coreClientSetFromUnversioned(c *client.Client) internalclientset.Interface { - var clientset internalclientset.Clientset - if c != nil { - clientset.CoreClient = unversionedcore.New(c.RESTClient) - } else { - clientset.CoreClient = unversionedcore.New(nil) - } - return &clientset -} diff --git a/test/e2e/logging_soak.go b/test/e2e/logging_soak.go index efec4ae2a79..87f1647011c 100644 --- a/test/e2e/logging_soak.go +++ b/test/e2e/logging_soak.go @@ -18,14 +18,15 @@ package e2e import ( "fmt" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/test/e2e/framework" "strconv" "strings" "sync" "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/test/e2e/framework" ) var _ = framework.KubeDescribe("Logging soak [Performance] [Slow] [Disruptive]", func() { diff --git a/test/e2e/mesos.go b/test/e2e/mesos.go index fbe84ca4024..1ffb07d0af2 100644 --- a/test/e2e/mesos.go +++ b/test/e2e/mesos.go @@ -21,7 +21,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" - client "k8s.io/kubernetes/pkg/client/unversioned" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" @@ -32,17 +32,17 @@ import ( var _ = framework.KubeDescribe("Mesos", func() { f := framework.NewDefaultFramework("pods") - var c *client.Client + var c clientset.Interface var ns string BeforeEach(func() { framework.SkipUnlessProviderIs("mesos/docker") - c = f.Client + c = f.ClientSet ns = f.Namespace.Name }) It("applies slave attributes as labels", func() { - nodeClient := f.Client.Nodes() + nodeClient := f.ClientSet.Core().Nodes() rackA := labels.SelectorFromSet(map[string]string{"k8s.mesosphere.io/attribute-rack": "1"}) options := api.ListOptions{LabelSelector: rackA} @@ -62,11 +62,10 @@ var _ = framework.KubeDescribe("Mesos", func() { }) It("starts static pods on every node in the mesos cluster", func() { - client := f.Client + client := f.ClientSet framework.ExpectNoError(framework.AllNodesReady(client, wait.ForeverTestTimeout), "all nodes ready") - nodelist := framework.GetReadySchedulableNodesOrDie(f.ClientSet) - + nodelist := framework.GetReadySchedulableNodesOrDie(client) const ns = "static-pods" numpods := int32(len(nodelist.Items)) framework.ExpectNoError(framework.WaitForPodsRunningReady(client, ns, numpods, wait.ForeverTestTimeout, map[string]string{}), @@ -80,7 +79,7 @@ var _ = framework.KubeDescribe("Mesos", func() { // scheduled onto it. By("Trying to launch a pod with a label to get a node which can launch it.") podName := "with-label" - _, err := c.Pods(ns).Create(&api.Pod{ + _, err := c.Core().Pods(ns).Create(&api.Pod{ TypeMeta: unversioned.TypeMeta{ Kind: "Pod", }, @@ -94,7 +93,7 @@ var _ = framework.KubeDescribe("Mesos", func() { Containers: []api.Container{ { Name: podName, - Image: framework.GetPauseImageName(f.Client), + Image: framework.GetPauseImageName(f.ClientSet), }, }, }, @@ -102,10 +101,10 @@ var _ = framework.KubeDescribe("Mesos", func() { framework.ExpectNoError(err) framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, podName, ns)) - pod, err := c.Pods(ns).Get(podName) + pod, err := c.Core().Pods(ns).Get(podName) framework.ExpectNoError(err) - nodeClient := f.Client.Nodes() + nodeClient := f.ClientSet.Core().Nodes() // schedule onto node with rack=2 being assigned to the "public" role rack2 := labels.SelectorFromSet(map[string]string{ diff --git a/test/e2e/metrics_grabber_test.go b/test/e2e/metrics_grabber_test.go index 93641cafcd4..4e0c39dbe00 100644 --- a/test/e2e/metrics_grabber_test.go +++ b/test/e2e/metrics_grabber_test.go @@ -20,7 +20,7 @@ import ( "strings" "k8s.io/kubernetes/pkg/api" - client "k8s.io/kubernetes/pkg/client/unversioned" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/metrics" "k8s.io/kubernetes/test/e2e/framework" @@ -30,11 +30,11 @@ import ( var _ = framework.KubeDescribe("MetricsGrabber", func() { f := framework.NewDefaultFramework("metrics-grabber") - var c *client.Client + var c clientset.Interface var grabber *metrics.MetricsGrabber BeforeEach(func() { var err error - c = f.Client + c = f.ClientSet framework.ExpectNoError(err) grabber, err = metrics.NewMetricsGrabber(c, true, true, true, true) framework.ExpectNoError(err) @@ -59,7 +59,7 @@ var _ = framework.KubeDescribe("MetricsGrabber", func() { It("should grab all metrics from a Scheduler.", func() { By("Proxying to Pod through the API server") // Check if master Node is registered - nodes, err := c.Nodes().List(api.ListOptions{}) + nodes, err := c.Core().Nodes().List(api.ListOptions{}) framework.ExpectNoError(err) var masterRegistered = false @@ -80,7 +80,7 @@ var _ = framework.KubeDescribe("MetricsGrabber", func() { It("should grab all metrics from a ControllerManager.", func() { By("Proxying to Pod through the API server") // Check if master Node is registered - nodes, err := c.Nodes().List(api.ListOptions{}) + nodes, err := c.Core().Nodes().List(api.ListOptions{}) framework.ExpectNoError(err) var masterRegistered = false diff --git a/test/e2e/monitoring.go b/test/e2e/monitoring.go index dba00ad57ad..11205d3ff47 100644 --- a/test/e2e/monitoring.go +++ b/test/e2e/monitoring.go @@ -24,7 +24,7 @@ import ( influxdb "github.com/influxdata/influxdb/client" "k8s.io/kubernetes/pkg/api" - client "k8s.io/kubernetes/pkg/client/unversioned" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/test/e2e/framework" @@ -39,7 +39,7 @@ var _ = framework.KubeDescribe("Monitoring", func() { }) It("should verify monitoring pods and all cluster nodes are available on influxdb using heapster.", func() { - testMonitoringUsingHeapsterInfluxdb(f.Client) + testMonitoringUsingHeapsterInfluxdb(f.ClientSet) }) }) @@ -61,8 +61,8 @@ var ( ) // Query sends a command to the server and returns the Response -func Query(c *client.Client, query string) (*influxdb.Response, error) { - result, err := c.Get(). +func Query(c clientset.Interface, query string) (*influxdb.Response, error) { + result, err := c.Core().RESTClient().Get(). Prefix("proxy"). Namespace("kube-system"). Resource("services"). @@ -89,7 +89,7 @@ func Query(c *client.Client, query string) (*influxdb.Response, error) { return &response, nil } -func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error) { +func verifyExpectedRcsExistAndGetExpectedPods(c clientset.Interface) ([]string, error) { expectedPods := []string{} // Iterate over the labels that identify the replication controllers that we // want to check. The rcLabels contains the value values for the k8s-app key @@ -102,11 +102,11 @@ func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error for _, rcLabel := range rcLabels { selector := labels.Set{"k8s-app": rcLabel}.AsSelector() options := api.ListOptions{LabelSelector: selector} - deploymentList, err := c.Deployments(api.NamespaceSystem).List(options) + deploymentList, err := c.Extensions().Deployments(api.NamespaceSystem).List(options) if err != nil { return nil, err } - rcList, err := c.ReplicationControllers(api.NamespaceSystem).List(options) + rcList, err := c.Core().ReplicationControllers(api.NamespaceSystem).List(options) if err != nil { return nil, err } @@ -122,7 +122,7 @@ func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error for _, rc := range rcList.Items { selector := labels.Set(rc.Spec.Selector).AsSelector() options := api.ListOptions{LabelSelector: selector} - podList, err := c.Pods(api.NamespaceSystem).List(options) + podList, err := c.Core().Pods(api.NamespaceSystem).List(options) if err != nil { return nil, err } @@ -137,7 +137,7 @@ func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error for _, rc := range deploymentList.Items { selector := labels.Set(rc.Spec.Selector.MatchLabels).AsSelector() options := api.ListOptions{LabelSelector: selector} - podList, err := c.Pods(api.NamespaceSystem).List(options) + podList, err := c.Core().Pods(api.NamespaceSystem).List(options) if err != nil { return nil, err } @@ -152,7 +152,7 @@ func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error for _, ps := range psList.Items { selector := labels.Set(ps.Spec.Selector.MatchLabels).AsSelector() options := api.ListOptions{LabelSelector: selector} - podList, err := c.Pods(api.NamespaceSystem).List(options) + podList, err := c.Core().Pods(api.NamespaceSystem).List(options) if err != nil { return nil, err } @@ -167,8 +167,8 @@ func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error return expectedPods, nil } -func expectedServicesExist(c *client.Client) error { - serviceList, err := c.Services(api.NamespaceSystem).List(api.ListOptions{}) +func expectedServicesExist(c clientset.Interface) error { + serviceList, err := c.Core().Services(api.NamespaceSystem).List(api.ListOptions{}) if err != nil { return err } @@ -185,9 +185,9 @@ func expectedServicesExist(c *client.Client) error { return nil } -func getAllNodesInCluster(c *client.Client) ([]string, error) { +func getAllNodesInCluster(c clientset.Interface) ([]string, error) { // It should be OK to list unschedulable Nodes here. - nodeList, err := c.Nodes().List(api.ListOptions{}) + nodeList, err := c.Core().Nodes().List(api.ListOptions{}) if err != nil { return nil, err } @@ -198,7 +198,7 @@ func getAllNodesInCluster(c *client.Client) ([]string, error) { return result, nil } -func getInfluxdbData(c *client.Client, query string, tag string) (map[string]bool, error) { +func getInfluxdbData(c clientset.Interface, query string, tag string) (map[string]bool, error) { response, err := Query(c, query) if err != nil { return nil, err @@ -232,7 +232,7 @@ func expectedItemsExist(expectedItems []string, actualItems map[string]bool) boo return true } -func validatePodsAndNodes(c *client.Client, expectedPods, expectedNodes []string) bool { +func validatePodsAndNodes(c clientset.Interface, expectedPods, expectedNodes []string) bool { pods, err := getInfluxdbData(c, podlistQuery, "pod_id") if err != nil { // We don't fail the test here because the influxdb service might still not be running. @@ -255,7 +255,7 @@ func validatePodsAndNodes(c *client.Client, expectedPods, expectedNodes []string return true } -func testMonitoringUsingHeapsterInfluxdb(c *client.Client) { +func testMonitoringUsingHeapsterInfluxdb(c clientset.Interface) { // Check if heapster pods and services are up. expectedPods, err := verifyExpectedRcsExistAndGetExpectedPods(c) framework.ExpectNoError(err) @@ -279,10 +279,10 @@ func testMonitoringUsingHeapsterInfluxdb(c *client.Client) { framework.Failf("monitoring using heapster and influxdb test failed") } -func printDebugInfo(c *client.Client) { +func printDebugInfo(c clientset.Interface) { set := labels.Set{"k8s-app": "heapster"} options := api.ListOptions{LabelSelector: set.AsSelector()} - podList, err := c.Pods(api.NamespaceSystem).List(options) + podList, err := c.Core().Pods(api.NamespaceSystem).List(options) if err != nil { framework.Logf("Error while listing pods %v", err) return diff --git a/test/e2e/namespace.go b/test/e2e/namespace.go index ef290afa538..d2e65901712 100644 --- a/test/e2e/namespace.go +++ b/test/e2e/namespace.go @@ -51,7 +51,7 @@ func extinguish(f *framework.Framework, totalNS int, maxAllowedAfterDel int, max //Wait 10 seconds, then SEND delete requests for all the namespaces. By("Waiting 10 seconds") time.Sleep(time.Duration(10 * time.Second)) - deleted, err := framework.DeleteNamespaces(f.Client, []string{"nslifetest"}, nil /* skipFilter */) + deleted, err := framework.DeleteNamespaces(f.ClientSet, []string{"nslifetest"}, nil /* skipFilter */) Expect(err).NotTo(HaveOccurred()) Expect(len(deleted)).To(Equal(totalNS)) @@ -60,7 +60,7 @@ func extinguish(f *framework.Framework, totalNS int, maxAllowedAfterDel int, max framework.ExpectNoError(wait.Poll(2*time.Second, time.Duration(maxSeconds)*time.Second, func() (bool, error) { var cnt = 0 - nsList, err := f.Client.Namespaces().List(api.ListOptions{}) + nsList, err := f.ClientSet.Core().Namespaces().List(api.ListOptions{}) if err != nil { return false, err } @@ -85,7 +85,7 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) { Expect(err).NotTo(HaveOccurred()) By("Waiting for a default service account to be provisioned in namespace") - err = framework.WaitForDefaultServiceAccountInNamespace(f.Client, namespace.Name) + err = framework.WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name) Expect(err).NotTo(HaveOccurred()) By("Creating a pod in the namespace") @@ -97,26 +97,26 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) { Containers: []api.Container{ { Name: "nginx", - Image: framework.GetPauseImageName(f.Client), + Image: framework.GetPauseImageName(f.ClientSet), }, }, }, } - pod, err = f.Client.Pods(namespace.Name).Create(pod) + pod, err = f.ClientSet.Core().Pods(namespace.Name).Create(pod) Expect(err).NotTo(HaveOccurred()) By("Waiting for the pod to have running status") - framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.Client, pod)) + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, pod)) By("Deleting the namespace") - err = f.Client.Namespaces().Delete(namespace.Name) + err = f.ClientSet.Core().Namespaces().Delete(namespace.Name, nil) Expect(err).NotTo(HaveOccurred()) By("Waiting for the namespace to be removed.") maxWaitSeconds := int64(60) + *pod.Spec.TerminationGracePeriodSeconds framework.ExpectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second, func() (bool, error) { - _, err = f.Client.Namespaces().Get(namespace.Name) + _, err = f.ClientSet.Core().Namespaces().Get(namespace.Name) if err != nil && errors.IsNotFound(err) { return true, nil } @@ -124,7 +124,7 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) { })) By("Verifying there is no pod in the namespace") - _, err = f.Client.Pods(namespace.Name).Get(pod.Name) + _, err = f.ClientSet.Core().Pods(namespace.Name).Get(pod.Name) Expect(err).To(HaveOccurred()) } @@ -136,7 +136,7 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) { Expect(err).NotTo(HaveOccurred()) By("Waiting for a default service account to be provisioned in namespace") - err = framework.WaitForDefaultServiceAccountInNamespace(f.Client, namespace.Name) + err = framework.WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name) Expect(err).NotTo(HaveOccurred()) By("Creating a service in the namespace") @@ -157,18 +157,18 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) { }}, }, } - service, err = f.Client.Services(namespace.Name).Create(service) + service, err = f.ClientSet.Core().Services(namespace.Name).Create(service) Expect(err).NotTo(HaveOccurred()) By("Deleting the namespace") - err = f.Client.Namespaces().Delete(namespace.Name) + err = f.ClientSet.Core().Namespaces().Delete(namespace.Name, nil) Expect(err).NotTo(HaveOccurred()) By("Waiting for the namespace to be removed.") maxWaitSeconds := int64(60) framework.ExpectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second, func() (bool, error) { - _, err = f.Client.Namespaces().Get(namespace.Name) + _, err = f.ClientSet.Core().Namespaces().Get(namespace.Name) if err != nil && errors.IsNotFound(err) { return true, nil } @@ -176,7 +176,7 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) { })) By("Verifying there is no service in the namespace") - _, err = f.Client.Services(namespace.Name).Get(service.Name) + _, err = f.ClientSet.Core().Services(namespace.Name).Get(service.Name) Expect(err).To(HaveOccurred()) } diff --git a/test/e2e/networking.go b/test/e2e/networking.go index 4251dab3d41..c39c75af48d 100644 --- a/test/e2e/networking.go +++ b/test/e2e/networking.go @@ -64,7 +64,7 @@ var _ = framework.KubeDescribe("Networking", func() { } for _, test := range tests { By(fmt.Sprintf("testing: %s", test.path)) - data, err := f.Client.RESTClient.Get(). + data, err := f.ClientSet.Core().RESTClient().Get(). AbsPath(test.path). DoRaw() if err != nil { diff --git a/test/e2e/node_problem_detector.go b/test/e2e/node_problem_detector.go index 7b4f1eb83b1..7f303fa9c6e 100644 --- a/test/e2e/node_problem_detector.go +++ b/test/e2e/node_problem_detector.go @@ -22,7 +22,8 @@ import ( "time" "k8s.io/kubernetes/pkg/api" - client "k8s.io/kubernetes/pkg/client/unversioned" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + coreclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/util/system" @@ -41,11 +42,11 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() { image = "gcr.io/google_containers/node-problem-detector:v0.1" ) f := framework.NewDefaultFramework("node-problem-detector") - var c *client.Client + var c clientset.Interface var uid string var ns, name, configName, eventNamespace string BeforeEach(func() { - c = f.Client + c = f.ClientSet ns = f.Namespace.Name uid = string(uuid.NewUUID()) name = "node-problem-detector-" + uid @@ -116,7 +117,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() { ] }` By("Get a non master node to run the pod") - nodes, err := c.Nodes().List(api.ListOptions{}) + nodes, err := c.Core().Nodes().List(api.ListOptions{}) Expect(err).NotTo(HaveOccurred()) node = nil for _, n := range nodes.Items { @@ -139,7 +140,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() { cmd := fmt.Sprintf("mkdir %s; > %s/%s", tmpDir, tmpDir, logFile) Expect(framework.IssueSSHCommand(cmd, framework.TestContext.Provider, node)).To(Succeed()) By("Create config map for the node problem detector") - _, err = c.ConfigMaps(ns).Create(&api.ConfigMap{ + _, err = c.Core().ConfigMaps(ns).Create(&api.ConfigMap{ ObjectMeta: api.ObjectMeta{ Name: configName, }, @@ -147,7 +148,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() { }) Expect(err).NotTo(HaveOccurred()) By("Create the node problem detector") - _, err = c.Pods(ns).Create(&api.Pod{ + _, err = c.Core().Pods(ns).Create(&api.Pod{ ObjectMeta: api.ObjectMeta{ Name: name, }, @@ -197,11 +198,11 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() { It("should generate node condition and events for corresponding errors", func() { By("Make sure no events are generated") Consistently(func() error { - return verifyNoEvents(c.Events(eventNamespace), eventListOptions) + return verifyNoEvents(c.Core().Events(eventNamespace), eventListOptions) }, pollConsistent, pollInterval).Should(Succeed()) By("Make sure the default node condition is generated") Eventually(func() error { - return verifyCondition(c.Nodes(), node.Name, condition, api.ConditionFalse, defaultReason, defaultMessage) + return verifyCondition(c.Core().Nodes(), node.Name, condition, api.ConditionFalse, defaultReason, defaultMessage) }, pollTimeout, pollInterval).Should(Succeed()) num := 3 @@ -209,39 +210,39 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() { Expect(framework.IssueSSHCommand(injectCommand(tempMessage, num), framework.TestContext.Provider, node)).To(Succeed()) By(fmt.Sprintf("Wait for %d events generated", num)) Eventually(func() error { - return verifyEvents(c.Events(eventNamespace), eventListOptions, num, tempReason, tempMessage) + return verifyEvents(c.Core().Events(eventNamespace), eventListOptions, num, tempReason, tempMessage) }, pollTimeout, pollInterval).Should(Succeed()) By(fmt.Sprintf("Make sure only %d events generated", num)) Consistently(func() error { - return verifyEvents(c.Events(eventNamespace), eventListOptions, num, tempReason, tempMessage) + return verifyEvents(c.Core().Events(eventNamespace), eventListOptions, num, tempReason, tempMessage) }, pollConsistent, pollInterval).Should(Succeed()) By("Make sure the node condition is still false") - Expect(verifyCondition(c.Nodes(), node.Name, condition, api.ConditionFalse, defaultReason, defaultMessage)).To(Succeed()) + Expect(verifyCondition(c.Core().Nodes(), node.Name, condition, api.ConditionFalse, defaultReason, defaultMessage)).To(Succeed()) By("Inject 1 permanent error") Expect(framework.IssueSSHCommand(injectCommand(permMessage, 1), framework.TestContext.Provider, node)).To(Succeed()) By("Make sure the corresponding node condition is generated") Eventually(func() error { - return verifyCondition(c.Nodes(), node.Name, condition, api.ConditionTrue, permReason, permMessage) + return verifyCondition(c.Core().Nodes(), node.Name, condition, api.ConditionTrue, permReason, permMessage) }, pollTimeout, pollInterval).Should(Succeed()) By("Make sure no new events are generated") Consistently(func() error { - return verifyEvents(c.Events(eventNamespace), eventListOptions, num, tempReason, tempMessage) + return verifyEvents(c.Core().Events(eventNamespace), eventListOptions, num, tempReason, tempMessage) }, pollConsistent, pollInterval).Should(Succeed()) }) AfterEach(func() { By("Delete the node problem detector") - c.Pods(ns).Delete(name, api.NewDeleteOptions(0)) + c.Core().Pods(ns).Delete(name, api.NewDeleteOptions(0)) By("Wait for the node problem detector to disappear") Expect(framework.WaitForPodToDisappear(c, ns, name, labels.Everything(), pollInterval, pollTimeout)).To(Succeed()) By("Delete the config map") - c.ConfigMaps(ns).Delete(configName) + c.Core().ConfigMaps(ns).Delete(configName, nil) By("Clean up the events") - Expect(c.Events(eventNamespace).DeleteCollection(api.NewDeleteOptions(0), eventListOptions)).To(Succeed()) + Expect(c.Core().Events(eventNamespace).DeleteCollection(api.NewDeleteOptions(0), eventListOptions)).To(Succeed()) By("Clean up the node condition") patch := []byte(fmt.Sprintf(`{"status":{"conditions":[{"$patch":"delete","type":"%s"}]}}`, condition)) - c.Patch(api.StrategicMergePatchType).Resource("nodes").Name(node.Name).SubResource("status").Body(patch).Do() + c.Core().RESTClient().Patch(api.StrategicMergePatchType).Resource("nodes").Name(node.Name).SubResource("status").Body(patch).Do() By("Clean up the temporary directory") framework.IssueSSHCommand(fmt.Sprintf("rm -r %s", tmpDir), framework.TestContext.Provider, node) }) @@ -249,7 +250,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() { }) // verifyEvents verifies there are num specific events generated -func verifyEvents(e client.EventInterface, options api.ListOptions, num int, reason, message string) error { +func verifyEvents(e coreclientset.EventInterface, options api.ListOptions, num int, reason, message string) error { events, err := e.List(options) if err != nil { return err @@ -268,7 +269,7 @@ func verifyEvents(e client.EventInterface, options api.ListOptions, num int, rea } // verifyNoEvents verifies there is no event generated -func verifyNoEvents(e client.EventInterface, options api.ListOptions) error { +func verifyNoEvents(e coreclientset.EventInterface, options api.ListOptions) error { events, err := e.List(options) if err != nil { return err @@ -280,7 +281,7 @@ func verifyNoEvents(e client.EventInterface, options api.ListOptions) error { } // verifyCondition verifies specific node condition is generated, if reason and message are empty, they will not be checked -func verifyCondition(n client.NodeInterface, nodeName string, condition api.NodeConditionType, status api.ConditionStatus, reason, message string) error { +func verifyCondition(n coreclientset.NodeInterface, nodeName string, condition api.NodeConditionType, status api.ConditionStatus, reason, message string) error { node, err := n.Get(nodeName) if err != nil { return err diff --git a/test/e2e/nodeoutofdisk.go b/test/e2e/nodeoutofdisk.go index ec3a489c012..9c95861b640 100644 --- a/test/e2e/nodeoutofdisk.go +++ b/test/e2e/nodeoutofdisk.go @@ -25,7 +25,6 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" @@ -67,16 +66,14 @@ const ( // // Flaky issue #20015. We have no clear path for how to test this functionality in a non-flaky way. var _ = framework.KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", func() { - var c *client.Client - var cs clientset.Interface + var c clientset.Interface var unfilledNodeName, recoveredNodeName string f := framework.NewDefaultFramework("node-outofdisk") BeforeEach(func() { - c = f.Client - cs = f.ClientSet + c = f.ClientSet - nodelist := framework.GetReadySchedulableNodesOrDie(cs) + nodelist := framework.GetReadySchedulableNodesOrDie(c) // Skip this test on small clusters. No need to fail since it is not a use // case that any cluster of small size needs to support. @@ -90,7 +87,7 @@ var _ = framework.KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", fu AfterEach(func() { - nodelist := framework.GetReadySchedulableNodesOrDie(cs) + nodelist := framework.GetReadySchedulableNodesOrDie(c) Expect(len(nodelist.Items)).ToNot(BeZero()) for _, node := range nodelist.Items { if unfilledNodeName == node.Name || recoveredNodeName == node.Name { @@ -101,7 +98,7 @@ var _ = framework.KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", fu }) It("runs out of disk space", func() { - unfilledNode, err := c.Nodes().Get(unfilledNodeName) + unfilledNode, err := c.Core().Nodes().Get(unfilledNodeName) framework.ExpectNoError(err) By(fmt.Sprintf("Calculating CPU availability on node %s", unfilledNode.Name)) @@ -116,7 +113,7 @@ var _ = framework.KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", fu podCPU := int64(float64(milliCpu/(numNodeOODPods-1)) * 0.99) ns := f.Namespace.Name - podClient := c.Pods(ns) + podClient := c.Core().Pods(ns) By("Creating pods and waiting for all but one pods to be scheduled") @@ -143,7 +140,7 @@ var _ = framework.KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", fu "reason": "FailedScheduling", }.AsSelector() options := api.ListOptions{FieldSelector: selector} - schedEvents, err := c.Events(ns).List(options) + schedEvents, err := c.Core().Events(ns).List(options) framework.ExpectNoError(err) if len(schedEvents.Items) > 0 { @@ -153,7 +150,7 @@ var _ = framework.KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", fu } }) - nodelist := framework.GetReadySchedulableNodesOrDie(cs) + nodelist := framework.GetReadySchedulableNodesOrDie(c) Expect(len(nodelist.Items)).To(BeNumerically(">", 1)) nodeToRecover := nodelist.Items[1] @@ -171,8 +168,8 @@ var _ = framework.KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", fu }) // createOutOfDiskPod creates a pod in the given namespace with the requested amount of CPU. -func createOutOfDiskPod(c *client.Client, ns, name string, milliCPU int64) { - podClient := c.Pods(ns) +func createOutOfDiskPod(c clientset.Interface, ns, name string, milliCPU int64) { + podClient := c.Core().Pods(ns) pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ @@ -200,8 +197,8 @@ func createOutOfDiskPod(c *client.Client, ns, name string, milliCPU int64) { // availCpu calculates the available CPU on a given node by subtracting the CPU requested by // all the pods from the total available CPU capacity on the node. -func availCpu(c *client.Client, node *api.Node) (int64, error) { - podClient := c.Pods(api.NamespaceAll) +func availCpu(c clientset.Interface, node *api.Node) (int64, error) { + podClient := c.Core().Pods(api.NamespaceAll) selector := fields.Set{"spec.nodeName": node.Name}.AsSelector() options := api.ListOptions{FieldSelector: selector} @@ -220,10 +217,10 @@ func availCpu(c *client.Client, node *api.Node) (int64, error) { // availSize returns the available disk space on a given node by querying node stats which // is in turn obtained internally from cadvisor. -func availSize(c *client.Client, node *api.Node) (uint64, error) { +func availSize(c clientset.Interface, node *api.Node) (uint64, error) { statsResource := fmt.Sprintf("api/v1/proxy/nodes/%s/stats/", node.Name) framework.Logf("Querying stats for node %s using url %s", node.Name, statsResource) - res, err := c.Get().AbsPath(statsResource).Timeout(time.Minute).Do().Raw() + res, err := c.Core().RESTClient().Get().AbsPath(statsResource).Timeout(time.Minute).Do().Raw() if err != nil { return 0, fmt.Errorf("error querying cAdvisor API: %v", err) } @@ -238,7 +235,7 @@ func availSize(c *client.Client, node *api.Node) (uint64, error) { // fillDiskSpace fills the available disk space on a given node by creating a large file. The disk // space on the node is filled in such a way that the available space after filling the disk is just // below the lowDiskSpaceThreshold mark. -func fillDiskSpace(c *client.Client, node *api.Node) { +func fillDiskSpace(c clientset.Interface, node *api.Node) { avail, err := availSize(c, node) framework.ExpectNoError(err, "Node %s: couldn't obtain available disk size %v", node.Name, err) @@ -259,7 +256,7 @@ func fillDiskSpace(c *client.Client, node *api.Node) { } // recoverDiskSpace recovers disk space, filled by creating a large file, on a given node. -func recoverDiskSpace(c *client.Client, node *api.Node) { +func recoverDiskSpace(c clientset.Interface, node *api.Node) { By(fmt.Sprintf("Recovering disk space on node %s", node.Name)) cmd := "rm -f test.img" framework.ExpectNoError(framework.IssueSSHCommand(cmd, framework.TestContext.Provider, node)) diff --git a/test/e2e/pd.go b/test/e2e/pd.go index 3a30ebbedb7..09139a20cf9 100644 --- a/test/e2e/pd.go +++ b/test/e2e/pd.go @@ -34,7 +34,7 @@ import ( "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/apimachinery/registered" - client "k8s.io/kubernetes/pkg/client/unversioned" + unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" awscloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws" gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" "k8s.io/kubernetes/pkg/types" @@ -53,8 +53,8 @@ const ( var _ = framework.KubeDescribe("Pod Disks", func() { var ( - podClient client.PodInterface - nodeClient client.NodeInterface + podClient unversionedcore.PodInterface + nodeClient unversionedcore.NodeInterface host0Name types.NodeName host1Name types.NodeName ) @@ -63,8 +63,8 @@ var _ = framework.KubeDescribe("Pod Disks", func() { BeforeEach(func() { framework.SkipUnlessNodeCountIsAtLeast(2) - podClient = f.Client.Pods(f.Namespace.Name) - nodeClient = f.Client.Nodes() + podClient = f.ClientSet.Core().Pods(f.Namespace.Name) + nodeClient = f.ClientSet.Core().Nodes() nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) Expect(len(nodes.Items)).To(BeNumerically(">=", 2), "Requires at least 2 nodes") @@ -702,7 +702,7 @@ func detachAndDeletePDs(diskName string, hosts []types.NodeName) { } func waitForPDInVolumesInUse( - nodeClient client.NodeInterface, + nodeClient unversionedcore.NodeInterface, diskName string, nodeName types.NodeName, timeout time.Duration, diff --git a/test/e2e/persistent_volumes.go b/test/e2e/persistent_volumes.go index 7f2bae26145..1af45ce823b 100644 --- a/test/e2e/persistent_volumes.go +++ b/test/e2e/persistent_volumes.go @@ -27,16 +27,16 @@ import ( "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/apimachinery/registered" - client "k8s.io/kubernetes/pkg/client/unversioned" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/volume/util/volumehelper" "k8s.io/kubernetes/test/e2e/framework" ) // Delete the nfs-server pod. -func nfsServerPodCleanup(c *client.Client, config VolumeTestConfig) { +func nfsServerPodCleanup(c clientset.Interface, config VolumeTestConfig) { defer GinkgoRecover() - podClient := c.Pods(config.namespace) + podClient := c.Core().Pods(config.namespace) if config.serverImage != "" { podName := config.prefix + "-server" @@ -49,14 +49,14 @@ func nfsServerPodCleanup(c *client.Client, config VolumeTestConfig) { // Delete the PV. Fail test if delete fails. If success the returned PV should // be nil, which prevents the AfterEach from attempting to delete it. -func deletePersistentVolume(c *client.Client, pv *api.PersistentVolume) (*api.PersistentVolume, error) { +func deletePersistentVolume(c clientset.Interface, pv *api.PersistentVolume) (*api.PersistentVolume, error) { if pv == nil { return nil, fmt.Errorf("PV to be deleted is nil") } framework.Logf("Deleting PersistentVolume %v", pv.Name) - err := c.PersistentVolumes().Delete(pv.Name) + err := c.Core().PersistentVolumes().Delete(pv.Name, nil) if err != nil { return pv, fmt.Errorf("Delete() PersistentVolume %v failed: %v", pv.Name, err) } @@ -77,16 +77,16 @@ func deletePersistentVolume(c *client.Client, pv *api.PersistentVolume) (*api.Pe // delete is successful the returned pvc should be nil and the pv non-nil. // Note: the pv and pvc are returned back to the It() caller so that the // AfterEach func can delete these objects if they are not nil. -func deletePVCandValidatePV(c *client.Client, ns string, pvc *api.PersistentVolumeClaim, pv *api.PersistentVolume) (*api.PersistentVolume, *api.PersistentVolumeClaim, error) { +func deletePVCandValidatePV(c clientset.Interface, ns string, pvc *api.PersistentVolumeClaim, pv *api.PersistentVolume) (*api.PersistentVolume, *api.PersistentVolumeClaim, error) { framework.Logf("Deleting PersistentVolumeClaim %v to trigger PV Recycling", pvc.Name) - err := c.PersistentVolumeClaims(ns).Delete(pvc.Name) + err := c.Core().PersistentVolumeClaims(ns).Delete(pvc.Name, nil) if err != nil { return pv, pvc, fmt.Errorf("Delete of PVC %v failed: %v", pvc.Name, err) } // Check that the PVC is really deleted. - pvc, err = c.PersistentVolumeClaims(ns).Get(pvc.Name) + pvc, err = c.Core().PersistentVolumeClaims(ns).Get(pvc.Name) if err == nil { return pv, pvc, fmt.Errorf("PVC %v deleted yet still exists", pvc.Name) } @@ -102,7 +102,7 @@ func deletePVCandValidatePV(c *client.Client, ns string, pvc *api.PersistentVolu } // Examine the pv.ClaimRef and UID. Expect nil values. - pv, err = c.PersistentVolumes().Get(pv.Name) + pv, err = c.Core().PersistentVolumes().Get(pv.Name) if err != nil { return pv, pvc, fmt.Errorf("Cannot re-get PersistentVolume %v:", pv.Name) } @@ -115,9 +115,9 @@ func deletePVCandValidatePV(c *client.Client, ns string, pvc *api.PersistentVolu } // create the PV resource. Fails test on error. -func createPV(c *client.Client, pv *api.PersistentVolume) (*api.PersistentVolume, error) { +func createPV(c clientset.Interface, pv *api.PersistentVolume) (*api.PersistentVolume, error) { - pv, err := c.PersistentVolumes().Create(pv) + pv, err := c.Core().PersistentVolumes().Create(pv) if err != nil { return pv, fmt.Errorf("Create PersistentVolume %v failed: %v", pv.Name, err) } @@ -126,9 +126,9 @@ func createPV(c *client.Client, pv *api.PersistentVolume) (*api.PersistentVolume } // create the PVC resource. Fails test on error. -func createPVC(c *client.Client, ns string, pvc *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) { +func createPVC(c clientset.Interface, ns string, pvc *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) { - pvc, err := c.PersistentVolumeClaims(ns).Create(pvc) + pvc, err := c.Core().PersistentVolumeClaims(ns).Create(pvc) if err != nil { return pvc, fmt.Errorf("Create PersistentVolumeClaim %v failed: %v", pvc.Name, err) } @@ -144,7 +144,7 @@ func createPVC(c *client.Client, ns string, pvc *api.PersistentVolumeClaim) (*ap // Note: in the pre-bind case the real PVC name, which is generated, is not // known until after the PVC is instantiated. This is why the pvc is created // before the pv. -func createPVCPV(c *client.Client, serverIP, ns string, preBind bool) (*api.PersistentVolume, *api.PersistentVolumeClaim, error) { +func createPVCPV(c clientset.Interface, serverIP, ns string, preBind bool) (*api.PersistentVolume, *api.PersistentVolumeClaim, error) { var bindTo *api.PersistentVolumeClaim var preBindMsg string @@ -187,7 +187,7 @@ func createPVCPV(c *client.Client, serverIP, ns string, preBind bool) (*api.Pers // Note: in the pre-bind case the real PV name, which is generated, is not // known until after the PV is instantiated. This is why the pv is created // before the pvc. -func createPVPVC(c *client.Client, serverIP, ns string, preBind bool) (*api.PersistentVolume, *api.PersistentVolumeClaim, error) { +func createPVPVC(c clientset.Interface, serverIP, ns string, preBind bool) (*api.PersistentVolume, *api.PersistentVolumeClaim, error) { preBindMsg := "" if preBind { @@ -219,7 +219,7 @@ func createPVPVC(c *client.Client, serverIP, ns string, preBind bool) (*api.Pers } // Wait for the pv and pvc to bind to each other. Fail test on errors. -func waitOnPVandPVC(c *client.Client, ns string, pv *api.PersistentVolume, pvc *api.PersistentVolumeClaim) error { +func waitOnPVandPVC(c clientset.Interface, ns string, pv *api.PersistentVolume, pvc *api.PersistentVolumeClaim) error { // Wait for newly created PVC to bind to the PV framework.Logf("Waiting for PV %v to bind to PVC %v", pv.Name, pvc.Name) @@ -243,7 +243,7 @@ func waitOnPVandPVC(c *client.Client, ns string, pv *api.PersistentVolume, pvc * // reflect that these resources have been retrieved again (Get). // Note: the pv and pvc are returned back to the It() caller so that the // AfterEach func can delete these objects if they are not nil. -func waitAndValidatePVandPVC(c *client.Client, ns string, pv *api.PersistentVolume, pvc *api.PersistentVolumeClaim) (*api.PersistentVolume, *api.PersistentVolumeClaim, error) { +func waitAndValidatePVandPVC(c clientset.Interface, ns string, pv *api.PersistentVolume, pvc *api.PersistentVolumeClaim) (*api.PersistentVolume, *api.PersistentVolumeClaim, error) { var err error @@ -254,12 +254,12 @@ func waitAndValidatePVandPVC(c *client.Client, ns string, pv *api.PersistentVolu // Check that the PersistentVolume.ClaimRef is valid and matches the PVC framework.Logf("Checking PersistentVolume ClaimRef is non-nil") - pv, err = c.PersistentVolumes().Get(pv.Name) + pv, err = c.Core().PersistentVolumes().Get(pv.Name) if err != nil { return pv, pvc, fmt.Errorf("Cannot re-get PersistentVolume %v:", pv.Name) } - pvc, err = c.PersistentVolumeClaims(ns).Get(pvc.Name) + pvc, err = c.Core().PersistentVolumeClaims(ns).Get(pvc.Name) if err != nil { return pv, pvc, fmt.Errorf("Cannot re-get PersistentVolumeClaim %v:", pvc.Name) } @@ -273,7 +273,7 @@ func waitAndValidatePVandPVC(c *client.Client, ns string, pv *api.PersistentVolu } // Test the pod's exitcode to be zero. -func testPodSuccessOrFail(f *framework.Framework, c *client.Client, ns string, pod *api.Pod) error { +func testPodSuccessOrFail(f *framework.Framework, c clientset.Interface, ns string, pod *api.Pod) error { By("Pod should terminate with exitcode 0 (success)") @@ -287,10 +287,10 @@ func testPodSuccessOrFail(f *framework.Framework, c *client.Client, ns string, p } // Delete the passed in pod. -func deletePod(f *framework.Framework, c *client.Client, ns string, pod *api.Pod) error { +func deletePod(f *framework.Framework, c clientset.Interface, ns string, pod *api.Pod) error { framework.Logf("Deleting pod %v", pod.Name) - err := c.Pods(ns).Delete(pod.Name, nil) + err := c.Core().Pods(ns).Delete(pod.Name, nil) if err != nil { return fmt.Errorf("Pod %v encountered a delete error: %v", pod.Name, err) } @@ -303,7 +303,7 @@ func deletePod(f *framework.Framework, c *client.Client, ns string, pod *api.Pod // Re-get the pod to double check that it has been deleted; expect err // Note: Get() writes a log error if the pod is not found - _, err = c.Pods(ns).Get(pod.Name) + _, err = c.Core().Pods(ns).Get(pod.Name) if err == nil { return fmt.Errorf("Pod %v has been deleted but able to re-Get the deleted pod", pod.Name) } @@ -316,7 +316,7 @@ func deletePod(f *framework.Framework, c *client.Client, ns string, pod *api.Pod } // Create the test pod, wait for (hopefully) success, and then delete the pod. -func createWaitAndDeletePod(f *framework.Framework, c *client.Client, ns string, claimName string) error { +func createWaitAndDeletePod(f *framework.Framework, c clientset.Interface, ns string, claimName string) error { var errmsg string @@ -326,7 +326,7 @@ func createWaitAndDeletePod(f *framework.Framework, c *client.Client, ns string, pod := makeWritePod(ns, claimName) // Instantiate pod (Create) - runPod, err := c.Pods(ns).Create(pod) + runPod, err := c.Core().Pods(ns).Create(pod) if err != nil || runPod == nil { name := "" if runPod != nil { @@ -366,7 +366,7 @@ func createWaitAndDeletePod(f *framework.Framework, c *client.Client, ns string, // these resources have been retrieved again (Get). // Note: the pv and pvc are returned back to the It() caller so that the // AfterEach func can delete these objects if they are not nil. -func completeTest(f *framework.Framework, c *client.Client, ns string, pv *api.PersistentVolume, pvc *api.PersistentVolumeClaim) (*api.PersistentVolume, *api.PersistentVolumeClaim, error) { +func completeTest(f *framework.Framework, c clientset.Interface, ns string, pv *api.PersistentVolume, pvc *api.PersistentVolumeClaim) (*api.PersistentVolume, *api.PersistentVolumeClaim, error) { // 1. verify that the PV and PVC have binded correctly By("Validating the PV-PVC binding") @@ -402,7 +402,7 @@ var _ = framework.KubeDescribe("PersistentVolumes", func() { // global vars for the It() tests below f := framework.NewDefaultFramework("pv") - var c *client.Client + var c clientset.Interface var ns string var NFSconfig VolumeTestConfig var serverIP string @@ -421,7 +421,7 @@ var _ = framework.KubeDescribe("PersistentVolumes", func() { } BeforeEach(func() { - c = f.Client + c = f.ClientSet ns = f.Namespace.Name // If it doesn't exist, create the nfs server pod in "default" ns @@ -439,7 +439,7 @@ var _ = framework.KubeDescribe("PersistentVolumes", func() { if pvc != nil && len(pvc.Name) > 0 { // Delete the PersistentVolumeClaim framework.Logf("AfterEach: PVC %v is non-nil, deleting claim", pvc.Name) - err := c.PersistentVolumeClaims(ns).Delete(pvc.Name) + err := c.Core().PersistentVolumeClaims(ns).Delete(pvc.Name, nil) if err != nil && !apierrs.IsNotFound(err) { framework.Logf("AfterEach: delete of PersistentVolumeClaim %v error: %v", pvc.Name, err) } @@ -447,7 +447,7 @@ var _ = framework.KubeDescribe("PersistentVolumes", func() { } if pv != nil && len(pv.Name) > 0 { framework.Logf("AfterEach: PV %v is non-nil, deleting pv", pv.Name) - err := c.PersistentVolumes().Delete(pv.Name) + err := c.Core().PersistentVolumes().Delete(pv.Name, nil) if err != nil && !apierrs.IsNotFound(err) { framework.Logf("AfterEach: delete of PersistentVolume %v error: %v", pv.Name, err) } diff --git a/test/e2e/petset.go b/test/e2e/petset.go index 795a8912f38..1131fccd8ba 100644 --- a/test/e2e/petset.go +++ b/test/e2e/petset.go @@ -33,7 +33,7 @@ import ( "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/apis/apps" - client "k8s.io/kubernetes/pkg/client/unversioned" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/controller/petset" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" @@ -71,7 +71,7 @@ var _ = framework.KubeDescribe("PetSet [Slow] [Feature:PetSet]", func() { } f := framework.NewFramework("petset", options, nil) var ns string - var c *client.Client + var c clientset.Interface BeforeEach(func() { // PetSet is in alpha, so it's disabled on some platforms. We skip this @@ -82,7 +82,7 @@ var _ = framework.KubeDescribe("PetSet [Slow] [Feature:PetSet]", func() { framework.SkipIfMissingResource(f.ClientPool, unversioned.GroupVersionResource{Group: apps.GroupName, Version: "v1alpha1", Resource: "petsets"}, f.Namespace.Name) } - c = f.Client + c = f.ClientSet ns = f.Namespace.Name }) @@ -97,7 +97,7 @@ var _ = framework.KubeDescribe("PetSet [Slow] [Feature:PetSet]", func() { BeforeEach(func() { By("creating service " + headlessSvcName + " in namespace " + ns) headlessService := createServiceSpec(headlessSvcName, "", true, labels) - _, err := c.Services(ns).Create(headlessService) + _, err := c.Core().Services(ns).Create(headlessService) Expect(err).NotTo(HaveOccurred()) }) @@ -254,7 +254,7 @@ var _ = framework.KubeDescribe("PetSet [Slow] [Feature:PetSet]", func() { var _ = framework.KubeDescribe("Pet set recreate [Slow] [Feature:PetSet]", func() { f := framework.NewDefaultFramework("pet-set-recreate") - var c *client.Client + var c clientset.Interface var ns string labels := map[string]string{ @@ -270,9 +270,9 @@ var _ = framework.KubeDescribe("Pet set recreate [Slow] [Feature:PetSet]", func( framework.SkipUnlessProviderIs("gce", "vagrant") By("creating service " + headlessSvcName + " in namespace " + f.Namespace.Name) headlessService := createServiceSpec(headlessSvcName, "", true, labels) - _, err := f.Client.Services(f.Namespace.Name).Create(headlessService) + _, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(headlessService) framework.ExpectNoError(err) - c = f.Client + c = f.ClientSet ns = f.Namespace.Name }) @@ -306,7 +306,7 @@ var _ = framework.KubeDescribe("Pet set recreate [Slow] [Feature:PetSet]", func( NodeName: node.Name, }, } - pod, err := f.Client.Pods(f.Namespace.Name).Create(pod) + pod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod) framework.ExpectNoError(err) By("creating petset with conflicting port in namespace " + f.Namespace.Name) @@ -314,7 +314,7 @@ var _ = framework.KubeDescribe("Pet set recreate [Slow] [Feature:PetSet]", func( petContainer := &ps.Spec.Template.Spec.Containers[0] petContainer.Ports = append(petContainer.Ports, conflictingPort) ps.Spec.Template.Spec.NodeName = node.Name - _, err = f.Client.Apps().PetSets(f.Namespace.Name).Create(ps) + _, err = f.ClientSet.Apps().PetSets(f.Namespace.Name).Create(ps) framework.ExpectNoError(err) By("waiting until pod " + podName + " will start running in namespace " + f.Namespace.Name) @@ -324,7 +324,7 @@ var _ = framework.KubeDescribe("Pet set recreate [Slow] [Feature:PetSet]", func( var initialPetPodUID types.UID By("waiting until pet pod " + petPodName + " will be recreated and deleted at least once in namespace " + f.Namespace.Name) - w, err := f.Client.Pods(f.Namespace.Name).Watch(api.SingleObject(api.ObjectMeta{Name: petPodName})) + w, err := f.ClientSet.Core().Pods(f.Namespace.Name).Watch(api.SingleObject(api.ObjectMeta{Name: petPodName})) framework.ExpectNoError(err) // we need to get UID from pod in any state and wait until pet set controller will remove pod atleast once _, err = watch.Until(petPodTimeout, w, func(event watch.Event) (bool, error) { @@ -347,13 +347,13 @@ var _ = framework.KubeDescribe("Pet set recreate [Slow] [Feature:PetSet]", func( } By("removing pod with conflicting port in namespace " + f.Namespace.Name) - err = f.Client.Pods(f.Namespace.Name).Delete(pod.Name, api.NewDeleteOptions(0)) + err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, api.NewDeleteOptions(0)) framework.ExpectNoError(err) By("waiting when pet pod " + petPodName + " will be recreated in namespace " + f.Namespace.Name + " and will be in running state") // we may catch delete event, thats why we are waiting for running phase like this, and not with watch.Until Eventually(func() error { - petPod, err := f.Client.Pods(f.Namespace.Name).Get(petPodName) + petPod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Get(petPodName) if err != nil { return err } @@ -367,8 +367,8 @@ var _ = framework.KubeDescribe("Pet set recreate [Slow] [Feature:PetSet]", func( }) }) -func dumpDebugInfo(c *client.Client, ns string) { - pl, _ := c.Pods(ns).List(api.ListOptions{LabelSelector: labels.Everything()}) +func dumpDebugInfo(c clientset.Interface, ns string) { + pl, _ := c.Core().Pods(ns).List(api.ListOptions{LabelSelector: labels.Everything()}) for _, p := range pl.Items { desc, _ := framework.RunKubectl("describe", "po", p.Name, fmt.Sprintf("--namespace=%v", ns)) framework.Logf("\nOutput of kubectl describe %v:\n%v", p.Name, desc) @@ -526,7 +526,7 @@ func petSetFromManifest(fileName, ns string) *apps.PetSet { // petSetTester has all methods required to test a single petset. type petSetTester struct { - c *client.Client + c clientset.Interface } func (p *petSetTester) createPetSet(manifestPath, ns string) *apps.PetSet { @@ -588,7 +588,7 @@ func (p *petSetTester) deletePetAtIndex(index int, ps *apps.PetSet) { // pull the name out from an identity mapper. name := fmt.Sprintf("%v-%v", ps.Name, index) noGrace := int64(0) - if err := p.c.Pods(ps.Namespace).Delete(name, &api.DeleteOptions{GracePeriodSeconds: &noGrace}); err != nil { + if err := p.c.Core().Pods(ps.Namespace).Delete(name, &api.DeleteOptions{GracePeriodSeconds: &noGrace}); err != nil { framework.Failf("Failed to delete pet %v for PetSet %v: %v", name, ps.Name, ps.Namespace, err) } } @@ -646,7 +646,7 @@ func (p *petSetTester) update(ns, name string, update func(ps *apps.PetSet)) { func (p *petSetTester) getPodList(ps *apps.PetSet) *api.PodList { selector, err := unversioned.LabelSelectorAsSelector(ps.Spec.Selector) ExpectNoError(err) - podList, err := p.c.Pods(ps.Namespace).List(api.ListOptions{LabelSelector: selector}) + podList, err := p.c.Core().Pods(ps.Namespace).List(api.ListOptions{LabelSelector: selector}) ExpectNoError(err) return podList } @@ -735,7 +735,7 @@ func (p *petSetTester) waitForStatus(ps *apps.PetSet, expectedReplicas int32) { } } -func deleteAllPetSets(c *client.Client, ns string) { +func deleteAllPetSets(c clientset.Interface, ns string) { pst := &petSetTester{c: c} psList, err := c.Apps().PetSets(ns).List(api.ListOptions{LabelSelector: labels.Everything()}) ExpectNoError(err) @@ -759,7 +759,7 @@ func deleteAllPetSets(c *client.Client, ns string) { pvNames := sets.NewString() // TODO: Don't assume all pvcs in the ns belong to a petset pvcPollErr := wait.PollImmediate(petsetPoll, petsetTimeout, func() (bool, error) { - pvcList, err := c.PersistentVolumeClaims(ns).List(api.ListOptions{LabelSelector: labels.Everything()}) + pvcList, err := c.Core().PersistentVolumeClaims(ns).List(api.ListOptions{LabelSelector: labels.Everything()}) if err != nil { framework.Logf("WARNING: Failed to list pvcs, retrying %v", err) return false, nil @@ -768,7 +768,7 @@ func deleteAllPetSets(c *client.Client, ns string) { pvNames.Insert(pvc.Spec.VolumeName) // TODO: Double check that there are no pods referencing the pvc framework.Logf("Deleting pvc: %v with volume %v", pvc.Name, pvc.Spec.VolumeName) - if err := c.PersistentVolumeClaims(ns).Delete(pvc.Name); err != nil { + if err := c.Core().PersistentVolumeClaims(ns).Delete(pvc.Name, nil); err != nil { return false, nil } } @@ -779,7 +779,7 @@ func deleteAllPetSets(c *client.Client, ns string) { } pollErr := wait.PollImmediate(petsetPoll, petsetTimeout, func() (bool, error) { - pvList, err := c.PersistentVolumes().List(api.ListOptions{LabelSelector: labels.Everything()}) + pvList, err := c.Core().PersistentVolumes().List(api.ListOptions{LabelSelector: labels.Everything()}) if err != nil { framework.Logf("WARNING: Failed to list pvs, retrying %v", err) return false, nil diff --git a/test/e2e/pod_gc.go b/test/e2e/pod_gc.go index cb4fa4eb82c..91c93b910d3 100644 --- a/test/e2e/pod_gc.go +++ b/test/e2e/pod_gc.go @@ -39,7 +39,7 @@ var _ = framework.KubeDescribe("Pod garbage collector [Feature:PodGarbageCollect pod, err := createTerminatingPod(f) pod.ResourceVersion = "" pod.Status.Phase = api.PodFailed - pod, err = f.Client.Pods(f.Namespace.Name).UpdateStatus(pod) + pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).UpdateStatus(pod) if err != nil { framework.Failf("err failing pod: %v", err) } @@ -61,7 +61,7 @@ var _ = framework.KubeDescribe("Pod garbage collector [Feature:PodGarbageCollect By(fmt.Sprintf("Waiting for gc controller to gc all but %d pods", gcThreshold)) pollErr := wait.Poll(1*time.Minute, timeout, func() (bool, error) { - pods, err = f.Client.Pods(f.Namespace.Name).List(api.ListOptions{}) + pods, err = f.ClientSet.Core().Pods(f.Namespace.Name).List(api.ListOptions{}) if err != nil { framework.Logf("Failed to list pod %v", err) return false, nil @@ -96,5 +96,5 @@ func createTerminatingPod(f *framework.Framework) (*api.Pod, error) { }, }, } - return f.Client.Pods(f.Namespace.Name).Create(pod) + return f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod) } diff --git a/test/e2e/portforward.go b/test/e2e/portforward.go index ccbc7b68a26..e3e6331ca8e 100644 --- a/test/e2e/portforward.go +++ b/test/e2e/portforward.go @@ -177,14 +177,14 @@ var _ = framework.KubeDescribe("Port forwarding", func() { It("should support a client that connects, sends no data, and disconnects [Conformance]", func() { By("creating the target pod") pod := pfPod("abc", "1", "1", "1") - if _, err := f.Client.Pods(f.Namespace.Name).Create(pod); err != nil { + if _, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod); err != nil { framework.Failf("Couldn't create pod: %v", err) } if err := f.WaitForPodRunning(pod.Name); err != nil { framework.Failf("Pod did not start running: %v", err) } defer func() { - logs, err := framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, "portforwardtester") + logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester") if err != nil { framework.Logf("Error getting pod log: %v", err) } else { @@ -211,7 +211,7 @@ var _ = framework.KubeDescribe("Port forwarding", func() { } By("Verifying logs") - logOutput, err := framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, "portforwardtester") + logOutput, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester") if err != nil { framework.Failf("Error retrieving pod logs: %v", err) } @@ -222,14 +222,14 @@ var _ = framework.KubeDescribe("Port forwarding", func() { It("should support a client that connects, sends data, and disconnects [Conformance]", func() { By("creating the target pod") pod := pfPod("abc", "10", "10", "100") - if _, err := f.Client.Pods(f.Namespace.Name).Create(pod); err != nil { + if _, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod); err != nil { framework.Failf("Couldn't create pod: %v", err) } if err := f.WaitForPodRunning(pod.Name); err != nil { framework.Failf("Pod did not start running: %v", err) } defer func() { - logs, err := framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, "portforwardtester") + logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester") if err != nil { framework.Logf("Error getting pod log: %v", err) } else { @@ -277,7 +277,7 @@ var _ = framework.KubeDescribe("Port forwarding", func() { } By("Verifying logs") - logOutput, err := framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, "portforwardtester") + logOutput, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester") if err != nil { framework.Failf("Error retrieving pod logs: %v", err) } @@ -290,14 +290,14 @@ var _ = framework.KubeDescribe("Port forwarding", func() { It("should support a client that connects, sends no data, and disconnects [Conformance]", func() { By("creating the target pod") pod := pfPod("", "10", "10", "100") - if _, err := f.Client.Pods(f.Namespace.Name).Create(pod); err != nil { + if _, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod); err != nil { framework.Failf("Couldn't create pod: %v", err) } if err := f.WaitForPodRunning(pod.Name); err != nil { framework.Failf("Pod did not start running: %v", err) } defer func() { - logs, err := framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, "portforwardtester") + logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester") if err != nil { framework.Logf("Error getting pod log: %v", err) } else { @@ -335,7 +335,7 @@ var _ = framework.KubeDescribe("Port forwarding", func() { } By("Verifying logs") - logOutput, err := framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, "portforwardtester") + logOutput, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester") if err != nil { framework.Failf("Error retrieving pod logs: %v", err) } diff --git a/test/e2e/pre_stop.go b/test/e2e/pre_stop.go index a9a35bee73f..38dd3e5a9e9 100644 --- a/test/e2e/pre_stop.go +++ b/test/e2e/pre_stop.go @@ -22,7 +22,7 @@ import ( "time" "k8s.io/kubernetes/pkg/api" - client "k8s.io/kubernetes/pkg/client/unversioned" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" @@ -34,7 +34,7 @@ type State struct { Received map[string]int } -func testPreStop(c *client.Client, ns string) { +func testPreStop(c clientset.Interface, ns string) { // This is the server that will receive the preStop notification podDescr := &api.Pod{ ObjectMeta: api.ObjectMeta{ @@ -51,13 +51,13 @@ func testPreStop(c *client.Client, ns string) { }, } By(fmt.Sprintf("Creating server pod %s in namespace %s", podDescr.Name, ns)) - podDescr, err := c.Pods(ns).Create(podDescr) + podDescr, err := c.Core().Pods(ns).Create(podDescr) framework.ExpectNoError(err, fmt.Sprintf("creating pod %s", podDescr.Name)) // At the end of the test, clean up by removing the pod. defer func() { By("Deleting the server pod") - c.Pods(ns).Delete(podDescr.Name, nil) + c.Core().Pods(ns).Delete(podDescr.Name, nil) }() By("Waiting for pods to come up.") @@ -66,7 +66,7 @@ func testPreStop(c *client.Client, ns string) { val := "{\"Source\": \"prestop\"}" - podOut, err := c.Pods(ns).Get(podDescr.Name) + podOut, err := c.Core().Pods(ns).Get(podDescr.Name) framework.ExpectNoError(err, "getting pod info") preStopDescr := &api.Pod{ @@ -94,7 +94,7 @@ func testPreStop(c *client.Client, ns string) { } By(fmt.Sprintf("Creating tester pod %s in namespace %s", preStopDescr.Name, ns)) - preStopDescr, err = c.Pods(ns).Create(preStopDescr) + preStopDescr, err = c.Core().Pods(ns).Create(preStopDescr) framework.ExpectNoError(err, fmt.Sprintf("creating pod %s", preStopDescr.Name)) deletePreStop := true @@ -102,7 +102,7 @@ func testPreStop(c *client.Client, ns string) { defer func() { if deletePreStop { By("Deleting the tester pod") - c.Pods(ns).Delete(preStopDescr.Name, nil) + c.Core().Pods(ns).Delete(preStopDescr.Name, nil) } }() @@ -111,20 +111,20 @@ func testPreStop(c *client.Client, ns string) { // Delete the pod with the preStop handler. By("Deleting pre-stop pod") - if err := c.Pods(ns).Delete(preStopDescr.Name, nil); err == nil { + if err := c.Core().Pods(ns).Delete(preStopDescr.Name, nil); err == nil { deletePreStop = false } framework.ExpectNoError(err, fmt.Sprintf("deleting pod: %s", preStopDescr.Name)) // Validate that the server received the web poke. err = wait.Poll(time.Second*5, time.Second*60, func() (bool, error) { - subResourceProxyAvailable, err := framework.ServerVersionGTE(framework.SubResourcePodProxyVersion, c) + subResourceProxyAvailable, err := framework.ServerVersionGTE(framework.SubResourcePodProxyVersion, c.Discovery()) if err != nil { return false, err } var body []byte if subResourceProxyAvailable { - body, err = c.Get(). + body, err = c.Core().RESTClient().Get(). Namespace(ns). Resource("pods"). SubResource("proxy"). @@ -132,7 +132,7 @@ func testPreStop(c *client.Client, ns string) { Suffix("read"). DoRaw() } else { - body, err = c.Get(). + body, err = c.Core().RESTClient().Get(). Prefix("proxy"). Namespace(ns). Resource("pods"). @@ -163,6 +163,6 @@ var _ = framework.KubeDescribe("PreStop", func() { f := framework.NewDefaultFramework("prestop") It("should call prestop when killing a pod [Conformance]", func() { - testPreStop(f.Client, f.Namespace.Name) + testPreStop(f.ClientSet, f.Namespace.Name) }) }) diff --git a/test/e2e/proxy.go b/test/e2e/proxy.go index ece812d6516..bb6e335d771 100644 --- a/test/e2e/proxy.go +++ b/test/e2e/proxy.go @@ -71,7 +71,7 @@ var _ = framework.KubeDescribe("Proxy", func() { It("should proxy through a service and a pod [Conformance]", func() { start := time.Now() labels := map[string]string{"proxy-service-target": "true"} - service, err := f.Client.Services(f.Namespace.Name).Create(&api.Service{ + service, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(&api.Service{ ObjectMeta: api.ObjectMeta{ GenerateName: "proxy-service-", }, @@ -109,7 +109,7 @@ var _ = framework.KubeDescribe("Proxy", func() { By("starting an echo server on multiple ports") pods := []*api.Pod{} cfg := testutils.RCConfig{ - Client: f.Client, + Client: f.ClientSet, Image: "gcr.io/google_containers/porter:cd5cb5791ebaa8641955f0e8c2a9bed669b1eaab", Name: service.Name, Namespace: f.Namespace.Name, @@ -146,7 +146,7 @@ var _ = framework.KubeDescribe("Proxy", func() { CreatedPods: &pods, } Expect(framework.RunRC(cfg)).NotTo(HaveOccurred()) - defer framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, cfg.Name) + defer framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, cfg.Name) Expect(f.WaitForAnEndpoint(service.Name)).NotTo(HaveOccurred()) @@ -260,7 +260,7 @@ var _ = framework.KubeDescribe("Proxy", func() { } if len(errs) != 0 { - body, err := f.Client.Pods(f.Namespace.Name).GetLogs(pods[0].Name, &api.PodLogOptions{}).Do().Raw() + body, err := f.ClientSet.Core().Pods(f.Namespace.Name).GetLogs(pods[0].Name, &api.PodLogOptions{}).Do().Raw() if err != nil { framework.Logf("Error getting logs for pod %s: %v", pods[0].Name, err) } else { @@ -281,7 +281,7 @@ func doProxy(f *framework.Framework, path string, i int) (body []byte, statusCod // chance of the things we are talking to being confused for an error // that apiserver would have emitted. start := time.Now() - body, err = f.Client.Get().AbsPath(path).Do().StatusCode(&statusCode).Raw() + body, err = f.ClientSet.Core().RESTClient().Get().AbsPath(path).Do().StatusCode(&statusCode).Raw() d = time.Since(start) if len(body) > 0 { framework.Logf("(%v) %v: %s (%v; %v)", i, path, truncate(body, maxDisplayBodyLen), statusCode, d) diff --git a/test/e2e/rc.go b/test/e2e/rc.go index e9e4187f91b..8103a617063 100644 --- a/test/e2e/rc.go +++ b/test/e2e/rc.go @@ -57,7 +57,7 @@ func ServeImageOrFail(f *framework.Framework, test string, image string) { // The source for the Docker containter kubernetes/serve_hostname is // in contrib/for-demos/serve_hostname By(fmt.Sprintf("Creating replication controller %s", name)) - controller, err := f.Client.ReplicationControllers(f.Namespace.Name).Create(&api.ReplicationController{ + controller, err := f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Create(&api.ReplicationController{ ObjectMeta: api.ObjectMeta{ Name: name, }, @@ -86,7 +86,7 @@ func ServeImageOrFail(f *framework.Framework, test string, image string) { // Cleanup the replication controller when we are done. defer func() { // Resize the replication controller to zero to get rid of pods. - if err := framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, controller.Name); err != nil { + if err := framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, controller.Name); err != nil { framework.Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err) } }() @@ -94,7 +94,7 @@ func ServeImageOrFail(f *framework.Framework, test string, image string) { // List the pods, making sure we observe all the replicas. label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) - pods, err := framework.PodsCreated(f.Client, f.Namespace.Name, name, replicas) + pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicas) By("Ensuring each pod is running") @@ -112,7 +112,7 @@ func ServeImageOrFail(f *framework.Framework, test string, image string) { By("Trying to dial each unique pod") retryTimeout := 2 * time.Minute retryInterval := 5 * time.Second - err = wait.Poll(retryInterval, retryTimeout, framework.PodProxyResponseChecker(f.Client, f.Namespace.Name, label, name, true, pods).CheckAllResponses) + err = wait.Poll(retryInterval, retryTimeout, framework.PodProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses) if err != nil { framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds()) } diff --git a/test/e2e/reboot.go b/test/e2e/reboot.go index a9c7092764e..73b842cac8f 100644 --- a/test/e2e/reboot.go +++ b/test/e2e/reboot.go @@ -23,7 +23,6 @@ import ( "k8s.io/kubernetes/pkg/api" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/util/sets" @@ -65,7 +64,7 @@ var _ = framework.KubeDescribe("Reboot [Disruptive] [Feature:Reboot]", func() { // events for the kube-system namespace on failures namespaceName := api.NamespaceSystem By(fmt.Sprintf("Collecting events from namespace %q.", namespaceName)) - events, err := f.Client.Events(namespaceName).List(api.ListOptions{}) + events, err := f.ClientSet.Core().Events(namespaceName).List(api.ListOptions{}) Expect(err).NotTo(HaveOccurred()) for _, e := range events.Items { @@ -90,32 +89,32 @@ var _ = framework.KubeDescribe("Reboot [Disruptive] [Feature:Reboot]", func() { It("each node by ordering clean reboot and ensure they function upon restart", func() { // clean shutdown and restart // We sleep 10 seconds to give some time for ssh command to cleanly finish before the node is rebooted. - testReboot(f.Client, f.ClientSet, "nohup sh -c 'sleep 10 && sudo reboot' >/dev/null 2>&1 &") + testReboot(f.ClientSet, "nohup sh -c 'sleep 10 && sudo reboot' >/dev/null 2>&1 &") }) It("each node by ordering unclean reboot and ensure they function upon restart", func() { // unclean shutdown and restart // We sleep 10 seconds to give some time for ssh command to cleanly finish before the node is shutdown. - testReboot(f.Client, f.ClientSet, "nohup sh -c 'sleep 10 && echo b | sudo tee /proc/sysrq-trigger' >/dev/null 2>&1 &") + testReboot(f.ClientSet, "nohup sh -c 'sleep 10 && echo b | sudo tee /proc/sysrq-trigger' >/dev/null 2>&1 &") }) It("each node by triggering kernel panic and ensure they function upon restart", func() { // kernel panic // We sleep 10 seconds to give some time for ssh command to cleanly finish before kernel panic is triggered. - testReboot(f.Client, f.ClientSet, "nohup sh -c 'sleep 10 && echo c | sudo tee /proc/sysrq-trigger' >/dev/null 2>&1 &") + testReboot(f.ClientSet, "nohup sh -c 'sleep 10 && echo c | sudo tee /proc/sysrq-trigger' >/dev/null 2>&1 &") }) It("each node by switching off the network interface and ensure they function upon switch on", func() { // switch the network interface off for a while to simulate a network outage // We sleep 10 seconds to give some time for ssh command to cleanly finish before network is down. - testReboot(f.Client, f.ClientSet, "nohup sh -c 'sleep 10 && (sudo ifdown eth0 || sudo ip link set eth0 down) && sleep 120 && (sudo ifup eth0 || sudo ip link set eth0 up)' >/dev/null 2>&1 &") + testReboot(f.ClientSet, "nohup sh -c 'sleep 10 && (sudo ifdown eth0 || sudo ip link set eth0 down) && sleep 120 && (sudo ifup eth0 || sudo ip link set eth0 up)' >/dev/null 2>&1 &") }) It("each node by dropping all inbound packets for a while and ensure they function afterwards", func() { // tell the firewall to drop all inbound packets for a while // We sleep 10 seconds to give some time for ssh command to cleanly finish before starting dropping inbound packets. // We still accept packages send from localhost to prevent monit from restarting kubelet. - testReboot(f.Client, f.ClientSet, "nohup sh -c 'sleep 10 && sudo iptables -I INPUT 1 -s 127.0.0.1 -j ACCEPT && sudo iptables -I INPUT 2 -j DROP && "+ + testReboot(f.ClientSet, "nohup sh -c 'sleep 10 && sudo iptables -I INPUT 1 -s 127.0.0.1 -j ACCEPT && sudo iptables -I INPUT 2 -j DROP && "+ " sleep 120 && sudo iptables -D INPUT -j DROP && sudo iptables -D INPUT -s 127.0.0.1 -j ACCEPT' >/dev/null 2>&1 &") }) @@ -123,14 +122,14 @@ var _ = framework.KubeDescribe("Reboot [Disruptive] [Feature:Reboot]", func() { // tell the firewall to drop all outbound packets for a while // We sleep 10 seconds to give some time for ssh command to cleanly finish before starting dropping outbound packets. // We still accept packages send to localhost to prevent monit from restarting kubelet. - testReboot(f.Client, f.ClientSet, "nohup sh -c 'sleep 10 && sudo iptables -I OUTPUT 1 -s 127.0.0.1 -j ACCEPT && sudo iptables -I OUTPUT 2 -j DROP && "+ + testReboot(f.ClientSet, "nohup sh -c 'sleep 10 && sudo iptables -I OUTPUT 1 -s 127.0.0.1 -j ACCEPT && sudo iptables -I OUTPUT 2 -j DROP && "+ " sleep 120 && sudo iptables -D OUTPUT -j DROP && sudo iptables -D OUTPUT -s 127.0.0.1 -j ACCEPT' >/dev/null 2>&1 &") }) }) -func testReboot(c *client.Client, cs clientset.Interface, rebootCmd string) { +func testReboot(c clientset.Interface, rebootCmd string) { // Get all nodes, and kick off the test on each. - nodelist := framework.GetReadySchedulableNodesOrDie(cs) + nodelist := framework.GetReadySchedulableNodesOrDie(c) result := make([]bool, len(nodelist.Items)) wg := sync.WaitGroup{} wg.Add(len(nodelist.Items)) @@ -161,7 +160,7 @@ func testReboot(c *client.Client, cs clientset.Interface, rebootCmd string) { } } -func printStatusAndLogsForNotReadyPods(c *client.Client, ns string, podNames []string, pods []*api.Pod) { +func printStatusAndLogsForNotReadyPods(c clientset.Interface, ns string, podNames []string, pods []*api.Pod) { printFn := func(id, log string, err error, previous bool) { prefix := "Retrieving log for container" if previous { @@ -208,7 +207,7 @@ func printStatusAndLogsForNotReadyPods(c *client.Client, ns string, podNames []s // // It returns true through result only if all of the steps pass; at the first // failed step, it will return false through result and not run the rest. -func rebootNode(c *client.Client, provider, name, rebootCmd string) bool { +func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool { // Setup ns := api.NamespaceSystem ps := testutils.NewPodStore(c, ns, labels.Everything(), fields.OneTermEqualSelector(api.PodHostField, name)) @@ -216,7 +215,7 @@ func rebootNode(c *client.Client, provider, name, rebootCmd string) bool { // Get the node initially. framework.Logf("Getting %s", name) - node, err := c.Nodes().Get(name) + node, err := c.Core().Nodes().Get(name) if err != nil { framework.Logf("Couldn't get node %s", name) return false diff --git a/test/e2e/replica_set.go b/test/e2e/replica_set.go index defd7dd2d4f..bd8da58d3a8 100644 --- a/test/e2e/replica_set.go +++ b/test/e2e/replica_set.go @@ -57,7 +57,7 @@ func ReplicaSetServeImageOrFail(f *framework.Framework, test string, image strin // The source for the Docker containter kubernetes/serve_hostname is // in contrib/for-demos/serve_hostname By(fmt.Sprintf("Creating ReplicaSet %s", name)) - rs, err := f.Client.Extensions().ReplicaSets(f.Namespace.Name).Create(&extensions.ReplicaSet{ + rs, err := f.ClientSet.Extensions().ReplicaSets(f.Namespace.Name).Create(&extensions.ReplicaSet{ ObjectMeta: api.ObjectMeta{ Name: name, }, @@ -86,7 +86,7 @@ func ReplicaSetServeImageOrFail(f *framework.Framework, test string, image strin // Cleanup the ReplicaSet when we are done. defer func() { // Resize the ReplicaSet to zero to get rid of pods. - if err := framework.DeleteReplicaSet(f.Client, f.ClientSet, f.Namespace.Name, rs.Name); err != nil { + if err := framework.DeleteReplicaSet(f.ClientSet, f.Namespace.Name, rs.Name); err != nil { framework.Logf("Failed to cleanup ReplicaSet %v: %v.", rs.Name, err) } }() @@ -94,7 +94,7 @@ func ReplicaSetServeImageOrFail(f *framework.Framework, test string, image strin // List the pods, making sure we observe all the replicas. label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) - pods, err := framework.PodsCreated(f.Client, f.Namespace.Name, name, replicas) + pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicas) Expect(err).NotTo(HaveOccurred()) By("Ensuring each pod is running") @@ -113,7 +113,7 @@ func ReplicaSetServeImageOrFail(f *framework.Framework, test string, image strin By("Trying to dial each unique pod") retryTimeout := 2 * time.Minute retryInterval := 5 * time.Second - err = wait.Poll(retryInterval, retryTimeout, framework.PodProxyResponseChecker(f.Client, f.Namespace.Name, label, name, true, pods).CheckAllResponses) + err = wait.Poll(retryInterval, retryTimeout, framework.PodProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses) if err != nil { framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds()) } diff --git a/test/e2e/rescheduler.go b/test/e2e/rescheduler.go index 963a7c252cc..4d9de3626ee 100644 --- a/test/e2e/rescheduler.go +++ b/test/e2e/rescheduler.go @@ -49,21 +49,21 @@ var _ = framework.KubeDescribe("Rescheduler [Serial]", func() { It("should ensure that critical pod is scheduled in case there is no resources available", func() { By("reserving all available cpu") err := reserveAllCpu(f, "reserve-all-cpu", totalMillicores) - defer framework.DeleteRCAndPods(f.Client, f.ClientSet, ns, "reserve-all-cpu") + defer framework.DeleteRCAndPods(f.ClientSet, ns, "reserve-all-cpu") framework.ExpectNoError(err) By("creating a new instance of DNS and waiting for DNS to be scheduled") label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "kube-dns"})) listOpts := api.ListOptions{LabelSelector: label} - rcs, err := f.Client.ReplicationControllers(api.NamespaceSystem).List(listOpts) + rcs, err := f.ClientSet.Core().ReplicationControllers(api.NamespaceSystem).List(listOpts) framework.ExpectNoError(err) Expect(len(rcs.Items)).Should(Equal(1)) rc := rcs.Items[0] replicas := uint(rc.Spec.Replicas) - err = framework.ScaleRC(f.Client, f.ClientSet, api.NamespaceSystem, rc.Name, replicas+1, true) - defer framework.ExpectNoError(framework.ScaleRC(f.Client, f.ClientSet, api.NamespaceSystem, rc.Name, replicas, true)) + err = framework.ScaleRC(f.ClientSet, api.NamespaceSystem, rc.Name, replicas+1, true) + defer framework.ExpectNoError(framework.ScaleRC(f.ClientSet, api.NamespaceSystem, rc.Name, replicas, true)) framework.ExpectNoError(err) }) }) @@ -73,10 +73,10 @@ func reserveAllCpu(f *framework.Framework, id string, millicores int) error { replicas := millicores / 100 ReserveCpu(f, id, 1, 100) - framework.ExpectNoError(framework.ScaleRC(f.Client, f.ClientSet, f.Namespace.Name, id, uint(replicas), false)) + framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.Namespace.Name, id, uint(replicas), false)) for start := time.Now(); time.Since(start) < timeout; time.Sleep(10 * time.Second) { - pods, err := framework.GetPodsInNamespace(f.Client, f.Namespace.Name, framework.ImagePullerLabels) + pods, err := framework.GetPodsInNamespace(f.ClientSet, f.Namespace.Name, framework.ImagePullerLabels) if err != nil { return err } diff --git a/test/e2e/resize_nodes.go b/test/e2e/resize_nodes.go index 0bae5a4164a..04f91c6e2b7 100644 --- a/test/e2e/resize_nodes.go +++ b/test/e2e/resize_nodes.go @@ -26,7 +26,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/apimachinery/registered" - client "k8s.io/kubernetes/pkg/client/unversioned" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/test/e2e/framework" @@ -160,8 +160,8 @@ func svcByName(name string, port int) *api.Service { } } -func newSVCByName(c *client.Client, ns, name string) error { - _, err := c.Services(ns).Create(svcByName(name, testPort)) +func newSVCByName(c clientset.Interface, ns, name string) error { + _, err := c.Core().Services(ns).Create(svcByName(name, testPort)) return err } @@ -187,8 +187,8 @@ func podOnNode(podName, nodeName string, image string) *api.Pod { } } -func newPodOnNode(c *client.Client, namespace, podName, nodeName string) error { - pod, err := c.Pods(namespace).Create(podOnNode(podName, nodeName, serveHostnameImage)) +func newPodOnNode(c clientset.Interface, namespace, podName, nodeName string) error { + pod, err := c.Core().Pods(namespace).Create(podOnNode(podName, nodeName, serveHostnameImage)) if err == nil { framework.Logf("Created pod %s on node %s", pod.ObjectMeta.Name, nodeName) } else { @@ -243,27 +243,27 @@ func rcByNameContainer(name string, replicas int32, image string, labels map[str } // newRCByName creates a replication controller with a selector by name of name. -func newRCByName(c *client.Client, ns, name string, replicas int32) (*api.ReplicationController, error) { +func newRCByName(c clientset.Interface, ns, name string, replicas int32) (*api.ReplicationController, error) { By(fmt.Sprintf("creating replication controller %s", name)) - return c.ReplicationControllers(ns).Create(rcByNamePort( + return c.Core().ReplicationControllers(ns).Create(rcByNamePort( name, replicas, serveHostnameImage, 9376, api.ProtocolTCP, map[string]string{})) } -func resizeRC(c *client.Client, ns, name string, replicas int32) error { - rc, err := c.ReplicationControllers(ns).Get(name) +func resizeRC(c clientset.Interface, ns, name string, replicas int32) error { + rc, err := c.Core().ReplicationControllers(ns).Get(name) if err != nil { return err } rc.Spec.Replicas = replicas - _, err = c.ReplicationControllers(rc.Namespace).Update(rc) + _, err = c.Core().ReplicationControllers(rc.Namespace).Update(rc) return err } -func getMaster(c *client.Client) string { +func getMaster(c clientset.Interface) string { master := "" switch framework.TestContext.Provider { case "gce": - eps, err := c.Endpoints(api.NamespaceDefault).Get("kubernetes") + eps, err := c.Core().Endpoints(api.NamespaceDefault).Get("kubernetes") if err != nil { framework.Failf("Fail to get kubernetes endpoinds: %v", err) } @@ -306,7 +306,7 @@ func getNodeExternalIP(node *api.Node) string { // At the end (even in case of errors), the network traffic is brought back to normal. // This function executes commands on a node so it will work only for some // environments. -func performTemporaryNetworkFailure(c *client.Client, ns, rcName string, replicas int32, podNameToDisappear string, node *api.Node) { +func performTemporaryNetworkFailure(c clientset.Interface, ns, rcName string, replicas int32, podNameToDisappear string, node *api.Node) { host := getNodeExternalIP(node) master := getMaster(c) By(fmt.Sprintf("block network traffic from node %s to the master", node.Name)) @@ -365,13 +365,13 @@ func expectNodeReadiness(isReady bool, newNode chan *api.Node) { var _ = framework.KubeDescribe("Nodes [Disruptive]", func() { f := framework.NewDefaultFramework("resize-nodes") var systemPodsNo int32 - var c *client.Client + var c clientset.Interface var ns string ignoreLabels := framework.ImagePullerLabels var group string BeforeEach(func() { - c = f.Client + c = f.ClientSet ns = f.Namespace.Name systemPods, err := framework.GetPodsInNamespace(c, ns, ignoreLabels) Expect(err).NotTo(HaveOccurred()) @@ -507,11 +507,11 @@ var _ = framework.KubeDescribe("Nodes [Disruptive]", func() { By("choose a node with at least one pod - we will block some network traffic on this node") label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) options := api.ListOptions{LabelSelector: label} - pods, err := c.Pods(ns).List(options) // list pods after all have been scheduled + pods, err := c.Core().Pods(ns).List(options) // list pods after all have been scheduled Expect(err).NotTo(HaveOccurred()) nodeName := pods.Items[0].Spec.NodeName - node, err := c.Nodes().Get(nodeName) + node, err := c.Core().Nodes().Get(nodeName) Expect(err).NotTo(HaveOccurred()) By(fmt.Sprintf("block network traffic from node %s", node.Name)) @@ -535,7 +535,7 @@ var _ = framework.KubeDescribe("Nodes [Disruptive]", func() { // verify that it is really on the requested node { - pod, err := c.Pods(ns).Get(additionalPod) + pod, err := c.Core().Pods(ns).Get(additionalPod) Expect(err).NotTo(HaveOccurred()) if pod.Spec.NodeName != node.Name { framework.Logf("Pod %s found on invalid node: %s instead of %s", pod.Name, pod.Spec.NodeName, node.Name) @@ -554,14 +554,14 @@ var _ = framework.KubeDescribe("Nodes [Disruptive]", func() { By("choose a node - we will block all network traffic on this node") var podOpts api.ListOptions nodeOpts := api.ListOptions{} - nodes, err := c.Nodes().List(nodeOpts) + nodes, err := c.Core().Nodes().List(nodeOpts) Expect(err).NotTo(HaveOccurred()) framework.FilterNodes(nodes, func(node api.Node) bool { if !framework.IsNodeConditionSetAsExpected(&node, api.NodeReady, true) { return false } podOpts = api.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name)} - pods, err := c.Pods(api.NamespaceAll).List(podOpts) + pods, err := c.Core().Pods(api.NamespaceAll).List(podOpts) if err != nil || len(pods.Items) <= 0 { return false } @@ -585,11 +585,12 @@ var _ = framework.KubeDescribe("Nodes [Disruptive]", func() { &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { options.FieldSelector = nodeSelector - return f.Client.Nodes().List(options) + obj, err := f.ClientSet.Core().Nodes().List(options) + return runtime.Object(obj), err }, WatchFunc: func(options api.ListOptions) (watch.Interface, error) { options.FieldSelector = nodeSelector - return f.Client.Nodes().Watch(options) + return f.ClientSet.Core().Nodes().Watch(options) }, }, &api.Node{}, diff --git a/test/e2e/resource_quota.go b/test/e2e/resource_quota.go index 5dd69eaad99..ccfe7412372 100644 --- a/test/e2e/resource_quota.go +++ b/test/e2e/resource_quota.go @@ -22,7 +22,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" - client "k8s.io/kubernetes/pkg/client/unversioned" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" @@ -43,13 +43,13 @@ var _ = framework.KubeDescribe("ResourceQuota", func() { By("Creating a ResourceQuota") quotaName := "test-quota" resourceQuota := newTestResourceQuota(quotaName) - resourceQuota, err := createResourceQuota(f.Client, f.Namespace.Name, resourceQuota) + resourceQuota, err := createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota status is calculated") usedResources := api.ResourceList{} usedResources[api.ResourceQuotas] = resource.MustParse("1") - err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) Expect(err).NotTo(HaveOccurred()) }) @@ -57,40 +57,40 @@ var _ = framework.KubeDescribe("ResourceQuota", func() { By("Creating a ResourceQuota") quotaName := "test-quota" resourceQuota := newTestResourceQuota(quotaName) - resourceQuota, err := createResourceQuota(f.Client, f.Namespace.Name, resourceQuota) + resourceQuota, err := createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota status is calculated") usedResources := api.ResourceList{} usedResources[api.ResourceQuotas] = resource.MustParse("1") - err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) Expect(err).NotTo(HaveOccurred()) By("Creating a Service") service := newTestServiceForQuota("test-service", api.ServiceTypeClusterIP) - service, err = f.Client.Services(f.Namespace.Name).Create(service) + service, err = f.ClientSet.Core().Services(f.Namespace.Name).Create(service) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota status captures service creation") usedResources = api.ResourceList{} usedResources[api.ResourceQuotas] = resource.MustParse("1") usedResources[api.ResourceServices] = resource.MustParse("1") - err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) Expect(err).NotTo(HaveOccurred()) By("Deleting a Service") - err = f.Client.Services(f.Namespace.Name).Delete(service.Name) + err = f.ClientSet.Core().Services(f.Namespace.Name).Delete(service.Name, nil) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota status released usage") usedResources[api.ResourceServices] = resource.MustParse("0") - err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) Expect(err).NotTo(HaveOccurred()) }) It("should create a ResourceQuota and capture the life of a secret.", func() { By("Discovering how many secrets are in namespace by default") - secrets, err := f.Client.Secrets(f.Namespace.Name).List(api.ListOptions{}) + secrets, err := f.ClientSet.Core().Secrets(f.Namespace.Name).List(api.ListOptions{}) Expect(err).NotTo(HaveOccurred()) defaultSecrets := fmt.Sprintf("%d", len(secrets.Items)) hardSecrets := fmt.Sprintf("%d", len(secrets.Items)+1) @@ -99,19 +99,19 @@ var _ = framework.KubeDescribe("ResourceQuota", func() { quotaName := "test-quota" resourceQuota := newTestResourceQuota(quotaName) resourceQuota.Spec.Hard[api.ResourceSecrets] = resource.MustParse(hardSecrets) - resourceQuota, err = createResourceQuota(f.Client, f.Namespace.Name, resourceQuota) + resourceQuota, err = createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota status is calculated") usedResources := api.ResourceList{} usedResources[api.ResourceQuotas] = resource.MustParse("1") usedResources[api.ResourceSecrets] = resource.MustParse(defaultSecrets) - err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) Expect(err).NotTo(HaveOccurred()) By("Creating a Secret") secret := newTestSecretForQuota("test-secret") - secret, err = f.Client.Secrets(f.Namespace.Name).Create(secret) + secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota status captures secret creation") @@ -119,16 +119,16 @@ var _ = framework.KubeDescribe("ResourceQuota", func() { usedResources[api.ResourceSecrets] = resource.MustParse(hardSecrets) // we expect there to be two secrets because each namespace will receive // a service account token secret by default - err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) Expect(err).NotTo(HaveOccurred()) By("Deleting a secret") - err = f.Client.Secrets(f.Namespace.Name).Delete(secret.Name) + err = f.ClientSet.Core().Secrets(f.Namespace.Name).Delete(secret.Name, nil) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota status released usage") usedResources[api.ResourceSecrets] = resource.MustParse(defaultSecrets) - err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) Expect(err).NotTo(HaveOccurred()) }) @@ -136,13 +136,13 @@ var _ = framework.KubeDescribe("ResourceQuota", func() { By("Creating a ResourceQuota") quotaName := "test-quota" resourceQuota := newTestResourceQuota(quotaName) - resourceQuota, err := createResourceQuota(f.Client, f.Namespace.Name, resourceQuota) + resourceQuota, err := createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota status is calculated") usedResources := api.ResourceList{} usedResources[api.ResourceQuotas] = resource.MustParse("1") - err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) Expect(err).NotTo(HaveOccurred()) By("Creating a Pod that fits quota") @@ -151,7 +151,7 @@ var _ = framework.KubeDescribe("ResourceQuota", func() { requests[api.ResourceCPU] = resource.MustParse("500m") requests[api.ResourceMemory] = resource.MustParse("252Mi") pod := newTestPodForQuota(f, podName, requests, api.ResourceList{}) - pod, err = f.Client.Pods(f.Namespace.Name).Create(pod) + pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod) Expect(err).NotTo(HaveOccurred()) podToUpdate := pod @@ -160,7 +160,7 @@ var _ = framework.KubeDescribe("ResourceQuota", func() { usedResources[api.ResourcePods] = resource.MustParse("1") usedResources[api.ResourceCPU] = requests[api.ResourceCPU] usedResources[api.ResourceMemory] = requests[api.ResourceMemory] - err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) Expect(err).NotTo(HaveOccurred()) By("Not allowing a pod to be created that exceeds remaining quota") @@ -168,7 +168,7 @@ var _ = framework.KubeDescribe("ResourceQuota", func() { requests[api.ResourceCPU] = resource.MustParse("600m") requests[api.ResourceMemory] = resource.MustParse("100Mi") pod = newTestPodForQuota(f, "fail-pod", requests, api.ResourceList{}) - pod, err = f.Client.Pods(f.Namespace.Name).Create(pod) + pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod) Expect(err).To(HaveOccurred()) By("Ensuring a pod cannot update its resource requirements") @@ -177,15 +177,15 @@ var _ = framework.KubeDescribe("ResourceQuota", func() { requests[api.ResourceCPU] = resource.MustParse("100m") requests[api.ResourceMemory] = resource.MustParse("100Mi") podToUpdate.Spec.Containers[0].Resources.Requests = requests - _, err = f.Client.Pods(f.Namespace.Name).Update(podToUpdate) + _, err = f.ClientSet.Core().Pods(f.Namespace.Name).Update(podToUpdate) Expect(err).To(HaveOccurred()) By("Ensuring attempts to update pod resource requirements did not change quota usage") - err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) Expect(err).NotTo(HaveOccurred()) By("Deleting the pod") - err = f.Client.Pods(f.Namespace.Name).Delete(podName, api.NewDeleteOptions(0)) + err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(podName, api.NewDeleteOptions(0)) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota status released the pod usage") @@ -193,7 +193,7 @@ var _ = framework.KubeDescribe("ResourceQuota", func() { usedResources[api.ResourcePods] = resource.MustParse("0") usedResources[api.ResourceCPU] = resource.MustParse("0") usedResources[api.ResourceMemory] = resource.MustParse("0") - err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) Expect(err).NotTo(HaveOccurred()) }) @@ -201,34 +201,34 @@ var _ = framework.KubeDescribe("ResourceQuota", func() { By("Creating a ResourceQuota") quotaName := "test-quota" resourceQuota := newTestResourceQuota(quotaName) - resourceQuota, err := createResourceQuota(f.Client, f.Namespace.Name, resourceQuota) + resourceQuota, err := createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota status is calculated") usedResources := api.ResourceList{} usedResources[api.ResourceQuotas] = resource.MustParse("1") - err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) Expect(err).NotTo(HaveOccurred()) By("Creating a ConfigMap") configMap := newTestConfigMapForQuota("test-configmap") - configMap, err = f.Client.ConfigMaps(f.Namespace.Name).Create(configMap) + configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota status captures configMap creation") usedResources = api.ResourceList{} usedResources[api.ResourceQuotas] = resource.MustParse("1") usedResources[api.ResourceConfigMaps] = resource.MustParse("1") - err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) Expect(err).NotTo(HaveOccurred()) By("Deleting a ConfigMap") - err = f.Client.ConfigMaps(f.Namespace.Name).Delete(configMap.Name) + err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Delete(configMap.Name, nil) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota status released usage") usedResources[api.ResourceConfigMaps] = resource.MustParse("0") - err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) Expect(err).NotTo(HaveOccurred()) }) @@ -236,34 +236,34 @@ var _ = framework.KubeDescribe("ResourceQuota", func() { By("Creating a ResourceQuota") quotaName := "test-quota" resourceQuota := newTestResourceQuota(quotaName) - resourceQuota, err := createResourceQuota(f.Client, f.Namespace.Name, resourceQuota) + resourceQuota, err := createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota status is calculated") usedResources := api.ResourceList{} usedResources[api.ResourceQuotas] = resource.MustParse("1") usedResources[api.ResourceReplicationControllers] = resource.MustParse("0") - err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) Expect(err).NotTo(HaveOccurred()) By("Creating a ReplicationController") replicationController := newTestReplicationControllerForQuota("test-rc", "nginx", 0) - replicationController, err = f.Client.ReplicationControllers(f.Namespace.Name).Create(replicationController) + replicationController, err = f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Create(replicationController) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota status captures replication controller creation") usedResources = api.ResourceList{} usedResources[api.ResourceReplicationControllers] = resource.MustParse("1") - err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) Expect(err).NotTo(HaveOccurred()) By("Deleting a ReplicationController") - err = f.Client.ReplicationControllers(f.Namespace.Name).Delete(replicationController.Name, nil) + err = f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Delete(replicationController.Name, nil) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota status released usage") usedResources[api.ResourceReplicationControllers] = resource.MustParse("0") - err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) Expect(err).NotTo(HaveOccurred()) }) @@ -271,7 +271,7 @@ var _ = framework.KubeDescribe("ResourceQuota", func() { By("Creating a ResourceQuota") quotaName := "test-quota" resourceQuota := newTestResourceQuota(quotaName) - resourceQuota, err := createResourceQuota(f.Client, f.Namespace.Name, resourceQuota) + resourceQuota, err := createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota status is calculated") @@ -279,51 +279,51 @@ var _ = framework.KubeDescribe("ResourceQuota", func() { usedResources[api.ResourceQuotas] = resource.MustParse("1") usedResources[api.ResourcePersistentVolumeClaims] = resource.MustParse("0") usedResources[api.ResourceRequestsStorage] = resource.MustParse("0") - err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) Expect(err).NotTo(HaveOccurred()) By("Creating a PersistentVolumeClaim") pvc := newTestPersistentVolumeClaimForQuota("test-claim") - pvc, err = f.Client.PersistentVolumeClaims(f.Namespace.Name).Create(pvc) + pvc, err = f.ClientSet.Core().PersistentVolumeClaims(f.Namespace.Name).Create(pvc) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota status captures persistent volume claimcreation") usedResources = api.ResourceList{} usedResources[api.ResourcePersistentVolumeClaims] = resource.MustParse("1") usedResources[api.ResourceRequestsStorage] = resource.MustParse("1Gi") - err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) Expect(err).NotTo(HaveOccurred()) By("Deleting a PersistentVolumeClaim") - err = f.Client.PersistentVolumeClaims(f.Namespace.Name).Delete(pvc.Name) + err = f.ClientSet.Core().PersistentVolumeClaims(f.Namespace.Name).Delete(pvc.Name, nil) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota status released usage") usedResources[api.ResourcePersistentVolumeClaims] = resource.MustParse("0") usedResources[api.ResourceRequestsStorage] = resource.MustParse("0") - err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources) + err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) Expect(err).NotTo(HaveOccurred()) }) It("should verify ResourceQuota with terminating scopes.", func() { By("Creating a ResourceQuota with terminating scope") quotaTerminatingName := "quota-terminating" - resourceQuotaTerminating, err := createResourceQuota(f.Client, f.Namespace.Name, newTestResourceQuotaWithScope(quotaTerminatingName, api.ResourceQuotaScopeTerminating)) + resourceQuotaTerminating, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope(quotaTerminatingName, api.ResourceQuotaScopeTerminating)) Expect(err).NotTo(HaveOccurred()) By("Ensuring ResourceQuota status is calculated") usedResources := api.ResourceList{} usedResources[api.ResourcePods] = resource.MustParse("0") - err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources) + err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources) Expect(err).NotTo(HaveOccurred()) By("Creating a ResourceQuota with not terminating scope") quotaNotTerminatingName := "quota-not-terminating" - resourceQuotaNotTerminating, err := createResourceQuota(f.Client, f.Namespace.Name, newTestResourceQuotaWithScope(quotaNotTerminatingName, api.ResourceQuotaScopeNotTerminating)) + resourceQuotaNotTerminating, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope(quotaNotTerminatingName, api.ResourceQuotaScopeNotTerminating)) Expect(err).NotTo(HaveOccurred()) By("Ensuring ResourceQuota status is calculated") - err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources) + err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources) Expect(err).NotTo(HaveOccurred()) By("Creating a long running pod") @@ -335,7 +335,7 @@ var _ = framework.KubeDescribe("ResourceQuota", func() { limits[api.ResourceCPU] = resource.MustParse("1") limits[api.ResourceMemory] = resource.MustParse("400Mi") pod := newTestPodForQuota(f, podName, requests, limits) - pod, err = f.Client.Pods(f.Namespace.Name).Create(pod) + pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota with not terminating scope captures the pod usage") @@ -344,7 +344,7 @@ var _ = framework.KubeDescribe("ResourceQuota", func() { usedResources[api.ResourceRequestsMemory] = requests[api.ResourceMemory] usedResources[api.ResourceLimitsCPU] = limits[api.ResourceCPU] usedResources[api.ResourceLimitsMemory] = limits[api.ResourceMemory] - err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources) + err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota with terminating scope ignored the pod usage") @@ -353,11 +353,11 @@ var _ = framework.KubeDescribe("ResourceQuota", func() { usedResources[api.ResourceRequestsMemory] = resource.MustParse("0") usedResources[api.ResourceLimitsCPU] = resource.MustParse("0") usedResources[api.ResourceLimitsMemory] = resource.MustParse("0") - err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources) + err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources) Expect(err).NotTo(HaveOccurred()) By("Deleting the pod") - err = f.Client.Pods(f.Namespace.Name).Delete(podName, api.NewDeleteOptions(0)) + err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(podName, api.NewDeleteOptions(0)) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota status released the pod usage") @@ -366,7 +366,7 @@ var _ = framework.KubeDescribe("ResourceQuota", func() { usedResources[api.ResourceRequestsMemory] = resource.MustParse("0") usedResources[api.ResourceLimitsCPU] = resource.MustParse("0") usedResources[api.ResourceLimitsMemory] = resource.MustParse("0") - err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources) + err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources) Expect(err).NotTo(HaveOccurred()) By("Creating a terminating pod") @@ -374,7 +374,7 @@ var _ = framework.KubeDescribe("ResourceQuota", func() { pod = newTestPodForQuota(f, podName, requests, limits) activeDeadlineSeconds := int64(3600) pod.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds - pod, err = f.Client.Pods(f.Namespace.Name).Create(pod) + pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota with terminating scope captures the pod usage") @@ -383,7 +383,7 @@ var _ = framework.KubeDescribe("ResourceQuota", func() { usedResources[api.ResourceRequestsMemory] = requests[api.ResourceMemory] usedResources[api.ResourceLimitsCPU] = limits[api.ResourceCPU] usedResources[api.ResourceLimitsMemory] = limits[api.ResourceMemory] - err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources) + err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota with not terminating scope ignored the pod usage") @@ -392,11 +392,11 @@ var _ = framework.KubeDescribe("ResourceQuota", func() { usedResources[api.ResourceRequestsMemory] = resource.MustParse("0") usedResources[api.ResourceLimitsCPU] = resource.MustParse("0") usedResources[api.ResourceLimitsMemory] = resource.MustParse("0") - err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources) + err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources) Expect(err).NotTo(HaveOccurred()) By("Deleting the pod") - err = f.Client.Pods(f.Namespace.Name).Delete(podName, api.NewDeleteOptions(0)) + err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(podName, api.NewDeleteOptions(0)) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota status released the pod usage") @@ -405,51 +405,51 @@ var _ = framework.KubeDescribe("ResourceQuota", func() { usedResources[api.ResourceRequestsMemory] = resource.MustParse("0") usedResources[api.ResourceLimitsCPU] = resource.MustParse("0") usedResources[api.ResourceLimitsMemory] = resource.MustParse("0") - err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources) + err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources) Expect(err).NotTo(HaveOccurred()) }) It("should verify ResourceQuota with best effort scope.", func() { By("Creating a ResourceQuota with best effort scope") - resourceQuotaBestEffort, err := createResourceQuota(f.Client, f.Namespace.Name, newTestResourceQuotaWithScope("quota-besteffort", api.ResourceQuotaScopeBestEffort)) + resourceQuotaBestEffort, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope("quota-besteffort", api.ResourceQuotaScopeBestEffort)) Expect(err).NotTo(HaveOccurred()) By("Ensuring ResourceQuota status is calculated") usedResources := api.ResourceList{} usedResources[api.ResourcePods] = resource.MustParse("0") - err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources) + err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources) Expect(err).NotTo(HaveOccurred()) By("Creating a ResourceQuota with not best effort scope") - resourceQuotaNotBestEffort, err := createResourceQuota(f.Client, f.Namespace.Name, newTestResourceQuotaWithScope("quota-not-besteffort", api.ResourceQuotaScopeNotBestEffort)) + resourceQuotaNotBestEffort, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope("quota-not-besteffort", api.ResourceQuotaScopeNotBestEffort)) Expect(err).NotTo(HaveOccurred()) By("Ensuring ResourceQuota status is calculated") - err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources) + err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources) Expect(err).NotTo(HaveOccurred()) By("Creating a best-effort pod") pod := newTestPodForQuota(f, podName, api.ResourceList{}, api.ResourceList{}) - pod, err = f.Client.Pods(f.Namespace.Name).Create(pod) + pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota with best effort scope captures the pod usage") usedResources[api.ResourcePods] = resource.MustParse("1") - err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources) + err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota with not best effort ignored the pod usage") usedResources[api.ResourcePods] = resource.MustParse("0") - err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources) + err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources) Expect(err).NotTo(HaveOccurred()) By("Deleting the pod") - err = f.Client.Pods(f.Namespace.Name).Delete(pod.Name, api.NewDeleteOptions(0)) + err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, api.NewDeleteOptions(0)) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota status released the pod usage") usedResources[api.ResourcePods] = resource.MustParse("0") - err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources) + err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources) Expect(err).NotTo(HaveOccurred()) By("Creating a not best-effort pod") @@ -460,26 +460,26 @@ var _ = framework.KubeDescribe("ResourceQuota", func() { limits[api.ResourceCPU] = resource.MustParse("1") limits[api.ResourceMemory] = resource.MustParse("400Mi") pod = newTestPodForQuota(f, "burstable-pod", requests, limits) - pod, err = f.Client.Pods(f.Namespace.Name).Create(pod) + pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota with not best effort scope captures the pod usage") usedResources[api.ResourcePods] = resource.MustParse("1") - err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources) + err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota with best effort scope ignored the pod usage") usedResources[api.ResourcePods] = resource.MustParse("0") - err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources) + err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources) Expect(err).NotTo(HaveOccurred()) By("Deleting the pod") - err = f.Client.Pods(f.Namespace.Name).Delete(pod.Name, api.NewDeleteOptions(0)) + err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, api.NewDeleteOptions(0)) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota status released the pod usage") usedResources[api.ResourcePods] = resource.MustParse("0") - err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources) + err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources) Expect(err).NotTo(HaveOccurred()) }) }) @@ -532,7 +532,7 @@ func newTestPodForQuota(f *framework.Framework, name string, requests api.Resour Containers: []api.Container{ { Name: "pause", - Image: framework.GetPauseImageName(f.Client), + Image: framework.GetPauseImageName(f.ClientSet), Resources: api.ResourceRequirements{ Requests: requests, Limits: limits, @@ -633,19 +633,19 @@ func newTestSecretForQuota(name string) *api.Secret { } // createResourceQuota in the specified namespace -func createResourceQuota(c *client.Client, namespace string, resourceQuota *api.ResourceQuota) (*api.ResourceQuota, error) { - return c.ResourceQuotas(namespace).Create(resourceQuota) +func createResourceQuota(c clientset.Interface, namespace string, resourceQuota *api.ResourceQuota) (*api.ResourceQuota, error) { + return c.Core().ResourceQuotas(namespace).Create(resourceQuota) } // deleteResourceQuota with the specified name -func deleteResourceQuota(c *client.Client, namespace, name string) error { - return c.ResourceQuotas(namespace).Delete(name) +func deleteResourceQuota(c clientset.Interface, namespace, name string) error { + return c.Core().ResourceQuotas(namespace).Delete(name, nil) } // wait for resource quota status to show the expected used resources value -func waitForResourceQuota(c *client.Client, ns, quotaName string, used api.ResourceList) error { +func waitForResourceQuota(c clientset.Interface, ns, quotaName string, used api.ResourceList) error { return wait.Poll(framework.Poll, resourceQuotaTimeout, func() (bool, error) { - resourceQuota, err := c.ResourceQuotas(ns).Get(quotaName) + resourceQuota, err := c.Core().ResourceQuotas(ns).Get(quotaName) if err != nil { return false, err } diff --git a/test/e2e/restart.go b/test/e2e/restart.go index 3d06c21ba8e..b72dbc1f6d1 100644 --- a/test/e2e/restart.go +++ b/test/e2e/restart.go @@ -63,7 +63,7 @@ var _ = framework.KubeDescribe("Restart [Disruptive]", func() { // check must be identical to that call. framework.SkipUnlessProviderIs("gce", "gke") - ps = testutils.NewPodStore(f.Client, api.NamespaceSystem, labels.Everything(), fields.Everything()) + ps = testutils.NewPodStore(f.ClientSet, api.NamespaceSystem, labels.Everything(), fields.Everything()) }) AfterEach(func() { @@ -76,7 +76,7 @@ var _ = framework.KubeDescribe("Restart [Disruptive]", func() { nn := framework.TestContext.CloudConfig.NumNodes By("ensuring all nodes are ready") - nodeNamesBefore, err := framework.CheckNodesReady(f.Client, framework.NodeReadyInitialTimeout, nn) + nodeNamesBefore, err := framework.CheckNodesReady(f.ClientSet, framework.NodeReadyInitialTimeout, nn) Expect(err).NotTo(HaveOccurred()) framework.Logf("Got the following nodes before restart: %v", nodeNamesBefore) @@ -89,7 +89,7 @@ var _ = framework.KubeDescribe("Restart [Disruptive]", func() { podNamesBefore[i] = p.ObjectMeta.Name } ns := api.NamespaceSystem - if !framework.CheckPodsRunningReadyOrSucceeded(f.Client, ns, podNamesBefore, framework.PodReadyBeforeTimeout) { + if !framework.CheckPodsRunningReadyOrSucceeded(f.ClientSet, ns, podNamesBefore, framework.PodReadyBeforeTimeout) { framework.Failf("At least one pod wasn't running and ready or succeeded at test start.") } @@ -98,7 +98,7 @@ var _ = framework.KubeDescribe("Restart [Disruptive]", func() { Expect(err).NotTo(HaveOccurred()) By("ensuring all nodes are ready after the restart") - nodeNamesAfter, err := framework.CheckNodesReady(f.Client, framework.RestartNodeReadyAgainTimeout, nn) + nodeNamesAfter, err := framework.CheckNodesReady(f.ClientSet, framework.RestartNodeReadyAgainTimeout, nn) Expect(err).NotTo(HaveOccurred()) framework.Logf("Got the following nodes after restart: %v", nodeNamesAfter) @@ -118,7 +118,7 @@ var _ = framework.KubeDescribe("Restart [Disruptive]", func() { podNamesAfter, err := waitForNPods(ps, len(podNamesBefore), framework.RestartPodReadyAgainTimeout) Expect(err).NotTo(HaveOccurred()) remaining := framework.RestartPodReadyAgainTimeout - time.Since(podCheckStart) - if !framework.CheckPodsRunningReadyOrSucceeded(f.Client, ns, podNamesAfter, remaining) { + if !framework.CheckPodsRunningReadyOrSucceeded(f.ClientSet, ns, podNamesAfter, remaining) { framework.Failf("At least one pod wasn't running and ready after the restart.") } }) @@ -156,7 +156,7 @@ func restartNodes(f *framework.Framework, nodeNames []string) error { // List old boot IDs. oldBootIDs := make(map[string]string) for _, name := range nodeNames { - node, err := f.Client.Nodes().Get(name) + node, err := f.ClientSet.Core().Nodes().Get(name) if err != nil { return fmt.Errorf("error getting node info before reboot: %s", err) } @@ -178,7 +178,7 @@ func restartNodes(f *framework.Framework, nodeNames []string) error { // Wait for their boot IDs to change. for _, name := range nodeNames { if err := wait.Poll(30*time.Second, 5*time.Minute, func() (bool, error) { - node, err := f.Client.Nodes().Get(name) + node, err := f.ClientSet.Core().Nodes().Get(name) if err != nil { return false, fmt.Errorf("error getting node info after reboot: %s", err) } diff --git a/test/e2e/scheduledjob.go b/test/e2e/scheduledjob.go index 998fec1909c..3164e9b435a 100644 --- a/test/e2e/scheduledjob.go +++ b/test/e2e/scheduledjob.go @@ -26,7 +26,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/apis/batch" - client "k8s.io/kubernetes/pkg/client/unversioned" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/controller/job" "k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" @@ -53,21 +53,21 @@ var _ = framework.KubeDescribe("ScheduledJob", func() { It("should schedule multiple jobs concurrently", func() { By("Creating a scheduledjob") scheduledJob := newTestScheduledJob("concurrent", "*/1 * * * ?", batch.AllowConcurrent, true) - scheduledJob, err := createScheduledJob(f.Client, f.Namespace.Name, scheduledJob) + scheduledJob, err := createScheduledJob(f.ClientSet, f.Namespace.Name, scheduledJob) Expect(err).NotTo(HaveOccurred()) By("Ensuring more than one job is running at a time") - err = waitForActiveJobs(f.Client, f.Namespace.Name, scheduledJob.Name, 2) + err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, scheduledJob.Name, 2) Expect(err).NotTo(HaveOccurred()) By("Ensuring at least two running jobs exists by listing jobs explicitly") - jobs, err := f.Client.Batch().Jobs(f.Namespace.Name).List(api.ListOptions{}) + jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(api.ListOptions{}) Expect(err).NotTo(HaveOccurred()) activeJobs := filterActiveJobs(jobs) Expect(len(activeJobs) >= 2).To(BeTrue()) By("Removing scheduledjob") - err = deleteScheduledJob(f.Client, f.Namespace.Name, scheduledJob.Name) + err = deleteScheduledJob(f.ClientSet, f.Namespace.Name, scheduledJob.Name) Expect(err).NotTo(HaveOccurred()) }) @@ -76,20 +76,20 @@ var _ = framework.KubeDescribe("ScheduledJob", func() { By("Creating a suspended scheduledjob") scheduledJob := newTestScheduledJob("suspended", "*/1 * * * ?", batch.AllowConcurrent, true) scheduledJob.Spec.Suspend = newBool(true) - scheduledJob, err := createScheduledJob(f.Client, f.Namespace.Name, scheduledJob) + scheduledJob, err := createScheduledJob(f.ClientSet, f.Namespace.Name, scheduledJob) Expect(err).NotTo(HaveOccurred()) By("Ensuring no jobs are scheduled") - err = waitForNoJobs(f.Client, f.Namespace.Name, scheduledJob.Name) + err = waitForNoJobs(f.ClientSet, f.Namespace.Name, scheduledJob.Name) Expect(err).To(HaveOccurred()) By("Ensuring no job exists by listing jobs explicitly") - jobs, err := f.Client.Batch().Jobs(f.Namespace.Name).List(api.ListOptions{}) + jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(api.ListOptions{}) Expect(err).NotTo(HaveOccurred()) Expect(jobs.Items).To(HaveLen(0)) By("Removing scheduledjob") - err = deleteScheduledJob(f.Client, f.Namespace.Name, scheduledJob.Name) + err = deleteScheduledJob(f.ClientSet, f.Namespace.Name, scheduledJob.Name) Expect(err).NotTo(HaveOccurred()) }) @@ -97,30 +97,30 @@ var _ = framework.KubeDescribe("ScheduledJob", func() { It("should not schedule new jobs when ForbidConcurrent [Slow]", func() { By("Creating a ForbidConcurrent scheduledjob") scheduledJob := newTestScheduledJob("forbid", "*/1 * * * ?", batch.ForbidConcurrent, true) - scheduledJob, err := createScheduledJob(f.Client, f.Namespace.Name, scheduledJob) + scheduledJob, err := createScheduledJob(f.ClientSet, f.Namespace.Name, scheduledJob) Expect(err).NotTo(HaveOccurred()) By("Ensuring a job is scheduled") - err = waitForActiveJobs(f.Client, f.Namespace.Name, scheduledJob.Name, 1) + err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, scheduledJob.Name, 1) Expect(err).NotTo(HaveOccurred()) By("Ensuring exactly one is scheduled") - scheduledJob, err = getScheduledJob(f.Client, f.Namespace.Name, scheduledJob.Name) + scheduledJob, err = getScheduledJob(f.ClientSet, f.Namespace.Name, scheduledJob.Name) Expect(err).NotTo(HaveOccurred()) Expect(scheduledJob.Status.Active).Should(HaveLen(1)) By("Ensuring exaclty one running job exists by listing jobs explicitly") - jobs, err := f.Client.Batch().Jobs(f.Namespace.Name).List(api.ListOptions{}) + jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(api.ListOptions{}) Expect(err).NotTo(HaveOccurred()) activeJobs := filterActiveJobs(jobs) Expect(activeJobs).To(HaveLen(1)) By("Ensuring no more jobs are scheduled") - err = waitForActiveJobs(f.Client, f.Namespace.Name, scheduledJob.Name, 2) + err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, scheduledJob.Name, 2) Expect(err).To(HaveOccurred()) By("Removing scheduledjob") - err = deleteScheduledJob(f.Client, f.Namespace.Name, scheduledJob.Name) + err = deleteScheduledJob(f.ClientSet, f.Namespace.Name, scheduledJob.Name) Expect(err).NotTo(HaveOccurred()) }) @@ -128,30 +128,30 @@ var _ = framework.KubeDescribe("ScheduledJob", func() { It("should replace jobs when ReplaceConcurrent", func() { By("Creating a ReplaceConcurrent scheduledjob") scheduledJob := newTestScheduledJob("replace", "*/1 * * * ?", batch.ReplaceConcurrent, true) - scheduledJob, err := createScheduledJob(f.Client, f.Namespace.Name, scheduledJob) + scheduledJob, err := createScheduledJob(f.ClientSet, f.Namespace.Name, scheduledJob) Expect(err).NotTo(HaveOccurred()) By("Ensuring a job is scheduled") - err = waitForActiveJobs(f.Client, f.Namespace.Name, scheduledJob.Name, 1) + err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, scheduledJob.Name, 1) Expect(err).NotTo(HaveOccurred()) By("Ensuring exactly one is scheduled") - scheduledJob, err = getScheduledJob(f.Client, f.Namespace.Name, scheduledJob.Name) + scheduledJob, err = getScheduledJob(f.ClientSet, f.Namespace.Name, scheduledJob.Name) Expect(err).NotTo(HaveOccurred()) Expect(scheduledJob.Status.Active).Should(HaveLen(1)) By("Ensuring exaclty one running job exists by listing jobs explicitly") - jobs, err := f.Client.Batch().Jobs(f.Namespace.Name).List(api.ListOptions{}) + jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(api.ListOptions{}) Expect(err).NotTo(HaveOccurred()) activeJobs := filterActiveJobs(jobs) Expect(activeJobs).To(HaveLen(1)) By("Ensuring the job is replaced with a new one") - err = waitForJobReplaced(f.Client, f.Namespace.Name, jobs.Items[0].Name) + err = waitForJobReplaced(f.ClientSet, f.Namespace.Name, jobs.Items[0].Name) Expect(err).NotTo(HaveOccurred()) By("Removing scheduledjob") - err = deleteScheduledJob(f.Client, f.Namespace.Name, scheduledJob.Name) + err = deleteScheduledJob(f.ClientSet, f.Namespace.Name, scheduledJob.Name) Expect(err).NotTo(HaveOccurred()) }) @@ -159,21 +159,21 @@ var _ = framework.KubeDescribe("ScheduledJob", func() { It("should not emit unexpected warnings", func() { By("Creating a scheduledjob") scheduledJob := newTestScheduledJob("concurrent", "*/1 * * * ?", batch.AllowConcurrent, false) - scheduledJob, err := createScheduledJob(f.Client, f.Namespace.Name, scheduledJob) + scheduledJob, err := createScheduledJob(f.ClientSet, f.Namespace.Name, scheduledJob) Expect(err).NotTo(HaveOccurred()) By("Ensuring at least two jobs and at least one finished job exists by listing jobs explicitly") - err = waitForJobsAtLeast(f.Client, f.Namespace.Name, 2) + err = waitForJobsAtLeast(f.ClientSet, f.Namespace.Name, 2) Expect(err).NotTo(HaveOccurred()) - err = waitForAnyFinishedJob(f.Client, f.Namespace.Name) + err = waitForAnyFinishedJob(f.ClientSet, f.Namespace.Name) Expect(err).NotTo(HaveOccurred()) By("Ensuring no unexpected event has happened") - err = checkNoUnexpectedEvents(f.Client, f.Namespace.Name, scheduledJob.Name) + err = checkNoUnexpectedEvents(f.ClientSet, f.Namespace.Name, scheduledJob.Name) Expect(err).NotTo(HaveOccurred()) By("Removing scheduledjob") - err = deleteScheduledJob(f.Client, f.Namespace.Name, scheduledJob.Name) + err = deleteScheduledJob(f.ClientSet, f.Namespace.Name, scheduledJob.Name) Expect(err).NotTo(HaveOccurred()) }) }) @@ -228,20 +228,20 @@ func newTestScheduledJob(name, schedule string, concurrencyPolicy batch.Concurre return sj } -func createScheduledJob(c *client.Client, ns string, scheduledJob *batch.ScheduledJob) (*batch.ScheduledJob, error) { +func createScheduledJob(c clientset.Interface, ns string, scheduledJob *batch.ScheduledJob) (*batch.ScheduledJob, error) { return c.Batch().ScheduledJobs(ns).Create(scheduledJob) } -func getScheduledJob(c *client.Client, ns, name string) (*batch.ScheduledJob, error) { +func getScheduledJob(c clientset.Interface, ns, name string) (*batch.ScheduledJob, error) { return c.Batch().ScheduledJobs(ns).Get(name) } -func deleteScheduledJob(c *client.Client, ns, name string) error { +func deleteScheduledJob(c clientset.Interface, ns, name string) error { return c.Batch().ScheduledJobs(ns).Delete(name, nil) } // Wait for at least given amount of active jobs. -func waitForActiveJobs(c *client.Client, ns, scheduledJobName string, active int) error { +func waitForActiveJobs(c clientset.Interface, ns, scheduledJobName string, active int) error { return wait.Poll(framework.Poll, scheduledJobTimeout, func() (bool, error) { curr, err := c.Batch().ScheduledJobs(ns).Get(scheduledJobName) if err != nil { @@ -252,7 +252,7 @@ func waitForActiveJobs(c *client.Client, ns, scheduledJobName string, active int } // Wait for no jobs to appear. -func waitForNoJobs(c *client.Client, ns, jobName string) error { +func waitForNoJobs(c clientset.Interface, ns, jobName string) error { return wait.Poll(framework.Poll, scheduledJobTimeout, func() (bool, error) { curr, err := c.Batch().ScheduledJobs(ns).Get(jobName) if err != nil { @@ -264,7 +264,7 @@ func waitForNoJobs(c *client.Client, ns, jobName string) error { } // Wait for a job to be replaced with a new one. -func waitForJobReplaced(c *client.Client, ns, previousJobName string) error { +func waitForJobReplaced(c clientset.Interface, ns, previousJobName string) error { return wait.Poll(framework.Poll, scheduledJobTimeout, func() (bool, error) { jobs, err := c.Batch().Jobs(ns).List(api.ListOptions{}) if err != nil { @@ -281,7 +281,7 @@ func waitForJobReplaced(c *client.Client, ns, previousJobName string) error { } // waitForJobsAtLeast waits for at least a number of jobs to appear. -func waitForJobsAtLeast(c *client.Client, ns string, atLeast int) error { +func waitForJobsAtLeast(c clientset.Interface, ns string, atLeast int) error { return wait.Poll(framework.Poll, scheduledJobTimeout, func() (bool, error) { jobs, err := c.Batch().Jobs(ns).List(api.ListOptions{}) if err != nil { @@ -292,7 +292,7 @@ func waitForJobsAtLeast(c *client.Client, ns string, atLeast int) error { } // waitForAnyFinishedJob waits for any completed job to appear. -func waitForAnyFinishedJob(c *client.Client, ns string) error { +func waitForAnyFinishedJob(c clientset.Interface, ns string) error { return wait.Poll(framework.Poll, scheduledJobTimeout, func() (bool, error) { jobs, err := c.Batch().Jobs(ns).List(api.ListOptions{}) if err != nil { @@ -309,12 +309,12 @@ func waitForAnyFinishedJob(c *client.Client, ns string) error { // checkNoUnexpectedEvents checks unexpected events didn't happen. // Currently only "UnexpectedJob" is checked. -func checkNoUnexpectedEvents(c *client.Client, ns, scheduledJobName string) error { +func checkNoUnexpectedEvents(c clientset.Interface, ns, scheduledJobName string) error { sj, err := c.Batch().ScheduledJobs(ns).Get(scheduledJobName) if err != nil { return fmt.Errorf("error in getting scheduledjob %s/%s: %v", ns, scheduledJobName, err) } - events, err := c.Events(ns).Search(sj) + events, err := c.Core().Events(ns).Search(sj) if err != nil { return fmt.Errorf("error in listing events: %s", err) } diff --git a/test/e2e/scheduler_predicates.go b/test/e2e/scheduler_predicates.go index 53b89a1af7c..d0d9b7471cf 100644 --- a/test/e2e/scheduler_predicates.go +++ b/test/e2e/scheduler_predicates.go @@ -24,7 +24,6 @@ import ( "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/resource" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" @@ -49,7 +48,6 @@ type pausePodConfig struct { } var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { - var c *client.Client var cs clientset.Interface var nodeList *api.NodeList var systemPodsNo int @@ -60,30 +58,29 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { ignoreLabels := framework.ImagePullerLabels AfterEach(func() { - rc, err := c.ReplicationControllers(ns).Get(RCName) + rc, err := cs.Core().ReplicationControllers(ns).Get(RCName) if err == nil && rc.Spec.Replicas != 0 { By("Cleaning up the replication controller") - err := framework.DeleteRCAndPods(c, f.ClientSet, ns, RCName) + err := framework.DeleteRCAndPods(f.ClientSet, ns, RCName) framework.ExpectNoError(err) } }) BeforeEach(func() { - c = f.Client cs = f.ClientSet ns = f.Namespace.Name nodeList = &api.NodeList{} - framework.WaitForAllNodesHealthy(c, time.Minute) - masterNodes, nodeList = framework.GetMasterAndWorkerNodesOrDie(c) + framework.WaitForAllNodesHealthy(cs, time.Minute) + masterNodes, nodeList = framework.GetMasterAndWorkerNodesOrDie(cs) - err := framework.CheckTestingNSDeletedExcept(c, ns) + err := framework.CheckTestingNSDeletedExcept(cs, ns) framework.ExpectNoError(err) // Every test case in this suite assumes that cluster add-on pods stay stable and // cannot be run in parallel with any other test that touches Nodes or Pods. // It is so because we need to have precise control on what's running in the cluster. - systemPods, err := framework.GetPodsInNamespace(c, ns, ignoreLabels) + systemPods, err := framework.GetPodsInNamespace(cs, ns, ignoreLabels) Expect(err).NotTo(HaveOccurred()) systemPodsNo = 0 for _, pod := range systemPods { @@ -92,12 +89,12 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { } } - err = framework.WaitForPodsRunningReady(c, api.NamespaceSystem, int32(systemPodsNo), framework.PodReadyBeforeTimeout, ignoreLabels) + err = framework.WaitForPodsRunningReady(cs, api.NamespaceSystem, int32(systemPodsNo), framework.PodReadyBeforeTimeout, ignoreLabels) Expect(err).NotTo(HaveOccurred()) for _, node := range nodeList.Items { framework.Logf("\nLogging pods the kubelet thinks is on node %v before test", node.Name) - framework.PrintAllKubeletPods(c, node.Name) + framework.PrintAllKubeletPods(cs, node.Name) } }) @@ -117,7 +114,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { totalPodCapacity += podCapacity.Value() } - currentlyScheduledPods := framework.WaitForStableCluster(c, masterNodes) + currentlyScheduledPods := framework.WaitForStableCluster(cs, masterNodes) podsNeededForSaturation := int(totalPodCapacity) - currentlyScheduledPods By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster max pods and trying to start another one", podsNeededForSaturation)) @@ -127,7 +124,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { // and there is no need to create additional pods. // StartPods requires at least one pod to replicate. if podsNeededForSaturation > 0 { - framework.ExpectNoError(testutils.StartPods(c, podsNeededForSaturation, ns, "maxp", + framework.ExpectNoError(testutils.StartPods(cs, podsNeededForSaturation, ns, "maxp", *initPausePod(f, pausePodConfig{ Name: "", Labels: map[string]string{"name": ""}, @@ -139,7 +136,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { Labels: map[string]string{"name": "additional"}, }) waitForScheduler() - verifyResult(c, podsNeededForSaturation, 1, ns) + verifyResult(cs, podsNeededForSaturation, 1, ns) }) // This test verifies we don't allow scheduling of pods in a way that sum of limits of pods is greater than machines capacity. @@ -157,9 +154,9 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { nodeMaxCapacity = capacity.MilliValue() } } - framework.WaitForStableCluster(c, masterNodes) + framework.WaitForStableCluster(cs, masterNodes) - pods, err := c.Pods(api.NamespaceAll).List(api.ListOptions{}) + pods, err := cs.Core().Pods(api.NamespaceAll).List(api.ListOptions{}) framework.ExpectNoError(err) for _, pod := range pods.Items { _, found := nodeToCapacityMap[pod.Spec.NodeName] @@ -188,7 +185,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { // and there is no need to create additional pods. // StartPods requires at least one pod to replicate. if podsNeededForSaturation > 0 { - framework.ExpectNoError(testutils.StartPods(c, podsNeededForSaturation, ns, "overcommit", + framework.ExpectNoError(testutils.StartPods(cs, podsNeededForSaturation, ns, "overcommit", *initPausePod(f, pausePodConfig{ Name: "", Labels: map[string]string{"name": ""}, @@ -213,7 +210,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { }, }) waitForScheduler() - verifyResult(c, podsNeededForSaturation, 1, ns) + verifyResult(cs, podsNeededForSaturation, 1, ns) }) // Test Nodes does not have any label, hence it should be impossible to schedule Pod with @@ -222,7 +219,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { By("Trying to schedule Pod with nonempty NodeSelector.") podName := "restricted-pod" - framework.WaitForStableCluster(c, masterNodes) + framework.WaitForStableCluster(cs, masterNodes) createPausePod(f, pausePodConfig{ Name: podName, @@ -233,13 +230,13 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { }) waitForScheduler() - verifyResult(c, 0, 1, ns) + verifyResult(cs, 0, 1, ns) }) It("validates that a pod with an invalid NodeAffinity is rejected", func() { By("Trying to launch a pod with an invalid Affinity data.") podName := "without-label" - _, err := c.Pods(ns).Create(initPausePod(f, pausePodConfig{ + _, err := cs.Core().Pods(ns).Create(initPausePod(f, pausePodConfig{ Name: podName, Affinity: `{ "nodeAffinity": { @@ -285,8 +282,8 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { // kubelet and the scheduler: the scheduler might have scheduled a pod // already when the kubelet does not know about its new label yet. The // kubelet will then refuse to launch the pod. - framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, labelPodName, pod.ResourceVersion)) - labelPod, err := c.Pods(ns).Get(labelPodName) + framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, labelPodName, pod.ResourceVersion)) + labelPod, err := cs.Core().Pods(ns).Get(labelPodName) framework.ExpectNoError(err) Expect(labelPod.Spec.NodeName).To(Equal(nodeName)) }) @@ -297,7 +294,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { By("Trying to schedule Pod with nonempty NodeSelector.") podName := "restricted-pod" - framework.WaitForStableCluster(c, masterNodes) + framework.WaitForStableCluster(cs, masterNodes) createPausePod(f, pausePodConfig{ Name: podName, @@ -326,7 +323,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { Labels: map[string]string{"name": "restricted"}, }) waitForScheduler() - verifyResult(c, 0, 1, ns) + verifyResult(cs, 0, 1, ns) }) // Keep the same steps with the test on NodeSelector, @@ -369,8 +366,8 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { // kubelet and the scheduler: the scheduler might have scheduled a pod // already when the kubelet does not know about its new label yet. The // kubelet will then refuse to launch the pod. - framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, labelPodName, pod.ResourceVersion)) - labelPod, err := c.Pods(ns).Get(labelPodName) + framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, labelPodName, pod.ResourceVersion)) + labelPod, err := cs.Core().Pods(ns).Get(labelPodName) framework.ExpectNoError(err) Expect(labelPod.Spec.NodeName).To(Equal(nodeName)) }) @@ -394,8 +391,8 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { // kubelet and the scheduler: the scheduler might have scheduled a pod // already when the kubelet does not know about its new label yet. The // kubelet will then refuse to launch the pod. - framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, pod.Name, "")) - labelPod, err := c.Pods(ns).Get(pod.Name) + framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, pod.Name, "")) + labelPod, err := cs.Core().Pods(ns).Get(pod.Name) framework.ExpectNoError(err) Expect(labelPod.Spec.NodeName).To(Equal(nodeName)) }) @@ -405,7 +402,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { It("validates that a pod with an invalid podAffinity is rejected because of the LabelSelectorRequirement is invalid", func() { By("Trying to launch a pod with an invalid pod Affinity data.") podName := "without-label-" + string(uuid.NewUUID()) - _, err := c.Pods(ns).Create(initPausePod(f, pausePodConfig{ + _, err := cs.Core().Pods(ns).Create(initPausePod(f, pausePodConfig{ Name: podName, Labels: map[string]string{"name": "without-label"}, Affinity: `{ @@ -439,7 +436,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { // Test Nodes does not have any pod, hence it should be impossible to schedule a Pod with pod affinity. It("validates that Inter-pod-Affinity is respected if not matching", func() { By("Trying to schedule Pod with nonempty Pod Affinity.") - framework.WaitForStableCluster(c, masterNodes) + framework.WaitForStableCluster(cs, masterNodes) podName := "without-label-" + string(uuid.NewUUID()) createPausePod(f, pausePodConfig{ Name: podName, @@ -460,7 +457,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { }) waitForScheduler() - verifyResult(c, 0, 1, ns) + verifyResult(cs, 0, 1, ns) }) // test the pod affinity successful matching scenario. @@ -500,8 +497,8 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { // kubelet and the scheduler: the scheduler might have scheduled a pod // already when the kubelet does not know about its new label yet. The // kubelet will then refuse to launch the pod. - framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, labelPodName, pod.ResourceVersion)) - labelPod, err := c.Pods(ns).Get(labelPodName) + framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, labelPodName, pod.ResourceVersion)) + labelPod, err := cs.Core().Pods(ns).Get(labelPodName) framework.ExpectNoError(err) Expect(labelPod.Spec.NodeName).To(Equal(nodeName)) }) @@ -514,8 +511,8 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { // cannot be scheduled onto it. By("Launching two pods on two distinct nodes to get two node names") CreateHostPortPods(f, "host-port", 2, true) - defer framework.DeleteRCAndPods(c, f.ClientSet, ns, "host-port") - podList, err := c.Pods(ns).List(api.ListOptions{}) + defer framework.DeleteRCAndPods(f.ClientSet, ns, "host-port") + podList, err := cs.Core().Pods(ns).List(api.ListOptions{}) ExpectNoError(err) Expect(len(podList.Items)).To(Equal(2)) nodeNames := []string{podList.Items[0].Spec.NodeName, podList.Items[1].Spec.NodeName} @@ -563,7 +560,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { }) waitForScheduler() - verifyResult(c, 3, 1, ns) + verifyResult(cs, 3, 1, ns) }) // test the pod affinity successful matching scenario with multiple Label Operators. @@ -611,8 +608,8 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { // kubelet and the scheduler: the scheduler might have scheduled a pod // already when the kubelet does not know about its new label yet. The // kubelet will then refuse to launch the pod. - framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, labelPodName, pod.ResourceVersion)) - labelPod, err := c.Pods(ns).Get(labelPodName) + framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, labelPodName, pod.ResourceVersion)) + labelPod, err := cs.Core().Pods(ns).Get(labelPodName) framework.ExpectNoError(err) Expect(labelPod.Spec.NodeName).To(Equal(nodeName)) }) @@ -636,8 +633,8 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { // kubelet and the scheduler: the scheduler might have scheduled a pod // already when the kubelet does not know about its new label yet. The // kubelet will then refuse to launch the pod. - framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, pod.Name, pod.ResourceVersion)) - labelPod, err := c.Pods(ns).Get(pod.Name) + framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, pod.Name, pod.ResourceVersion)) + labelPod, err := cs.Core().Pods(ns).Get(pod.Name) framework.ExpectNoError(err) Expect(labelPod.Spec.NodeName).To(Equal(nodeName)) }) @@ -660,8 +657,8 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { // kubelet and the scheduler: the scheduler might have scheduled a pod // already when the kubelet does not know about its new label yet. The // kubelet will then refuse to launch the pod. - framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, pod.Name, pod.ResourceVersion)) - labelPod, err := c.Pods(ns).Get(pod.Name) + framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, pod.Name, pod.ResourceVersion)) + labelPod, err := cs.Core().Pods(ns).Get(pod.Name) framework.ExpectNoError(err) Expect(labelPod.Spec.NodeName).To(Equal(nodeName)) }) @@ -679,9 +676,9 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { Value: "testing-taint-value", Effect: api.TaintEffectNoSchedule, } - framework.AddOrUpdateTaintOnNode(c, nodeName, testTaint) - framework.ExpectNodeHasTaint(c, nodeName, testTaint) - defer framework.RemoveTaintOffNode(c, nodeName, testTaint) + framework.AddOrUpdateTaintOnNode(cs, nodeName, testTaint) + framework.ExpectNodeHasTaint(cs, nodeName, testTaint) + defer framework.RemoveTaintOffNode(cs, nodeName, testTaint) By("Trying to apply a random label on the found node.") labelKey := fmt.Sprintf("kubernetes.io/e2e-label-key-%s", string(uuid.NewUUID())) @@ -712,8 +709,8 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { // kubelet and the scheduler: the scheduler might have scheduled a pod // already when the kubelet does not know about its new taint yet. The // kubelet will then refuse to launch the pod. - framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, tolerationPodName, pod.ResourceVersion)) - deployedPod, err := c.Pods(ns).Get(tolerationPodName) + framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, tolerationPodName, pod.ResourceVersion)) + deployedPod, err := cs.Core().Pods(ns).Get(tolerationPodName) framework.ExpectNoError(err) Expect(deployedPod.Spec.NodeName).To(Equal(nodeName)) }) @@ -731,9 +728,9 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { Value: "testing-taint-value", Effect: api.TaintEffectNoSchedule, } - framework.AddOrUpdateTaintOnNode(c, nodeName, testTaint) - framework.ExpectNodeHasTaint(c, nodeName, testTaint) - defer framework.RemoveTaintOffNode(c, nodeName, testTaint) + framework.AddOrUpdateTaintOnNode(cs, nodeName, testTaint) + framework.ExpectNodeHasTaint(cs, nodeName, testTaint) + defer framework.RemoveTaintOffNode(cs, nodeName, testTaint) By("Trying to apply a random label on the found node.") labelKey := fmt.Sprintf("kubernetes.io/e2e-label-key-%s", string(uuid.NewUUID())) @@ -750,13 +747,13 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { }) waitForScheduler() - verifyResult(c, 0, 1, ns) + verifyResult(cs, 0, 1, ns) By("Removing taint off the node") - framework.RemoveTaintOffNode(c, nodeName, testTaint) + framework.RemoveTaintOffNode(cs, nodeName, testTaint) waitForScheduler() - verifyResult(c, 1, 0, ns) + verifyResult(cs, 1, 0, ns) }) }) @@ -781,7 +778,7 @@ func initPausePod(f *framework.Framework, conf pausePodConfig) *api.Pod { Containers: []api.Container{ { Name: podName, - Image: framework.GetPauseImageName(f.Client), + Image: framework.GetPauseImageName(f.ClientSet), }, }, }, @@ -793,15 +790,15 @@ func initPausePod(f *framework.Framework, conf pausePodConfig) *api.Pod { } func createPausePod(f *framework.Framework, conf pausePodConfig) *api.Pod { - pod, err := f.Client.Pods(f.Namespace.Name).Create(initPausePod(f, conf)) + pod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(initPausePod(f, conf)) framework.ExpectNoError(err) return pod } func runPausePod(f *framework.Framework, conf pausePodConfig) *api.Pod { pod := createPausePod(f, conf) - framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.Client, pod)) - pod, err := f.Client.Pods(f.Namespace.Name).Get(conf.Name) + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, pod)) + pod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Get(conf.Name) framework.ExpectNoError(err) return pod } @@ -814,7 +811,7 @@ func runPodAndGetNodeName(f *framework.Framework, conf pausePodConfig) string { pod := runPausePod(f, conf) By("Explicitly delete pod here to free the resource it takes.") - err := f.Client.Pods(f.Namespace.Name).Delete(pod.Name, api.NewDeleteOptions(0)) + err := f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, api.NewDeleteOptions(0)) framework.ExpectNoError(err) return pod.Spec.NodeName @@ -915,8 +912,8 @@ func waitForScheduler() { } // TODO: upgrade calls in PodAffinity tests when we're able to run them -func verifyResult(c *client.Client, expectedScheduled int, expectedNotScheduled int, ns string) { - allPods, err := c.Pods(ns).List(api.ListOptions{}) +func verifyResult(c clientset.Interface, expectedScheduled int, expectedNotScheduled int, ns string) { + allPods, err := c.Core().Pods(ns).List(api.ListOptions{}) framework.ExpectNoError(err) scheduledPods, notScheduledPods := framework.GetPodsScheduled(masterNodes, allPods) diff --git a/test/e2e/security_context.go b/test/e2e/security_context.go index f7a683013fa..18b26799a76 100644 --- a/test/e2e/security_context.go +++ b/test/e2e/security_context.go @@ -167,11 +167,11 @@ func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool) } pod.Spec.Containers[0].Command = []string{"sleep", "6000"} - client := f.Client.Pods(f.Namespace.Name) + client := f.ClientSet.Core().Pods(f.Namespace.Name) pod, err := client.Create(pod) framework.ExpectNoError(err, "Error creating pod %v", pod) - framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.Client, pod)) + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, pod)) testContent := "hello" testFilePath := mountPath + "/TEST" @@ -181,7 +181,7 @@ func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool) Expect(err).To(BeNil()) Expect(content).To(ContainSubstring(testContent)) - foundPod, err := f.Client.Pods(f.Namespace.Name).Get(pod.Name) + foundPod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Get(pod.Name) Expect(err).NotTo(HaveOccurred()) // Confirm that the file can be accessed from a second diff --git a/test/e2e/service.go b/test/e2e/service.go index 8d2dcec844b..2af254e2a70 100644 --- a/test/e2e/service.go +++ b/test/e2e/service.go @@ -34,7 +34,6 @@ import ( "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/service" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/controller/endpoint" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/types" @@ -80,18 +79,16 @@ var ServiceNodePortRange = utilnet.PortRange{Base: 30000, Size: 2768} var _ = framework.KubeDescribe("Services", func() { f := framework.NewDefaultFramework("services") - var c *client.Client var cs clientset.Interface BeforeEach(func() { - c = f.Client cs = f.ClientSet }) // TODO: We get coverage of TCP/UDP and multi-port services through the DNS test. We should have a simpler test for multi-port TCP here. It("should provide secure master service [Conformance]", func() { - _, err := c.Services(api.NamespaceDefault).Get("kubernetes") + _, err := cs.Core().Services(api.NamespaceDefault).Get("kubernetes") Expect(err).NotTo(HaveOccurred()) }) @@ -106,7 +103,7 @@ var _ = framework.KubeDescribe("Services", func() { By("creating service " + serviceName + " in namespace " + ns) defer func() { - err := c.Services(ns).Delete(serviceName) + err := cs.Core().Services(ns).Delete(serviceName, nil) Expect(err).NotTo(HaveOccurred()) }() @@ -122,15 +119,15 @@ var _ = framework.KubeDescribe("Services", func() { }}, }, } - _, err := c.Services(ns).Create(service) + _, err := cs.Core().Services(ns).Create(service) Expect(err).NotTo(HaveOccurred()) - validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{}) + validateEndpointsOrFail(cs, ns, serviceName, PortsByPodName{}) names := map[string]bool{} defer func() { for name := range names { - err := c.Pods(ns).Delete(name, nil) + err := cs.Core().Pods(ns).Delete(name, nil) Expect(err).NotTo(HaveOccurred()) } }() @@ -138,21 +135,21 @@ var _ = framework.KubeDescribe("Services", func() { name1 := "pod1" name2 := "pod2" - createPodOrFail(c, ns, name1, labels, []api.ContainerPort{{ContainerPort: 80}}) + createPodOrFail(cs, ns, name1, labels, []api.ContainerPort{{ContainerPort: 80}}) names[name1] = true - validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{name1: {80}}) + validateEndpointsOrFail(cs, ns, serviceName, PortsByPodName{name1: {80}}) - createPodOrFail(c, ns, name2, labels, []api.ContainerPort{{ContainerPort: 80}}) + createPodOrFail(cs, ns, name2, labels, []api.ContainerPort{{ContainerPort: 80}}) names[name2] = true - validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{name1: {80}, name2: {80}}) + validateEndpointsOrFail(cs, ns, serviceName, PortsByPodName{name1: {80}, name2: {80}}) - deletePodOrFail(c, ns, name1) + deletePodOrFail(cs, ns, name1) delete(names, name1) - validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{name2: {80}}) + validateEndpointsOrFail(cs, ns, serviceName, PortsByPodName{name2: {80}}) - deletePodOrFail(c, ns, name2) + deletePodOrFail(cs, ns, name2) delete(names, name2) - validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{}) + validateEndpointsOrFail(cs, ns, serviceName, PortsByPodName{}) }) It("should serve multiport endpoints from pods [Conformance]", func() { @@ -162,7 +159,7 @@ var _ = framework.KubeDescribe("Services", func() { ns := f.Namespace.Name defer func() { - err := c.Services(ns).Delete(serviceName) + err := cs.Core().Services(ns).Delete(serviceName, nil) Expect(err).NotTo(HaveOccurred()) }() @@ -192,16 +189,16 @@ var _ = framework.KubeDescribe("Services", func() { }, }, } - _, err := c.Services(ns).Create(service) + _, err := cs.Core().Services(ns).Create(service) Expect(err).NotTo(HaveOccurred()) port1 := 100 port2 := 101 - validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{}) + validateEndpointsOrFail(cs, ns, serviceName, PortsByPodName{}) names := map[string]bool{} defer func() { for name := range names { - err := c.Pods(ns).Delete(name, nil) + err := cs.Core().Pods(ns).Delete(name, nil) Expect(err).NotTo(HaveOccurred()) } }() @@ -222,21 +219,21 @@ var _ = framework.KubeDescribe("Services", func() { podname1 := "pod1" podname2 := "pod2" - createPodOrFail(c, ns, podname1, labels, containerPorts1) + createPodOrFail(cs, ns, podname1, labels, containerPorts1) names[podname1] = true - validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{podname1: {port1}}) + validateEndpointsOrFail(cs, ns, serviceName, PortsByPodName{podname1: {port1}}) - createPodOrFail(c, ns, podname2, labels, containerPorts2) + createPodOrFail(cs, ns, podname2, labels, containerPorts2) names[podname2] = true - validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{podname1: {port1}, podname2: {port2}}) + validateEndpointsOrFail(cs, ns, serviceName, PortsByPodName{podname1: {port1}, podname2: {port2}}) - deletePodOrFail(c, ns, podname1) + deletePodOrFail(cs, ns, podname1) delete(names, podname1) - validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{podname2: {port2}}) + validateEndpointsOrFail(cs, ns, serviceName, PortsByPodName{podname2: {port2}}) - deletePodOrFail(c, ns, podname2) + deletePodOrFail(cs, ns, podname2) delete(names, podname2) - validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{}) + validateEndpointsOrFail(cs, ns, serviceName, PortsByPodName{}) }) It("should preserve source pod IP for traffic thru service cluster IP", func() { @@ -245,13 +242,13 @@ var _ = framework.KubeDescribe("Services", func() { ns := f.Namespace.Name By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns) - jig := NewServiceTestJig(c, cs, serviceName) + jig := NewServiceTestJig(cs, serviceName) servicePort := 8080 tcpService := jig.CreateTCPServiceWithPort(ns, nil, int32(servicePort)) jig.SanityCheckService(tcpService, api.ServiceTypeClusterIP) defer func() { framework.Logf("Cleaning up the sourceip test service") - err := c.Services(ns).Delete(serviceName) + err := cs.Core().Services(ns).Delete(serviceName, nil) Expect(err).NotTo(HaveOccurred()) }() serviceIp := tcpService.Spec.ClusterIP @@ -272,20 +269,20 @@ var _ = framework.KubeDescribe("Services", func() { jig.launchEchoserverPodOnNode(f, node1.Name, serverPodName) defer func() { framework.Logf("Cleaning up the echo server pod") - err := c.Pods(ns).Delete(serverPodName, nil) + err := cs.Core().Pods(ns).Delete(serverPodName, nil) Expect(err).NotTo(HaveOccurred()) }() // Waiting for service to expose endpoint. - validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{serverPodName: {servicePort}}) + validateEndpointsOrFail(cs, ns, serviceName, PortsByPodName{serverPodName: {servicePort}}) By("Retrieve sourceip from a pod on the same node") - sourceIp1, execPodIp1 := execSourceipTest(f, c, ns, node1.Name, serviceIp, servicePort) + sourceIp1, execPodIp1 := execSourceipTest(f, cs, ns, node1.Name, serviceIp, servicePort) By("Verifying the preserved source ip") Expect(sourceIp1).To(Equal(execPodIp1)) By("Retrieve sourceip from a pod on a different node") - sourceIp2, execPodIp2 := execSourceipTest(f, c, ns, node2.Name, serviceIp, servicePort) + sourceIp2, execPodIp2 := execSourceipTest(f, cs, ns, node2.Name, serviceIp, servicePort) By("Verifying the preserved source ip") Expect(sourceIp2).To(Equal(execPodIp2)) }) @@ -298,13 +295,13 @@ var _ = framework.KubeDescribe("Services", func() { numPods, servicePort := 3, 80 By("creating service1 in namespace " + ns) - podNames1, svc1IP, err := startServeHostnameService(c, ns, "service1", servicePort, numPods) + podNames1, svc1IP, err := startServeHostnameService(cs, ns, "service1", servicePort, numPods) Expect(err).NotTo(HaveOccurred()) By("creating service2 in namespace " + ns) - podNames2, svc2IP, err := startServeHostnameService(c, ns, "service2", servicePort, numPods) + podNames2, svc2IP, err := startServeHostnameService(cs, ns, "service2", servicePort, numPods) Expect(err).NotTo(HaveOccurred()) - hosts, err := framework.NodeSSHHosts(f.ClientSet) + hosts, err := framework.NodeSSHHosts(cs) Expect(err).NotTo(HaveOccurred()) if len(hosts) == 0 { framework.Failf("No ssh-able nodes") @@ -312,23 +309,23 @@ var _ = framework.KubeDescribe("Services", func() { host := hosts[0] By("verifying service1 is up") - framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) By("verifying service2 is up") - framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) // Stop service 1 and make sure it is gone. By("stopping service1") - framework.ExpectNoError(stopServeHostnameService(c, f.ClientSet, ns, "service1")) + framework.ExpectNoError(stopServeHostnameService(f.ClientSet, ns, "service1")) By("verifying service1 is not up") - framework.ExpectNoError(verifyServeHostnameServiceDown(c, host, svc1IP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceDown(cs, host, svc1IP, servicePort)) By("verifying service2 is still up") - framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) // Start another service and verify both are up. By("creating service3 in namespace " + ns) - podNames3, svc3IP, err := startServeHostnameService(c, ns, "service3", servicePort, numPods) + podNames3, svc3IP, err := startServeHostnameService(cs, ns, "service3", servicePort, numPods) Expect(err).NotTo(HaveOccurred()) if svc2IP == svc3IP { @@ -336,10 +333,10 @@ var _ = framework.KubeDescribe("Services", func() { } By("verifying service2 is still up") - framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) By("verifying service3 is up") - framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames3, svc3IP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames3, svc3IP, servicePort)) }) It("should work after restarting kube-proxy [Disruptive]", func() { @@ -352,34 +349,34 @@ var _ = framework.KubeDescribe("Services", func() { svc1 := "service1" svc2 := "service2" - defer func() { framework.ExpectNoError(stopServeHostnameService(c, f.ClientSet, ns, svc1)) }() - podNames1, svc1IP, err := startServeHostnameService(c, ns, svc1, servicePort, numPods) + defer func() { framework.ExpectNoError(stopServeHostnameService(f.ClientSet, ns, svc1)) }() + podNames1, svc1IP, err := startServeHostnameService(cs, ns, svc1, servicePort, numPods) Expect(err).NotTo(HaveOccurred()) - defer func() { framework.ExpectNoError(stopServeHostnameService(c, f.ClientSet, ns, svc2)) }() - podNames2, svc2IP, err := startServeHostnameService(c, ns, svc2, servicePort, numPods) + defer func() { framework.ExpectNoError(stopServeHostnameService(f.ClientSet, ns, svc2)) }() + podNames2, svc2IP, err := startServeHostnameService(cs, ns, svc2, servicePort, numPods) Expect(err).NotTo(HaveOccurred()) if svc1IP == svc2IP { framework.Failf("VIPs conflict: %v", svc1IP) } - hosts, err := framework.NodeSSHHosts(f.ClientSet) + hosts, err := framework.NodeSSHHosts(cs) Expect(err).NotTo(HaveOccurred()) if len(hosts) == 0 { framework.Failf("No ssh-able nodes") } host := hosts[0] - framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort)) - framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) By(fmt.Sprintf("Restarting kube-proxy on %v", host)) if err := framework.RestartKubeProxy(host); err != nil { framework.Failf("error restarting kube-proxy: %v", err) } - framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort)) - framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) By("Removing iptable rules") result, err := framework.SSH(` @@ -390,8 +387,8 @@ var _ = framework.KubeDescribe("Services", func() { framework.LogSSHResult(result) framework.Failf("couldn't remove iptable rules: %v", err) } - framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort)) - framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) }) It("should work after restarting apiserver [Disruptive]", func() { @@ -401,40 +398,40 @@ var _ = framework.KubeDescribe("Services", func() { ns := f.Namespace.Name numPods, servicePort := 3, 80 - defer func() { framework.ExpectNoError(stopServeHostnameService(c, f.ClientSet, ns, "service1")) }() - podNames1, svc1IP, err := startServeHostnameService(c, ns, "service1", servicePort, numPods) + defer func() { framework.ExpectNoError(stopServeHostnameService(f.ClientSet, ns, "service1")) }() + podNames1, svc1IP, err := startServeHostnameService(cs, ns, "service1", servicePort, numPods) Expect(err).NotTo(HaveOccurred()) - hosts, err := framework.NodeSSHHosts(f.ClientSet) + hosts, err := framework.NodeSSHHosts(cs) Expect(err).NotTo(HaveOccurred()) if len(hosts) == 0 { framework.Failf("No ssh-able nodes") } host := hosts[0] - framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) // Restart apiserver By("Restarting apiserver") - if err := framework.RestartApiserver(c); err != nil { + if err := framework.RestartApiserver(cs.Discovery()); err != nil { framework.Failf("error restarting apiserver: %v", err) } By("Waiting for apiserver to come up by polling /healthz") - if err := framework.WaitForApiserverUp(c); err != nil { + if err := framework.WaitForApiserverUp(cs); err != nil { framework.Failf("error while waiting for apiserver up: %v", err) } - framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) // Create a new service and check if it's not reusing IP. - defer func() { framework.ExpectNoError(stopServeHostnameService(c, f.ClientSet, ns, "service2")) }() - podNames2, svc2IP, err := startServeHostnameService(c, ns, "service2", servicePort, numPods) + defer func() { framework.ExpectNoError(stopServeHostnameService(f.ClientSet, ns, "service2")) }() + podNames2, svc2IP, err := startServeHostnameService(cs, ns, "service2", servicePort, numPods) Expect(err).NotTo(HaveOccurred()) if svc1IP == svc2IP { framework.Failf("VIPs conflict: %v", svc1IP) } - framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort)) - framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) + framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) }) // TODO: Run this test against the userspace proxy and nodes @@ -444,8 +441,8 @@ var _ = framework.KubeDescribe("Services", func() { serviceName := "nodeport-test" ns := f.Namespace.Name - jig := NewServiceTestJig(c, cs, serviceName) - nodeIP := pickNodeIP(jig.ClientSet) // for later + jig := NewServiceTestJig(cs, serviceName) + nodeIP := pickNodeIP(jig.Client) // for later By("creating service " + serviceName + " with type=NodePort in namespace " + ns) service := jig.CreateTCPServiceOrFail(ns, func(svc *api.Service) { @@ -461,7 +458,7 @@ var _ = framework.KubeDescribe("Services", func() { jig.TestReachableHTTP(nodeIP, nodePort, kubeProxyLagTimeout) By("verifying the node port is locked") - hostExec := framework.LaunchHostExecPod(f.Client, f.Namespace.Name, "hostexec") + hostExec := framework.LaunchHostExecPod(f.ClientSet, f.Namespace.Name, "hostexec") // Even if the node-ip:node-port check above passed, this hostexec pod // might fall on a node with a laggy kube-proxy. cmd := fmt.Sprintf(`for i in $(seq 1 300); do if ss -ant46 'sport = :%d' | grep ^LISTEN; then exit 0; fi; sleep 1; done; exit 1`, nodePort) @@ -500,8 +497,8 @@ var _ = framework.KubeDescribe("Services", func() { ns2 := namespacePtr.Name // LB2 in ns2 on UDP framework.Logf("namespace for UDP test: %s", ns2) - jig := NewServiceTestJig(c, cs, serviceName) - nodeIP := pickNodeIP(jig.ClientSet) // for later + jig := NewServiceTestJig(cs, serviceName) + nodeIP := pickNodeIP(jig.Client) // for later // Test TCP and UDP Services. Services with the same name in different // namespaces should get different node ports and load balancers. @@ -794,7 +791,7 @@ var _ = framework.KubeDescribe("Services", func() { serviceName := "nodeports" ns := f.Namespace.Name - t := NewServerTest(c, ns, serviceName) + t := NewServerTest(cs, ns, serviceName) defer func() { defer GinkgoRecover() errs := t.Cleanup() @@ -844,7 +841,7 @@ var _ = framework.KubeDescribe("Services", func() { serviceName2 := baseName + "2" ns := f.Namespace.Name - t := NewServerTest(c, ns, serviceName1) + t := NewServerTest(cs, ns, serviceName1) defer func() { defer GinkgoRecover() errs := t.Cleanup() @@ -896,7 +893,7 @@ var _ = framework.KubeDescribe("Services", func() { serviceName := "nodeport-range-test" ns := f.Namespace.Name - t := NewServerTest(c, ns, serviceName) + t := NewServerTest(cs, ns, serviceName) defer func() { defer GinkgoRecover() errs := t.Cleanup() @@ -935,7 +932,7 @@ var _ = framework.KubeDescribe("Services", func() { } } By(fmt.Sprintf("changing service "+serviceName+" to out-of-range NodePort %d", outOfRangeNodePort)) - result, err := updateService(c, ns, serviceName, func(s *api.Service) { + result, err := updateService(cs, ns, serviceName, func(s *api.Service) { s.Spec.Ports[0].NodePort = int32(outOfRangeNodePort) }) if err == nil { @@ -964,7 +961,7 @@ var _ = framework.KubeDescribe("Services", func() { serviceName := "nodeport-reuse" ns := f.Namespace.Name - t := NewServerTest(c, ns, serviceName) + t := NewServerTest(cs, ns, serviceName) defer func() { defer GinkgoRecover() errs := t.Cleanup() @@ -999,7 +996,7 @@ var _ = framework.KubeDescribe("Services", func() { err = t.DeleteService(serviceName) Expect(err).NotTo(HaveOccurred()) - hostExec := framework.LaunchHostExecPod(f.Client, f.Namespace.Name, "hostexec") + hostExec := framework.LaunchHostExecPod(f.ClientSet, f.Namespace.Name, "hostexec") cmd := fmt.Sprintf(`! ss -ant46 'sport = :%d' | tail -n +2 | grep LISTEN`, nodePort) var stdout string if pollErr := wait.PollImmediate(framework.Poll, kubeProxyLagTimeout, func() (bool, error) { @@ -1026,7 +1023,7 @@ var _ = framework.KubeDescribe("Services", func() { serviceName := "never-ready" ns := f.Namespace.Name - t := NewServerTest(c, ns, serviceName) + t := NewServerTest(cs, ns, serviceName) defer func() { defer GinkgoRecover() errs := t.Cleanup() @@ -1064,7 +1061,7 @@ var _ = framework.KubeDescribe("Services", func() { svcName := fmt.Sprintf("%v.%v", serviceName, f.Namespace.Name) By("waiting for endpoints of Service with DNS name " + svcName) - execPodName := createExecPodOrFail(f.Client, f.Namespace.Name, "execpod-") + execPodName := createExecPodOrFail(f.ClientSet, f.Namespace.Name, "execpod-") cmd := fmt.Sprintf("wget -qO- %v", svcName) var stdout string if pollErr := wait.PollImmediate(framework.Poll, kubeProxyLagTimeout, func() (bool, error) { @@ -1085,14 +1082,12 @@ var _ = framework.KubeDescribe("ESIPP [Slow][Feature:ExternalTrafficLocalOnly]", f := framework.NewDefaultFramework("esipp") loadBalancerCreateTimeout := loadBalancerCreateTimeoutDefault - var c *client.Client var cs clientset.Interface BeforeEach(func() { // requires cloud load-balancer support - this feature currently supported only on GCE/GKE framework.SkipUnlessProviderIs("gce", "gke") - c = f.Client cs = f.ClientSet if nodes := framework.GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > largeClusterMinNodesNumber { loadBalancerCreateTimeout = loadBalancerCreateTimeoutLarge @@ -1102,7 +1097,7 @@ var _ = framework.KubeDescribe("ESIPP [Slow][Feature:ExternalTrafficLocalOnly]", It("should work for type=LoadBalancer [Slow][Feature:ExternalTrafficLocalOnly]", func() { namespace := f.Namespace.Name serviceName := "external-local" - jig := NewServiceTestJig(c, cs, serviceName) + jig := NewServiceTestJig(cs, serviceName) svc := jig.createOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, true) healthCheckNodePort := int(service.GetServiceHealthCheckNodePort(svc)) @@ -1120,7 +1115,7 @@ var _ = framework.KubeDescribe("ESIPP [Slow][Feature:ExternalTrafficLocalOnly]", } break } - Expect(c.Services(svc.Namespace).Delete(svc.Name)).NotTo(HaveOccurred()) + Expect(cs.Core().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred()) }() svcTCPPort := int(svc.Spec.Ports[0].Port) @@ -1140,11 +1135,11 @@ var _ = framework.KubeDescribe("ESIPP [Slow][Feature:ExternalTrafficLocalOnly]", It("should work for type=NodePort [Slow][Feature:ExternalTrafficLocalOnly]", func() { namespace := f.Namespace.Name serviceName := "external-local" - jig := NewServiceTestJig(c, cs, serviceName) + jig := NewServiceTestJig(cs, serviceName) svc := jig.createOnlyLocalNodePortService(namespace, serviceName, true) defer func() { - Expect(c.Services(svc.Namespace).Delete(svc.Name)).NotTo(HaveOccurred()) + Expect(cs.Core().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred()) }() tcpNodePort := int(svc.Spec.Ports[0].NodePort) @@ -1166,13 +1161,13 @@ var _ = framework.KubeDescribe("ESIPP [Slow][Feature:ExternalTrafficLocalOnly]", It("should only target nodes with endpoints [Slow][Feature:ExternalTrafficLocalOnly]", func() { namespace := f.Namespace.Name serviceName := "external-local" - jig := NewServiceTestJig(c, cs, serviceName) + jig := NewServiceTestJig(cs, serviceName) nodes := jig.getNodes(maxNodesForEndpointsTests) svc := jig.createOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, false) defer func() { jig.ChangeServiceType(svc.Namespace, svc.Name, api.ServiceTypeClusterIP, loadBalancerCreateTimeout) - Expect(c.Services(svc.Namespace).Delete(svc.Name)).NotTo(HaveOccurred()) + Expect(cs.Core().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred()) }() healthCheckNodePort := int(service.GetServiceHealthCheckNodePort(svc)) @@ -1218,20 +1213,20 @@ var _ = framework.KubeDescribe("ESIPP [Slow][Feature:ExternalTrafficLocalOnly]", // Make sure the loadbalancer picked up the helth check change jig.TestReachableHTTP(ingressIP, svcTCPPort, kubeProxyLagTimeout) } - framework.ExpectNoError(framework.DeleteRCAndPods(c, f.ClientSet, namespace, serviceName)) + framework.ExpectNoError(framework.DeleteRCAndPods(f.ClientSet, namespace, serviceName)) } }) It("should work from pods [Slow][Feature:ExternalTrafficLocalOnly]", func() { namespace := f.Namespace.Name serviceName := "external-local" - jig := NewServiceTestJig(c, cs, serviceName) + jig := NewServiceTestJig(cs, serviceName) nodes := jig.getNodes(maxNodesForEndpointsTests) svc := jig.createOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, true) defer func() { jig.ChangeServiceType(svc.Namespace, svc.Name, api.ServiceTypeClusterIP, loadBalancerCreateTimeout) - Expect(c.Services(svc.Namespace).Delete(svc.Name)).NotTo(HaveOccurred()) + Expect(cs.Core().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred()) }() ingressIP := getIngressPoint(&svc.Status.LoadBalancer.Ingress[0]) @@ -1240,12 +1235,12 @@ var _ = framework.KubeDescribe("ESIPP [Slow][Feature:ExternalTrafficLocalOnly]", podName := "execpod-sourceip" By(fmt.Sprintf("Creating %v on node %v", podName, nodeName)) - execPodName := createExecPodOnNode(f.Client, namespace, nodeName, podName) + execPodName := createExecPodOnNode(f.ClientSet, namespace, nodeName, podName) defer func() { - err := c.Pods(namespace).Delete(execPodName, nil) + err := cs.Core().Pods(namespace).Delete(execPodName, nil) Expect(err).NotTo(HaveOccurred()) }() - execPod, err := f.Client.Pods(namespace).Get(execPodName) + execPod, err := f.ClientSet.Core().Pods(namespace).Get(execPodName) ExpectNoError(err) framework.Logf("Waiting up to %v wget %v", kubeProxyLagTimeout, path) @@ -1269,7 +1264,7 @@ var _ = framework.KubeDescribe("ESIPP [Slow][Feature:ExternalTrafficLocalOnly]", It("should handle updates to source ip annotation [Slow][Feature:ExternalTrafficLocalOnly]", func() { namespace := f.Namespace.Name serviceName := "external-local" - jig := NewServiceTestJig(c, cs, serviceName) + jig := NewServiceTestJig(cs, serviceName) nodes := jig.getNodes(maxNodesForEndpointsTests) if len(nodes.Items) < 2 { @@ -1279,7 +1274,7 @@ var _ = framework.KubeDescribe("ESIPP [Slow][Feature:ExternalTrafficLocalOnly]", svc := jig.createOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, true) defer func() { jig.ChangeServiceType(svc.Namespace, svc.Name, api.ServiceTypeClusterIP, loadBalancerCreateTimeout) - Expect(c.Services(svc.Namespace).Delete(svc.Name)).NotTo(HaveOccurred()) + Expect(cs.Core().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred()) }() // save the health check node port because it disappears when lift the annotation. @@ -1375,18 +1370,18 @@ var _ = framework.KubeDescribe("ESIPP [Slow][Feature:ExternalTrafficLocalOnly]", // updateService fetches a service, calls the update function on it, // and then attempts to send the updated service. It retries up to 2 // times in the face of timeouts and conflicts. -func updateService(c *client.Client, namespace, serviceName string, update func(*api.Service)) (*api.Service, error) { +func updateService(c clientset.Interface, namespace, serviceName string, update func(*api.Service)) (*api.Service, error) { var service *api.Service var err error for i := 0; i < 3; i++ { - service, err = c.Services(namespace).Get(serviceName) + service, err = c.Core().Services(namespace).Get(serviceName) if err != nil { return service, err } update(service) - service, err = c.Services(namespace).Update(service) + service, err = c.Core().Services(namespace).Update(service) if !errors.IsConflict(err) && !errors.IsServerTimeout(err) { return service, err @@ -1430,11 +1425,11 @@ func getContainerPortsByPodUID(endpoints *api.Endpoints) PortsByPodUID { type PortsByPodName map[string][]int type PortsByPodUID map[types.UID][]int -func translatePodNameToUIDOrFail(c *client.Client, ns string, expectedEndpoints PortsByPodName) PortsByPodUID { +func translatePodNameToUIDOrFail(c clientset.Interface, ns string, expectedEndpoints PortsByPodName) PortsByPodUID { portsByUID := make(PortsByPodUID) for name, portList := range expectedEndpoints { - pod, err := c.Pods(ns).Get(name) + pod, err := c.Core().Pods(ns).Get(name) if err != nil { framework.Failf("failed to get pod %s, that's pretty weird. validation failed: %s", name, err) } @@ -1466,11 +1461,11 @@ func validatePortsOrFail(endpoints PortsByPodUID, expectedEndpoints PortsByPodUI } } -func validateEndpointsOrFail(c *client.Client, namespace, serviceName string, expectedEndpoints PortsByPodName) { +func validateEndpointsOrFail(c clientset.Interface, namespace, serviceName string, expectedEndpoints PortsByPodName) { By(fmt.Sprintf("waiting up to %v for service %s in namespace %s to expose endpoints %v", framework.ServiceStartTimeout, serviceName, namespace, expectedEndpoints)) i := 1 for start := time.Now(); time.Since(start) < framework.ServiceStartTimeout; time.Sleep(1 * time.Second) { - endpoints, err := c.Endpoints(namespace).Get(serviceName) + endpoints, err := c.Core().Endpoints(namespace).Get(serviceName) if err != nil { framework.Logf("Get endpoints failed (%v elapsed, ignoring for 5s): %v", time.Since(start), err) continue @@ -1494,7 +1489,7 @@ func validateEndpointsOrFail(c *client.Client, namespace, serviceName string, ex i++ } - if pods, err := c.Pods(api.NamespaceAll).List(api.ListOptions{}); err == nil { + if pods, err := c.Core().Pods(api.NamespaceAll).List(api.ListOptions{}); err == nil { for _, pod := range pods.Items { framework.Logf("Pod %s\t%s\t%s\t%s", pod.Namespace, pod.Name, pod.Spec.NodeName, pod.DeletionTimestamp) } @@ -1529,13 +1524,13 @@ func newExecPodSpec(ns, generateName string) *api.Pod { // createExecPodOrFail creates a simple busybox pod in a sleep loop used as a // vessel for kubectl exec commands. // Returns the name of the created pod. -func createExecPodOrFail(client *client.Client, ns, generateName string) string { +func createExecPodOrFail(client clientset.Interface, ns, generateName string) string { framework.Logf("Creating new exec pod") execPod := newExecPodSpec(ns, generateName) - created, err := client.Pods(ns).Create(execPod) + created, err := client.Core().Pods(ns).Create(execPod) Expect(err).NotTo(HaveOccurred()) err = wait.PollImmediate(framework.Poll, 5*time.Minute, func() (bool, error) { - retrievedPod, err := client.Pods(execPod.Namespace).Get(created.Name) + retrievedPod, err := client.Core().Pods(execPod.Namespace).Get(created.Name) if err != nil { return false, nil } @@ -1547,14 +1542,14 @@ func createExecPodOrFail(client *client.Client, ns, generateName string) string // createExecPodOnNode launches a exec pod in the given namespace and node // waits until it's Running, created pod name would be returned -func createExecPodOnNode(client *client.Client, ns, nodeName, generateName string) string { +func createExecPodOnNode(client clientset.Interface, ns, nodeName, generateName string) string { framework.Logf("Creating exec pod %q in namespace %q", generateName, ns) execPod := newExecPodSpec(ns, generateName) execPod.Spec.NodeName = nodeName - created, err := client.Pods(ns).Create(execPod) + created, err := client.Core().Pods(ns).Create(execPod) Expect(err).NotTo(HaveOccurred()) err = wait.PollImmediate(framework.Poll, 5*time.Minute, func() (bool, error) { - retrievedPod, err := client.Pods(execPod.Namespace).Get(created.Name) + retrievedPod, err := client.Core().Pods(execPod.Namespace).Get(created.Name) if err != nil { return false, nil } @@ -1564,7 +1559,7 @@ func createExecPodOnNode(client *client.Client, ns, nodeName, generateName strin return created.Name } -func createPodOrFail(c *client.Client, ns, name string, labels map[string]string, containerPorts []api.ContainerPort) { +func createPodOrFail(c clientset.Interface, ns, name string, labels map[string]string, containerPorts []api.ContainerPort) { By(fmt.Sprintf("creating pod %s in namespace %s", name, ns)) pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ @@ -1584,13 +1579,13 @@ func createPodOrFail(c *client.Client, ns, name string, labels map[string]string }, }, } - _, err := c.Pods(ns).Create(pod) + _, err := c.Core().Pods(ns).Create(pod) Expect(err).NotTo(HaveOccurred()) } -func deletePodOrFail(c *client.Client, ns, name string) { +func deletePodOrFail(c clientset.Interface, ns, name string) { By(fmt.Sprintf("deleting pod %s in namespace %s", name, ns)) - err := c.Pods(ns).Delete(name, nil) + err := c.Core().Pods(ns).Delete(name, nil) Expect(err).NotTo(HaveOccurred()) } @@ -1612,8 +1607,8 @@ func collectAddresses(nodes *api.NodeList, addressType api.NodeAddressType) []st return ips } -func getNodePublicIps(cs clientset.Interface) ([]string, error) { - nodes := framework.GetReadySchedulableNodesOrDie(cs) +func getNodePublicIps(c clientset.Interface) ([]string, error) { + nodes := framework.GetReadySchedulableNodesOrDie(c) ips := collectAddresses(nodes, api.NodeExternalIP) if len(ips) == 0 { @@ -1622,8 +1617,8 @@ func getNodePublicIps(cs clientset.Interface) ([]string, error) { return ips, nil } -func pickNodeIP(cs clientset.Interface) string { - publicIps, err := getNodePublicIps(cs) +func pickNodeIP(c clientset.Interface) string { + publicIps, err := getNodePublicIps(c) Expect(err).NotTo(HaveOccurred()) if len(publicIps) == 0 { framework.Failf("got unexpected number (%d) of public IPs", len(publicIps)) @@ -1808,11 +1803,11 @@ func testNotReachableUDP(ip string, port int, request string) (bool, error) { } // Creates a replication controller that serves its hostname and a service on top of it. -func startServeHostnameService(c *client.Client, ns, name string, port, replicas int) ([]string, string, error) { +func startServeHostnameService(c clientset.Interface, ns, name string, port, replicas int) ([]string, string, error) { podNames := make([]string, replicas) By("creating service " + name + " in namespace " + ns) - _, err := c.Services(ns).Create(&api.Service{ + _, err := c.Core().Services(ns).Create(&api.Service{ ObjectMeta: api.ObjectMeta{ Name: name, }, @@ -1858,7 +1853,7 @@ func startServeHostnameService(c *client.Client, ns, name string, port, replicas } sort.StringSlice(podNames).Sort() - service, err := c.Services(ns).Get(name) + service, err := c.Core().Services(ns).Get(name) if err != nil { return podNames, "", err } @@ -1869,11 +1864,11 @@ func startServeHostnameService(c *client.Client, ns, name string, port, replicas return podNames, serviceIP, nil } -func stopServeHostnameService(c *client.Client, clientset clientset.Interface, ns, name string) error { - if err := framework.DeleteRCAndPods(c, clientset, ns, name); err != nil { +func stopServeHostnameService(clientset clientset.Interface, ns, name string) error { + if err := framework.DeleteRCAndPods(clientset, ns, name); err != nil { return err } - if err := c.Services(ns).Delete(name); err != nil { + if err := clientset.Core().Services(ns).Delete(name, nil); err != nil { return err } return nil @@ -1883,7 +1878,7 @@ func stopServeHostnameService(c *client.Client, clientset clientset.Interface, n // given host and from within a pod. The host is expected to be an SSH-able node // in the cluster. Each pod in the service is expected to echo its name. These // names are compared with the given expectedPods list after a sort | uniq. -func verifyServeHostnameServiceUp(c *client.Client, ns, host string, expectedPods []string, serviceIP string, servicePort int) error { +func verifyServeHostnameServiceUp(c clientset.Interface, ns, host string, expectedPods []string, serviceIP string, servicePort int) error { execPodName := createExecPodOrFail(c, ns, "execpod-") defer func() { deletePodOrFail(c, ns, execPodName) @@ -1959,7 +1954,7 @@ func verifyServeHostnameServiceUp(c *client.Client, ns, host string, expectedPod return nil } -func verifyServeHostnameServiceDown(c *client.Client, host string, serviceIP string, servicePort int) error { +func verifyServeHostnameServiceDown(c clientset.Interface, host string, serviceIP string, servicePort int) error { command := fmt.Sprintf( "curl -s --connect-timeout 2 http://%s:%d && exit 99", serviceIP, servicePort) @@ -1994,18 +1989,16 @@ func httpGetNoConnectionPool(url string) (*http.Response, error) { // A test jig to help testing. type ServiceTestJig struct { - ID string - Name string - Client *client.Client - ClientSet clientset.Interface - Labels map[string]string + ID string + Name string + Client clientset.Interface + Labels map[string]string } // NewServiceTestJig allocates and inits a new ServiceTestJig. -func NewServiceTestJig(client *client.Client, cs clientset.Interface, name string) *ServiceTestJig { +func NewServiceTestJig(client clientset.Interface, name string) *ServiceTestJig { j := &ServiceTestJig{} j.Client = client - j.ClientSet = cs j.Name = name j.ID = j.Name + "-" + string(uuid.NewUUID()) j.Labels = map[string]string{"testid": j.ID} @@ -2044,7 +2037,7 @@ func (j *ServiceTestJig) CreateTCPServiceWithPort(namespace string, tweak func(s if tweak != nil { tweak(svc) } - result, err := j.Client.Services(namespace).Create(svc) + result, err := j.Client.Core().Services(namespace).Create(svc) if err != nil { framework.Failf("Failed to create TCP Service %q: %v", svc.Name, err) } @@ -2059,7 +2052,7 @@ func (j *ServiceTestJig) CreateTCPServiceOrFail(namespace string, tweak func(svc if tweak != nil { tweak(svc) } - result, err := j.Client.Services(namespace).Create(svc) + result, err := j.Client.Core().Services(namespace).Create(svc) if err != nil { framework.Failf("Failed to create TCP Service %q: %v", svc.Name, err) } @@ -2074,7 +2067,7 @@ func (j *ServiceTestJig) CreateUDPServiceOrFail(namespace string, tweak func(svc if tweak != nil { tweak(svc) } - result, err := j.Client.Services(namespace).Create(svc) + result, err := j.Client.Core().Services(namespace).Create(svc) if err != nil { framework.Failf("Failed to create UDP Service %q: %v", svc.Name, err) } @@ -2145,7 +2138,7 @@ func (j *ServiceTestJig) createOnlyLocalLoadBalancerService(namespace, serviceNa // endpoints of the given Service are running. func (j *ServiceTestJig) getEndpointNodes(svc *api.Service) map[string][]string { nodes := j.getNodes(maxNodesForEndpointsTests) - endpoints, err := j.Client.Endpoints(svc.Namespace).Get(svc.Name) + endpoints, err := j.Client.Core().Endpoints(svc.Namespace).Get(svc.Name) if err != nil { framework.Failf("Get endpoints for service %s/%s failed (%s)", svc.Namespace, svc.Name, err) } @@ -2172,7 +2165,7 @@ func (j *ServiceTestJig) getEndpointNodes(svc *api.Service) map[string][]string // getNodes returns the first maxNodesForTest nodes. Useful in large clusters // where we don't eg: want to create an endpoint per node. func (j *ServiceTestJig) getNodes(maxNodesForTest int) (nodes *api.NodeList) { - nodes = framework.GetReadySchedulableNodesOrDie(j.ClientSet) + nodes = framework.GetReadySchedulableNodesOrDie(j.Client) if len(nodes.Items) <= maxNodesForTest { maxNodesForTest = len(nodes.Items) } @@ -2182,7 +2175,7 @@ func (j *ServiceTestJig) getNodes(maxNodesForTest int) (nodes *api.NodeList) { func (j *ServiceTestJig) waitForEndpointOnNode(namespace, serviceName, nodeName string) { err := wait.PollImmediate(framework.Poll, loadBalancerCreateTimeoutDefault, func() (bool, error) { - endpoints, err := j.Client.Endpoints(namespace).Get(serviceName) + endpoints, err := j.Client.Core().Endpoints(namespace).Get(serviceName) if err != nil { framework.Logf("Get endpoints for service %s/%s failed (%s)", namespace, serviceName, err) return false, nil @@ -2244,12 +2237,12 @@ func (j *ServiceTestJig) SanityCheckService(svc *api.Service, svcType api.Servic // face of timeouts and conflicts. func (j *ServiceTestJig) UpdateService(namespace, name string, update func(*api.Service)) (*api.Service, error) { for i := 0; i < 3; i++ { - service, err := j.Client.Services(namespace).Get(name) + service, err := j.Client.Core().Services(namespace).Get(name) if err != nil { return nil, fmt.Errorf("Failed to get Service %q: %v", name, err) } update(service) - service, err = j.Client.Services(namespace).Update(service) + service, err = j.Client.Core().Services(namespace).Update(service) if err == nil { return service, nil } @@ -2298,7 +2291,7 @@ func (j *ServiceTestJig) WaitForLoadBalancerOrFail(namespace, name string, timeo var service *api.Service framework.Logf("Waiting up to %v for service %q to have a LoadBalancer", timeout, name) pollFunc := func() (bool, error) { - svc, err := j.Client.Services(namespace).Get(name) + svc, err := j.Client.Core().Services(namespace).Get(name) if err != nil { return false, err } @@ -2325,7 +2318,7 @@ func (j *ServiceTestJig) WaitForLoadBalancerDestroyOrFail(namespace, name string var service *api.Service framework.Logf("Waiting up to %v for service %q to have no LoadBalancer", timeout, name) pollFunc := func() (bool, error) { - svc, err := j.Client.Services(namespace).Get(name) + svc, err := j.Client.Core().Services(namespace).Get(name) if err != nil { return false, err } @@ -2453,7 +2446,7 @@ func (j *ServiceTestJig) RunOrFail(namespace string, tweak func(rc *api.Replicat if tweak != nil { tweak(rc) } - result, err := j.Client.ReplicationControllers(namespace).Create(rc) + result, err := j.Client.Core().ReplicationControllers(namespace).Create(rc) if err != nil { framework.Failf("Failed to created RC %q: %v", rc.Name, err) } @@ -2474,7 +2467,7 @@ func (j *ServiceTestJig) waitForPodsCreated(namespace string, replicas int) ([]s framework.Logf("Waiting up to %v for %d pods to be created", timeout, replicas) for start := time.Now(); time.Since(start) < timeout; time.Sleep(2 * time.Second) { options := api.ListOptions{LabelSelector: label} - pods, err := j.Client.Pods(namespace).List(options) + pods, err := j.Client.Core().Pods(namespace).List(options) if err != nil { return nil, err } @@ -2507,7 +2500,7 @@ func (j *ServiceTestJig) waitForPodsReady(namespace string, pods []string) error type ServiceTestFixture struct { ServiceName string Namespace string - Client *client.Client + Client clientset.Interface TestId string Labels map[string]string @@ -2518,7 +2511,7 @@ type ServiceTestFixture struct { image string } -func NewServerTest(client *client.Client, namespace string, serviceName string) *ServiceTestFixture { +func NewServerTest(client clientset.Interface, namespace string, serviceName string) *ServiceTestFixture { t := &ServiceTestFixture{} t.Client = client t.Namespace = namespace @@ -2571,7 +2564,7 @@ func (t *ServiceTestFixture) CreateWebserverRC(replicas int32) *api.ReplicationC // createRC creates a replication controller and records it for cleanup. func (t *ServiceTestFixture) createRC(rc *api.ReplicationController) (*api.ReplicationController, error) { - rc, err := t.Client.ReplicationControllers(t.Namespace).Create(rc) + rc, err := t.Client.Core().ReplicationControllers(t.Namespace).Create(rc) if err == nil { t.rcs[rc.Name] = true } @@ -2580,7 +2573,7 @@ func (t *ServiceTestFixture) createRC(rc *api.ReplicationController) (*api.Repli // Create a service, and record it for cleanup func (t *ServiceTestFixture) CreateService(service *api.Service) (*api.Service, error) { - result, err := t.Client.Services(t.Namespace).Create(service) + result, err := t.Client.Core().Services(t.Namespace).Create(service) if err == nil { t.services[service.Name] = true } @@ -2589,7 +2582,7 @@ func (t *ServiceTestFixture) CreateService(service *api.Service) (*api.Service, // Delete a service, and remove it from the cleanup list func (t *ServiceTestFixture) DeleteService(serviceName string) error { - err := t.Client.Services(t.Namespace).Delete(serviceName) + err := t.Client.Core().Services(t.Namespace).Delete(serviceName, nil) if err == nil { delete(t.services, serviceName) } @@ -2601,25 +2594,25 @@ func (t *ServiceTestFixture) Cleanup() []error { for rcName := range t.rcs { By("stopping RC " + rcName + " in namespace " + t.Namespace) // First, resize the RC to 0. - old, err := t.Client.ReplicationControllers(t.Namespace).Get(rcName) + old, err := t.Client.Core().ReplicationControllers(t.Namespace).Get(rcName) if err != nil { errs = append(errs, err) } old.Spec.Replicas = 0 - if _, err := t.Client.ReplicationControllers(t.Namespace).Update(old); err != nil { + if _, err := t.Client.Core().ReplicationControllers(t.Namespace).Update(old); err != nil { errs = append(errs, err) } // TODO(mikedanese): Wait. // Then, delete the RC altogether. - if err := t.Client.ReplicationControllers(t.Namespace).Delete(rcName, nil); err != nil { + if err := t.Client.Core().ReplicationControllers(t.Namespace).Delete(rcName, nil); err != nil { errs = append(errs, err) } } for serviceName := range t.services { By("deleting service " + serviceName + " in namespace " + t.Namespace) - err := t.Client.Services(t.Namespace).Delete(serviceName) + err := t.Client.Core().Services(t.Namespace).Delete(serviceName, nil) if err != nil { errs = append(errs, err) } @@ -2657,22 +2650,22 @@ func (j *ServiceTestJig) launchEchoserverPodOnNode(f *framework.Framework, nodeN pod := newEchoServerPodSpec(podName) pod.Spec.NodeName = nodeName pod.ObjectMeta.Labels = j.Labels - podClient := f.Client.Pods(f.Namespace.Name) + podClient := f.ClientSet.Core().Pods(f.Namespace.Name) _, err := podClient.Create(pod) framework.ExpectNoError(err) framework.ExpectNoError(f.WaitForPodRunning(podName)) framework.Logf("Echo server pod %q in namespace %q running", pod.Name, f.Namespace.Name) } -func execSourceipTest(f *framework.Framework, c *client.Client, ns, nodeName, serviceIP string, servicePort int) (string, string) { +func execSourceipTest(f *framework.Framework, c clientset.Interface, ns, nodeName, serviceIP string, servicePort int) (string, string) { framework.Logf("Creating an exec pod on node %v", nodeName) - execPodName := createExecPodOnNode(f.Client, ns, nodeName, fmt.Sprintf("execpod-sourceip-%s", nodeName)) + execPodName := createExecPodOnNode(f.ClientSet, ns, nodeName, fmt.Sprintf("execpod-sourceip-%s", nodeName)) defer func() { framework.Logf("Cleaning up the exec pod") - err := c.Pods(ns).Delete(execPodName, nil) + err := c.Core().Pods(ns).Delete(execPodName, nil) Expect(err).NotTo(HaveOccurred()) }() - execPod, err := f.Client.Pods(ns).Get(execPodName) + execPod, err := f.ClientSet.Core().Pods(ns).Get(execPodName) ExpectNoError(err) var stdout string diff --git a/test/e2e/service_accounts.go b/test/e2e/service_accounts.go index 5db596850bb..ffa1ce81bed 100644 --- a/test/e2e/service_accounts.go +++ b/test/e2e/service_accounts.go @@ -42,7 +42,7 @@ var _ = framework.KubeDescribe("ServiceAccounts", func() { var secrets []api.ObjectReference framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*10, func() (bool, error) { By("waiting for a single token reference") - sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default") + sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default") if apierrors.IsNotFound(err) { framework.Logf("default service account was not found") return false, nil @@ -68,19 +68,19 @@ var _ = framework.KubeDescribe("ServiceAccounts", func() { { By("ensuring the single token reference persists") time.Sleep(2 * time.Second) - sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default") + sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default") framework.ExpectNoError(err) Expect(sa.Secrets).To(Equal(secrets)) } // delete the referenced secret By("deleting the service account token") - framework.ExpectNoError(f.Client.Secrets(f.Namespace.Name).Delete(secrets[0].Name)) + framework.ExpectNoError(f.ClientSet.Core().Secrets(f.Namespace.Name).Delete(secrets[0].Name, nil)) // wait for the referenced secret to be removed, and another one autocreated framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) { By("waiting for a new token reference") - sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default") + sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default") if err != nil { framework.Logf("error getting default service account: %v", err) return false, err @@ -106,7 +106,7 @@ var _ = framework.KubeDescribe("ServiceAccounts", func() { { By("ensuring the single token reference persists") time.Sleep(2 * time.Second) - sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default") + sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default") framework.ExpectNoError(err) Expect(sa.Secrets).To(Equal(secrets)) } @@ -114,17 +114,17 @@ var _ = framework.KubeDescribe("ServiceAccounts", func() { // delete the reference from the service account By("deleting the reference to the service account token") { - sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default") + sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default") framework.ExpectNoError(err) sa.Secrets = nil - _, updateErr := f.Client.ServiceAccounts(f.Namespace.Name).Update(sa) + _, updateErr := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Update(sa) framework.ExpectNoError(updateErr) } // wait for another one to be autocreated framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) { By("waiting for a new token to be created and added") - sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default") + sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default") if err != nil { framework.Logf("error getting default service account: %v", err) return false, err @@ -146,7 +146,7 @@ var _ = framework.KubeDescribe("ServiceAccounts", func() { { By("ensuring the single token reference persists") time.Sleep(2 * time.Second) - sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default") + sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default") framework.ExpectNoError(err) Expect(sa.Secrets).To(Equal(secrets)) } @@ -159,7 +159,7 @@ var _ = framework.KubeDescribe("ServiceAccounts", func() { // Standard get, update retry loop framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) { By("getting the auto-created API token") - sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default") + sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default") if apierrors.IsNotFound(err) { framework.Logf("default service account was not found") return false, nil @@ -173,7 +173,7 @@ var _ = framework.KubeDescribe("ServiceAccounts", func() { return false, nil } for _, secretRef := range sa.Secrets { - secret, err := f.Client.Secrets(f.Namespace.Name).Get(secretRef.Name) + secret, err := f.ClientSet.Core().Secrets(f.Namespace.Name).Get(secretRef.Name) if err != nil { framework.Logf("Error getting secret %s: %v", secretRef.Name, err) continue @@ -214,7 +214,7 @@ var _ = framework.KubeDescribe("ServiceAccounts", func() { }, } - supportsTokenNamespace, _ := framework.ServerVersionGTE(serviceAccountTokenNamespaceVersion, f.Client) + supportsTokenNamespace, _ := framework.ServerVersionGTE(serviceAccountTokenNamespaceVersion, f.ClientSet.Discovery()) if supportsTokenNamespace { pod.Spec.Containers = append(pod.Spec.Containers, api.Container{ Name: "namespace-test", diff --git a/test/e2e/service_latency.go b/test/e2e/service_latency.go index cb312406597..b8856f9dd97 100644 --- a/test/e2e/service_latency.go +++ b/test/e2e/service_latency.go @@ -24,6 +24,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/client/cache" + "k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util/flowcontrol" "k8s.io/kubernetes/pkg/util/sets" @@ -66,9 +67,9 @@ var _ = framework.KubeDescribe("Service endpoints latency", func() { ) // Turn off rate limiting--it interferes with our measurements. - oldThrottle := f.Client.RESTClient.Throttle - f.Client.RESTClient.Throttle = flowcontrol.NewFakeAlwaysRateLimiter() - defer func() { f.Client.RESTClient.Throttle = oldThrottle }() + oldThrottle := f.ClientSet.Core().RESTClient().GetRateLimiter() + f.ClientSet.Core().RESTClient().(*restclient.RESTClient).Throttle = flowcontrol.NewFakeAlwaysRateLimiter() + defer func() { f.ClientSet.Core().RESTClient().(*restclient.RESTClient).Throttle = oldThrottle }() failing := sets.NewString() d, err := runServiceLatencies(f, parallelTrials, totalTrials) @@ -117,8 +118,8 @@ var _ = framework.KubeDescribe("Service endpoints latency", func() { func runServiceLatencies(f *framework.Framework, inParallel, total int) (output []time.Duration, err error) { cfg := testutils.RCConfig{ - Client: f.Client, - Image: framework.GetPauseImageName(f.Client), + Client: f.ClientSet, + Image: framework.GetPauseImageName(f.ClientSet), Name: "svc-latency-rc", Namespace: f.Namespace.Name, Replicas: 1, @@ -277,10 +278,11 @@ func startEndpointWatcher(f *framework.Framework, q *endpointQueries) { _, controller := cache.NewInformer( &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { - return f.Client.Endpoints(f.Namespace.Name).List(options) + obj, err := f.ClientSet.Core().Endpoints(f.Namespace.Name).List(options) + return runtime.Object(obj), err }, WatchFunc: func(options api.ListOptions) (watch.Interface, error) { - return f.Client.Endpoints(f.Namespace.Name).Watch(options) + return f.ClientSet.Core().Endpoints(f.Namespace.Name).Watch(options) }, }, &api.Endpoints{}, @@ -325,7 +327,7 @@ func singleServiceLatency(f *framework.Framework, name string, q *endpointQuerie }, } startTime := time.Now() - gotSvc, err := f.Client.Services(f.Namespace.Name).Create(svc) + gotSvc, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(svc) if err != nil { return 0, err } diff --git a/test/e2e/serviceloadbalancers.go b/test/e2e/serviceloadbalancers.go index bff54f9b95d..2936ffc5918 100644 --- a/test/e2e/serviceloadbalancers.go +++ b/test/e2e/serviceloadbalancers.go @@ -22,7 +22,7 @@ import ( "net/http" "k8s.io/kubernetes/pkg/api" - client "k8s.io/kubernetes/pkg/client/unversioned" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util/wait" @@ -34,7 +34,7 @@ import ( ) // getLoadBalancerControllers returns a list of LBCtesters. -func getLoadBalancerControllers(client *client.Client) []LBCTester { +func getLoadBalancerControllers(client clientset.Interface) []LBCTester { return []LBCTester{ &haproxyControllerTester{ name: "haproxy", @@ -45,7 +45,7 @@ func getLoadBalancerControllers(client *client.Client) []LBCTester { } // getIngManagers returns a list of ingManagers. -func getIngManagers(client *client.Client) []*ingManager { +func getIngManagers(client clientset.Interface) []*ingManager { return []*ingManager{ { name: "netexec", @@ -71,7 +71,7 @@ type LBCTester interface { // haproxyControllerTester implements LBCTester for bare metal haproxy LBs. type haproxyControllerTester struct { - client *client.Client + client clientset.Interface cfg string rcName string rcNamespace string @@ -98,7 +98,7 @@ func (h *haproxyControllerTester) start(namespace string) (err error) { framework.Logf("Container args %+v", rc.Spec.Template.Spec.Containers[i].Args) } - rc, err = h.client.ReplicationControllers(rc.Namespace).Create(rc) + rc, err = h.client.Core().ReplicationControllers(rc.Namespace).Create(rc) if err != nil { return } @@ -112,7 +112,7 @@ func (h *haproxyControllerTester) start(namespace string) (err error) { labelSelector := labels.SelectorFromSet( labels.Set(map[string]string{"name": h.rcName})) options := api.ListOptions{LabelSelector: labelSelector} - pods, err := h.client.Pods(h.rcNamespace).List(options) + pods, err := h.client.Core().Pods(h.rcNamespace).List(options) if err != nil { return err } @@ -136,7 +136,7 @@ func (h *haproxyControllerTester) start(namespace string) (err error) { } func (h *haproxyControllerTester) stop() error { - return h.client.ReplicationControllers(h.rcNamespace).Delete(h.rcName, nil) + return h.client.Core().ReplicationControllers(h.rcNamespace).Delete(h.rcName, nil) } func (h *haproxyControllerTester) lookup(ingressKey string) string { @@ -151,7 +151,7 @@ type ingManager struct { ingCfgPath string name string namespace string - client *client.Client + client clientset.Interface svcNames []string } @@ -165,7 +165,7 @@ func (s *ingManager) start(namespace string) (err error) { rc := rcFromManifest(rcPath) rc.Namespace = namespace rc.Spec.Template.Labels["name"] = rc.Name - rc, err = s.client.ReplicationControllers(rc.Namespace).Create(rc) + rc, err = s.client.Core().ReplicationControllers(rc.Namespace).Create(rc) if err != nil { return } @@ -179,7 +179,7 @@ func (s *ingManager) start(namespace string) (err error) { for _, svcPath := range s.svcCfgPaths { svc := svcFromManifest(svcPath) svc.Namespace = namespace - svc, err = s.client.Services(svc.Namespace).Create(svc) + svc, err = s.client.Core().Services(svc.Namespace).Create(svc) if err != nil { return } @@ -207,12 +207,12 @@ func (s *ingManager) test(path string) error { var _ = framework.KubeDescribe("ServiceLoadBalancer [Feature:ServiceLoadBalancer]", func() { // These variables are initialized after framework's beforeEach. var ns string - var client *client.Client + var client clientset.Interface f := framework.NewDefaultFramework("servicelb") BeforeEach(func() { - client = f.Client + client = f.ClientSet ns = f.Namespace.Name }) diff --git a/test/e2e/third-party.go b/test/e2e/third-party.go index a999fa697a3..bfd40d34bc2 100644 --- a/test/e2e/third-party.go +++ b/test/e2e/third-party.go @@ -75,16 +75,16 @@ var _ = Describe("ThirdParty resources [Flaky] [Disruptive]", func() { Context("Simple Third Party", func() { It("creating/deleting thirdparty objects works [Conformance]", func() { defer func() { - if err := f.Client.ThirdPartyResources().Delete(rsrc.Name); err != nil { + if err := f.ClientSet.Extensions().ThirdPartyResources().Delete(rsrc.Name, nil); err != nil { framework.Failf("failed to delete third party resource: %v", err) } }() - if _, err := f.Client.ThirdPartyResources().Create(rsrc); err != nil { + if _, err := f.ClientSet.Extensions().ThirdPartyResources().Create(rsrc); err != nil { framework.Failf("failed to create third party resource: %v", err) } wait.Poll(time.Second*30, time.Minute*5, func() (bool, error) { - data, err := f.Client.RESTClient.Get().AbsPath("/apis/company.com/v1/foos").DoRaw() + data, err := f.ClientSet.Extensions().RESTClient().Get().AbsPath("/apis/company.com/v1/foos").DoRaw() if err != nil { return false, err } @@ -105,7 +105,7 @@ var _ = Describe("ThirdParty resources [Flaky] [Disruptive]", func() { return false, nil }) - data, err := f.Client.RESTClient.Get().AbsPath("/apis/company.com/v1/foos").DoRaw() + data, err := f.ClientSet.Extensions().RESTClient().Get().AbsPath("/apis/company.com/v1/foos").DoRaw() if err != nil { framework.Failf("failed to list with no objects: %v", err) } @@ -130,11 +130,11 @@ var _ = Describe("ThirdParty resources [Flaky] [Disruptive]", func() { if err != nil { framework.Failf("failed to marshal: %v", err) } - if _, err := f.Client.RESTClient.Post().AbsPath("/apis/company.com/v1/namespaces/default/foos").Body(bodyData).DoRaw(); err != nil { + if _, err := f.ClientSet.Extensions().RESTClient().Post().AbsPath("/apis/company.com/v1/namespaces/default/foos").Body(bodyData).DoRaw(); err != nil { framework.Failf("failed to create: %v", err) } - data, err = f.Client.RESTClient.Get().AbsPath("/apis/company.com/v1/namespaces/default/foos/foo").DoRaw() + data, err = f.ClientSet.Extensions().RESTClient().Get().AbsPath("/apis/company.com/v1/namespaces/default/foos/foo").DoRaw() if err != nil { framework.Failf("failed to get object: %v", err) } @@ -146,7 +146,7 @@ var _ = Describe("ThirdParty resources [Flaky] [Disruptive]", func() { framework.Failf("expected:\n%#v\nsaw:\n%#v\n%s\n", foo, &out, string(data)) } - data, err = f.Client.RESTClient.Get().AbsPath("/apis/company.com/v1/foos").DoRaw() + data, err = f.ClientSet.Extensions().RESTClient().Get().AbsPath("/apis/company.com/v1/foos").DoRaw() if err != nil { framework.Failf("failed to list with no objects: %v", err) } @@ -160,11 +160,11 @@ var _ = Describe("ThirdParty resources [Flaky] [Disruptive]", func() { framework.Failf("expected: %#v, saw in list: %#v", foo, list.Items[0]) } - if _, err := f.Client.RESTClient.Delete().AbsPath("/apis/company.com/v1/namespaces/default/foos/foo").DoRaw(); err != nil { + if _, err := f.ClientSet.Extensions().RESTClient().Delete().AbsPath("/apis/company.com/v1/namespaces/default/foos/foo").DoRaw(); err != nil { framework.Failf("failed to delete: %v", err) } - data, err = f.Client.RESTClient.Get().AbsPath("/apis/company.com/v1/foos").DoRaw() + data, err = f.ClientSet.Extensions().RESTClient().Get().AbsPath("/apis/company.com/v1/foos").DoRaw() if err != nil { framework.Failf("failed to list with no objects: %v", err) } diff --git a/test/e2e/ubernetes_lite.go b/test/e2e/ubernetes_lite.go index f027a85d667..9e8a9061d25 100644 --- a/test/e2e/ubernetes_lite.go +++ b/test/e2e/ubernetes_lite.go @@ -24,7 +24,7 @@ import ( . "github.com/onsi/gomega" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" - client "k8s.io/kubernetes/pkg/client/unversioned" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/sets" @@ -41,7 +41,7 @@ var _ = framework.KubeDescribe("Multi-AZ Clusters", func() { BeforeEach(func() { framework.SkipUnlessProviderIs("gce", "gke", "aws") if zoneCount <= 0 { - zoneCount, err = getZoneCount(f.Client) + zoneCount, err = getZoneCount(f.ClientSet) Expect(err).NotTo(HaveOccurred()) } By(fmt.Sprintf("Checking for multi-zone cluster. Zone count = %d", zoneCount)) @@ -76,7 +76,7 @@ func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string) }}, }, } - _, err := f.Client.Services(f.Namespace.Name).Create(serviceSpec) + _, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(serviceSpec) Expect(err).NotTo(HaveOccurred()) // Now create some pods behind the service @@ -89,7 +89,7 @@ func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string) Containers: []api.Container{ { Name: "test", - Image: framework.GetPauseImageName(f.Client), + Image: framework.GetPauseImageName(f.ClientSet), }, }, }, @@ -99,17 +99,17 @@ func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string) // Based on the callers, replicas is always positive number: zoneCount >= 0 implies (2*zoneCount)+1 > 0. // Thus, no need to test for it. Once the precondition changes to zero number of replicas, // test for replicaCount > 0. Otherwise, StartPods panics. - framework.ExpectNoError(testutils.StartPods(f.Client, replicaCount, f.Namespace.Name, serviceName, *podSpec, false, framework.Logf)) + framework.ExpectNoError(testutils.StartPods(f.ClientSet, replicaCount, f.Namespace.Name, serviceName, *podSpec, false, framework.Logf)) // Wait for all of them to be scheduled selector := labels.SelectorFromSet(labels.Set(map[string]string{"service": serviceName})) - pods, err := framework.WaitForPodsWithLabelScheduled(f.Client, f.Namespace.Name, selector) + pods, err := framework.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, selector) Expect(err).NotTo(HaveOccurred()) // Now make sure they're spread across zones - zoneNames, err := getZoneNames(f.Client) + zoneNames, err := getZoneNames(f.ClientSet) Expect(err).NotTo(HaveOccurred()) - Expect(checkZoneSpreading(f.Client, pods, zoneNames)).To(Equal(true)) + Expect(checkZoneSpreading(f.ClientSet, pods, zoneNames)).To(Equal(true)) } // Find the name of the zone in which a Node is running @@ -124,9 +124,9 @@ func getZoneNameForNode(node api.Node) (string, error) { } // Find the names of all zones in which we have nodes in this cluster. -func getZoneNames(c *client.Client) ([]string, error) { +func getZoneNames(c clientset.Interface) ([]string, error) { zoneNames := sets.NewString() - nodes, err := c.Nodes().List(api.ListOptions{}) + nodes, err := c.Core().Nodes().List(api.ListOptions{}) if err != nil { return nil, err } @@ -139,7 +139,7 @@ func getZoneNames(c *client.Client) ([]string, error) { } // Return the number of zones in which we have nodes in this cluster. -func getZoneCount(c *client.Client) (int, error) { +func getZoneCount(c clientset.Interface) (int, error) { zoneNames, err := getZoneNames(c) if err != nil { return -1, err @@ -148,16 +148,16 @@ func getZoneCount(c *client.Client) (int, error) { } // Find the name of the zone in which the pod is scheduled -func getZoneNameForPod(c *client.Client, pod api.Pod) (string, error) { +func getZoneNameForPod(c clientset.Interface, pod api.Pod) (string, error) { By(fmt.Sprintf("Getting zone name for pod %s, on node %s", pod.Name, pod.Spec.NodeName)) - node, err := c.Nodes().Get(pod.Spec.NodeName) + node, err := c.Core().Nodes().Get(pod.Spec.NodeName) Expect(err).NotTo(HaveOccurred()) return getZoneNameForNode(*node) } // Determine whether a set of pods are approximately evenly spread // across a given set of zones -func checkZoneSpreading(c *client.Client, pods *api.PodList, zoneNames []string) (bool, error) { +func checkZoneSpreading(c clientset.Interface, pods *api.PodList, zoneNames []string) (bool, error) { podsPerZone := make(map[string]int) for _, zoneName := range zoneNames { podsPerZone[zoneName] = 0 @@ -190,7 +190,7 @@ func checkZoneSpreading(c *client.Client, pods *api.PodList, zoneNames []string) func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) { name := "ubelite-spread-rc-" + string(uuid.NewUUID()) By(fmt.Sprintf("Creating replication controller %s", name)) - controller, err := f.Client.ReplicationControllers(f.Namespace.Name).Create(&api.ReplicationController{ + controller, err := f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Create(&api.ReplicationController{ ObjectMeta: api.ObjectMeta{ Namespace: f.Namespace.Name, Name: name, @@ -220,22 +220,22 @@ func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) { // Cleanup the replication controller when we are done. defer func() { // Resize the replication controller to zero to get rid of pods. - if err := framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, controller.Name); err != nil { + if err := framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, controller.Name); err != nil { framework.Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err) } }() // List the pods, making sure we observe all the replicas. selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) - pods, err := framework.PodsCreated(f.Client, f.Namespace.Name, name, replicaCount) + pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicaCount) Expect(err).NotTo(HaveOccurred()) // Wait for all of them to be scheduled By(fmt.Sprintf("Waiting for %d replicas of %s to be scheduled. Selector: %v", replicaCount, name, selector)) - pods, err = framework.WaitForPodsWithLabelScheduled(f.Client, f.Namespace.Name, selector) + pods, err = framework.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, selector) Expect(err).NotTo(HaveOccurred()) // Now make sure they're spread across zones - zoneNames, err := getZoneNames(f.Client) + zoneNames, err := getZoneNames(f.ClientSet) Expect(err).NotTo(HaveOccurred()) - Expect(checkZoneSpreading(f.Client, pods, zoneNames)).To(Equal(true)) + Expect(checkZoneSpreading(f.ClientSet, pods, zoneNames)).To(Equal(true)) } diff --git a/test/e2e/volume_provisioning.go b/test/e2e/volume_provisioning.go index 0d98fc7537a..18a6981c92b 100644 --- a/test/e2e/volume_provisioning.go +++ b/test/e2e/volume_provisioning.go @@ -24,7 +24,7 @@ import ( "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/apis/storage" storageutil "k8s.io/kubernetes/pkg/apis/storage/util" - client "k8s.io/kubernetes/pkg/client/unversioned" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" @@ -39,17 +39,17 @@ const ( expectedSize = "2Gi" ) -func testDynamicProvisioning(client *client.Client, claim *api.PersistentVolumeClaim) { +func testDynamicProvisioning(client clientset.Interface, claim *api.PersistentVolumeClaim) { err := framework.WaitForPersistentVolumeClaimPhase(api.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout) Expect(err).NotTo(HaveOccurred()) By("checking the claim") // Get new copy of the claim - claim, err = client.PersistentVolumeClaims(claim.Namespace).Get(claim.Name) + claim, err = client.Core().PersistentVolumeClaims(claim.Namespace).Get(claim.Name) Expect(err).NotTo(HaveOccurred()) // Get the bound PV - pv, err := client.PersistentVolumes().Get(claim.Spec.VolumeName) + pv, err := client.Core().PersistentVolumes().Get(claim.Spec.VolumeName) Expect(err).NotTo(HaveOccurred()) // Check sizes @@ -96,7 +96,7 @@ func testDynamicProvisioning(client *client.Client, claim *api.PersistentVolumeC time.Sleep(3 * time.Minute) By("deleting the claim") - framework.ExpectNoError(client.PersistentVolumeClaims(claim.Namespace).Delete(claim.Name)) + framework.ExpectNoError(client.Core().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil)) // Wait for the PV to get deleted too. framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(client, pv.Name, 5*time.Second, 20*time.Minute)) @@ -106,11 +106,11 @@ var _ = framework.KubeDescribe("Dynamic provisioning", func() { f := framework.NewDefaultFramework("volume-provisioning") // filled in BeforeEach - var c *client.Client + var c clientset.Interface var ns string BeforeEach(func() { - c = f.Client + c = f.ClientSet ns = f.Namespace.Name }) @@ -121,15 +121,15 @@ var _ = framework.KubeDescribe("Dynamic provisioning", func() { By("creating a StorageClass") class := newStorageClass() _, err := c.Storage().StorageClasses().Create(class) - defer c.Storage().StorageClasses().Delete(class.Name) + defer c.Storage().StorageClasses().Delete(class.Name, nil) Expect(err).NotTo(HaveOccurred()) By("creating a claim with a dynamic provisioning annotation") claim := newClaim(ns, false) defer func() { - c.PersistentVolumeClaims(ns).Delete(claim.Name) + c.Core().PersistentVolumeClaims(ns).Delete(claim.Name, nil) }() - claim, err = c.PersistentVolumeClaims(ns).Create(claim) + claim, err = c.Core().PersistentVolumeClaims(ns).Create(claim) Expect(err).NotTo(HaveOccurred()) testDynamicProvisioning(c, claim) @@ -143,9 +143,9 @@ var _ = framework.KubeDescribe("Dynamic provisioning", func() { By("creating a claim with an alpha dynamic provisioning annotation") claim := newClaim(ns, true) defer func() { - c.PersistentVolumeClaims(ns).Delete(claim.Name) + c.Core().PersistentVolumeClaims(ns).Delete(claim.Name, nil) }() - claim, err := c.PersistentVolumeClaims(ns).Create(claim) + claim, err := c.Core().PersistentVolumeClaims(ns).Create(claim) Expect(err).NotTo(HaveOccurred()) testDynamicProvisioning(c, claim) @@ -186,7 +186,7 @@ func newClaim(ns string, alpha bool) *api.PersistentVolumeClaim { } // runInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory. -func runInPodWithVolume(c *client.Client, ns, claimName, command string) { +func runInPodWithVolume(c clientset.Interface, ns, claimName, command string) { pod := &api.Pod{ TypeMeta: unversioned.TypeMeta{ Kind: "Pod", @@ -224,9 +224,9 @@ func runInPodWithVolume(c *client.Client, ns, claimName, command string) { }, }, } - pod, err := c.Pods(ns).Create(pod) + pod, err := c.Core().Pods(ns).Create(pod) defer func() { - framework.ExpectNoError(c.Pods(ns).Delete(pod.Name, nil)) + framework.ExpectNoError(c.Core().Pods(ns).Delete(pod.Name, nil)) }() framework.ExpectNoError(err, "Failed to create pod: %v", err) framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Namespace)) diff --git a/test/e2e/volumes.go b/test/e2e/volumes.go index ecaf5fcc258..b67608d68f0 100644 --- a/test/e2e/volumes.go +++ b/test/e2e/volumes.go @@ -49,7 +49,7 @@ import ( "k8s.io/kubernetes/pkg/api" apierrs "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/unversioned" - client "k8s.io/kubernetes/pkg/client/unversioned" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/test/e2e/framework" "github.com/golang/glog" @@ -78,8 +78,8 @@ type VolumeTestConfig struct { // Starts a container specified by config.serverImage and exports all // config.serverPorts from it. The returned pod should be used to get the server // IP address and create appropriate VolumeSource. -func startVolumeServer(client *client.Client, config VolumeTestConfig) *api.Pod { - podClient := client.Pods(config.namespace) +func startVolumeServer(client clientset.Interface, config VolumeTestConfig) *api.Pod { + podClient := client.Core().Pods(config.namespace) portCount := len(config.serverPorts) serverPodPorts := make([]api.ContainerPort, portCount) @@ -164,8 +164,8 @@ func volumeTestCleanup(f *framework.Framework, config VolumeTestConfig) { defer GinkgoRecover() - client := f.Client - podClient := client.Pods(config.namespace) + client := f.ClientSet + podClient := client.Core().Pods(config.namespace) err := podClient.Delete(config.prefix+"-client", nil) if err != nil { @@ -194,7 +194,7 @@ func volumeTestCleanup(f *framework.Framework, config VolumeTestConfig) { // Start a client pod using given VolumeSource (exported by startVolumeServer()) // and check that the pod sees the data from the server pod. -func testVolumeClient(client *client.Client, config VolumeTestConfig, volume api.VolumeSource, fsGroup *int64, expectedContent string) { +func testVolumeClient(client clientset.Interface, config VolumeTestConfig, volume api.VolumeSource, fsGroup *int64, expectedContent string) { By(fmt.Sprint("starting ", config.prefix, " client")) clientPod := &api.Pod{ TypeMeta: unversioned.TypeMeta{ @@ -242,7 +242,7 @@ func testVolumeClient(client *client.Client, config VolumeTestConfig, volume api }, }, } - podsNamespacer := client.Pods(config.namespace) + podsNamespacer := client.Core().Pods(config.namespace) if fsGroup != nil { clientPod.Spec.SecurityContext.FSGroup = fsGroup @@ -268,9 +268,9 @@ func testVolumeClient(client *client.Client, config VolumeTestConfig, volume api // Insert index.html with given content into given volume. It does so by // starting and auxiliary pod which writes the file there. // The volume must be writable. -func injectHtml(client *client.Client, config VolumeTestConfig, volume api.VolumeSource, content string) { +func injectHtml(client clientset.Interface, config VolumeTestConfig, volume api.VolumeSource, content string) { By(fmt.Sprint("starting ", config.prefix, " injector")) - podClient := client.Pods(config.namespace) + podClient := client.Core().Pods(config.namespace) injectPod := &api.Pod{ TypeMeta: unversioned.TypeMeta{ @@ -353,11 +353,11 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() { // note that namespace deletion is handled by delete-namespace flag clean := true // filled in BeforeEach - var c *client.Client + var cs clientset.Interface var namespace *api.Namespace BeforeEach(func() { - c = f.Client + cs = f.ClientSet namespace = f.Namespace }) @@ -379,7 +379,7 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() { volumeTestCleanup(f, config) } }() - pod := startVolumeServer(c, config) + pod := startVolumeServer(cs, config) serverIP := pod.Status.PodIP framework.Logf("NFS server IP address: %v", serverIP) @@ -391,7 +391,7 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() { }, } // Must match content of test/images/volumes-tester/nfs/index.html - testVolumeClient(c, config, volume, nil, "Hello from NFS!") + testVolumeClient(cs, config, volume, nil, "Hello from NFS!") }) }) @@ -413,7 +413,7 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() { volumeTestCleanup(f, config) } }() - pod := startVolumeServer(c, config) + pod := startVolumeServer(cs, config) serverIP := pod.Status.PodIP framework.Logf("Gluster server IP address: %v", serverIP) @@ -444,11 +444,11 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() { }, } - endClient := c.Endpoints(config.namespace) + endClient := cs.Core().Endpoints(config.namespace) defer func() { if clean { - endClient.Delete(config.prefix + "-server") + endClient.Delete(config.prefix+"-server", nil) } }() @@ -465,7 +465,7 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() { }, } // Must match content of test/images/volumes-tester/gluster/index.html - testVolumeClient(c, config, volume, nil, "Hello from GlusterFS!") + testVolumeClient(cs, config, volume, nil, "Hello from GlusterFS!") }) }) @@ -496,7 +496,7 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() { volumeTestCleanup(f, config) } }() - pod := startVolumeServer(c, config) + pod := startVolumeServer(cs, config) serverIP := pod.Status.PodIP framework.Logf("iSCSI server IP address: %v", serverIP) @@ -512,7 +512,7 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() { fsGroup := int64(1234) // Must match content of test/images/volumes-tester/iscsi/block.tar.gz - testVolumeClient(c, config, volume, &fsGroup, "Hello from iSCSI") + testVolumeClient(cs, config, volume, &fsGroup, "Hello from iSCSI") }) }) @@ -539,7 +539,7 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() { volumeTestCleanup(f, config) } }() - pod := startVolumeServer(c, config) + pod := startVolumeServer(cs, config) serverIP := pod.Status.PodIP framework.Logf("Ceph server IP address: %v", serverIP) @@ -558,11 +558,11 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() { }, } - secClient := c.Secrets(config.namespace) + secClient := cs.Core().Secrets(config.namespace) defer func() { if clean { - secClient.Delete(config.prefix + "-secret") + secClient.Delete(config.prefix+"-secret", nil) } }() @@ -585,7 +585,7 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() { fsGroup := int64(1234) // Must match content of test/images/volumes-tester/gluster/index.html - testVolumeClient(c, config, volume, &fsGroup, "Hello from RBD") + testVolumeClient(cs, config, volume, &fsGroup, "Hello from RBD") }) }) @@ -607,7 +607,7 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() { volumeTestCleanup(f, config) } }() - pod := startVolumeServer(c, config) + pod := startVolumeServer(cs, config) serverIP := pod.Status.PodIP framework.Logf("Ceph server IP address: %v", serverIP) By("sleeping a bit to give ceph server time to initialize") @@ -631,14 +631,14 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() { defer func() { if clean { - if err := c.Secrets(namespace.Name).Delete(secret.Name); err != nil { + if err := cs.Core().Secrets(namespace.Name).Delete(secret.Name, nil); err != nil { framework.Failf("unable to delete secret %v: %v", secret.Name, err) } } }() var err error - if secret, err = c.Secrets(namespace.Name).Create(secret); err != nil { + if secret, err = cs.Core().Secrets(namespace.Name).Create(secret); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } @@ -651,7 +651,7 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() { }, } // Must match content of contrib/for-tests/volumes-ceph/ceph/index.html - testVolumeClient(c, config, volume, nil, "Hello Ceph!") + testVolumeClient(cs, config, volume, nil, "Hello Ceph!") }) }) @@ -723,10 +723,10 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() { // Insert index.html into the test volume with some random content // to make sure we don't see the content from previous test runs. content := "Hello from Cinder from namespace " + volumeName - injectHtml(c, config, volume, content) + injectHtml(cs, config, volume, content) fsGroup := int64(1234) - testVolumeClient(c, config, volume, &fsGroup, content) + testVolumeClient(cs, config, volume, &fsGroup, content) }) }) @@ -767,10 +767,10 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() { // Insert index.html into the test volume with some random content // to make sure we don't see the content from previous test runs. content := "Hello from GCE PD from namespace " + volumeName - injectHtml(c, config, volume, content) + injectHtml(cs, config, volume, content) fsGroup := int64(1234) - testVolumeClient(c, config, volume, &fsGroup, content) + testVolumeClient(cs, config, volume, &fsGroup, content) }) }) }) diff --git a/test/e2e_node/apparmor_test.go b/test/e2e_node/apparmor_test.go index 4d7ad3ad6d9..c94bf88c1b8 100644 --- a/test/e2e_node/apparmor_test.go +++ b/test/e2e_node/apparmor_test.go @@ -148,7 +148,7 @@ func runAppArmorTest(f *framework.Framework, profile string) api.PodStatus { pod := createPodWithAppArmor(f, profile) // The pod needs to start before it stops, so wait for the longer start timeout. framework.ExpectNoError(framework.WaitTimeoutForPodNoLongerRunningInNamespace( - f.Client, pod.Name, f.Namespace.Name, "", framework.PodStartTimeout)) + f.ClientSet, pod.Name, f.Namespace.Name, "", framework.PodStartTimeout)) p, err := f.PodClient().Get(pod.Name) framework.ExpectNoError(err) return p.Status diff --git a/test/e2e_node/benchmark_util.go b/test/e2e_node/benchmark_util.go index 4083b2926ec..6debb765859 100644 --- a/test/e2e_node/benchmark_util.go +++ b/test/e2e_node/benchmark_util.go @@ -126,7 +126,7 @@ func getThroughputPerfData(batchLag time.Duration, e2eLags []framework.PodLatenc // getTestNodeInfo fetches the capacity of a node from API server and returns a map of labels. func getTestNodeInfo(f *framework.Framework, testName string) map[string]string { nodeName := framework.TestContext.NodeName - node, err := f.Client.Nodes().Get(nodeName) + node, err := f.ClientSet.Core().Nodes().Get(nodeName) Expect(err).NotTo(HaveOccurred()) cpu, ok := node.Status.Capacity["cpu"] diff --git a/test/e2e_node/cgroup_manager_test.go b/test/e2e_node/cgroup_manager_test.go index 428dd7d5ac1..085cc0ba411 100644 --- a/test/e2e_node/cgroup_manager_test.go +++ b/test/e2e_node/cgroup_manager_test.go @@ -68,7 +68,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager [Skip]", func() { } podClient := f.PodClient() podClient.Create(pod) - err := framework.WaitForPodSuccessInNamespace(f.Client, podName, f.Namespace.Name) + err := framework.WaitForPodSuccessInNamespace(f.ClientSet, podName, f.Namespace.Name) Expect(err).NotTo(HaveOccurred()) }) }) diff --git a/test/e2e_node/density_test.go b/test/e2e_node/density_test.go index f68053afd51..234e03d516b 100644 --- a/test/e2e_node/density_test.go +++ b/test/e2e_node/density_test.go @@ -495,11 +495,12 @@ func newInformerWatchPod(f *framework.Framework, mutex *sync.Mutex, watchTimes m &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": podType}) - return f.Client.Pods(ns).List(options) + obj, err := f.ClientSet.Core().Pods(ns).List(options) + return runtime.Object(obj), err }, WatchFunc: func(options api.ListOptions) (watch.Interface, error) { options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": podType}) - return f.Client.Pods(ns).Watch(options) + return f.ClientSet.Core().Pods(ns).Watch(options) }, }, &api.Pod{}, diff --git a/test/e2e_node/disk_eviction_test.go b/test/e2e_node/disk_eviction_test.go index 5595e1fbfbc..b6b14984be0 100644 --- a/test/e2e_node/disk_eviction_test.go +++ b/test/e2e_node/disk_eviction_test.go @@ -28,7 +28,6 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - client "k8s.io/kubernetes/pkg/client/unversioned" ) const ( @@ -50,11 +49,11 @@ const ( var _ = framework.KubeDescribe("Kubelet Eviction Manager [Serial] [Disruptive]", func() { f := framework.NewDefaultFramework("kubelet-eviction-manager") var podClient *framework.PodClient - var c *client.Client + var c clientset.Interface BeforeEach(func() { podClient = f.PodClient() - c = f.Client + c = f.ClientSet }) Describe("hard eviction test", func() { diff --git a/test/e2e_node/dynamic_kubelet_configuration_test.go b/test/e2e_node/dynamic_kubelet_configuration_test.go index 674022b1fd5..4e70e8a65e2 100644 --- a/test/e2e_node/dynamic_kubelet_configuration_test.go +++ b/test/e2e_node/dynamic_kubelet_configuration_test.go @@ -160,7 +160,7 @@ func createConfigMap(f *framework.Framework, kubeCfg *componentconfig.KubeletCon bytes, err := json.Marshal(kubeCfgExt) framework.ExpectNoError(err) - cmap, err := f.Client.ConfigMaps("kube-system").Create(&api.ConfigMap{ + cmap, err := f.ClientSet.Core().ConfigMaps("kube-system").Create(&api.ConfigMap{ ObjectMeta: api.ObjectMeta{ Name: fmt.Sprintf("kubelet-%s", framework.TestContext.NodeName), }, @@ -182,7 +182,7 @@ func updateConfigMap(f *framework.Framework, kubeCfg *componentconfig.KubeletCon bytes, err := json.Marshal(kubeCfgExt) framework.ExpectNoError(err) - cmap, err := f.Client.ConfigMaps("kube-system").Update(&api.ConfigMap{ + cmap, err := f.ClientSet.Core().ConfigMaps("kube-system").Update(&api.ConfigMap{ ObjectMeta: api.ObjectMeta{ Name: fmt.Sprintf("kubelet-%s", framework.TestContext.NodeName), }, diff --git a/test/e2e_node/image_id_test.go b/test/e2e_node/image_id_test.go index d81ac51a688..da831b60794 100644 --- a/test/e2e_node/image_id_test.go +++ b/test/e2e_node/image_id_test.go @@ -50,7 +50,7 @@ var _ = framework.KubeDescribe("ImageID", func() { pod := f.PodClient().Create(podDesc) framework.ExpectNoError(framework.WaitTimeoutForPodNoLongerRunningInNamespace( - f.Client, pod.Name, f.Namespace.Name, "", framework.PodStartTimeout)) + f.ClientSet, pod.Name, f.Namespace.Name, "", framework.PodStartTimeout)) runningPod, err := f.PodClient().Get(pod.Name) framework.ExpectNoError(err) diff --git a/test/e2e_node/memory_eviction_test.go b/test/e2e_node/memory_eviction_test.go index 37639ec6411..9aaac32ef97 100644 --- a/test/e2e_node/memory_eviction_test.go +++ b/test/e2e_node/memory_eviction_test.go @@ -42,10 +42,10 @@ var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", fu Context("", func() { AfterEach(func() { glog.Infof("Summary of node events during the memory eviction test:") - err := framework.ListNamespaceEvents(f.Client, f.Namespace.Name) + err := framework.ListNamespaceEvents(f.ClientSet, f.Namespace.Name) framework.ExpectNoError(err) glog.Infof("Summary of pod events during the memory eviction test:") - err = framework.ListNamespaceEvents(f.Client, "") + err = framework.ListNamespaceEvents(f.ClientSet, "") framework.ExpectNoError(err) }) @@ -54,7 +54,7 @@ var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", fu // Wait for the memory pressure condition to disappear from the node status before continuing. By("waiting for the memory pressure condition on the node to disappear before ending the test.") Eventually(func() error { - nodeList, err := f.Client.Nodes().List(api.ListOptions{}) + nodeList, err := f.ClientSet.Core().Nodes().List(api.ListOptions{}) if err != nil { return fmt.Errorf("tried to get node list but got error: %v", err) } @@ -154,15 +154,15 @@ var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", fu By("polling the Status.Phase of each pod and checking for violations of the eviction order.") Eventually(func() error { - gteed, gtErr := f.Client.Pods(f.Namespace.Name).Get(guaranteed.Name) + gteed, gtErr := f.ClientSet.Core().Pods(f.Namespace.Name).Get(guaranteed.Name) framework.ExpectNoError(gtErr, fmt.Sprintf("getting pod %s", guaranteed.Name)) gteedPh := gteed.Status.Phase - burst, buErr := f.Client.Pods(f.Namespace.Name).Get(burstable.Name) + burst, buErr := f.ClientSet.Core().Pods(f.Namespace.Name).Get(burstable.Name) framework.ExpectNoError(buErr, fmt.Sprintf("getting pod %s", burstable.Name)) burstPh := burst.Status.Phase - best, beErr := f.Client.Pods(f.Namespace.Name).Get(besteffort.Name) + best, beErr := f.ClientSet.Core().Pods(f.Namespace.Name).Get(besteffort.Name) framework.ExpectNoError(beErr, fmt.Sprintf("getting pod %s", besteffort.Name)) bestPh := best.Status.Phase @@ -178,7 +178,7 @@ var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", fu // see the eviction manager reporting a pressure condition for a while without the besteffort failing, // and we see that the manager did in fact evict the besteffort (this should be in the Kubelet log), we // will have more reason to believe the phase is out of date. - nodeList, err := f.Client.Nodes().List(api.ListOptions{}) + nodeList, err := f.ClientSet.Core().Nodes().List(api.ListOptions{}) if err != nil { glog.Errorf("tried to get node list but got error: %v", err) } diff --git a/test/e2e_node/mirror_pod_test.go b/test/e2e_node/mirror_pod_test.go index b7315042c85..9d42858a5a4 100644 --- a/test/e2e_node/mirror_pod_test.go +++ b/test/e2e_node/mirror_pod_test.go @@ -25,7 +25,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" - client "k8s.io/kubernetes/pkg/client/unversioned" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" @@ -50,12 +50,12 @@ var _ = framework.KubeDescribe("MirrorPod", func() { By("wait for the mirror pod to be running") Eventually(func() error { - return checkMirrorPodRunning(f.Client, mirrorPodName, ns) + return checkMirrorPodRunning(f.ClientSet, mirrorPodName, ns) }, 2*time.Minute, time.Second*4).Should(BeNil()) }) It("should be updated when static pod updated", func() { By("get mirror pod uid") - pod, err := f.Client.Pods(ns).Get(mirrorPodName) + pod, err := f.ClientSet.Core().Pods(ns).Get(mirrorPodName) Expect(err).ShouldNot(HaveOccurred()) uid := pod.UID @@ -66,43 +66,43 @@ var _ = framework.KubeDescribe("MirrorPod", func() { By("wait for the mirror pod to be updated") Eventually(func() error { - return checkMirrorPodRecreatedAndRunnig(f.Client, mirrorPodName, ns, uid) + return checkMirrorPodRecreatedAndRunnig(f.ClientSet, mirrorPodName, ns, uid) }, 2*time.Minute, time.Second*4).Should(BeNil()) By("check the mirror pod container image is updated") - pod, err = f.Client.Pods(ns).Get(mirrorPodName) + pod, err = f.ClientSet.Core().Pods(ns).Get(mirrorPodName) Expect(err).ShouldNot(HaveOccurred()) Expect(len(pod.Spec.Containers)).Should(Equal(1)) Expect(pod.Spec.Containers[0].Image).Should(Equal(image)) }) It("should be recreated when mirror pod gracefully deleted", func() { By("get mirror pod uid") - pod, err := f.Client.Pods(ns).Get(mirrorPodName) + pod, err := f.ClientSet.Core().Pods(ns).Get(mirrorPodName) Expect(err).ShouldNot(HaveOccurred()) uid := pod.UID By("delete the mirror pod with grace period 30s") - err = f.Client.Pods(ns).Delete(mirrorPodName, api.NewDeleteOptions(30)) + err = f.ClientSet.Core().Pods(ns).Delete(mirrorPodName, api.NewDeleteOptions(30)) Expect(err).ShouldNot(HaveOccurred()) By("wait for the mirror pod to be recreated") Eventually(func() error { - return checkMirrorPodRecreatedAndRunnig(f.Client, mirrorPodName, ns, uid) + return checkMirrorPodRecreatedAndRunnig(f.ClientSet, mirrorPodName, ns, uid) }, 2*time.Minute, time.Second*4).Should(BeNil()) }) It("should be recreated when mirror pod forcibly deleted", func() { By("get mirror pod uid") - pod, err := f.Client.Pods(ns).Get(mirrorPodName) + pod, err := f.ClientSet.Core().Pods(ns).Get(mirrorPodName) Expect(err).ShouldNot(HaveOccurred()) uid := pod.UID By("delete the mirror pod with grace period 0s") - err = f.Client.Pods(ns).Delete(mirrorPodName, api.NewDeleteOptions(0)) + err = f.ClientSet.Core().Pods(ns).Delete(mirrorPodName, api.NewDeleteOptions(0)) Expect(err).ShouldNot(HaveOccurred()) By("wait for the mirror pod to be recreated") Eventually(func() error { - return checkMirrorPodRecreatedAndRunnig(f.Client, mirrorPodName, ns, uid) + return checkMirrorPodRecreatedAndRunnig(f.ClientSet, mirrorPodName, ns, uid) }, 2*time.Minute, time.Second*4).Should(BeNil()) }) AfterEach(func() { @@ -112,7 +112,7 @@ var _ = framework.KubeDescribe("MirrorPod", func() { By("wait for the mirror pod to disappear") Eventually(func() error { - return checkMirrorPodDisappear(f.Client, mirrorPodName, ns) + return checkMirrorPodDisappear(f.ClientSet, mirrorPodName, ns) }, 2*time.Minute, time.Second*4).Should(BeNil()) }) }) @@ -153,16 +153,16 @@ func deleteStaticPod(dir, name, namespace string) error { return os.Remove(file) } -func checkMirrorPodDisappear(cl *client.Client, name, namespace string) error { - _, err := cl.Pods(namespace).Get(name) +func checkMirrorPodDisappear(cl clientset.Interface, name, namespace string) error { + _, err := cl.Core().Pods(namespace).Get(name) if errors.IsNotFound(err) { return nil } return goerrors.New("pod not disappear") } -func checkMirrorPodRunning(cl *client.Client, name, namespace string) error { - pod, err := cl.Pods(namespace).Get(name) +func checkMirrorPodRunning(cl clientset.Interface, name, namespace string) error { + pod, err := cl.Core().Pods(namespace).Get(name) if err != nil { return fmt.Errorf("expected the mirror pod %q to appear: %v", name, err) } @@ -172,8 +172,8 @@ func checkMirrorPodRunning(cl *client.Client, name, namespace string) error { return nil } -func checkMirrorPodRecreatedAndRunnig(cl *client.Client, name, namespace string, oUID types.UID) error { - pod, err := cl.Pods(namespace).Get(name) +func checkMirrorPodRecreatedAndRunnig(cl clientset.Interface, name, namespace string, oUID types.UID) error { + pod, err := cl.Core().Pods(namespace).Get(name) if err != nil { return fmt.Errorf("expected the mirror pod %q to appear: %v", name, err) } diff --git a/test/e2e_node/resource_collector.go b/test/e2e_node/resource_collector.go index cee4043d258..40d15a19a37 100644 --- a/test/e2e_node/resource_collector.go +++ b/test/e2e_node/resource_collector.go @@ -377,7 +377,7 @@ func deletePodsSync(f *framework.Framework, pods []*api.Pod) { err := f.PodClient().Delete(pod.ObjectMeta.Name, api.NewDeleteOptions(30)) Expect(err).NotTo(HaveOccurred()) - Expect(framework.WaitForPodToDisappear(f.Client, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(), + Expect(framework.WaitForPodToDisappear(f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(), 30*time.Second, 10*time.Minute)).NotTo(HaveOccurred()) }(pod) } diff --git a/test/e2e_node/resource_usage_test.go b/test/e2e_node/resource_usage_test.go index 1fdb5512f6e..07b15f6a358 100644 --- a/test/e2e_node/resource_usage_test.go +++ b/test/e2e_node/resource_usage_test.go @@ -23,7 +23,7 @@ import ( "strings" "time" - client "k8s.io/kubernetes/pkg/client/unversioned" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats" "k8s.io/kubernetes/test/e2e/framework" @@ -45,7 +45,7 @@ var _ = framework.KubeDescribe("Resource-usage [Serial] [Slow]", func() { f := framework.NewDefaultFramework("resource-usage") BeforeEach(func() { - om = framework.NewRuntimeOperationMonitor(f.Client) + om = framework.NewRuntimeOperationMonitor(f.ClientSet) // The test collects resource usage from a standalone Cadvisor pod. // The Cadvsior of Kubelet has a housekeeping interval of 10s, which is too long to // show the resource usage spikes. But changing its interval increases the overhead @@ -174,11 +174,11 @@ func runResourceUsageTest(f *framework.Framework, rc *ResourceCollector, testArg } else { time.Sleep(reportingPeriod) } - logPods(f.Client) + logPods(f.ClientSet) } By("Reporting overall resource usage") - logPods(f.Client) + logPods(f.ClientSet) } // logAndVerifyResource prints the resource usage as perf data and verifies whether resource usage satisfies the limit. @@ -207,12 +207,12 @@ func logAndVerifyResource(f *framework.Framework, rc *ResourceCollector, cpuLimi // Verify resource usage if isVerify { - verifyMemoryLimits(f.Client, memLimits, usagePerNode) + verifyMemoryLimits(f.ClientSet, memLimits, usagePerNode) verifyCPULimits(cpuLimits, cpuSummaryPerNode) } } -func verifyMemoryLimits(c *client.Client, expected framework.ResourceUsagePerContainer, actual framework.ResourceUsagePerNode) { +func verifyMemoryLimits(c clientset.Interface, expected framework.ResourceUsagePerContainer, actual framework.ResourceUsagePerNode) { if expected == nil { return } @@ -282,7 +282,7 @@ func verifyCPULimits(expected framework.ContainersCPUSummary, actual framework.N } } -func logPods(c *client.Client) { +func logPods(c clientset.Interface) { nodeName := framework.TestContext.NodeName podList, err := framework.GetKubeletRunningPods(c, nodeName) if err != nil { diff --git a/test/e2e_node/runtime_conformance_test.go b/test/e2e_node/runtime_conformance_test.go index 3f98bf96713..08d6dc888ba 100644 --- a/test/e2e_node/runtime_conformance_test.go +++ b/test/e2e_node/runtime_conformance_test.go @@ -251,9 +251,9 @@ while true; do sleep 1; done if testCase.secret { secret.Name = "image-pull-secret-" + string(uuid.NewUUID()) By("create image pull secret") - _, err := f.Client.Secrets(f.Namespace.Name).Create(secret) + _, err := f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret) Expect(err).NotTo(HaveOccurred()) - defer f.Client.Secrets(f.Namespace.Name).Delete(secret.Name) + defer f.ClientSet.Core().Secrets(f.Namespace.Name).Delete(secret.Name, nil) container.ImagePullSecrets = []string{secret.Name} } // checkContainerStatus checks whether the container status matches expectation. diff --git a/test/images/clusterapi-tester/main.go b/test/images/clusterapi-tester/main.go index f366b9a8d35..ca3804da089 100644 --- a/test/images/clusterapi-tester/main.go +++ b/test/images/clusterapi-tester/main.go @@ -25,18 +25,24 @@ import ( "net/http" "k8s.io/kubernetes/pkg/api" - client "k8s.io/kubernetes/pkg/client/unversioned" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + "k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" ) func main() { - kubeClient, err := client.NewInCluster() + cc, err := restclient.InClusterConfig() + if err != nil { + log.Fatalf("Failed to create client: %v", err) + } + + kubeClient, err := clientset.NewForConfig(cc) if err != nil { log.Fatalf("Failed to create client: %v", err) } listAll := api.ListOptions{LabelSelector: labels.Everything(), FieldSelector: fields.Everything()} - nodes, err := kubeClient.Nodes().List(listAll) + nodes, err := kubeClient.Core().Nodes().List(listAll) if err != nil { log.Fatalf("Failed to list nodes: %v", err) } @@ -44,7 +50,7 @@ func main() { for _, node := range nodes.Items { log.Printf("\t%v", node.Name) } - services, err := kubeClient.Services(api.NamespaceDefault).List(listAll) + services, err := kubeClient.Core().Services(api.NamespaceDefault).List(listAll) if err != nil { log.Fatalf("Failed to list services: %v", err) } diff --git a/test/images/network-tester/webserver.go b/test/images/network-tester/webserver.go index c53f610c8ed..fa41b8a63ad 100644 --- a/test/images/network-tester/webserver.go +++ b/test/images/network-tester/webserver.go @@ -43,8 +43,8 @@ import ( "syscall" "time" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/restclient" - client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/util/sets" ) @@ -235,7 +235,7 @@ func contactOthers(state *State) { log.Fatalf("Unable to create config; error: %v\n", err) } config.ContentType = "application/vnd.kubernetes.protobuf" - client, err := client.New(config) + client, err := clientset.NewForConfig(config) if err != nil { log.Fatalf("Unable to create client; error: %v\n", err) } @@ -267,8 +267,8 @@ func contactOthers(state *State) { } //getWebserverEndpoints returns the webserver endpoints as a set of String, each in the format like "http://{ip}:{port}" -func getWebserverEndpoints(client *client.Client) sets.String { - endpoints, err := client.Endpoints(*namespace).Get(*service) +func getWebserverEndpoints(client clientset.Interface) sets.String { + endpoints, err := client.Core().Endpoints(*namespace).Get(*service) eps := sets.String{} if err != nil { state.Logf("Unable to read the endpoints for %v/%v: %v.", *namespace, *service, err) diff --git a/test/integration/client/client_test.go b/test/integration/client/client_test.go index def2e698049..b5513e409a6 100644 --- a/test/integration/client/client_test.go +++ b/test/integration/client/client_test.go @@ -32,8 +32,8 @@ import ( "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/apimachinery/registered" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/restclient" - client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util/wait" @@ -47,7 +47,7 @@ func TestClient(t *testing.T) { _, s := framework.RunAMaster(nil) defer s.Close() - client := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) + client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) ns := framework.CreateTestingNamespace("client", s, t) defer framework.DeleteTestingNamespace(ns, s, t) @@ -60,7 +60,7 @@ func TestClient(t *testing.T) { t.Errorf("expected %#v, got %#v", e, a) } - pods, err := client.Pods(ns.Name).List(api.ListOptions{}) + pods, err := client.Core().Pods(ns.Name).List(api.ListOptions{}) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -83,14 +83,14 @@ func TestClient(t *testing.T) { }, } - got, err := client.Pods(ns.Name).Create(pod) + got, err := client.Core().Pods(ns.Name).Create(pod) if err == nil { t.Fatalf("unexpected non-error: %v", got) } // get a created pod pod.Spec.Containers[0].Image = "an-image" - got, err = client.Pods(ns.Name).Create(pod) + got, err = client.Core().Pods(ns.Name).Create(pod) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -99,7 +99,7 @@ func TestClient(t *testing.T) { } // pod is shown, but not scheduled - pods, err = client.Pods(ns.Name).List(api.ListOptions{}) + pods, err = client.Core().Pods(ns.Name).List(api.ListOptions{}) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -119,14 +119,14 @@ func TestAtomicPut(t *testing.T) { _, s := framework.RunAMaster(nil) defer s.Close() - c := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) + c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) ns := framework.CreateTestingNamespace("atomic-put", s, t) defer framework.DeleteTestingNamespace(ns, s, t) rcBody := api.ReplicationController{ TypeMeta: unversioned.TypeMeta{ - APIVersion: c.APIVersion().String(), + APIVersion: c.Core().RESTClient().APIVersion().String(), }, ObjectMeta: api.ObjectMeta{ Name: "atomicrc", @@ -154,7 +154,7 @@ func TestAtomicPut(t *testing.T) { }, }, } - rcs := c.ReplicationControllers(ns.Name) + rcs := c.Core().ReplicationControllers(ns.Name) rc, err := rcs.Create(&rcBody) if err != nil { t.Fatalf("Failed creating atomicRC: %v", err) @@ -211,7 +211,7 @@ func TestPatch(t *testing.T) { _, s := framework.RunAMaster(nil) defer s.Close() - c := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) + c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) ns := framework.CreateTestingNamespace("patch", s, t) defer framework.DeleteTestingNamespace(ns, s, t) @@ -220,7 +220,7 @@ func TestPatch(t *testing.T) { resource := "pods" podBody := api.Pod{ TypeMeta: unversioned.TypeMeta{ - APIVersion: c.APIVersion().String(), + APIVersion: c.Core().RESTClient().APIVersion().String(), }, ObjectMeta: api.ObjectMeta{ Name: name, @@ -233,7 +233,7 @@ func TestPatch(t *testing.T) { }, }, } - pods := c.Pods(ns.Name) + pods := c.Core().Pods(ns.Name) pod, err := pods.Create(&podBody) if err != nil { t.Fatalf("Failed creating patchpods: %v", err) @@ -263,10 +263,10 @@ func TestPatch(t *testing.T) { }, } - pb := patchBodies[c.APIVersion()] + pb := patchBodies[c.Core().RESTClient().APIVersion()] execPatch := func(pt api.PatchType, body []byte) error { - return c.Patch(pt). + return c.Core().RESTClient().Patch(pt). Resource(resource). Namespace(ns.Name). Name(name). @@ -320,7 +320,7 @@ func TestPatchWithCreateOnUpdate(t *testing.T) { _, s := framework.RunAMaster(nil) defer s.Close() - c := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) + c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) ns := framework.CreateTestingNamespace("patch-with-create", s, t) defer framework.DeleteTestingNamespace(ns, s, t) @@ -339,7 +339,7 @@ func TestPatchWithCreateOnUpdate(t *testing.T) { } patchEndpoint := func(json []byte) (runtime.Object, error) { - return c.Patch(api.MergePatchType).Resource("endpoints").Namespace(ns.Name).Name("patchendpoint").Body(json).Do().Get() + return c.Core().RESTClient().Patch(api.MergePatchType).Resource("endpoints").Namespace(ns.Name).Name("patchendpoint").Body(json).Do().Get() } // Make sure patch doesn't get to CreateOnUpdate @@ -354,7 +354,7 @@ func TestPatchWithCreateOnUpdate(t *testing.T) { } // Create the endpoint (endpoints set AllowCreateOnUpdate=true) to get a UID and resource version - createdEndpoint, err := c.Endpoints(ns.Name).Update(endpointTemplate) + createdEndpoint, err := c.Core().Endpoints(ns.Name).Update(endpointTemplate) if err != nil { t.Fatalf("Failed creating endpoint: %v", err) } @@ -431,10 +431,10 @@ func TestAPIVersions(t *testing.T) { _, s := framework.RunAMaster(nil) defer s.Close() - c := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) + c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) - clientVersion := c.APIVersion().String() - g, err := c.ServerGroups() + clientVersion := c.Core().RESTClient().APIVersion().String() + g, err := c.Discovery().ServerGroups() if err != nil { t.Fatalf("Failed to get api versions: %v", err) } @@ -456,7 +456,7 @@ func TestSingleWatch(t *testing.T) { ns := framework.CreateTestingNamespace("single-watch", s, t) defer framework.DeleteTestingNamespace(ns, s, t) - client := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) + client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) mkEvent := func(i int) *api.Event { name := fmt.Sprintf("event-%v", i) @@ -476,7 +476,7 @@ func TestSingleWatch(t *testing.T) { rv1 := "" for i := 0; i < 10; i++ { event := mkEvent(i) - got, err := client.Events(ns.Name).Create(event) + got, err := client.Core().Events(ns.Name).Create(event) if err != nil { t.Fatalf("Failed creating event %#q: %v", event, err) } @@ -489,7 +489,7 @@ func TestSingleWatch(t *testing.T) { t.Logf("Created event %#v", got.ObjectMeta) } - w, err := client.Get(). + w, err := client.Core().RESTClient().Get(). Prefix("watch"). Namespace(ns.Name). Resource("events"). @@ -541,7 +541,7 @@ func TestMultiWatch(t *testing.T) { ns := framework.CreateTestingNamespace("multi-watch", s, t) defer framework.DeleteTestingNamespace(ns, s, t) - client := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) + client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) dummyEvent := func(i int) *api.Event { name := fmt.Sprintf("unrelated-%v", i) @@ -570,7 +570,7 @@ func TestMultiWatch(t *testing.T) { for i := 0; i < watcherCount; i++ { watchesStarted.Add(1) name := fmt.Sprintf("multi-watch-%v", i) - got, err := client.Pods(ns.Name).Create(&api.Pod{ + got, err := client.Core().Pods(ns.Name).Create(&api.Pod{ ObjectMeta: api.ObjectMeta{ Name: name, Labels: labels.Set{"watchlabel": name}, @@ -591,7 +591,7 @@ func TestMultiWatch(t *testing.T) { LabelSelector: labels.Set{"watchlabel": name}.AsSelector(), ResourceVersion: rv, } - w, err := client.Pods(ns.Name).Watch(options) + w, err := client.Core().Pods(ns.Name).Watch(options) if err != nil { panic(fmt.Sprintf("watch error for %v: %v", name, err)) } @@ -640,7 +640,7 @@ func TestMultiWatch(t *testing.T) { if !ok { return } - if _, err := client.Events(ns.Name).Create(dummyEvent(i)); err != nil { + if _, err := client.Core().Events(ns.Name).Create(dummyEvent(i)); err != nil { panic(fmt.Sprintf("couldn't make an event: %v", err)) } changeMade <- i @@ -677,7 +677,7 @@ func TestMultiWatch(t *testing.T) { return } name := fmt.Sprintf("unrelated-%v", i) - _, err := client.Pods(ns.Name).Create(&api.Pod{ + _, err := client.Core().Pods(ns.Name).Create(&api.Pod{ ObjectMeta: api.ObjectMeta{ Name: name, }, @@ -711,13 +711,13 @@ func TestMultiWatch(t *testing.T) { for i := 0; i < watcherCount; i++ { go func(i int) { name := fmt.Sprintf("multi-watch-%v", i) - pod, err := client.Pods(ns.Name).Get(name) + pod, err := client.Core().Pods(ns.Name).Get(name) if err != nil { panic(fmt.Sprintf("Couldn't get %v: %v", name, err)) } pod.Spec.Containers[0].Image = e2e.GetPauseImageName(client) sentTimes <- timePair{time.Now(), name} - if _, err := client.Pods(ns.Name).Update(pod); err != nil { + if _, err := client.Core().Pods(ns.Name).Update(pod); err != nil { panic(fmt.Sprintf("Couldn't make %v: %v", name, err)) } }(i) @@ -740,7 +740,7 @@ func TestMultiWatch(t *testing.T) { t.Errorf("durations: %v", dur) } -func runSelfLinkTestOnNamespace(t *testing.T, c *client.Client, namespace string) { +func runSelfLinkTestOnNamespace(t *testing.T, c clientset.Interface, namespace string) { podBody := api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "selflinktest", @@ -755,20 +755,20 @@ func runSelfLinkTestOnNamespace(t *testing.T, c *client.Client, namespace string }, }, } - pod, err := c.Pods(namespace).Create(&podBody) + pod, err := c.Core().Pods(namespace).Create(&podBody) if err != nil { t.Fatalf("Failed creating selflinktest pod: %v", err) } - if err = c.Get().RequestURI(pod.SelfLink).Do().Into(pod); err != nil { + if err = c.Core().RESTClient().Get().RequestURI(pod.SelfLink).Do().Into(pod); err != nil { t.Errorf("Failed listing pod with supplied self link '%v': %v", pod.SelfLink, err) } - podList, err := c.Pods(namespace).List(api.ListOptions{}) + podList, err := c.Core().Pods(namespace).List(api.ListOptions{}) if err != nil { t.Errorf("Failed listing pods: %v", err) } - if err = c.Get().RequestURI(podList.SelfLink).Do().Into(podList); err != nil { + if err = c.Core().RESTClient().Get().RequestURI(podList.SelfLink).Do().Into(podList); err != nil { t.Errorf("Failed listing pods with supplied self link '%v': %v", podList.SelfLink, err) } @@ -779,7 +779,7 @@ func runSelfLinkTestOnNamespace(t *testing.T, c *client.Client, namespace string continue } found = true - err = c.Get().RequestURI(item.SelfLink).Do().Into(pod) + err = c.Core().RESTClient().Get().RequestURI(item.SelfLink).Do().Into(pod) if err != nil { t.Errorf("Failed listing pod with supplied self link '%v': %v", item.SelfLink, err) } @@ -797,7 +797,7 @@ func TestSelfLinkOnNamespace(t *testing.T) { ns := framework.CreateTestingNamespace("selflink", s, t) defer framework.DeleteTestingNamespace(ns, s, t) - c := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) + c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) runSelfLinkTestOnNamespace(t, c, ns.Name) } diff --git a/test/integration/client/dynamic_client_test.go b/test/integration/client/dynamic_client_test.go index 64718480a12..122ec19a477 100644 --- a/test/integration/client/dynamic_client_test.go +++ b/test/integration/client/dynamic_client_test.go @@ -27,9 +27,9 @@ import ( "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/apimachinery/registered" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/typed/dynamic" - uclient "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/test/integration/framework" ) @@ -47,7 +47,7 @@ func TestDynamicClient(t *testing.T) { ContentConfig: restclient.ContentConfig{GroupVersion: gv}, } - client := uclient.NewOrDie(config) + client := clientset.NewForConfigOrDie(config) dynamicClient, err := dynamic.NewClient(config) _ = dynamicClient if err != nil { @@ -87,7 +87,7 @@ func TestDynamicClient(t *testing.T) { }, } - actual, err := client.Pods(ns.Name).Create(pod) + actual, err := client.Core().Pods(ns.Name).Create(pod) if err != nil { t.Fatalf("unexpected error when creating pod: %v", err) } @@ -136,7 +136,7 @@ func TestDynamicClient(t *testing.T) { t.Fatalf("unexpected error when deleting pod: %v", err) } - list, err := client.Pods(ns.Name).List(api.ListOptions{}) + list, err := client.Core().Pods(ns.Name).List(api.ListOptions{}) if err != nil { t.Fatalf("unexpected error when listing pods: %v", err) } diff --git a/test/integration/configmap/configmap_test.go b/test/integration/configmap/configmap_test.go index f4ccf7e9166..468eee8f490 100644 --- a/test/integration/configmap/configmap_test.go +++ b/test/integration/configmap/configmap_test.go @@ -25,8 +25,8 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apimachinery/registered" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/restclient" - client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/test/integration" "k8s.io/kubernetes/test/integration/framework" ) @@ -36,7 +36,7 @@ func TestConfigMap(t *testing.T) { _, s := framework.RunAMaster(nil) defer s.Close() - client := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) + client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) ns := framework.CreateTestingNamespace("config-map", s, t) defer framework.DeleteTestingNamespace(ns, s, t) @@ -44,7 +44,7 @@ func TestConfigMap(t *testing.T) { DoTestConfigMap(t, client, ns) } -func DoTestConfigMap(t *testing.T, client *client.Client, ns *api.Namespace) { +func DoTestConfigMap(t *testing.T, client clientset.Interface, ns *api.Namespace) { cfg := api.ConfigMap{ ObjectMeta: api.ObjectMeta{ Name: "configmap", @@ -57,7 +57,7 @@ func DoTestConfigMap(t *testing.T, client *client.Client, ns *api.Namespace) { }, } - if _, err := client.ConfigMaps(cfg.Namespace).Create(&cfg); err != nil { + if _, err := client.Core().ConfigMaps(cfg.Namespace).Create(&cfg); err != nil { t.Errorf("unable to create test configMap: %v", err) } defer deleteConfigMapOrErrorf(t, client, cfg.Namespace, cfg.Name) @@ -112,14 +112,14 @@ func DoTestConfigMap(t *testing.T, client *client.Client, ns *api.Namespace) { } pod.ObjectMeta.Name = "uses-configmap" - if _, err := client.Pods(ns.Name).Create(pod); err != nil { + if _, err := client.Core().Pods(ns.Name).Create(pod); err != nil { t.Errorf("Failed to create pod: %v", err) } defer integration.DeletePodOrErrorf(t, client, ns.Name, pod.Name) } -func deleteConfigMapOrErrorf(t *testing.T, c *client.Client, ns, name string) { - if err := c.ConfigMaps(ns).Delete(name); err != nil { +func deleteConfigMapOrErrorf(t *testing.T, c clientset.Interface, ns, name string) { + if err := c.Core().ConfigMaps(ns).Delete(name, nil); err != nil { t.Errorf("unable to delete ConfigMap %v: %v", name, err) } } diff --git a/test/integration/framework/master_utils.go b/test/integration/framework/master_utils.go index 16e50d29ddb..bd0a6c0a7a1 100644 --- a/test/integration/framework/master_utils.go +++ b/test/integration/framework/master_utils.go @@ -49,7 +49,6 @@ import ( coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1" "k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/restclient" - client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/controller" replicationcontroller "k8s.io/kubernetes/pkg/controller/replication" "k8s.io/kubernetes/pkg/generated/openapi" @@ -90,7 +89,7 @@ type MasterComponents struct { // Kubernetes master, contains an embedded etcd storage KubeMaster *master.Master // Restclient used to talk to the kubernetes master - RestClient *client.Client + ClientSet clientset.Interface // Replication controller manager ControllerManager *replicationcontroller.ReplicationManager // Channel for stop signals to rc manager @@ -117,7 +116,6 @@ func NewMasterComponents(c *Config) *MasterComponents { // TODO: Allow callers to pipe through a different master url and create a client/start components using it. glog.Infof("Master %+v", s.URL) // TODO: caesarxuchao: remove this client when the refactoring of client libraray is done. - restClient := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}, QPS: c.QPS, Burst: c.Burst}) clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}, QPS: c.QPS, Burst: c.Burst}) rcStopCh := make(chan struct{}) controllerManager := replicationcontroller.NewReplicationManagerFromClient(clientset, controller.NoResyncPeriodFunc, c.Burst, 4096) @@ -130,7 +128,7 @@ func NewMasterComponents(c *Config) *MasterComponents { return &MasterComponents{ ApiServer: s, KubeMaster: m, - RestClient: restClient, + ClientSet: clientset, ControllerManager: controllerManager, rcStopCh: rcStopCh, } diff --git a/test/integration/master/master_benchmark_test.go b/test/integration/master/master_benchmark_test.go index fb4d9de3df9..889b5e91499 100644 --- a/test/integration/master/master_benchmark_test.go +++ b/test/integration/master/master_benchmark_test.go @@ -27,7 +27,7 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" - client "k8s.io/kubernetes/pkg/client/unversioned" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/test/integration/framework" @@ -98,7 +98,7 @@ func getIterations(bN int) int { } // startPodsOnNodes creates numPods sharded across numNodes -func startPodsOnNodes(ns string, numPods, numNodes int, restClient *client.Client) { +func startPodsOnNodes(ns string, numPods, numNodes int, restClient clientset.Interface) { podsPerNode := numPods / numNodes if podsPerNode < 1 { podsPerNode = 1 @@ -137,7 +137,7 @@ func BenchmarkPodList(b *testing.B) { defer func() { glog.V(3).Infof("Worker %d: Node %v listing pods took %v", id, host, time.Since(now)) }() - if pods, err := m.RestClient.Pods(ns.Name).List(api.ListOptions{ + if pods, err := m.ClientSet.Core().Pods(ns.Name).List(api.ListOptions{ LabelSelector: labels.Everything(), FieldSelector: fields.OneTermEqualSelector(api.PodHostField, host), }); err != nil { @@ -180,7 +180,7 @@ func BenchmarkPodListEtcd(b *testing.B) { defer func() { glog.V(3).Infof("Worker %d: listing pods took %v", id, time.Since(now)) }() - pods, err := m.RestClient.Pods(ns.Name).List(api.ListOptions{ + pods, err := m.ClientSet.Core().Pods(ns.Name).List(api.ListOptions{ LabelSelector: labels.Everything(), FieldSelector: fields.Everything(), }) diff --git a/test/integration/master/master_test.go b/test/integration/master/master_test.go index f258a16ef51..e38f1921df8 100644 --- a/test/integration/master/master_test.go +++ b/test/integration/master/master_test.go @@ -36,8 +36,8 @@ import ( "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apis/batch/v2alpha1" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/restclient" - client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/test/integration" "k8s.io/kubernetes/test/integration/framework" @@ -387,10 +387,10 @@ func TestMasterService(t *testing.T) { _, s := framework.RunAMaster(framework.NewIntegrationTestMasterConfig()) defer s.Close() - client := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) + client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) err := wait.Poll(time.Second, time.Minute, func() (bool, error) { - svcList, err := client.Services(api.NamespaceDefault).List(api.ListOptions{}) + svcList, err := client.Core().Services(api.NamespaceDefault).List(api.ListOptions{}) if err != nil { t.Errorf("unexpected error: %v", err) return false, nil @@ -403,7 +403,7 @@ func TestMasterService(t *testing.T) { } } if found { - ep, err := client.Endpoints(api.NamespaceDefault).Get("kubernetes") + ep, err := client.Core().Endpoints(api.NamespaceDefault).Get("kubernetes") if err != nil { return false, nil } @@ -429,7 +429,7 @@ func TestServiceAlloc(t *testing.T) { _, s := framework.RunAMaster(cfg) defer s.Close() - client := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) + client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) svc := func(i int) *api.Service { return &api.Service{ @@ -447,7 +447,7 @@ func TestServiceAlloc(t *testing.T) { // Wait until the default "kubernetes" service is created. if err = wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) { - _, err := client.Services(api.NamespaceDefault).Get("kubernetes") + _, err := client.Core().Services(api.NamespaceDefault).Get("kubernetes") if err != nil && !errors.IsNotFound(err) { return false, err } @@ -457,17 +457,17 @@ func TestServiceAlloc(t *testing.T) { } // Make a service. - if _, err := client.Services(api.NamespaceDefault).Create(svc(1)); err != nil { + if _, err := client.Core().Services(api.NamespaceDefault).Create(svc(1)); err != nil { t.Fatalf("got unexpected error: %v", err) } // Make a second service. It will fail because we're out of cluster IPs - if _, err := client.Services(api.NamespaceDefault).Create(svc(2)); err != nil { + if _, err := client.Core().Services(api.NamespaceDefault).Create(svc(2)); err != nil { if !strings.Contains(err.Error(), "range is full") { t.Errorf("unexpected error text: %v", err) } } else { - svcs, err := client.Services(api.NamespaceAll).List(api.ListOptions{}) + svcs, err := client.Core().Services(api.NamespaceAll).List(api.ListOptions{}) if err != nil { t.Fatalf("unexpected success, and error getting the services: %v", err) } @@ -479,12 +479,12 @@ func TestServiceAlloc(t *testing.T) { } // Delete the first service. - if err := client.Services(api.NamespaceDefault).Delete(svc(1).ObjectMeta.Name); err != nil { + if err := client.Core().Services(api.NamespaceDefault).Delete(svc(1).ObjectMeta.Name, nil); err != nil { t.Fatalf("got unexpected error: %v", err) } // This time creating the second service should work. - if _, err := client.Services(api.NamespaceDefault).Create(svc(2)); err != nil { + if _, err := client.Core().Services(api.NamespaceDefault).Create(svc(2)); err != nil { t.Fatalf("got unexpected error: %v", err) } } diff --git a/test/integration/metrics/metrics_test.go b/test/integration/metrics/metrics_test.go index f5d499b1daf..eac3a2f5545 100644 --- a/test/integration/metrics/metrics_test.go +++ b/test/integration/metrics/metrics_test.go @@ -27,8 +27,8 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apimachinery/registered" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/restclient" - client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/test/integration/framework" "github.com/golang/glog" @@ -108,8 +108,8 @@ func TestApiserverMetrics(t *testing.T) { // Make a request to the apiserver to ensure there's at least one data point // for the metrics we're expecting -- otherwise, they won't be exported. - client := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) - if _, err := client.Pods(api.NamespaceDefault).List(api.ListOptions{}); err != nil { + client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) + if _, err := client.Core().Pods(api.NamespaceDefault).List(api.ListOptions{}); err != nil { t.Fatalf("unexpected error getting pods: %v", err) } diff --git a/test/integration/objectmeta/objectmeta_test.go b/test/integration/objectmeta/objectmeta_test.go index f8670ad905c..54bc0c4e9b0 100644 --- a/test/integration/objectmeta/objectmeta_test.go +++ b/test/integration/objectmeta/objectmeta_test.go @@ -24,8 +24,8 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/apimachinery/registered" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/restclient" - client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/genericapiserver" etcdstorage "k8s.io/kubernetes/pkg/storage/etcd" "k8s.io/kubernetes/pkg/storage/etcd/etcdtest" @@ -38,7 +38,7 @@ func TestIgnoreClusterName(t *testing.T) { _, s := framework.RunAMaster(config) defer s.Close() - client := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) + client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) etcdClient := framework.NewEtcdClient() etcdStorage := etcdstorage.NewEtcdStorage(etcdClient, testapi.Default.Codec(), prefix+"/namespaces/", false, etcdtest.DeserializationCacheSize) @@ -50,7 +50,7 @@ func TestIgnoreClusterName(t *testing.T) { ClusterName: "cluster-name-to-ignore", }, } - nsNew, err := client.Namespaces().Create(&ns) + nsNew, err := client.Core().Namespaces().Create(&ns) assert.Nil(t, err) assert.Equal(t, ns.Name, nsNew.Name) assert.Empty(t, nsNew.ClusterName) @@ -61,7 +61,7 @@ func TestIgnoreClusterName(t *testing.T) { assert.Equal(t, ns.Name, nsEtcd.Name) assert.Empty(t, nsEtcd.ClusterName) - nsNew, err = client.Namespaces().Update(&ns) + nsNew, err = client.Core().Namespaces().Update(&ns) assert.Nil(t, err) assert.Equal(t, ns.Name, nsNew.Name) assert.Empty(t, nsNew.ClusterName) diff --git a/test/integration/pods/pods_test.go b/test/integration/pods/pods_test.go index 2fd230575f4..5d92bf21aae 100644 --- a/test/integration/pods/pods_test.go +++ b/test/integration/pods/pods_test.go @@ -24,8 +24,8 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apimachinery/registered" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/restclient" - client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/test/integration" "k8s.io/kubernetes/test/integration/framework" ) @@ -37,7 +37,7 @@ func TestPodUpdateActiveDeadlineSeconds(t *testing.T) { ns := framework.CreateTestingNamespace("pod-activedeadline-update", s, t) defer framework.DeleteTestingNamespace(ns, s, t) - client := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) + client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) var ( iZero = int64(0) @@ -130,13 +130,13 @@ func TestPodUpdateActiveDeadlineSeconds(t *testing.T) { pod.Spec.ActiveDeadlineSeconds = tc.original pod.ObjectMeta.Name = fmt.Sprintf("activedeadlineseconds-test-%v", i) - if _, err := client.Pods(ns.Name).Create(pod); err != nil { + if _, err := client.Core().Pods(ns.Name).Create(pod); err != nil { t.Errorf("Failed to create pod: %v", err) } pod.Spec.ActiveDeadlineSeconds = tc.update - _, err := client.Pods(ns.Name).Update(pod) + _, err := client.Core().Pods(ns.Name).Update(pod) if tc.valid && err != nil { t.Errorf("%v: failed to update pod: %v", tc.name, err) } else if !tc.valid && err == nil { @@ -155,7 +155,7 @@ func TestPodReadOnlyFilesystem(t *testing.T) { ns := framework.CreateTestingNamespace("pod-readonly-root", s, t) defer framework.DeleteTestingNamespace(ns, s, t) - client := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) + client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ @@ -174,7 +174,7 @@ func TestPodReadOnlyFilesystem(t *testing.T) { }, } - if _, err := client.Pods(ns.Name).Create(pod); err != nil { + if _, err := client.Core().Pods(ns.Name).Create(pod); err != nil { t.Errorf("Failed to create pod: %v", err) } diff --git a/test/integration/scheduler/extender_test.go b/test/integration/scheduler/extender_test.go index 6ffad60815a..6b289aaba73 100644 --- a/test/integration/scheduler/extender_test.go +++ b/test/integration/scheduler/extender_test.go @@ -37,7 +37,6 @@ import ( unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" "k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/restclient" - client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/plugin/pkg/scheduler" _ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider" @@ -197,7 +196,6 @@ func TestSchedulerExtender(t *testing.T) { ns := framework.CreateTestingNamespace("scheduler-extender", s, t) defer framework.DeleteTestingNamespace(ns, s, t) - restClient := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) extender1 := &Extender{ @@ -252,13 +250,13 @@ func TestSchedulerExtender(t *testing.T) { defer close(schedulerConfig.StopEverything) - DoTestPodScheduling(ns, t, restClient) + DoTestPodScheduling(ns, t, clientSet) } -func DoTestPodScheduling(ns *api.Namespace, t *testing.T, restClient *client.Client) { +func DoTestPodScheduling(ns *api.Namespace, t *testing.T, cs clientset.Interface) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (Nodes). - defer restClient.Nodes().DeleteCollection(nil, api.ListOptions{}) + defer cs.Core().Nodes().DeleteCollection(nil, api.ListOptions{}) goodCondition := api.NodeCondition{ Type: api.NodeReady, @@ -278,7 +276,7 @@ func DoTestPodScheduling(ns *api.Namespace, t *testing.T, restClient *client.Cli for ii := 0; ii < 5; ii++ { node.Name = fmt.Sprintf("machine%d", ii+1) - if _, err := restClient.Nodes().Create(node); err != nil { + if _, err := cs.Core().Nodes().Create(node); err != nil { t.Fatalf("Failed to create nodes: %v", err) } } @@ -286,21 +284,21 @@ func DoTestPodScheduling(ns *api.Namespace, t *testing.T, restClient *client.Cli pod := &api.Pod{ ObjectMeta: api.ObjectMeta{Name: "extender-test-pod"}, Spec: api.PodSpec{ - Containers: []api.Container{{Name: "container", Image: e2e.GetPauseImageName(restClient)}}, + Containers: []api.Container{{Name: "container", Image: e2e.GetPauseImageName(cs)}}, }, } - myPod, err := restClient.Pods(ns.Name).Create(pod) + myPod, err := cs.Core().Pods(ns.Name).Create(pod) if err != nil { t.Fatalf("Failed to create pod: %v", err) } - err = wait.Poll(time.Second, wait.ForeverTestTimeout, podScheduled(restClient, myPod.Namespace, myPod.Name)) + err = wait.Poll(time.Second, wait.ForeverTestTimeout, podScheduled(cs, myPod.Namespace, myPod.Name)) if err != nil { t.Fatalf("Failed to schedule pod: %v", err) } - if myPod, err := restClient.Pods(ns.Name).Get(myPod.Name); err != nil { + if myPod, err := cs.Core().Pods(ns.Name).Get(myPod.Name); err != nil { t.Fatalf("Failed to get pod: %v", err) } else if myPod.Spec.NodeName != "machine3" { t.Fatalf("Failed to schedule using extender, expected machine3, got %v", myPod.Spec.NodeName) diff --git a/test/integration/scheduler/scheduler_test.go b/test/integration/scheduler/scheduler_test.go index a5867f5bb13..58523bf27fc 100644 --- a/test/integration/scheduler/scheduler_test.go +++ b/test/integration/scheduler/scheduler_test.go @@ -32,9 +32,9 @@ import ( "k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/client/cache" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" "k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/restclient" - client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/plugin/pkg/scheduler" _ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider" @@ -43,7 +43,7 @@ import ( "k8s.io/kubernetes/test/integration/framework" ) -type nodeMutationFunc func(t *testing.T, n *api.Node, nodeStore cache.Store, c *client.Client) +type nodeMutationFunc func(t *testing.T, n *api.Node, nodeStore cache.Store, c clientset.Interface) type nodeStateManager struct { makeSchedulable nodeMutationFunc @@ -57,7 +57,6 @@ func TestUnschedulableNodes(t *testing.T) { ns := framework.CreateTestingNamespace("unschedulable-nodes", s, t) defer framework.DeleteTestingNamespace(ns, s, t) - restClient := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) schedulerConfigFactory := factory.NewConfigFactory(clientSet, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains) @@ -67,17 +66,17 @@ func TestUnschedulableNodes(t *testing.T) { } eventBroadcaster := record.NewBroadcaster() schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: api.DefaultSchedulerName}) - eventBroadcaster.StartRecordingToSink(restClient.Events(ns.Name)) + eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: clientSet.Core().Events(ns.Name)}) scheduler.New(schedulerConfig).Run() defer close(schedulerConfig.StopEverything) - DoTestUnschedulableNodes(t, restClient, ns, schedulerConfigFactory.NodeLister.Store) + DoTestUnschedulableNodes(t, clientSet, ns, schedulerConfigFactory.NodeLister.Store) } -func podScheduled(c *client.Client, podNamespace, podName string) wait.ConditionFunc { +func podScheduled(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc { return func() (bool, error) { - pod, err := c.Pods(podNamespace).Get(podName) + pod, err := c.Core().Pods(podNamespace).Get(podName) if errors.IsNotFound(err) { return false, nil } @@ -121,10 +120,10 @@ func waitForReflection(t *testing.T, s cache.Store, key string, passFunc func(n return err } -func DoTestUnschedulableNodes(t *testing.T, restClient *client.Client, ns *api.Namespace, nodeStore cache.Store) { +func DoTestUnschedulableNodes(t *testing.T, cs clientset.Interface, ns *api.Namespace, nodeStore cache.Store) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (Nodes). - defer restClient.Nodes().DeleteCollection(nil, api.ListOptions{}) + defer cs.Core().Nodes().DeleteCollection(nil, api.ListOptions{}) goodCondition := api.NodeCondition{ Type: api.NodeReady, @@ -167,9 +166,9 @@ func DoTestUnschedulableNodes(t *testing.T, restClient *client.Client, ns *api.N nodeModifications := []nodeStateManager{ // Test node.Spec.Unschedulable=true/false { - makeUnSchedulable: func(t *testing.T, n *api.Node, s cache.Store, c *client.Client) { + makeUnSchedulable: func(t *testing.T, n *api.Node, s cache.Store, c clientset.Interface) { n.Spec.Unschedulable = true - if _, err := c.Nodes().Update(n); err != nil { + if _, err := c.Core().Nodes().Update(n); err != nil { t.Fatalf("Failed to update node with unschedulable=true: %v", err) } err = waitForReflection(t, s, nodeKey, func(node interface{}) bool { @@ -183,9 +182,9 @@ func DoTestUnschedulableNodes(t *testing.T, restClient *client.Client, ns *api.N t.Fatalf("Failed to observe reflected update for setting unschedulable=true: %v", err) } }, - makeSchedulable: func(t *testing.T, n *api.Node, s cache.Store, c *client.Client) { + makeSchedulable: func(t *testing.T, n *api.Node, s cache.Store, c clientset.Interface) { n.Spec.Unschedulable = false - if _, err := c.Nodes().Update(n); err != nil { + if _, err := c.Core().Nodes().Update(n); err != nil { t.Fatalf("Failed to update node with unschedulable=false: %v", err) } err = waitForReflection(t, s, nodeKey, func(node interface{}) bool { @@ -198,14 +197,14 @@ func DoTestUnschedulableNodes(t *testing.T, restClient *client.Client, ns *api.N }, // Test node.Status.Conditions=ConditionTrue/Unknown { - makeUnSchedulable: func(t *testing.T, n *api.Node, s cache.Store, c *client.Client) { + makeUnSchedulable: func(t *testing.T, n *api.Node, s cache.Store, c clientset.Interface) { n.Status = api.NodeStatus{ Capacity: api.ResourceList{ api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI), }, Conditions: []api.NodeCondition{badCondition}, } - if _, err = c.Nodes().UpdateStatus(n); err != nil { + if _, err = c.Core().Nodes().UpdateStatus(n); err != nil { t.Fatalf("Failed to update node with bad status condition: %v", err) } err = waitForReflection(t, s, nodeKey, func(node interface{}) bool { @@ -215,14 +214,14 @@ func DoTestUnschedulableNodes(t *testing.T, restClient *client.Client, ns *api.N t.Fatalf("Failed to observe reflected update for status condition update: %v", err) } }, - makeSchedulable: func(t *testing.T, n *api.Node, s cache.Store, c *client.Client) { + makeSchedulable: func(t *testing.T, n *api.Node, s cache.Store, c clientset.Interface) { n.Status = api.NodeStatus{ Capacity: api.ResourceList{ api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI), }, Conditions: []api.NodeCondition{goodCondition}, } - if _, err = c.Nodes().UpdateStatus(n); err != nil { + if _, err = c.Core().Nodes().UpdateStatus(n); err != nil { t.Fatalf("Failed to update node with healthy status condition: %v", err) } err = waitForReflection(t, s, nodeKey, func(node interface{}) bool { @@ -236,29 +235,29 @@ func DoTestUnschedulableNodes(t *testing.T, restClient *client.Client, ns *api.N } for i, mod := range nodeModifications { - unSchedNode, err := restClient.Nodes().Create(node) + unSchedNode, err := cs.Core().Nodes().Create(node) if err != nil { t.Fatalf("Failed to create node: %v", err) } // Apply the unschedulable modification to the node, and wait for the reflection - mod.makeUnSchedulable(t, unSchedNode, nodeStore, restClient) + mod.makeUnSchedulable(t, unSchedNode, nodeStore, cs) // Create the new pod, note that this needs to happen post unschedulable // modification or we have a race in the test. pod := &api.Pod{ ObjectMeta: api.ObjectMeta{Name: "node-scheduling-test-pod"}, Spec: api.PodSpec{ - Containers: []api.Container{{Name: "container", Image: e2e.GetPauseImageName(restClient)}}, + Containers: []api.Container{{Name: "container", Image: e2e.GetPauseImageName(cs)}}, }, } - myPod, err := restClient.Pods(ns.Name).Create(pod) + myPod, err := cs.Core().Pods(ns.Name).Create(pod) if err != nil { t.Fatalf("Failed to create pod: %v", err) } // There are no schedulable nodes - the pod shouldn't be scheduled. - err = wait.Poll(time.Second, wait.ForeverTestTimeout, podScheduled(restClient, myPod.Namespace, myPod.Name)) + err = wait.Poll(time.Second, wait.ForeverTestTimeout, podScheduled(cs, myPod.Namespace, myPod.Name)) if err == nil { t.Errorf("Pod scheduled successfully on unschedulable nodes") } @@ -269,25 +268,25 @@ func DoTestUnschedulableNodes(t *testing.T, restClient *client.Client, ns *api.N } // Apply the schedulable modification to the node, and wait for the reflection - schedNode, err := restClient.Nodes().Get(unSchedNode.Name) + schedNode, err := cs.Core().Nodes().Get(unSchedNode.Name) if err != nil { t.Fatalf("Failed to get node: %v", err) } - mod.makeSchedulable(t, schedNode, nodeStore, restClient) + mod.makeSchedulable(t, schedNode, nodeStore, cs) // Wait until the pod is scheduled. - err = wait.Poll(time.Second, wait.ForeverTestTimeout, podScheduled(restClient, myPod.Namespace, myPod.Name)) + err = wait.Poll(time.Second, wait.ForeverTestTimeout, podScheduled(cs, myPod.Namespace, myPod.Name)) if err != nil { t.Errorf("Test %d: failed to schedule a pod: %v", i, err) } else { t.Logf("Test %d: Pod got scheduled on a schedulable node", i) } - err = restClient.Pods(ns.Name).Delete(myPod.Name, api.NewDeleteOptions(0)) + err = cs.Core().Pods(ns.Name).Delete(myPod.Name, api.NewDeleteOptions(0)) if err != nil { t.Errorf("Failed to delete pod: %v", err) } - err = restClient.Nodes().Delete(schedNode.Name) + err = cs.Core().Nodes().Delete(schedNode.Name, nil) if err != nil { t.Errorf("Failed to delete node: %v", err) } @@ -323,12 +322,11 @@ func TestMultiScheduler(t *testing.T) { - testPodNoAnnotation2 and testPodWithAnnotationFitsDefault2 shoule NOT be scheduled */ // 1. create and start default-scheduler - restClient := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (Nodes). - defer restClient.Nodes().DeleteCollection(nil, api.ListOptions{}) + defer clientSet.Core().Nodes().DeleteCollection(nil, api.ListOptions{}) schedulerConfigFactory := factory.NewConfigFactory(clientSet, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains) schedulerConfig, err := schedulerConfigFactory.Create() @@ -337,7 +335,7 @@ func TestMultiScheduler(t *testing.T) { } eventBroadcaster := record.NewBroadcaster() schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: api.DefaultSchedulerName}) - eventBroadcaster.StartRecordingToSink(restClient.Events(ns.Name)) + eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: clientSet.Core().Events(ns.Name)}) scheduler.New(schedulerConfig).Run() // default-scheduler will be stopped later @@ -351,25 +349,25 @@ func TestMultiScheduler(t *testing.T) { }, }, } - restClient.Nodes().Create(node) + clientSet.Core().Nodes().Create(node) // 3. create 3 pods for testing - podWithNoAnnotation := createPod(restClient, "pod-with-no-annotation", nil) - testPodNoAnnotation, err := restClient.Pods(ns.Name).Create(podWithNoAnnotation) + podWithNoAnnotation := createPod(clientSet, "pod-with-no-annotation", nil) + testPodNoAnnotation, err := clientSet.Core().Pods(ns.Name).Create(podWithNoAnnotation) if err != nil { t.Fatalf("Failed to create pod: %v", err) } schedulerAnnotationFitsDefault := map[string]string{"scheduler.alpha.kubernetes.io/name": "default-scheduler"} - podWithAnnotationFitsDefault := createPod(restClient, "pod-with-annotation-fits-default", schedulerAnnotationFitsDefault) - testPodWithAnnotationFitsDefault, err := restClient.Pods(ns.Name).Create(podWithAnnotationFitsDefault) + podWithAnnotationFitsDefault := createPod(clientSet, "pod-with-annotation-fits-default", schedulerAnnotationFitsDefault) + testPodWithAnnotationFitsDefault, err := clientSet.Core().Pods(ns.Name).Create(podWithAnnotationFitsDefault) if err != nil { t.Fatalf("Failed to create pod: %v", err) } schedulerAnnotationFitsFoo := map[string]string{"scheduler.alpha.kubernetes.io/name": "foo-scheduler"} - podWithAnnotationFitsFoo := createPod(restClient, "pod-with-annotation-fits-foo", schedulerAnnotationFitsFoo) - testPodWithAnnotationFitsFoo, err := restClient.Pods(ns.Name).Create(podWithAnnotationFitsFoo) + podWithAnnotationFitsFoo := createPod(clientSet, "pod-with-annotation-fits-foo", schedulerAnnotationFitsFoo) + testPodWithAnnotationFitsFoo, err := clientSet.Core().Pods(ns.Name).Create(podWithAnnotationFitsFoo) if err != nil { t.Fatalf("Failed to create pod: %v", err) } @@ -377,21 +375,21 @@ func TestMultiScheduler(t *testing.T) { // 4. **check point-1**: // - testPodNoAnnotation, testPodWithAnnotationFitsDefault should be scheduled // - testPodWithAnnotationFitsFoo should NOT be scheduled - err = wait.Poll(time.Second, time.Second*5, podScheduled(restClient, testPodNoAnnotation.Namespace, testPodNoAnnotation.Name)) + err = wait.Poll(time.Second, time.Second*5, podScheduled(clientSet, testPodNoAnnotation.Namespace, testPodNoAnnotation.Name)) if err != nil { t.Errorf("Test MultiScheduler: %s Pod not scheduled: %v", testPodNoAnnotation.Name, err) } else { t.Logf("Test MultiScheduler: %s Pod scheduled", testPodNoAnnotation.Name) } - err = wait.Poll(time.Second, time.Second*5, podScheduled(restClient, testPodWithAnnotationFitsDefault.Namespace, testPodWithAnnotationFitsDefault.Name)) + err = wait.Poll(time.Second, time.Second*5, podScheduled(clientSet, testPodWithAnnotationFitsDefault.Namespace, testPodWithAnnotationFitsDefault.Name)) if err != nil { t.Errorf("Test MultiScheduler: %s Pod not scheduled: %v", testPodWithAnnotationFitsDefault.Name, err) } else { t.Logf("Test MultiScheduler: %s Pod scheduled", testPodWithAnnotationFitsDefault.Name) } - err = wait.Poll(time.Second, time.Second*5, podScheduled(restClient, testPodWithAnnotationFitsFoo.Namespace, testPodWithAnnotationFitsFoo.Name)) + err = wait.Poll(time.Second, time.Second*5, podScheduled(clientSet, testPodWithAnnotationFitsFoo.Namespace, testPodWithAnnotationFitsFoo.Name)) if err == nil { t.Errorf("Test MultiScheduler: %s Pod got scheduled, %v", testPodWithAnnotationFitsFoo.Name, err) } else { @@ -399,7 +397,6 @@ func TestMultiScheduler(t *testing.T) { } // 5. create and start a scheduler with name "foo-scheduler" - restClient2 := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) clientSet2 := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) schedulerConfigFactory2 := factory.NewConfigFactory(clientSet2, "foo-scheduler", api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains) @@ -409,14 +406,14 @@ func TestMultiScheduler(t *testing.T) { } eventBroadcaster2 := record.NewBroadcaster() schedulerConfig2.Recorder = eventBroadcaster2.NewRecorder(api.EventSource{Component: "foo-scheduler"}) - eventBroadcaster2.StartRecordingToSink(restClient2.Events(ns.Name)) + eventBroadcaster2.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: clientSet2.Core().Events(ns.Name)}) scheduler.New(schedulerConfig2).Run() defer close(schedulerConfig2.StopEverything) // 6. **check point-2**: // - testPodWithAnnotationFitsFoo should be scheduled - err = wait.Poll(time.Second, time.Second*5, podScheduled(restClient, testPodWithAnnotationFitsFoo.Namespace, testPodWithAnnotationFitsFoo.Name)) + err = wait.Poll(time.Second, time.Second*5, podScheduled(clientSet, testPodWithAnnotationFitsFoo.Namespace, testPodWithAnnotationFitsFoo.Name)) if err != nil { t.Errorf("Test MultiScheduler: %s Pod not scheduled, %v", testPodWithAnnotationFitsFoo.Name, err) } else { @@ -424,11 +421,11 @@ func TestMultiScheduler(t *testing.T) { } // 7. delete the pods that were scheduled by the default scheduler, and stop the default scheduler - err = restClient.Pods(ns.Name).Delete(testPodNoAnnotation.Name, api.NewDeleteOptions(0)) + err = clientSet.Core().Pods(ns.Name).Delete(testPodNoAnnotation.Name, api.NewDeleteOptions(0)) if err != nil { t.Errorf("Failed to delete pod: %v", err) } - err = restClient.Pods(ns.Name).Delete(testPodWithAnnotationFitsDefault.Name, api.NewDeleteOptions(0)) + err = clientSet.Core().Pods(ns.Name).Delete(testPodWithAnnotationFitsDefault.Name, api.NewDeleteOptions(0)) if err != nil { t.Errorf("Failed to delete pod: %v", err) } @@ -446,24 +443,24 @@ func TestMultiScheduler(t *testing.T) { // - note: these two pods belong to default scheduler which no longer exists podWithNoAnnotation2 := createPod("pod-with-no-annotation2", nil) podWithAnnotationFitsDefault2 := createPod("pod-with-annotation-fits-default2", schedulerAnnotationFitsDefault) - testPodNoAnnotation2, err := restClient.Pods(ns.Name).Create(podWithNoAnnotation2) + testPodNoAnnotation2, err := clientSet.Core().Pods(ns.Name).Create(podWithNoAnnotation2) if err != nil { t.Fatalf("Failed to create pod: %v", err) } - testPodWithAnnotationFitsDefault2, err := restClient.Pods(ns.Name).Create(podWithAnnotationFitsDefault2) + testPodWithAnnotationFitsDefault2, err := clientSet.Core().Pods(ns.Name).Create(podWithAnnotationFitsDefault2) if err != nil { t.Fatalf("Failed to create pod: %v", err) } // 9. **check point-3**: // - testPodNoAnnotation2 and testPodWithAnnotationFitsDefault2 shoule NOT be scheduled - err = wait.Poll(time.Second, time.Second*5, podScheduled(restClient, testPodNoAnnotation2.Namespace, testPodNoAnnotation2.Name)) + err = wait.Poll(time.Second, time.Second*5, podScheduled(clientSet, testPodNoAnnotation2.Namespace, testPodNoAnnotation2.Name)) if err == nil { t.Errorf("Test MultiScheduler: %s Pod got scheduled, %v", testPodNoAnnotation2.Name, err) } else { t.Logf("Test MultiScheduler: %s Pod not scheduled", testPodNoAnnotation2.Name) } - err = wait.Poll(time.Second, time.Second*5, podScheduled(restClient, testPodWithAnnotationFitsDefault2.Namespace, testPodWithAnnotationFitsDefault2.Name)) + err = wait.Poll(time.Second, time.Second*5, podScheduled(clientSet, testPodWithAnnotationFitsDefault2.Namespace, testPodWithAnnotationFitsDefault2.Name)) if err == nil { t.Errorf("Test MultiScheduler: %s Pod got scheduled, %v", testPodWithAnnotationFitsDefault2.Name, err) } else { @@ -472,7 +469,7 @@ func TestMultiScheduler(t *testing.T) { */ } -func createPod(client *client.Client, name string, annotation map[string]string) *api.Pod { +func createPod(client clientset.Interface, name string, annotation map[string]string) *api.Pod { return &api.Pod{ ObjectMeta: api.ObjectMeta{Name: name, Annotations: annotation}, Spec: api.PodSpec{ @@ -490,12 +487,11 @@ func TestAllocatable(t *testing.T) { defer framework.DeleteTestingNamespace(ns, s, t) // 1. create and start default-scheduler - restClient := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (Nodes). - defer restClient.Nodes().DeleteCollection(nil, api.ListOptions{}) + defer clientSet.Core().Nodes().DeleteCollection(nil, api.ListOptions{}) schedulerConfigFactory := factory.NewConfigFactory(clientSet, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains) schedulerConfig, err := schedulerConfigFactory.Create() @@ -504,7 +500,7 @@ func TestAllocatable(t *testing.T) { } eventBroadcaster := record.NewBroadcaster() schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: api.DefaultSchedulerName}) - eventBroadcaster.StartRecordingToSink(restClient.Events(ns.Name)) + eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: clientSet.Core().Events(ns.Name)}) scheduler.New(schedulerConfig).Run() // default-scheduler will be stopped later defer close(schedulerConfig.StopEverything) @@ -522,7 +518,7 @@ func TestAllocatable(t *testing.T) { }, } - allocNode, err := restClient.Nodes().Create(node) + allocNode, err := clientSet.Core().Nodes().Create(node) if err != nil { t.Fatalf("Failed to create node: %v", err) } @@ -534,7 +530,7 @@ func TestAllocatable(t *testing.T) { Containers: []api.Container{ { Name: "container", - Image: e2e.GetPauseImageName(restClient), + Image: e2e.GetPauseImageName(clientSet), Resources: api.ResourceRequirements{ Requests: api.ResourceList{ api.ResourceCPU: *resource.NewMilliQuantity(20, resource.DecimalSI), @@ -546,13 +542,13 @@ func TestAllocatable(t *testing.T) { }, } - testAllocPod, err := restClient.Pods(ns.Name).Create(podResource) + testAllocPod, err := clientSet.Core().Pods(ns.Name).Create(podResource) if err != nil { t.Fatalf("Test allocatable unawareness failed to create pod: %v", err) } // 4. Test: this test pod should be scheduled since api-server will use Capacity as Allocatable - err = wait.Poll(time.Second, time.Second*5, podScheduled(restClient, testAllocPod.Namespace, testAllocPod.Name)) + err = wait.Poll(time.Second, time.Second*5, podScheduled(clientSet, testAllocPod.Namespace, testAllocPod.Name)) if err != nil { t.Errorf("Test allocatable unawareness: %s Pod not scheduled: %v", testAllocPod.Name, err) } else { @@ -573,23 +569,23 @@ func TestAllocatable(t *testing.T) { }, } - if _, err := restClient.Nodes().UpdateStatus(allocNode); err != nil { + if _, err := clientSet.Core().Nodes().UpdateStatus(allocNode); err != nil { t.Fatalf("Failed to update node with Status.Allocatable: %v", err) } - if err := restClient.Pods(ns.Name).Delete(podResource.Name, &api.DeleteOptions{}); err != nil { + if err := clientSet.Core().Pods(ns.Name).Delete(podResource.Name, &api.DeleteOptions{}); err != nil { t.Fatalf("Failed to remove first resource pod: %v", err) } // 6. Make another pod with different name, same resource request podResource.ObjectMeta.Name = "pod-test-allocatable2" - testAllocPod2, err := restClient.Pods(ns.Name).Create(podResource) + testAllocPod2, err := clientSet.Core().Pods(ns.Name).Create(podResource) if err != nil { t.Fatalf("Test allocatable awareness failed to create pod: %v", err) } // 7. Test: this test pod should not be scheduled since it request more than Allocatable - err = wait.Poll(time.Second, time.Second*5, podScheduled(restClient, testAllocPod2.Namespace, testAllocPod2.Name)) + err = wait.Poll(time.Second, time.Second*5, podScheduled(clientSet, testAllocPod2.Namespace, testAllocPod2.Name)) if err == nil { t.Errorf("Test allocatable awareness: %s Pod got scheduled unexpectly, %v", testAllocPod2.Name, err) } else { diff --git a/test/integration/secrets/secrets_test.go b/test/integration/secrets/secrets_test.go index fa51184eec0..e618dcd3d4e 100644 --- a/test/integration/secrets/secrets_test.go +++ b/test/integration/secrets/secrets_test.go @@ -25,14 +25,14 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apimachinery/registered" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/restclient" - client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/test/integration" "k8s.io/kubernetes/test/integration/framework" ) -func deleteSecretOrErrorf(t *testing.T, c *client.Client, ns, name string) { - if err := c.Secrets(ns).Delete(name); err != nil { +func deleteSecretOrErrorf(t *testing.T, c clientset.Interface, ns, name string) { + if err := c.Core().Secrets(ns).Delete(name, nil); err != nil { t.Errorf("unable to delete secret %v: %v", name, err) } } @@ -42,7 +42,7 @@ func TestSecrets(t *testing.T) { _, s := framework.RunAMaster(nil) defer s.Close() - client := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) + client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) ns := framework.CreateTestingNamespace("secret", s, t) defer framework.DeleteTestingNamespace(ns, s, t) @@ -51,7 +51,7 @@ func TestSecrets(t *testing.T) { } // DoTestSecrets test secrets for one api version. -func DoTestSecrets(t *testing.T, client *client.Client, ns *api.Namespace) { +func DoTestSecrets(t *testing.T, client clientset.Interface, ns *api.Namespace) { // Make a secret object. s := api.Secret{ ObjectMeta: api.ObjectMeta{ @@ -63,7 +63,7 @@ func DoTestSecrets(t *testing.T, client *client.Client, ns *api.Namespace) { }, } - if _, err := client.Secrets(s.Namespace).Create(&s); err != nil { + if _, err := client.Core().Secrets(s.Namespace).Create(&s); err != nil { t.Errorf("unable to create test secret: %v", err) } defer deleteSecretOrErrorf(t, client, s.Namespace, s.Name) @@ -103,14 +103,14 @@ func DoTestSecrets(t *testing.T, client *client.Client, ns *api.Namespace) { // Create a pod to consume secret. pod.ObjectMeta.Name = "uses-secret" - if _, err := client.Pods(ns.Name).Create(pod); err != nil { + if _, err := client.Core().Pods(ns.Name).Create(pod); err != nil { t.Errorf("Failed to create pod: %v", err) } defer integration.DeletePodOrErrorf(t, client, ns.Name, pod.Name) // Create a pod that consumes non-existent secret. pod.ObjectMeta.Name = "uses-non-existent-secret" - if _, err := client.Pods(ns.Name).Create(pod); err != nil { + if _, err := client.Core().Pods(ns.Name).Create(pod); err != nil { t.Errorf("Failed to create pod: %v", err) } defer integration.DeletePodOrErrorf(t, client, ns.Name, pod.Name) diff --git a/test/integration/storageclasses/storage_classes_test.go b/test/integration/storageclasses/storage_classes_test.go index 85e4283aa53..cd0c9a99184 100644 --- a/test/integration/storageclasses/storage_classes_test.go +++ b/test/integration/storageclasses/storage_classes_test.go @@ -29,8 +29,8 @@ import ( "k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apis/storage" storageutil "k8s.io/kubernetes/pkg/apis/storage/util" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/restclient" - client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/test/integration/framework" ) @@ -41,7 +41,7 @@ func TestStorageClasses(t *testing.T) { _, s := framework.RunAMaster(nil) defer s.Close() - client := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) + client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) ns := framework.CreateTestingNamespace("storageclass", s, t) defer framework.DeleteTestingNamespace(ns, s, t) @@ -50,7 +50,7 @@ func TestStorageClasses(t *testing.T) { } // DoTestStorageClasses tests storage classes for one api version. -func DoTestStorageClasses(t *testing.T, client *client.Client, ns *api.Namespace) { +func DoTestStorageClasses(t *testing.T, client clientset.Interface, ns *api.Namespace) { // Make a storage class object. s := storage.StorageClass{ TypeMeta: unversioned.TypeMeta{ @@ -83,20 +83,20 @@ func DoTestStorageClasses(t *testing.T, client *client.Client, ns *api.Namespace } pvc.ObjectMeta.Name = "uses-storageclass" - if _, err := client.PersistentVolumeClaims(ns.Name).Create(pvc); err != nil { + if _, err := client.Core().PersistentVolumeClaims(ns.Name).Create(pvc); err != nil { t.Errorf("Failed to create pvc: %v", err) } defer deletePersistentVolumeClaimOrErrorf(t, client, ns.Name, pvc.Name) } -func deleteStorageClassOrErrorf(t *testing.T, c *client.Client, ns, name string) { - if err := c.Storage().StorageClasses().Delete(name); err != nil { +func deleteStorageClassOrErrorf(t *testing.T, c clientset.Interface, ns, name string) { + if err := c.Storage().StorageClasses().Delete(name, nil); err != nil { t.Errorf("unable to delete storage class %v: %v", name, err) } } -func deletePersistentVolumeClaimOrErrorf(t *testing.T, c *client.Client, ns, name string) { - if err := c.PersistentVolumeClaims(ns).Delete(name); err != nil { +func deletePersistentVolumeClaimOrErrorf(t *testing.T, c clientset.Interface, ns, name string) { + if err := c.Core().PersistentVolumeClaims(ns).Delete(name, nil); err != nil { t.Errorf("unable to delete persistent volume claim %v: %v", name, err) } } diff --git a/test/integration/utils.go b/test/integration/utils.go index ec39fdcd43e..faaa43b675d 100644 --- a/test/integration/utils.go +++ b/test/integration/utils.go @@ -26,8 +26,8 @@ import ( "github.com/golang/glog" "golang.org/x/net/context" "k8s.io/kubernetes/pkg/api/errors" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1" - client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/test/integration/framework" ) @@ -55,8 +55,8 @@ func withEtcdKey(f func(string)) { f(prefix) } -func DeletePodOrErrorf(t *testing.T, c *client.Client, ns, name string) { - if err := c.Pods(ns).Delete(name, nil); err != nil { +func DeletePodOrErrorf(t *testing.T, c clientset.Interface, ns, name string) { + if err := c.Core().Pods(ns).Delete(name, nil); err != nil { t.Errorf("unable to delete pod %v: %v", name, err) } } diff --git a/test/soak/cauldron/cauldron.go b/test/soak/cauldron/cauldron.go index a6fc37d6ff3..cd3ae29e9da 100644 --- a/test/soak/cauldron/cauldron.go +++ b/test/soak/cauldron/cauldron.go @@ -32,7 +32,8 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" - client "k8s.io/kubernetes/pkg/client/unversioned" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + "k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/util/intstr" ) @@ -59,14 +60,19 @@ func main() { glog.Infof("Starting cauldron soak test with queries=%d podsPerNode=%d upTo=%d maxPar=%d", *queriesAverage, *podsPerNode, *upTo, *maxPar) - c, err := client.NewInCluster() + cc, err := restclient.InClusterConfig() + if err != nil { + glog.Fatalf("Failed to make client: %v", err) + } + + client, err := clientset.NewForConfig(cc) if err != nil { glog.Fatalf("Failed to make client: %v", err) } var nodes *api.NodeList for start := time.Now(); time.Since(start) < nodeListTimeout; time.Sleep(2 * time.Second) { - nodes, err = c.Nodes().List(api.ListOptions{}) + nodes, err = client.Core().Nodes().List(api.ListOptions{}) if err == nil { break } @@ -88,18 +94,18 @@ func main() { queries := *queriesAverage * len(nodes.Items) * *podsPerNode // Create a uniquely named namespace. - got, err := c.Namespaces().Create(&api.Namespace{ObjectMeta: api.ObjectMeta{GenerateName: "serve-hostnames-"}}) + got, err := client.Core().Namespaces().Create(&api.Namespace{ObjectMeta: api.ObjectMeta{GenerateName: "serve-hostnames-"}}) if err != nil { glog.Fatalf("Failed to create namespace: %v", err) } ns := got.Name defer func(ns string) { - if err := c.Namespaces().Delete(ns); err != nil { + if err := client.Core().Namespaces().Delete(ns, nil); err != nil { glog.Warningf("Failed to delete namespace ns: %e", ns, err) } else { // wait until the namespace disappears for i := 0; i < int(namespaceDeleteTimeout/time.Second); i++ { - if _, err := c.Namespaces().Get(ns); err != nil { + if _, err := client.Core().Namespaces().Get(ns); err != nil { if errors.IsNotFound(err) { return } @@ -116,7 +122,7 @@ func main() { var svc *api.Service for start := time.Now(); time.Since(start) < serviceCreateTimeout; time.Sleep(2 * time.Second) { t := time.Now() - svc, err = c.Services(ns).Create(&api.Service{ + svc, err = client.Core().Services(ns).Create(&api.Service{ ObjectMeta: api.ObjectMeta{ Name: "serve-hostnames", Labels: map[string]string{ @@ -149,7 +155,7 @@ func main() { glog.Infof("Cleaning up service %s/serve-hostnames", ns) // Make several attempts to delete the service. for start := time.Now(); time.Since(start) < deleteTimeout; time.Sleep(1 * time.Second) { - if err := c.Services(ns).Delete(svc.Name); err == nil { + if err := client.Core().Services(ns).Delete(svc.Name, nil); err == nil { return } glog.Warningf("After %v unable to delete service %s/%s: %v", time.Since(start), ns, svc.Name, err) @@ -166,7 +172,7 @@ func main() { for start := time.Now(); time.Since(start) < podCreateTimeout; time.Sleep(2 * time.Second) { glog.Infof("Creating pod %s/%s on node %s", ns, podName, node.Name) t := time.Now() - _, err = c.Pods(ns).Create(&api.Pod{ + _, err = client.Core().Pods(ns).Create(&api.Pod{ ObjectMeta: api.ObjectMeta{ Name: podName, Labels: map[string]string{ @@ -202,7 +208,7 @@ func main() { // Make several attempts to delete the pods. for _, podName := range podNames { for start := time.Now(); time.Since(start) < deleteTimeout; time.Sleep(1 * time.Second) { - if err = c.Pods(ns).Delete(podName, nil); err == nil { + if err = client.Core().Pods(ns).Delete(podName, nil); err == nil { break } glog.Warningf("After %v failed to delete pod %s/%s: %v", time.Since(start), ns, podName, err) @@ -214,7 +220,7 @@ func main() { for _, podName := range podNames { var pod *api.Pod for start := time.Now(); time.Since(start) < podStartTimeout; time.Sleep(5 * time.Second) { - pod, err = c.Pods(ns).Get(podName) + pod, err = client.Core().Pods(ns).Get(podName) if err != nil { glog.Warningf("Get pod %s/%s failed, ignoring for %v: %v", ns, podName, err, podStartTimeout) continue diff --git a/test/soak/serve_hostnames/serve_hostnames.go b/test/soak/serve_hostnames/serve_hostnames.go index 65efc6cf966..e170b08d668 100644 --- a/test/soak/serve_hostnames/serve_hostnames.go +++ b/test/soak/serve_hostnames/serve_hostnames.go @@ -33,7 +33,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/unversioned" - client "k8s.io/kubernetes/pkg/client/unversioned" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util/intstr" @@ -82,7 +82,7 @@ func main() { glog.Fatalf("Failed to construct config: %v", err) } - c, err := client.New(config) + client, err := clientset.NewForConfig(config) if err != nil { glog.Fatalf("Failed to make client: %v", err) } @@ -117,7 +117,7 @@ func main() { } ns := got.Name defer func(ns string) { - if err := c.Namespaces().Delete(ns); err != nil { + if err := client.Core().Namespaces().Delete(ns, nil); err != nil { glog.Warningf("Failed to delete namespace ns: %e", ns, err) } else { // wait until the namespace disappears diff --git a/test/utils/pod_store.go b/test/utils/pod_store.go index e6e044f4fe8..d7700ffdc01 100644 --- a/test/utils/pod_store.go +++ b/test/utils/pod_store.go @@ -19,7 +19,7 @@ package utils import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/client/cache" - client "k8s.io/kubernetes/pkg/client/unversioned" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" @@ -33,17 +33,18 @@ type PodStore struct { Reflector *cache.Reflector } -func NewPodStore(c *client.Client, namespace string, label labels.Selector, field fields.Selector) *PodStore { +func NewPodStore(c clientset.Interface, namespace string, label labels.Selector, field fields.Selector) *PodStore { lw := &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { options.LabelSelector = label options.FieldSelector = field - return c.Pods(namespace).List(options) + obj, err := c.Core().Pods(namespace).List(options) + return runtime.Object(obj), err }, WatchFunc: func(options api.ListOptions) (watch.Interface, error) { options.LabelSelector = label options.FieldSelector = field - return c.Pods(namespace).Watch(options) + return c.Core().Pods(namespace).Watch(options) }, } store := cache.NewStore(cache.MetaNamespaceKeyFunc) diff --git a/test/utils/runners.go b/test/utils/runners.go index b23fac15080..9c9d1fc238b 100644 --- a/test/utils/runners.go +++ b/test/utils/runners.go @@ -28,7 +28,6 @@ import ( "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/apis/extensions" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/util/sets" @@ -43,7 +42,7 @@ const ( ) type RCConfig struct { - Client *client.Client + Client clientset.Interface Image string Command []string Name string @@ -91,8 +90,8 @@ type RCConfig struct { LogFunc func(fmt string, args ...interface{}) // If set those functions will be used to gather data from Nodes - in integration tests where no // kubelets are running those variables should be nil. - NodeDumpFunc func(c *client.Client, nodeNames []string, logFunc func(fmt string, args ...interface{})) - ContainerDumpFunc func(c *client.Client, ns string, logFunc func(ftm string, args ...interface{})) + NodeDumpFunc func(c clientset.Interface, nodeNames []string, logFunc func(fmt string, args ...interface{})) + ContainerDumpFunc func(c clientset.Interface, ns string, logFunc func(ftm string, args ...interface{})) } func (rc *RCConfig) RCConfigLog(fmt string, args ...interface{}) { @@ -221,7 +220,7 @@ func (config *DeploymentConfig) create() error { config.applyTo(&deployment.Spec.Template) - _, err := config.Client.Deployments(config.Namespace).Create(deployment) + _, err := config.Client.Extensions().Deployments(config.Namespace).Create(deployment) if err != nil { return fmt.Errorf("Error creating deployment: %v", err) } @@ -273,7 +272,7 @@ func (config *ReplicaSetConfig) create() error { config.applyTo(&rs.Spec.Template) - _, err := config.Client.ReplicaSets(config.Namespace).Create(rs) + _, err := config.Client.Extensions().ReplicaSets(config.Namespace).Create(rs) if err != nil { return fmt.Errorf("Error creating replica set: %v", err) } @@ -330,7 +329,7 @@ func (config *RCConfig) create() error { config.applyTo(rc.Spec.Template) - _, err := config.Client.ReplicationControllers(config.Namespace).Create(rc) + _, err := config.Client.Core().ReplicationControllers(config.Namespace).Create(rc) if err != nil { return fmt.Errorf("Error creating replication controller: %v", err) } @@ -537,7 +536,7 @@ func (config *RCConfig) start() error { if oldRunning != config.Replicas { // List only pods from a given replication controller. options := api.ListOptions{LabelSelector: label} - if pods, err := config.Client.Pods(api.NamespaceAll).List(options); err == nil { + if pods, err := config.Client.Core().Pods(api.NamespaceAll).List(options); err == nil { for _, pod := range pods.Items { config.RCConfigLog("Pod %s\t%s\t%s\t%s", pod.Name, pod.Spec.NodeName, pod.Status.Phase, pod.DeletionTimestamp) @@ -553,7 +552,7 @@ func (config *RCConfig) start() error { // Simplified version of RunRC, that does not create RC, but creates plain Pods. // Optionally waits for pods to start running (if waitForRunning == true). // The number of replicas must be non-zero. -func StartPods(c *client.Client, replicas int, namespace string, podNamePrefix string, +func StartPods(c clientset.Interface, replicas int, namespace string, podNamePrefix string, pod api.Pod, waitForRunning bool, logFunc func(fmt string, args ...interface{})) error { // no pod to start if replicas < 1 { @@ -566,7 +565,7 @@ func StartPods(c *client.Client, replicas int, namespace string, podNamePrefix s pod.ObjectMeta.Labels["name"] = podName pod.ObjectMeta.Labels["startPodsID"] = startPodsID pod.Spec.Containers[0].Name = podName - _, err := c.Pods(namespace).Create(&pod) + _, err := c.Core().Pods(namespace).Create(&pod) if err != nil { return err } @@ -584,7 +583,7 @@ func StartPods(c *client.Client, replicas int, namespace string, podNamePrefix s // Wait up to 10 minutes for all matching pods to become Running and at least one // matching pod exists. -func WaitForPodsWithLabelRunning(c *client.Client, ns string, label labels.Selector) error { +func WaitForPodsWithLabelRunning(c clientset.Interface, ns string, label labels.Selector) error { running := false PodStore := NewPodStore(c, ns, label, fields.Everything()) defer PodStore.Stop()