Replace client with clientset in code

This commit is contained in:
Jan Chaloupka 2016-10-18 15:00:38 +02:00
parent 432bbb5d5a
commit 4fde09d308
137 changed files with 1762 additions and 1794 deletions

View File

@ -25,7 +25,8 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/restclient"
) )
func flattenSubsets(subsets []api.EndpointSubset) []string { func flattenSubsets(subsets []api.EndpointSubset) []string {
@ -42,14 +43,19 @@ func main() {
flag.Parse() flag.Parse()
glog.Info("Kubernetes Elasticsearch logging discovery") glog.Info("Kubernetes Elasticsearch logging discovery")
c, err := client.NewInCluster() cc, err := restclient.InClusterConfig()
if err != nil {
glog.Fatalf("Failed to make client: %v", err)
}
client, err := clientset.NewForConfig(cc)
if err != nil { if err != nil {
glog.Fatalf("Failed to make client: %v", err) glog.Fatalf("Failed to make client: %v", err)
} }
namespace := api.NamespaceSystem namespace := api.NamespaceSystem
envNamespace := os.Getenv("NAMESPACE") envNamespace := os.Getenv("NAMESPACE")
if envNamespace != "" { if envNamespace != "" {
if _, err := c.Namespaces().Get(envNamespace); err != nil { if _, err := client.Core().Namespaces().Get(envNamespace); err != nil {
glog.Fatalf("%s namespace doesn't exist: %v", envNamespace, err) glog.Fatalf("%s namespace doesn't exist: %v", envNamespace, err)
} }
namespace = envNamespace namespace = envNamespace
@ -59,7 +65,7 @@ func main() {
// Look for endpoints associated with the Elasticsearch loggging service. // Look for endpoints associated with the Elasticsearch loggging service.
// First wait for the service to become available. // First wait for the service to become available.
for t := time.Now(); time.Since(t) < 5*time.Minute; time.Sleep(10 * time.Second) { for t := time.Now(); time.Since(t) < 5*time.Minute; time.Sleep(10 * time.Second) {
elasticsearch, err = c.Services(namespace).Get("elasticsearch-logging") elasticsearch, err = client.Core().Services(namespace).Get("elasticsearch-logging")
if err == nil { if err == nil {
break break
} }
@ -76,7 +82,7 @@ func main() {
// Wait for some endpoints. // Wait for some endpoints.
count := 0 count := 0
for t := time.Now(); time.Since(t) < 5*time.Minute; time.Sleep(10 * time.Second) { for t := time.Now(); time.Since(t) < 5*time.Minute; time.Sleep(10 * time.Second) {
endpoints, err = c.Endpoints(namespace).Get("elasticsearch-logging") endpoints, err = client.Core().Endpoints(namespace).Get("elasticsearch-logging")
if err != nil { if err != nil {
continue continue
} }

View File

@ -30,8 +30,9 @@ import (
"k8s.io/kubernetes/cmd/kube-proxy/app/options" "k8s.io/kubernetes/cmd/kube-proxy/app/options"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
kubeclient "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd" "k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
"k8s.io/kubernetes/pkg/proxy" "k8s.io/kubernetes/pkg/proxy"
@ -56,7 +57,7 @@ import (
) )
type ProxyServer struct { type ProxyServer struct {
Client *kubeclient.Client Client clientset.Interface
Config *options.ProxyServerConfig Config *options.ProxyServerConfig
IptInterface utiliptables.Interface IptInterface utiliptables.Interface
Proxier proxy.ProxyProvider Proxier proxy.ProxyProvider
@ -82,7 +83,7 @@ func checkKnownProxyMode(proxyMode string) bool {
} }
func NewProxyServer( func NewProxyServer(
client *kubeclient.Client, client clientset.Interface,
config *options.ProxyServerConfig, config *options.ProxyServerConfig,
iptInterface utiliptables.Interface, iptInterface utiliptables.Interface,
proxier proxy.ProxyProvider, proxier proxy.ProxyProvider,
@ -185,7 +186,7 @@ func NewProxyServerDefault(config *options.ProxyServerConfig) (*ProxyServer, err
kubeconfig.QPS = config.KubeAPIQPS kubeconfig.QPS = config.KubeAPIQPS
kubeconfig.Burst = int(config.KubeAPIBurst) kubeconfig.Burst = int(config.KubeAPIBurst)
client, err := kubeclient.New(kubeconfig) client, err := clientset.NewForConfig(kubeconfig)
if err != nil { if err != nil {
glog.Fatalf("Invalid API configuration: %v", err) glog.Fatalf("Invalid API configuration: %v", err)
} }
@ -198,7 +199,7 @@ func NewProxyServerDefault(config *options.ProxyServerConfig) (*ProxyServer, err
var proxier proxy.ProxyProvider var proxier proxy.ProxyProvider
var endpointsHandler proxyconfig.EndpointsConfigHandler var endpointsHandler proxyconfig.EndpointsConfigHandler
proxyMode := getProxyMode(string(config.Mode), client.Nodes(), hostname, iptInterface, iptables.LinuxKernelCompatTester{}) proxyMode := getProxyMode(string(config.Mode), client.Core().Nodes(), hostname, iptInterface, iptables.LinuxKernelCompatTester{})
if proxyMode == proxyModeIPTables { if proxyMode == proxyModeIPTables {
glog.V(0).Info("Using iptables Proxier.") glog.V(0).Info("Using iptables Proxier.")
if config.IPTablesMasqueradeBit == nil { if config.IPTablesMasqueradeBit == nil {
@ -251,7 +252,7 @@ func NewProxyServerDefault(config *options.ProxyServerConfig) (*ProxyServer, err
endpointsConfig.RegisterHandler(endpointsHandler) endpointsConfig.RegisterHandler(endpointsHandler)
proxyconfig.NewSourceAPI( proxyconfig.NewSourceAPI(
client, client.Core().RESTClient(),
config.ConfigSyncPeriod, config.ConfigSyncPeriod,
serviceConfig.Channel("api"), serviceConfig.Channel("api"),
endpointsConfig.Channel("api"), endpointsConfig.Channel("api"),
@ -281,7 +282,7 @@ func (s *ProxyServer) Run() error {
return nil return nil
} }
s.Broadcaster.StartRecordingToSink(s.Client.Events("")) s.Broadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: s.Client.Core().Events("")})
// Start up a webserver if requested // Start up a webserver if requested
if s.Config.HealthzPort > 0 { if s.Config.HealthzPort > 0 {
@ -418,9 +419,9 @@ func (s *ProxyServer) birthCry() {
s.Recorder.Eventf(s.Config.NodeRef, api.EventTypeNormal, "Starting", "Starting kube-proxy.") s.Recorder.Eventf(s.Config.NodeRef, api.EventTypeNormal, "Starting", "Starting kube-proxy.")
} }
func getNodeIP(client *kubeclient.Client, hostname string) net.IP { func getNodeIP(client clientset.Interface, hostname string) net.IP {
var nodeIP net.IP var nodeIP net.IP
node, err := client.Nodes().Get(hostname) node, err := client.Core().Nodes().Get(hostname)
if err != nil { if err != nil {
glog.Warningf("Failed to retrieve node info: %v", err) glog.Warningf("Failed to retrieve node info: %v", err)
return nil return nil

View File

@ -24,7 +24,6 @@ import (
_ "k8s.io/kubernetes/pkg/client/metrics/prometheus" // for client metric registration _ "k8s.io/kubernetes/pkg/client/metrics/prometheus" // for client metric registration
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/restclient"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd" "k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing" cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing"
"k8s.io/kubernetes/pkg/kubelet/cm" "k8s.io/kubernetes/pkg/kubelet/cm"
@ -94,10 +93,7 @@ func main() {
if err != nil { if err != nil {
glog.Fatalf("Failed to create a ClientConfig: %v. Exiting.", err) glog.Fatalf("Failed to create a ClientConfig: %v. Exiting.", err)
} }
cl, err := client.New(clientConfig)
if err != nil {
glog.Fatalf("Failed to create a Client: %v. Exiting.", err)
}
clientset, err := internalclientset.NewForConfig(clientConfig) clientset, err := internalclientset.NewForConfig(clientConfig)
if err != nil { if err != nil {
glog.Fatalf("Failed to create a ClientSet: %v. Exiting.", err) glog.Fatalf("Failed to create a ClientSet: %v. Exiting.", err)
@ -136,7 +132,7 @@ func main() {
endpointsConfig := proxyconfig.NewEndpointsConfig() endpointsConfig := proxyconfig.NewEndpointsConfig()
endpointsConfig.RegisterHandler(&kubemark.FakeProxyHandler{}) endpointsConfig.RegisterHandler(&kubemark.FakeProxyHandler{})
hollowProxy := kubemark.NewHollowProxyOrDie(config.NodeName, cl, endpointsConfig, serviceConfig, iptInterface, eventBroadcaster, recorder) hollowProxy := kubemark.NewHollowProxyOrDie(config.NodeName, clientset, endpointsConfig, serviceConfig, iptInterface, eventBroadcaster, recorder)
hollowProxy.Run() hollowProxy.Run()
} }
} }

View File

@ -26,8 +26,8 @@ import (
federation_v1beta1 "k8s.io/kubernetes/federation/apis/federation/v1beta1" federation_v1beta1 "k8s.io/kubernetes/federation/apis/federation/v1beta1"
fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5" fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/restclient"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd" "k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
utilnet "k8s.io/kubernetes/pkg/util/net" utilnet "k8s.io/kubernetes/pkg/util/net"
@ -102,14 +102,18 @@ var KubeconfigGetterForSecret = func(secretName string) clientcmd.KubeconfigGett
return nil, fmt.Errorf("unexpected: POD_NAMESPACE env var returned empty string") return nil, fmt.Errorf("unexpected: POD_NAMESPACE env var returned empty string")
} }
// Get a client to talk to the k8s apiserver, to fetch secrets from it. // Get a client to talk to the k8s apiserver, to fetch secrets from it.
client, err := client.NewInCluster() cc, err := restclient.InClusterConfig()
if err != nil {
return nil, fmt.Errorf("error in creating in-cluster client: %s", err)
}
client, err := clientset.NewForConfig(cc)
if err != nil { if err != nil {
return nil, fmt.Errorf("error in creating in-cluster client: %s", err) return nil, fmt.Errorf("error in creating in-cluster client: %s", err)
} }
data = []byte{} data = []byte{}
var secret *api.Secret var secret *api.Secret
err = wait.PollImmediate(1*time.Second, getSecretTimeout, func() (bool, error) { err = wait.PollImmediate(1*time.Second, getSecretTimeout, func() (bool, error) {
secret, err = client.Secrets(namespace).Get(secretName) secret, err = client.Core().Secrets(namespace).Get(secretName)
if err == nil { if err == nil {
return true, nil return true, nil
} }

View File

@ -25,8 +25,8 @@ import (
"k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/restclient"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
utiltesting "k8s.io/kubernetes/pkg/util/testing" utiltesting "k8s.io/kubernetes/pkg/util/testing"
) )
@ -98,8 +98,8 @@ func TestListWatchesCanList(t *testing.T) {
} }
server := httptest.NewServer(&handler) server := httptest.NewServer(&handler)
defer server.Close() defer server.Close()
client := client.NewOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
lw := NewListWatchFromClient(client, item.resource, item.namespace, item.fieldSelector) lw := NewListWatchFromClient(client.Core().RESTClient(), item.resource, item.namespace, item.fieldSelector)
// This test merely tests that the correct request is made. // This test merely tests that the correct request is made.
lw.List(api.ListOptions{}) lw.List(api.ListOptions{})
handler.ValidateRequest(t, item.location, "GET", nil) handler.ValidateRequest(t, item.location, "GET", nil)
@ -164,8 +164,8 @@ func TestListWatchesCanWatch(t *testing.T) {
} }
server := httptest.NewServer(&handler) server := httptest.NewServer(&handler)
defer server.Close() defer server.Close()
client := client.NewOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})
lw := NewListWatchFromClient(client, item.resource, item.namespace, item.fieldSelector) lw := NewListWatchFromClient(client.Core().RESTClient(), item.resource, item.namespace, item.fieldSelector)
// This test merely tests that the correct request is made. // This test merely tests that the correct request is made.
lw.Watch(api.ListOptions{ResourceVersion: item.rv}) lw.Watch(api.ListOptions{ResourceVersion: item.rv})
handler.ValidateRequest(t, item.location, "GET", nil) handler.ValidateRequest(t, item.location, "GET", nil)

View File

@ -22,8 +22,8 @@ import (
proxyapp "k8s.io/kubernetes/cmd/kube-proxy/app" proxyapp "k8s.io/kubernetes/cmd/kube-proxy/app"
"k8s.io/kubernetes/cmd/kube-proxy/app/options" "k8s.io/kubernetes/cmd/kube-proxy/app/options"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
client "k8s.io/kubernetes/pkg/client/unversioned"
proxyconfig "k8s.io/kubernetes/pkg/proxy/config" proxyconfig "k8s.io/kubernetes/pkg/proxy/config"
"k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util"
@ -51,7 +51,7 @@ func (*FakeProxier) SyncLoop() {
func NewHollowProxyOrDie( func NewHollowProxyOrDie(
nodeName string, nodeName string,
client *client.Client, client clientset.Interface,
endpointsConfig *proxyconfig.EndpointsConfig, endpointsConfig *proxyconfig.EndpointsConfig,
serviceConfig *proxyconfig.ServiceConfig, serviceConfig *proxyconfig.ServiceConfig,
iptInterface utiliptables.Interface, iptInterface utiliptables.Interface,
@ -69,7 +69,7 @@ func NewHollowProxyOrDie(
Namespace: "", Namespace: "",
} }
proxyconfig.NewSourceAPI( proxyconfig.NewSourceAPI(
client, client.Core().RESTClient(),
30*time.Second, 30*time.Second,
serviceConfig.Channel("api"), serviceConfig.Channel("api"),
endpointsConfig.Channel("api"), endpointsConfig.Channel("api"),

View File

@ -36,7 +36,7 @@ func parseApiServerMetrics(data string) (ApiServerMetrics, error) {
} }
func (g *MetricsGrabber) getMetricsFromApiServer() (string, error) { func (g *MetricsGrabber) getMetricsFromApiServer() (string, error) {
rawOutput, err := g.client.Get().RequestURI("/metrics").Do().Raw() rawOutput, err := g.client.Core().RESTClient().Get().RequestURI("/metrics").Do().Raw()
if err != nil { if err != nil {
return "", err return "", err
} }

View File

@ -99,7 +99,7 @@ func parseMetrics(data string, output *Metrics) error {
} }
func (g *MetricsGrabber) getMetricsFromPod(podName string, namespace string, port int) (string, error) { func (g *MetricsGrabber) getMetricsFromPod(podName string, namespace string, port int) (string, error) {
rawOutput, err := g.client.Get(). rawOutput, err := g.client.Core().RESTClient().Get().
Prefix("proxy"). Prefix("proxy").
Namespace(namespace). Namespace(namespace).
Resource("pods"). Resource("pods").

View File

@ -65,7 +65,7 @@ func (g *MetricsGrabber) getMetricsFromNode(nodeName string, kubeletPort int) (s
var err error var err error
var rawOutput []byte var rawOutput []byte
go func() { go func() {
rawOutput, err = g.client.Get(). rawOutput, err = g.client.Core().RESTClient().Get().
Prefix("proxy"). Prefix("proxy").
Resource("nodes"). Resource("nodes").
Name(fmt.Sprintf("%v:%v", nodeName, kubeletPort)). Name(fmt.Sprintf("%v:%v", nodeName, kubeletPort)).

View File

@ -21,7 +21,7 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/pkg/util/system" "k8s.io/kubernetes/pkg/util/system"
@ -41,7 +41,7 @@ type MetricsCollection struct {
} }
type MetricsGrabber struct { type MetricsGrabber struct {
client *client.Client client clientset.Interface
grabFromApiServer bool grabFromApiServer bool
grabFromControllerManager bool grabFromControllerManager bool
grabFromKubelets bool grabFromKubelets bool
@ -50,10 +50,10 @@ type MetricsGrabber struct {
registeredMaster bool registeredMaster bool
} }
func NewMetricsGrabber(c *client.Client, kubelets bool, scheduler bool, controllers bool, apiServer bool) (*MetricsGrabber, error) { func NewMetricsGrabber(c clientset.Interface, kubelets bool, scheduler bool, controllers bool, apiServer bool) (*MetricsGrabber, error) {
registeredMaster := false registeredMaster := false
masterName := "" masterName := ""
nodeList, err := c.Nodes().List(api.ListOptions{}) nodeList, err := c.Core().Nodes().List(api.ListOptions{})
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -85,7 +85,7 @@ func NewMetricsGrabber(c *client.Client, kubelets bool, scheduler bool, controll
} }
func (g *MetricsGrabber) GrabFromKubelet(nodeName string) (KubeletMetrics, error) { func (g *MetricsGrabber) GrabFromKubelet(nodeName string) (KubeletMetrics, error) {
nodes, err := g.client.Nodes().List(api.ListOptions{FieldSelector: fields.Set{api.ObjectNameField: nodeName}.AsSelector()}) nodes, err := g.client.Core().Nodes().List(api.ListOptions{FieldSelector: fields.Set{api.ObjectNameField: nodeName}.AsSelector()})
if err != nil { if err != nil {
return KubeletMetrics{}, err return KubeletMetrics{}, err
} }
@ -166,7 +166,7 @@ func (g *MetricsGrabber) Grab() (MetricsCollection, error) {
} }
if g.grabFromKubelets { if g.grabFromKubelets {
result.KubeletMetrics = make(map[string]KubeletMetrics) result.KubeletMetrics = make(map[string]KubeletMetrics)
nodes, err := g.client.Nodes().List(api.ListOptions{}) nodes, err := g.client.Core().Nodes().List(api.ListOptions{})
if err != nil { if err != nil {
errs = append(errs, err) errs = append(errs, err)
} else { } else {

View File

@ -26,7 +26,7 @@ import (
"golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
@ -275,8 +275,8 @@ var _ = framework.KubeDescribe("Addon update", func() {
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, rcv1, destinationDir, rcv1)) sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, rcv1, destinationDir, rcv1))
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, svcv1, destinationDir, svcv1)) sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, svcv1, destinationDir, svcv1))
waitForServiceInAddonTest(f.Client, f.Namespace.Name, "addon-test", true) waitForServiceInAddonTest(f.ClientSet, f.Namespace.Name, "addon-test", true)
waitForReplicationControllerInAddonTest(f.Client, defaultNsName, "addon-test-v1", true) waitForReplicationControllerInAddonTest(f.ClientSet, defaultNsName, "addon-test-v1", true)
By("update manifests") By("update manifests")
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, rcv2, destinationDir, rcv2)) sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, rcv2, destinationDir, rcv2))
@ -289,38 +289,38 @@ var _ = framework.KubeDescribe("Addon update", func() {
* But it is ok - as long as we don't have rolling update, the result will be the same * But it is ok - as long as we don't have rolling update, the result will be the same
*/ */
waitForServiceInAddonTest(f.Client, f.Namespace.Name, "addon-test-updated", true) waitForServiceInAddonTest(f.ClientSet, f.Namespace.Name, "addon-test-updated", true)
waitForReplicationControllerInAddonTest(f.Client, f.Namespace.Name, "addon-test-v2", true) waitForReplicationControllerInAddonTest(f.ClientSet, f.Namespace.Name, "addon-test-v2", true)
waitForServiceInAddonTest(f.Client, f.Namespace.Name, "addon-test", false) waitForServiceInAddonTest(f.ClientSet, f.Namespace.Name, "addon-test", false)
waitForReplicationControllerInAddonTest(f.Client, defaultNsName, "addon-test-v1", false) waitForReplicationControllerInAddonTest(f.ClientSet, defaultNsName, "addon-test-v1", false)
By("remove manifests") By("remove manifests")
sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, rcv2)) sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, rcv2))
sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, svcv2)) sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, svcv2))
waitForServiceInAddonTest(f.Client, f.Namespace.Name, "addon-test-updated", false) waitForServiceInAddonTest(f.ClientSet, f.Namespace.Name, "addon-test-updated", false)
waitForReplicationControllerInAddonTest(f.Client, f.Namespace.Name, "addon-test-v2", false) waitForReplicationControllerInAddonTest(f.ClientSet, f.Namespace.Name, "addon-test-v2", false)
By("verify invalid API addons weren't created") By("verify invalid API addons weren't created")
_, err = f.Client.ReplicationControllers(f.Namespace.Name).Get("invalid-addon-test-v1") _, err = f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Get("invalid-addon-test-v1")
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
_, err = f.Client.ReplicationControllers(defaultNsName).Get("invalid-addon-test-v1") _, err = f.ClientSet.Core().ReplicationControllers(defaultNsName).Get("invalid-addon-test-v1")
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
_, err = f.Client.Services(f.Namespace.Name).Get("ivalid-addon-test") _, err = f.ClientSet.Core().Services(f.Namespace.Name).Get("ivalid-addon-test")
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
_, err = f.Client.Services(defaultNsName).Get("ivalid-addon-test") _, err = f.ClientSet.Core().Services(defaultNsName).Get("ivalid-addon-test")
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
// invalid addons will be deleted by the deferred function // invalid addons will be deleted by the deferred function
}) })
}) })
func waitForServiceInAddonTest(c *client.Client, addonNamespace, name string, exist bool) { func waitForServiceInAddonTest(c clientset.Interface, addonNamespace, name string, exist bool) {
framework.ExpectNoError(framework.WaitForService(c, addonNamespace, name, exist, addonTestPollInterval, addonTestPollTimeout)) framework.ExpectNoError(framework.WaitForService(c, addonNamespace, name, exist, addonTestPollInterval, addonTestPollTimeout))
} }
func waitForReplicationControllerInAddonTest(c *client.Client, addonNamespace, name string, exist bool) { func waitForReplicationControllerInAddonTest(c clientset.Interface, addonNamespace, name string, exist bool) {
framework.ExpectNoError(framework.WaitForReplicationController(c, addonNamespace, name, exist, addonTestPollInterval, addonTestPollTimeout)) framework.ExpectNoError(framework.WaitForReplicationController(c, addonNamespace, name, exist, addonTestPollInterval, addonTestPollTimeout))
} }

View File

@ -22,7 +22,7 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
@ -97,7 +97,7 @@ cpuLimit argument is in millicores, cpuLimit is a maximum amount of cpu that can
func newResourceConsumer(name, kind string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, consumptionTimeInSeconds, requestSizeInMillicores, func newResourceConsumer(name, kind string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, consumptionTimeInSeconds, requestSizeInMillicores,
requestSizeInMegabytes int, requestSizeCustomMetric int, cpuLimit, memLimit int64, f *framework.Framework) *ResourceConsumer { requestSizeInMegabytes int, requestSizeCustomMetric int, cpuLimit, memLimit int64, f *framework.Framework) *ResourceConsumer {
runServiceAndWorkloadForResourceConsumer(f.Client, f.Namespace.Name, name, kind, replicas, cpuLimit, memLimit) runServiceAndWorkloadForResourceConsumer(f.ClientSet, f.Namespace.Name, name, kind, replicas, cpuLimit, memLimit)
rc := &ResourceConsumer{ rc := &ResourceConsumer{
name: name, name: name,
controllerName: name + "-ctrl", controllerName: name + "-ctrl",
@ -199,7 +199,7 @@ func (rc *ResourceConsumer) makeConsumeCustomMetric() {
} }
func (rc *ResourceConsumer) sendConsumeCPURequest(millicores int) { func (rc *ResourceConsumer) sendConsumeCPURequest(millicores int) {
proxyRequest, err := framework.GetServicesProxyRequest(rc.framework.Client, rc.framework.Client.Post()) proxyRequest, err := framework.GetServicesProxyRequest(rc.framework.ClientSet, rc.framework.ClientSet.Core().RESTClient().Post())
framework.ExpectNoError(err) framework.ExpectNoError(err)
req := proxyRequest.Namespace(rc.framework.Namespace.Name). req := proxyRequest.Namespace(rc.framework.Namespace.Name).
Name(rc.controllerName). Name(rc.controllerName).
@ -214,7 +214,7 @@ func (rc *ResourceConsumer) sendConsumeCPURequest(millicores int) {
// sendConsumeMemRequest sends POST request for memory consumption // sendConsumeMemRequest sends POST request for memory consumption
func (rc *ResourceConsumer) sendConsumeMemRequest(megabytes int) { func (rc *ResourceConsumer) sendConsumeMemRequest(megabytes int) {
proxyRequest, err := framework.GetServicesProxyRequest(rc.framework.Client, rc.framework.Client.Post()) proxyRequest, err := framework.GetServicesProxyRequest(rc.framework.ClientSet, rc.framework.ClientSet.Core().RESTClient().Post())
framework.ExpectNoError(err) framework.ExpectNoError(err)
req := proxyRequest.Namespace(rc.framework.Namespace.Name). req := proxyRequest.Namespace(rc.framework.Namespace.Name).
Name(rc.controllerName). Name(rc.controllerName).
@ -229,7 +229,7 @@ func (rc *ResourceConsumer) sendConsumeMemRequest(megabytes int) {
// sendConsumeCustomMetric sends POST request for custom metric consumption // sendConsumeCustomMetric sends POST request for custom metric consumption
func (rc *ResourceConsumer) sendConsumeCustomMetric(delta int) { func (rc *ResourceConsumer) sendConsumeCustomMetric(delta int) {
proxyRequest, err := framework.GetServicesProxyRequest(rc.framework.Client, rc.framework.Client.Post()) proxyRequest, err := framework.GetServicesProxyRequest(rc.framework.ClientSet, rc.framework.ClientSet.Core().RESTClient().Post())
framework.ExpectNoError(err) framework.ExpectNoError(err)
req := proxyRequest.Namespace(rc.framework.Namespace.Name). req := proxyRequest.Namespace(rc.framework.Namespace.Name).
Name(rc.controllerName). Name(rc.controllerName).
@ -246,21 +246,21 @@ func (rc *ResourceConsumer) sendConsumeCustomMetric(delta int) {
func (rc *ResourceConsumer) GetReplicas() int { func (rc *ResourceConsumer) GetReplicas() int {
switch rc.kind { switch rc.kind {
case kindRC: case kindRC:
replicationController, err := rc.framework.Client.ReplicationControllers(rc.framework.Namespace.Name).Get(rc.name) replicationController, err := rc.framework.ClientSet.Core().ReplicationControllers(rc.framework.Namespace.Name).Get(rc.name)
framework.ExpectNoError(err) framework.ExpectNoError(err)
if replicationController == nil { if replicationController == nil {
framework.Failf(rcIsNil) framework.Failf(rcIsNil)
} }
return int(replicationController.Status.Replicas) return int(replicationController.Status.Replicas)
case kindDeployment: case kindDeployment:
deployment, err := rc.framework.Client.Deployments(rc.framework.Namespace.Name).Get(rc.name) deployment, err := rc.framework.ClientSet.Extensions().Deployments(rc.framework.Namespace.Name).Get(rc.name)
framework.ExpectNoError(err) framework.ExpectNoError(err)
if deployment == nil { if deployment == nil {
framework.Failf(deploymentIsNil) framework.Failf(deploymentIsNil)
} }
return int(deployment.Status.Replicas) return int(deployment.Status.Replicas)
case kindReplicaSet: case kindReplicaSet:
rs, err := rc.framework.Client.ReplicaSets(rc.framework.Namespace.Name).Get(rc.name) rs, err := rc.framework.ClientSet.Extensions().ReplicaSets(rc.framework.Namespace.Name).Get(rc.name)
framework.ExpectNoError(err) framework.ExpectNoError(err)
if rs == nil { if rs == nil {
framework.Failf(rsIsNil) framework.Failf(rsIsNil)
@ -303,15 +303,15 @@ func (rc *ResourceConsumer) CleanUp() {
rc.stopCustomMetric <- 0 rc.stopCustomMetric <- 0
// Wait some time to ensure all child goroutines are finished. // Wait some time to ensure all child goroutines are finished.
time.Sleep(10 * time.Second) time.Sleep(10 * time.Second)
framework.ExpectNoError(framework.DeleteRCAndPods(rc.framework.Client, rc.framework.ClientSet, rc.framework.Namespace.Name, rc.name)) framework.ExpectNoError(framework.DeleteRCAndPods(rc.framework.ClientSet, rc.framework.Namespace.Name, rc.name))
framework.ExpectNoError(rc.framework.Client.Services(rc.framework.Namespace.Name).Delete(rc.name)) framework.ExpectNoError(rc.framework.ClientSet.Core().Services(rc.framework.Namespace.Name).Delete(rc.name, nil))
framework.ExpectNoError(framework.DeleteRCAndPods(rc.framework.Client, rc.framework.ClientSet, rc.framework.Namespace.Name, rc.controllerName)) framework.ExpectNoError(framework.DeleteRCAndPods(rc.framework.ClientSet, rc.framework.Namespace.Name, rc.controllerName))
framework.ExpectNoError(rc.framework.Client.Services(rc.framework.Namespace.Name).Delete(rc.controllerName)) framework.ExpectNoError(rc.framework.ClientSet.Core().Services(rc.framework.Namespace.Name).Delete(rc.controllerName, nil))
} }
func runServiceAndWorkloadForResourceConsumer(c *client.Client, ns, name, kind string, replicas int, cpuLimitMillis, memLimitMb int64) { func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name, kind string, replicas int, cpuLimitMillis, memLimitMb int64) {
By(fmt.Sprintf("Running consuming RC %s via %s with %v replicas", name, kind, replicas)) By(fmt.Sprintf("Running consuming RC %s via %s with %v replicas", name, kind, replicas))
_, err := c.Services(ns).Create(&api.Service{ _, err := c.Core().Services(ns).Create(&api.Service{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Name: name, Name: name,
}, },
@ -364,7 +364,7 @@ func runServiceAndWorkloadForResourceConsumer(c *client.Client, ns, name, kind s
By(fmt.Sprintf("Running controller")) By(fmt.Sprintf("Running controller"))
controllerName := name + "-ctrl" controllerName := name + "-ctrl"
_, err = c.Services(ns).Create(&api.Service{ _, err = c.Core().Services(ns).Create(&api.Service{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Name: controllerName, Name: controllerName,
}, },

View File

@ -26,7 +26,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/apis/batch"
client "k8s.io/kubernetes/pkg/client/unversioned" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
@ -54,11 +54,11 @@ var _ = framework.KubeDescribe("V1Job", func() {
It("should run a job to completion when tasks succeed", func() { It("should run a job to completion when tasks succeed", func() {
By("Creating a job") By("Creating a job")
job := newTestV1Job("succeed", "all-succeed", api.RestartPolicyNever, parallelism, completions) job := newTestV1Job("succeed", "all-succeed", api.RestartPolicyNever, parallelism, completions)
job, err := createV1Job(f.Client, f.Namespace.Name, job) job, err := createV1Job(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring job reaches completions") By("Ensuring job reaches completions")
err = waitForV1JobFinish(f.Client, f.Namespace.Name, job.Name, completions) err = waitForV1JobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
@ -73,11 +73,11 @@ var _ = framework.KubeDescribe("V1Job", func() {
// due to successive failures too likely with a reasonable // due to successive failures too likely with a reasonable
// test timeout. // test timeout.
job := newTestV1Job("failOnce", "fail-once-local", api.RestartPolicyOnFailure, parallelism, completions) job := newTestV1Job("failOnce", "fail-once-local", api.RestartPolicyOnFailure, parallelism, completions)
job, err := createV1Job(f.Client, f.Namespace.Name, job) job, err := createV1Job(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring job reaches completions") By("Ensuring job reaches completions")
err = waitForV1JobFinish(f.Client, f.Namespace.Name, job.Name, completions) err = waitForV1JobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
@ -91,23 +91,23 @@ var _ = framework.KubeDescribe("V1Job", func() {
// run due to some slowness, 1 in 2^15 chance of happening, // run due to some slowness, 1 in 2^15 chance of happening,
// causing test flake. Should be very rare. // causing test flake. Should be very rare.
job := newTestV1Job("randomlySucceedOrFail", "rand-non-local", api.RestartPolicyNever, parallelism, completions) job := newTestV1Job("randomlySucceedOrFail", "rand-non-local", api.RestartPolicyNever, parallelism, completions)
job, err := createV1Job(f.Client, f.Namespace.Name, job) job, err := createV1Job(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring job reaches completions") By("Ensuring job reaches completions")
err = waitForV1JobFinish(f.Client, f.Namespace.Name, job.Name, completions) err = waitForV1JobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
It("should keep restarting failed pods", func() { It("should keep restarting failed pods", func() {
By("Creating a job") By("Creating a job")
job := newTestV1Job("fail", "all-fail", api.RestartPolicyNever, parallelism, completions) job := newTestV1Job("fail", "all-fail", api.RestartPolicyNever, parallelism, completions)
job, err := createV1Job(f.Client, f.Namespace.Name, job) job, err := createV1Job(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring job shows many failures") By("Ensuring job shows many failures")
err = wait.Poll(framework.Poll, v1JobTimeout, func() (bool, error) { err = wait.Poll(framework.Poll, v1JobTimeout, func() (bool, error) {
curr, err := getV1Job(f.Client, f.Namespace.Name, job.Name) curr, err := getV1Job(f.ClientSet, f.Namespace.Name, job.Name)
if err != nil { if err != nil {
return false, err return false, err
} }
@ -120,11 +120,11 @@ var _ = framework.KubeDescribe("V1Job", func() {
endParallelism := int32(2) endParallelism := int32(2)
By("Creating a job") By("Creating a job")
job := newTestV1Job("notTerminate", "scale-up", api.RestartPolicyNever, startParallelism, completions) job := newTestV1Job("notTerminate", "scale-up", api.RestartPolicyNever, startParallelism, completions)
job, err := createV1Job(f.Client, f.Namespace.Name, job) job, err := createV1Job(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring active pods == startParallelism") By("Ensuring active pods == startParallelism")
err = waitForAllPodsRunningV1(f.Client, f.Namespace.Name, job.Name, startParallelism) err = waitForAllPodsRunningV1(f.ClientSet, f.Namespace.Name, job.Name, startParallelism)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("scale job up") By("scale job up")
@ -136,7 +136,7 @@ var _ = framework.KubeDescribe("V1Job", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring active pods == endParallelism") By("Ensuring active pods == endParallelism")
err = waitForAllPodsRunningV1(f.Client, f.Namespace.Name, job.Name, endParallelism) err = waitForAllPodsRunningV1(f.ClientSet, f.Namespace.Name, job.Name, endParallelism)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
@ -145,11 +145,11 @@ var _ = framework.KubeDescribe("V1Job", func() {
endParallelism := int32(1) endParallelism := int32(1)
By("Creating a job") By("Creating a job")
job := newTestV1Job("notTerminate", "scale-down", api.RestartPolicyNever, startParallelism, completions) job := newTestV1Job("notTerminate", "scale-down", api.RestartPolicyNever, startParallelism, completions)
job, err := createV1Job(f.Client, f.Namespace.Name, job) job, err := createV1Job(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring active pods == startParallelism") By("Ensuring active pods == startParallelism")
err = waitForAllPodsRunningV1(f.Client, f.Namespace.Name, job.Name, startParallelism) err = waitForAllPodsRunningV1(f.ClientSet, f.Namespace.Name, job.Name, startParallelism)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("scale job down") By("scale job down")
@ -161,18 +161,18 @@ var _ = framework.KubeDescribe("V1Job", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring active pods == endParallelism") By("Ensuring active pods == endParallelism")
err = waitForAllPodsRunningV1(f.Client, f.Namespace.Name, job.Name, endParallelism) err = waitForAllPodsRunningV1(f.ClientSet, f.Namespace.Name, job.Name, endParallelism)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
It("should delete a job", func() { It("should delete a job", func() {
By("Creating a job") By("Creating a job")
job := newTestV1Job("notTerminate", "foo", api.RestartPolicyNever, parallelism, completions) job := newTestV1Job("notTerminate", "foo", api.RestartPolicyNever, parallelism, completions)
job, err := createV1Job(f.Client, f.Namespace.Name, job) job, err := createV1Job(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring active pods == parallelism") By("Ensuring active pods == parallelism")
err = waitForAllPodsRunningV1(f.Client, f.Namespace.Name, job.Name, parallelism) err = waitForAllPodsRunningV1(f.ClientSet, f.Namespace.Name, job.Name, parallelism)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("delete a job") By("delete a job")
@ -183,7 +183,7 @@ var _ = framework.KubeDescribe("V1Job", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring job was deleted") By("Ensuring job was deleted")
_, err = getV1Job(f.Client, f.Namespace.Name, job.Name) _, err = getV1Job(f.ClientSet, f.Namespace.Name, job.Name)
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
Expect(errors.IsNotFound(err)).To(BeTrue()) Expect(errors.IsNotFound(err)).To(BeTrue())
}) })
@ -193,21 +193,21 @@ var _ = framework.KubeDescribe("V1Job", func() {
job := newTestV1Job("notTerminate", "foo", api.RestartPolicyNever, parallelism, completions) job := newTestV1Job("notTerminate", "foo", api.RestartPolicyNever, parallelism, completions)
activeDeadlineSeconds := int64(10) activeDeadlineSeconds := int64(10)
job.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds job.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds
job, err := createV1Job(f.Client, f.Namespace.Name, job) job, err := createV1Job(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring job was failed") By("Ensuring job was failed")
err = waitForV1JobFail(f.Client, f.Namespace.Name, job.Name, 20*time.Second) err = waitForV1JobFail(f.ClientSet, f.Namespace.Name, job.Name, 20*time.Second)
if err == wait.ErrWaitTimeout { if err == wait.ErrWaitTimeout {
job, err = getV1Job(f.Client, f.Namespace.Name, job.Name) job, err = getV1Job(f.ClientSet, f.Namespace.Name, job.Name)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// the job stabilized and won't be synced until modification or full // the job stabilized and won't be synced until modification or full
// resync happens, we don't want to wait for the latter so we force // resync happens, we don't want to wait for the latter so we force
// sync modifying it // sync modifying it
job.Spec.Parallelism = &completions job.Spec.Parallelism = &completions
job, err = updateV1Job(f.Client, f.Namespace.Name, job) job, err = updateV1Job(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
err = waitForV1JobFail(f.Client, f.Namespace.Name, job.Name, v1JobTimeout) err = waitForV1JobFail(f.ClientSet, f.Namespace.Name, job.Name, v1JobTimeout)
} }
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
@ -275,28 +275,28 @@ func newTestV1Job(behavior, name string, rPol api.RestartPolicy, parallelism, co
return job return job
} }
func getV1Job(c *client.Client, ns, name string) (*batch.Job, error) { func getV1Job(c clientset.Interface, ns, name string) (*batch.Job, error) {
return c.Batch().Jobs(ns).Get(name) return c.Batch().Jobs(ns).Get(name)
} }
func createV1Job(c *client.Client, ns string, job *batch.Job) (*batch.Job, error) { func createV1Job(c clientset.Interface, ns string, job *batch.Job) (*batch.Job, error) {
return c.Batch().Jobs(ns).Create(job) return c.Batch().Jobs(ns).Create(job)
} }
func updateV1Job(c *client.Client, ns string, job *batch.Job) (*batch.Job, error) { func updateV1Job(c clientset.Interface, ns string, job *batch.Job) (*batch.Job, error) {
return c.Batch().Jobs(ns).Update(job) return c.Batch().Jobs(ns).Update(job)
} }
func deleteV1Job(c *client.Client, ns, name string) error { func deleteV1Job(c clientset.Interface, ns, name string) error {
return c.Batch().Jobs(ns).Delete(name, api.NewDeleteOptions(0)) return c.Batch().Jobs(ns).Delete(name, api.NewDeleteOptions(0))
} }
// Wait for all pods to become Running. Only use when pods will run for a long time, or it will be racy. // Wait for all pods to become Running. Only use when pods will run for a long time, or it will be racy.
func waitForAllPodsRunningV1(c *client.Client, ns, jobName string, parallelism int32) error { func waitForAllPodsRunningV1(c clientset.Interface, ns, jobName string, parallelism int32) error {
label := labels.SelectorFromSet(labels.Set(map[string]string{v1JobSelectorKey: jobName})) label := labels.SelectorFromSet(labels.Set(map[string]string{v1JobSelectorKey: jobName}))
return wait.Poll(framework.Poll, v1JobTimeout, func() (bool, error) { return wait.Poll(framework.Poll, v1JobTimeout, func() (bool, error) {
options := api.ListOptions{LabelSelector: label} options := api.ListOptions{LabelSelector: label}
pods, err := c.Pods(ns).List(options) pods, err := c.Core().Pods(ns).List(options)
if err != nil { if err != nil {
return false, err return false, err
} }
@ -311,7 +311,7 @@ func waitForAllPodsRunningV1(c *client.Client, ns, jobName string, parallelism i
} }
// Wait for job to reach completions. // Wait for job to reach completions.
func waitForV1JobFinish(c *client.Client, ns, jobName string, completions int32) error { func waitForV1JobFinish(c clientset.Interface, ns, jobName string, completions int32) error {
return wait.Poll(framework.Poll, v1JobTimeout, func() (bool, error) { return wait.Poll(framework.Poll, v1JobTimeout, func() (bool, error) {
curr, err := c.Batch().Jobs(ns).Get(jobName) curr, err := c.Batch().Jobs(ns).Get(jobName)
if err != nil { if err != nil {
@ -322,7 +322,7 @@ func waitForV1JobFinish(c *client.Client, ns, jobName string, completions int32)
} }
// Wait for job fail. // Wait for job fail.
func waitForV1JobFail(c *client.Client, ns, jobName string, timeout time.Duration) error { func waitForV1JobFail(c clientset.Interface, ns, jobName string, timeout time.Duration) error {
return wait.Poll(framework.Poll, timeout, func() (bool, error) { return wait.Poll(framework.Poll, timeout, func() (bool, error) {
curr, err := c.Batch().Jobs(ns).Get(jobName) curr, err := c.Batch().Jobs(ns).Get(jobName)
if err != nil { if err != nil {

View File

@ -21,7 +21,7 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
@ -32,14 +32,14 @@ var _ = framework.KubeDescribe("Cadvisor", func() {
f := framework.NewDefaultFramework("cadvisor") f := framework.NewDefaultFramework("cadvisor")
It("should be healthy on every node.", func() { It("should be healthy on every node.", func() {
CheckCadvisorHealthOnAllNodes(f.Client, 5*time.Minute) CheckCadvisorHealthOnAllNodes(f.ClientSet, 5*time.Minute)
}) })
}) })
func CheckCadvisorHealthOnAllNodes(c *client.Client, timeout time.Duration) { func CheckCadvisorHealthOnAllNodes(c clientset.Interface, timeout time.Duration) {
// It should be OK to list unschedulable Nodes here. // It should be OK to list unschedulable Nodes here.
By("getting list of nodes") By("getting list of nodes")
nodeList, err := c.Nodes().List(api.ListOptions{}) nodeList, err := c.Core().Nodes().List(api.ListOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
var errors []error var errors []error
@ -69,7 +69,7 @@ func CheckCadvisorHealthOnAllNodes(c *client.Client, timeout time.Duration) {
// Here, we access '/stats/' REST endpoint on the kubelet which polls cadvisor internally. // Here, we access '/stats/' REST endpoint on the kubelet which polls cadvisor internally.
statsResource := fmt.Sprintf("api/v1/proxy/nodes/%s/stats/", node.Name) statsResource := fmt.Sprintf("api/v1/proxy/nodes/%s/stats/", node.Name)
By(fmt.Sprintf("Querying stats from node %s using url %s", node.Name, statsResource)) By(fmt.Sprintf("Querying stats from node %s using url %s", node.Name, statsResource))
_, err = c.Get().AbsPath(statsResource).Timeout(timeout).Do().Raw() _, err = c.Core().RESTClient().Get().AbsPath(statsResource).Timeout(timeout).Do().Raw()
if err != nil { if err != nil {
errors = append(errors, err) errors = append(errors, err)
} }

View File

@ -53,7 +53,7 @@ var _ = framework.KubeDescribe("Cluster level logging using Elasticsearch [Featu
By("Running synthetic logger") By("Running synthetic logger")
createSynthLogger(f, expectedLinesCount) createSynthLogger(f, expectedLinesCount)
defer f.PodClient().Delete(synthLoggerPodName, &api.DeleteOptions{}) defer f.PodClient().Delete(synthLoggerPodName, &api.DeleteOptions{})
err = framework.WaitForPodSuccessInNamespace(f.Client, synthLoggerPodName, f.Namespace.Name) err = framework.WaitForPodSuccessInNamespace(f.ClientSet, synthLoggerPodName, f.Namespace.Name)
framework.ExpectNoError(err, fmt.Sprintf("Should've successfully waited for pod %s to succeed", synthLoggerPodName)) framework.ExpectNoError(err, fmt.Sprintf("Should've successfully waited for pod %s to succeed", synthLoggerPodName))
By("Waiting for logs to ingest") By("Waiting for logs to ingest")
@ -86,7 +86,7 @@ var _ = framework.KubeDescribe("Cluster level logging using Elasticsearch [Featu
func checkElasticsearchReadiness(f *framework.Framework) error { func checkElasticsearchReadiness(f *framework.Framework) error {
// Check for the existence of the Elasticsearch service. // Check for the existence of the Elasticsearch service.
By("Checking the Elasticsearch service exists.") By("Checking the Elasticsearch service exists.")
s := f.Client.Services(api.NamespaceSystem) s := f.ClientSet.Core().Services(api.NamespaceSystem)
// Make a few attempts to connect. This makes the test robust against // Make a few attempts to connect. This makes the test robust against
// being run as the first e2e test just after the e2e cluster has been created. // being run as the first e2e test just after the e2e cluster has been created.
var err error var err error
@ -102,10 +102,10 @@ func checkElasticsearchReadiness(f *framework.Framework) error {
By("Checking to make sure the Elasticsearch pods are running") By("Checking to make sure the Elasticsearch pods are running")
label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "elasticsearch-logging"})) label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "elasticsearch-logging"}))
options := api.ListOptions{LabelSelector: label} options := api.ListOptions{LabelSelector: label}
pods, err := f.Client.Pods(api.NamespaceSystem).List(options) pods, err := f.ClientSet.Core().Pods(api.NamespaceSystem).List(options)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
for _, pod := range pods.Items { for _, pod := range pods.Items {
err = framework.WaitForPodRunningInNamespace(f.Client, &pod) err = framework.WaitForPodRunningInNamespace(f.ClientSet, &pod)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
} }
@ -115,7 +115,7 @@ func checkElasticsearchReadiness(f *framework.Framework) error {
err = nil err = nil
var body []byte var body []byte
for start := time.Now(); time.Since(start) < graceTime; time.Sleep(10 * time.Second) { for start := time.Now(); time.Since(start) < graceTime; time.Sleep(10 * time.Second) {
proxyRequest, errProxy := framework.GetServicesProxyRequest(f.Client, f.Client.Get()) proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.Core().RESTClient().Get())
if errProxy != nil { if errProxy != nil {
framework.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy) framework.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy)
continue continue
@ -147,7 +147,7 @@ func checkElasticsearchReadiness(f *framework.Framework) error {
By("Checking health of Elasticsearch service.") By("Checking health of Elasticsearch service.")
healthy := false healthy := false
for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) { for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) {
proxyRequest, errProxy := framework.GetServicesProxyRequest(f.Client, f.Client.Get()) proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.Core().RESTClient().Get())
if errProxy != nil { if errProxy != nil {
framework.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy) framework.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy)
continue continue
@ -189,7 +189,7 @@ func checkElasticsearchReadiness(f *framework.Framework) error {
} }
func getMissingLinesCountElasticsearch(f *framework.Framework, expectedCount int) (int, error) { func getMissingLinesCountElasticsearch(f *framework.Framework, expectedCount int) (int, error) {
proxyRequest, errProxy := framework.GetServicesProxyRequest(f.Client, f.Client.Get()) proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.Core().RESTClient().Get())
if errProxy != nil { if errProxy != nil {
return 0, fmt.Errorf("Failed to get services proxy request: %v", errProxy) return 0, fmt.Errorf("Failed to get services proxy request: %v", errProxy)
} }

View File

@ -43,7 +43,7 @@ var _ = framework.KubeDescribe("Cluster level logging using GCL [Flaky]", func()
By("Running synthetic logger") By("Running synthetic logger")
createSynthLogger(f, expectedLinesCount) createSynthLogger(f, expectedLinesCount)
defer f.PodClient().Delete(synthLoggerPodName, &api.DeleteOptions{}) defer f.PodClient().Delete(synthLoggerPodName, &api.DeleteOptions{})
err := framework.WaitForPodSuccessInNamespace(f.Client, synthLoggerPodName, f.Namespace.Name) err := framework.WaitForPodSuccessInNamespace(f.ClientSet, synthLoggerPodName, f.Namespace.Name)
framework.ExpectNoError(err, fmt.Sprintf("Should've successfully waited for pod %s to succeed", synthLoggerPodName)) framework.ExpectNoError(err, fmt.Sprintf("Should've successfully waited for pod %s to succeed", synthLoggerPodName))
By("Waiting for logs to ingest") By("Waiting for logs to ingest")

View File

@ -73,12 +73,12 @@ func reportLogsFromFluentdPod(f *framework.Framework) error {
label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "fluentd-logging"})) label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "fluentd-logging"}))
options := api.ListOptions{LabelSelector: label} options := api.ListOptions{LabelSelector: label}
fluentdPods, err := f.Client.Pods(api.NamespaceSystem).List(options) fluentdPods, err := f.ClientSet.Core().Pods(api.NamespaceSystem).List(options)
for _, fluentdPod := range fluentdPods.Items { for _, fluentdPod := range fluentdPods.Items {
if fluentdPod.Spec.NodeName == synthLoggerNodeName { if fluentdPod.Spec.NodeName == synthLoggerNodeName {
containerName := fluentdPod.Spec.Containers[0].Name containerName := fluentdPod.Spec.Containers[0].Name
logs, err := framework.GetPodLogs(f.Client, api.NamespaceSystem, fluentdPod.Name, containerName) logs, err := framework.GetPodLogs(f.ClientSet, api.NamespaceSystem, fluentdPod.Name, containerName)
if err != nil { if err != nil {
return fmt.Errorf("Failed to get logs from fluentd pod %s due to %v", fluentdPod.Name, err) return fmt.Errorf("Failed to get logs from fluentd pod %s due to %v", fluentdPod.Name, err)
} }

View File

@ -27,7 +27,7 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -50,14 +50,14 @@ const (
var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() { var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
f := framework.NewDefaultFramework("autoscaling") f := framework.NewDefaultFramework("autoscaling")
var c *client.Client var c clientset.Interface
var nodeCount int var nodeCount int
var coresPerNode int var coresPerNode int
var memCapacityMb int var memCapacityMb int
var originalSizes map[string]int var originalSizes map[string]int
BeforeEach(func() { BeforeEach(func() {
c = f.Client c = f.ClientSet
framework.SkipUnlessProviderIs("gce", "gke") framework.SkipUnlessProviderIs("gce", "gke")
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
@ -98,7 +98,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
It("shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]", func() { It("shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]", func() {
By("Creating unschedulable pod") By("Creating unschedulable pod")
ReserveMemory(f, "memory-reservation", 1, memCapacityMb, false) ReserveMemory(f, "memory-reservation", 1, memCapacityMb, false)
defer framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, "memory-reservation") defer framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, "memory-reservation")
By("Waiting for scale up hoping it won't happen") By("Waiting for scale up hoping it won't happen")
// Verfiy, that the appropreate event was generated. // Verfiy, that the appropreate event was generated.
@ -106,7 +106,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
EventsLoop: EventsLoop:
for start := time.Now(); time.Since(start) < scaleUpTimeout; time.Sleep(20 * time.Second) { for start := time.Now(); time.Since(start) < scaleUpTimeout; time.Sleep(20 * time.Second) {
By("Waiting for NotTriggerScaleUp event") By("Waiting for NotTriggerScaleUp event")
events, err := f.Client.Events(f.Namespace.Name).List(api.ListOptions{}) events, err := f.ClientSet.Core().Events(f.Namespace.Name).List(api.ListOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
for _, e := range events.Items { for _, e := range events.Items {
@ -119,16 +119,16 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
} }
Expect(eventFound).Should(Equal(true)) Expect(eventFound).Should(Equal(true))
// Verify, that cluster size is not changed. // Verify, that cluster size is not changed.
framework.ExpectNoError(WaitForClusterSizeFunc(f.Client, framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size <= nodeCount }, time.Second)) func(size int) bool { return size <= nodeCount }, time.Second))
}) })
It("should increase cluster size if pending pods are small [Feature:ClusterSizeAutoscalingScaleUp]", func() { It("should increase cluster size if pending pods are small [Feature:ClusterSizeAutoscalingScaleUp]", func() {
ReserveMemory(f, "memory-reservation", 100, nodeCount*memCapacityMb, false) ReserveMemory(f, "memory-reservation", 100, nodeCount*memCapacityMb, false)
defer framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, "memory-reservation") defer framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, "memory-reservation")
// Verify, that cluster size is increased // Verify, that cluster size is increased
framework.ExpectNoError(WaitForClusterSizeFunc(f.Client, framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout)) func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
}) })
@ -144,10 +144,10 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
glog.Infof("Not enabling cluster autoscaler for the node pool (on purpose).") glog.Infof("Not enabling cluster autoscaler for the node pool (on purpose).")
ReserveMemory(f, "memory-reservation", 100, nodeCount*memCapacityMb, false) ReserveMemory(f, "memory-reservation", 100, nodeCount*memCapacityMb, false)
defer framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, "memory-reservation") defer framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, "memory-reservation")
// Verify, that cluster size is increased // Verify, that cluster size is increased
framework.ExpectNoError(WaitForClusterSizeFunc(f.Client, framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout)) func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
}) })
@ -166,9 +166,9 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
It("should increase cluster size if pods are pending due to host port conflict [Feature:ClusterSizeAutoscalingScaleUp]", func() { It("should increase cluster size if pods are pending due to host port conflict [Feature:ClusterSizeAutoscalingScaleUp]", func() {
CreateHostPortPods(f, "host-port", nodeCount+2, false) CreateHostPortPods(f, "host-port", nodeCount+2, false)
defer framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, "host-port") defer framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, "host-port")
framework.ExpectNoError(WaitForClusterSizeFunc(f.Client, framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= nodeCount+2 }, scaleUpTimeout)) func(size int) bool { return size >= nodeCount+2 }, scaleUpTimeout))
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
}) })
@ -203,7 +203,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
By("Waiting for new node to appear and annotating it") By("Waiting for new node to appear and annotating it")
WaitForGroupSize(minMig, int32(minSize+1)) WaitForGroupSize(minMig, int32(minSize+1))
// Verify, that cluster size is increased // Verify, that cluster size is increased
framework.ExpectNoError(WaitForClusterSizeFunc(f.Client, framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout)) func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
newNodes, err := GetGroupNodes(minMig) newNodes, err := GetGroupNodes(minMig)
@ -214,11 +214,11 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
By(fmt.Sprintf("Setting labels for new nodes: %v", newNodesSet.List())) By(fmt.Sprintf("Setting labels for new nodes: %v", newNodesSet.List()))
updateNodeLabels(c, newNodesSet, labels, nil) updateNodeLabels(c, newNodesSet, labels, nil)
framework.ExpectNoError(WaitForClusterSizeFunc(f.Client, framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout)) func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
framework.ExpectNoError(framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, "node-selector")) framework.ExpectNoError(framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, "node-selector"))
}) })
It("should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp]", func() { It("should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp]", func() {
@ -233,7 +233,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
By("Creating rc with 2 pods too big to fit default-pool but fitting extra-pool") By("Creating rc with 2 pods too big to fit default-pool but fitting extra-pool")
ReserveMemory(f, "memory-reservation", 2, 2*memCapacityMb, false) ReserveMemory(f, "memory-reservation", 2, 2*memCapacityMb, false)
defer framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, "memory-reservation") defer framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, "memory-reservation")
// Apparently GKE master is restarted couple minutes after the node pool is added // Apparently GKE master is restarted couple minutes after the node pool is added
// reseting all the timers in scale down code. Adding 5 extra minutes to workaround // reseting all the timers in scale down code. Adding 5 extra minutes to workaround
@ -251,11 +251,11 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
increasedSize += val + 2 increasedSize += val + 2
} }
setMigSizes(newSizes) setMigSizes(newSizes)
framework.ExpectNoError(WaitForClusterSizeFunc(f.Client, framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= increasedSize }, scaleUpTimeout)) func(size int) bool { return size >= increasedSize }, scaleUpTimeout))
By("Some node should be removed") By("Some node should be removed")
framework.ExpectNoError(WaitForClusterSizeFunc(f.Client, framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size < increasedSize }, scaleDownTimeout)) func(size int) bool { return size < increasedSize }, scaleDownTimeout))
}) })
@ -270,14 +270,14 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
increasedSize += val + 2 increasedSize += val + 2
} }
setMigSizes(newSizes) setMigSizes(newSizes)
framework.ExpectNoError(WaitForClusterSizeFunc(f.Client, framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= increasedSize }, scaleUpTimeout)) func(size int) bool { return size >= increasedSize }, scaleUpTimeout))
const extraPoolName = "extra-pool" const extraPoolName = "extra-pool"
addNodePool(extraPoolName, "n1-standard-1", 3) addNodePool(extraPoolName, "n1-standard-1", 3)
defer deleteNodePool(extraPoolName) defer deleteNodePool(extraPoolName)
framework.ExpectNoError(WaitForClusterSizeFunc(f.Client, framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= increasedSize+3 }, scaleUpTimeout)) func(size int) bool { return size >= increasedSize+3 }, scaleUpTimeout))
By("Some node should be removed") By("Some node should be removed")
@ -285,7 +285,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
// reseting all the timers in scale down code. Adding 10 extra minutes to workaround // reseting all the timers in scale down code. Adding 10 extra minutes to workaround
// this issue. // this issue.
// TODO: Remove the extra time when GKE restart is fixed. // TODO: Remove the extra time when GKE restart is fixed.
framework.ExpectNoError(WaitForClusterSizeFunc(f.Client, framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size < increasedSize+3 }, scaleDownTimeout+10*time.Minute)) func(size int) bool { return size < increasedSize+3 }, scaleDownTimeout+10*time.Minute))
}) })
}) })
@ -458,11 +458,11 @@ func CreateNodeSelectorPods(f *framework.Framework, id string, replicas int, nod
By(fmt.Sprintf("Running RC which reserves host port and defines node selector")) By(fmt.Sprintf("Running RC which reserves host port and defines node selector"))
config := &testutils.RCConfig{ config := &testutils.RCConfig{
Client: f.Client, Client: f.ClientSet,
Name: "node-selector", Name: "node-selector",
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
Timeout: defaultTimeout, Timeout: defaultTimeout,
Image: framework.GetPauseImageName(f.Client), Image: framework.GetPauseImageName(f.ClientSet),
Replicas: replicas, Replicas: replicas,
HostPorts: map[string]int{"port1": 4321}, HostPorts: map[string]int{"port1": 4321},
NodeSelector: map[string]string{"cluster-autoscaling-test.special-node": "true"}, NodeSelector: map[string]string{"cluster-autoscaling-test.special-node": "true"},
@ -476,11 +476,11 @@ func CreateNodeSelectorPods(f *framework.Framework, id string, replicas int, nod
func CreateHostPortPods(f *framework.Framework, id string, replicas int, expectRunning bool) { func CreateHostPortPods(f *framework.Framework, id string, replicas int, expectRunning bool) {
By(fmt.Sprintf("Running RC which reserves host port")) By(fmt.Sprintf("Running RC which reserves host port"))
config := &testutils.RCConfig{ config := &testutils.RCConfig{
Client: f.Client, Client: f.ClientSet,
Name: id, Name: id,
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
Timeout: defaultTimeout, Timeout: defaultTimeout,
Image: framework.GetPauseImageName(f.Client), Image: framework.GetPauseImageName(f.ClientSet),
Replicas: replicas, Replicas: replicas,
HostPorts: map[string]int{"port1": 4321}, HostPorts: map[string]int{"port1": 4321},
} }
@ -494,11 +494,11 @@ func ReserveCpu(f *framework.Framework, id string, replicas, millicores int) {
By(fmt.Sprintf("Running RC which reserves %v millicores", millicores)) By(fmt.Sprintf("Running RC which reserves %v millicores", millicores))
request := int64(millicores / replicas) request := int64(millicores / replicas)
config := &testutils.RCConfig{ config := &testutils.RCConfig{
Client: f.Client, Client: f.ClientSet,
Name: id, Name: id,
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
Timeout: defaultTimeout, Timeout: defaultTimeout,
Image: framework.GetPauseImageName(f.Client), Image: framework.GetPauseImageName(f.ClientSet),
Replicas: replicas, Replicas: replicas,
CpuRequest: request, CpuRequest: request,
} }
@ -509,11 +509,11 @@ func ReserveMemory(f *framework.Framework, id string, replicas, megabytes int, e
By(fmt.Sprintf("Running RC which reserves %v MB of memory", megabytes)) By(fmt.Sprintf("Running RC which reserves %v MB of memory", megabytes))
request := int64(1024 * 1024 * megabytes / replicas) request := int64(1024 * 1024 * megabytes / replicas)
config := &testutils.RCConfig{ config := &testutils.RCConfig{
Client: f.Client, Client: f.ClientSet,
Name: id, Name: id,
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
Timeout: defaultTimeout, Timeout: defaultTimeout,
Image: framework.GetPauseImageName(f.Client), Image: framework.GetPauseImageName(f.ClientSet),
Replicas: replicas, Replicas: replicas,
MemRequest: request, MemRequest: request,
} }
@ -524,9 +524,9 @@ func ReserveMemory(f *framework.Framework, id string, replicas, megabytes int, e
} }
// WaitForClusterSize waits until the cluster size matches the given function. // WaitForClusterSize waits until the cluster size matches the given function.
func WaitForClusterSizeFunc(c *client.Client, sizeFunc func(int) bool, timeout time.Duration) error { func WaitForClusterSizeFunc(c clientset.Interface, sizeFunc func(int) bool, timeout time.Duration) error {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) { for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
nodes, err := c.Nodes().List(api.ListOptions{FieldSelector: fields.Set{ nodes, err := c.Core().Nodes().List(api.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false", "spec.unschedulable": "false",
}.AsSelector()}) }.AsSelector()})
if err != nil { if err != nil {
@ -550,10 +550,10 @@ func WaitForClusterSizeFunc(c *client.Client, sizeFunc func(int) bool, timeout t
return fmt.Errorf("timeout waiting %v for appropriate cluster size", timeout) return fmt.Errorf("timeout waiting %v for appropriate cluster size", timeout)
} }
func waitForAllCaPodsReadyInNamespace(f *framework.Framework, c *client.Client) error { func waitForAllCaPodsReadyInNamespace(f *framework.Framework, c clientset.Interface) error {
var notready []string var notready []string
for start := time.Now(); time.Now().Before(start.Add(scaleUpTimeout)); time.Sleep(20 * time.Second) { for start := time.Now(); time.Now().Before(start.Add(scaleUpTimeout)); time.Sleep(20 * time.Second) {
pods, err := c.Pods(f.Namespace.Name).List(api.ListOptions{}) pods, err := c.Core().Pods(f.Namespace.Name).List(api.ListOptions{})
if err != nil { if err != nil {
return fmt.Errorf("failed to get pods: %v", err) return fmt.Errorf("failed to get pods: %v", err)
} }

View File

@ -23,7 +23,6 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/chaosmonkey" "k8s.io/kubernetes/test/e2e/chaosmonkey"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -44,7 +43,7 @@ var _ = framework.KubeDescribe("Upgrade [Feature:Upgrade]", func() {
v, err := realVersion(framework.TestContext.UpgradeTarget) v, err := realVersion(framework.TestContext.UpgradeTarget)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNoError(framework.MasterUpgrade(v)) framework.ExpectNoError(framework.MasterUpgrade(v))
framework.ExpectNoError(checkMasterVersion(f.Client, v)) framework.ExpectNoError(checkMasterVersion(f.ClientSet, v))
}) })
cm.Register(func(sem *chaosmonkey.Semaphore) { cm.Register(func(sem *chaosmonkey.Semaphore) {
// Close over f. // Close over f.
@ -90,7 +89,7 @@ var _ = framework.KubeDescribe("Upgrade [Feature:Upgrade]", func() {
v, err := realVersion(framework.TestContext.UpgradeTarget) v, err := realVersion(framework.TestContext.UpgradeTarget)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNoError(framework.MasterUpgrade(v)) framework.ExpectNoError(framework.MasterUpgrade(v))
framework.ExpectNoError(checkMasterVersion(f.Client, v)) framework.ExpectNoError(checkMasterVersion(f.ClientSet, v))
framework.ExpectNoError(framework.NodeUpgrade(f, v, framework.TestContext.UpgradeImage)) framework.ExpectNoError(framework.NodeUpgrade(f, v, framework.TestContext.UpgradeImage))
framework.ExpectNoError(checkNodesVersions(f.ClientSet, v)) framework.ExpectNoError(checkNodesVersions(f.ClientSet, v))
}) })
@ -106,7 +105,7 @@ var _ = framework.KubeDescribe("Upgrade [Feature:Upgrade]", func() {
v, err := realVersion(framework.TestContext.UpgradeTarget) v, err := realVersion(framework.TestContext.UpgradeTarget)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNoError(framework.MasterUpgrade(v)) framework.ExpectNoError(framework.MasterUpgrade(v))
framework.ExpectNoError(checkMasterVersion(f.Client, v)) framework.ExpectNoError(checkMasterVersion(f.ClientSet, v))
framework.ExpectNoError(framework.NodeUpgrade(f, v, framework.TestContext.UpgradeImage)) framework.ExpectNoError(framework.NodeUpgrade(f, v, framework.TestContext.UpgradeImage))
framework.ExpectNoError(checkNodesVersions(f.ClientSet, v)) framework.ExpectNoError(checkNodesVersions(f.ClientSet, v))
}) })
@ -147,7 +146,7 @@ func testService(f *framework.Framework, sem *chaosmonkey.Semaphore, testDuringD
// Setup // Setup
serviceName := "service-test" serviceName := "service-test"
jig := NewServiceTestJig(f.Client, f.ClientSet, serviceName) jig := NewServiceTestJig(f.ClientSet, serviceName)
// nodeIP := pickNodeIP(jig.Client) // for later // nodeIP := pickNodeIP(jig.Client) // for later
By("creating a TCP service " + serviceName + " with type=LoadBalancer in namespace " + f.Namespace.Name) By("creating a TCP service " + serviceName + " with type=LoadBalancer in namespace " + f.Namespace.Name)
@ -192,7 +191,7 @@ func testService(f *framework.Framework, sem *chaosmonkey.Semaphore, testDuringD
jig.SanityCheckService(tcpService, api.ServiceTypeLoadBalancer) jig.SanityCheckService(tcpService, api.ServiceTypeLoadBalancer)
} }
func checkMasterVersion(c *client.Client, want string) error { func checkMasterVersion(c clientset.Interface, want string) error {
framework.Logf("Checking master version") framework.Logf("Checking master version")
v, err := c.Discovery().ServerVersion() v, err := c.Discovery().ServerVersion()
if err != nil { if err != nil {

View File

@ -91,7 +91,7 @@ var _ = framework.KubeDescribe("ConfigMap", func() {
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error var err error
if configMap, err = f.Client.ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
} }
@ -133,7 +133,7 @@ var _ = framework.KubeDescribe("ConfigMap", func() {
f.PodClient().CreateSync(pod) f.PodClient().CreateSync(pod)
pollLogs := func() (string, error) { pollLogs := func() (string, error) {
return framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, containerName) return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
} }
Eventually(pollLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1")) Eventually(pollLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
@ -141,7 +141,7 @@ var _ = framework.KubeDescribe("ConfigMap", func() {
By(fmt.Sprintf("Updating configmap %v", configMap.Name)) By(fmt.Sprintf("Updating configmap %v", configMap.Name))
configMap.ResourceVersion = "" // to force update configMap.ResourceVersion = "" // to force update
configMap.Data["data-1"] = "value-2" configMap.Data["data-1"] = "value-2"
_, err = f.Client.ConfigMaps(f.Namespace.Name).Update(configMap) _, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Update(configMap)
Expect(err).NotTo(HaveOccurred(), "Failed to update configmap %q in namespace %q", configMap.Name, f.Namespace.Name) Expect(err).NotTo(HaveOccurred(), "Failed to update configmap %q in namespace %q", configMap.Name, f.Namespace.Name)
By("waiting to observe update in volume") By("waiting to observe update in volume")
@ -153,7 +153,7 @@ var _ = framework.KubeDescribe("ConfigMap", func() {
configMap := newConfigMap(f, name) configMap := newConfigMap(f, name)
By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name)) By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name))
var err error var err error
if configMap, err = f.Client.ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
} }
@ -203,7 +203,7 @@ var _ = framework.KubeDescribe("ConfigMap", func() {
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error var err error
if configMap, err = f.Client.ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
} }
@ -288,7 +288,7 @@ func doConfigMapE2EWithoutMappings(f *framework.Framework, uid, fsGroup int64, d
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error var err error
if configMap, err = f.Client.ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
} }
@ -367,7 +367,7 @@ func doConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup int64, item
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error var err error
if configMap, err = f.Client.ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
} }

View File

@ -331,7 +331,7 @@ func runLivenessTest(f *framework.Framework, pod *api.Pod, expectNumRestarts int
// Wait until the pod is not pending. (Here we need to check for something other than // Wait until the pod is not pending. (Here we need to check for something other than
// 'Pending' other than checking for 'Running', since when failures occur, we go to // 'Pending' other than checking for 'Running', since when failures occur, we go to
// 'Terminated' which can cause indefinite blocking.) // 'Terminated' which can cause indefinite blocking.)
framework.ExpectNoError(framework.WaitForPodNotPending(f.Client, ns, pod.Name, pod.ResourceVersion), framework.ExpectNoError(framework.WaitForPodNotPending(f.ClientSet, ns, pod.Name, pod.ResourceVersion),
fmt.Sprintf("starting pod %s in namespace %s", pod.Name, ns)) fmt.Sprintf("starting pod %s in namespace %s", pod.Name, ns))
framework.Logf("Started pod %s in namespace %s", pod.Name, ns) framework.Logf("Started pod %s in namespace %s", pod.Name, ns)

View File

@ -93,7 +93,7 @@ var _ = framework.KubeDescribe("Downward API volume", func() {
podClient.CreateSync(pod) podClient.CreateSync(pod)
Eventually(func() (string, error) { Eventually(func() (string, error) {
return framework.GetPodLogs(f.Client, f.Namespace.Name, podName, containerName) return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, containerName)
}, },
podLogTimeout, framework.Poll).Should(ContainSubstring("key1=\"value1\"\n")) podLogTimeout, framework.Poll).Should(ContainSubstring("key1=\"value1\"\n"))
@ -103,7 +103,7 @@ var _ = framework.KubeDescribe("Downward API volume", func() {
}) })
Eventually(func() (string, error) { Eventually(func() (string, error) {
return framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, containerName) return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
}, },
podLogTimeout, framework.Poll).Should(ContainSubstring("key3=\"value3\"\n")) podLogTimeout, framework.Poll).Should(ContainSubstring("key3=\"value3\"\n"))
}) })
@ -122,7 +122,7 @@ var _ = framework.KubeDescribe("Downward API volume", func() {
Expect(err).NotTo(HaveOccurred(), "Failed to get pod %q", pod.Name) Expect(err).NotTo(HaveOccurred(), "Failed to get pod %q", pod.Name)
Eventually(func() (string, error) { Eventually(func() (string, error) {
return framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, containerName) return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
}, },
podLogTimeout, framework.Poll).Should(ContainSubstring("builder=\"bar\"\n")) podLogTimeout, framework.Poll).Should(ContainSubstring("builder=\"bar\"\n"))
@ -132,7 +132,7 @@ var _ = framework.KubeDescribe("Downward API volume", func() {
}) })
Eventually(func() (string, error) { Eventually(func() (string, error) {
return framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, containerName) return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
}, },
podLogTimeout, framework.Poll).Should(ContainSubstring("builder=\"foo\"\n")) podLogTimeout, framework.Poll).Should(ContainSubstring("builder=\"foo\"\n"))
}) })

View File

@ -128,7 +128,7 @@ var _ = framework.KubeDescribe("InitContainer", func() {
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: "run1", Name: "run1",
Image: framework.GetPauseImageName(f.Client), Image: framework.GetPauseImageName(f.ClientSet),
Resources: api.ResourceRequirements{ Resources: api.ResourceRequirements{
Limits: api.ResourceList{ Limits: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI), api.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
@ -191,7 +191,7 @@ var _ = framework.KubeDescribe("InitContainer", func() {
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: "run1", Name: "run1",
Image: framework.GetPauseImageName(f.Client), Image: framework.GetPauseImageName(f.ClientSet),
Resources: api.ResourceRequirements{ Resources: api.ResourceRequirements{
Limits: api.ResourceList{ Limits: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI), api.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),

View File

@ -135,7 +135,7 @@ var _ = framework.KubeDescribe("Pods", func() {
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: "test", Name: "test",
Image: framework.GetPauseImageName(f.Client), Image: framework.GetPauseImageName(f.ClientSet),
}, },
}, },
}, },
@ -211,7 +211,7 @@ var _ = framework.KubeDescribe("Pods", func() {
By("verifying the kubelet observed the termination notice") By("verifying the kubelet observed the termination notice")
Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) { Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) {
podList, err := framework.GetKubeletPods(f.Client, pod.Spec.NodeName) podList, err := framework.GetKubeletPods(f.ClientSet, pod.Spec.NodeName)
if err != nil { if err != nil {
framework.Logf("Unable to retrieve kubelet pods for node %v: %v", pod.Spec.NodeName, err) framework.Logf("Unable to retrieve kubelet pods for node %v: %v", pod.Spec.NodeName, err)
return false, nil return false, nil
@ -396,7 +396,7 @@ var _ = framework.KubeDescribe("Pods", func() {
}, },
}, },
} }
_, err := f.Client.Services(f.Namespace.Name).Create(svc) _, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(svc)
Expect(err).NotTo(HaveOccurred(), "failed to create service") Expect(err).NotTo(HaveOccurred(), "failed to create service")
// Make a client pod that verifies that it has the service environment variables. // Make a client pod that verifies that it has the service environment variables.
@ -460,7 +460,7 @@ var _ = framework.KubeDescribe("Pods", func() {
By("submitting the pod to kubernetes") By("submitting the pod to kubernetes")
pod = podClient.CreateSync(pod) pod = podClient.CreateSync(pod)
req := f.Client.Get(). req := f.ClientSet.Core().RESTClient().Get().
Namespace(f.Namespace.Name). Namespace(f.Namespace.Name).
Resource("pods"). Resource("pods").
Name(pod.Name). Name(pod.Name).
@ -530,7 +530,7 @@ var _ = framework.KubeDescribe("Pods", func() {
By("submitting the pod to kubernetes") By("submitting the pod to kubernetes")
podClient.CreateSync(pod) podClient.CreateSync(pod)
req := f.Client.Get(). req := f.ClientSet.Core().RESTClient().Get().
Namespace(f.Namespace.Name). Namespace(f.Namespace.Name).
Resource("pods"). Resource("pods").
Name(pod.Name). Name(pod.Name).

View File

@ -63,7 +63,7 @@ var _ = framework.KubeDescribe("Secrets", func() {
By(fmt.Sprintf("Creating secret with name %s", secret.Name)) By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error var err error
if secret, err = f.Client.Secrets(f.Namespace.Name).Create(secret); err != nil { if secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err) framework.Failf("unable to create test secret %s: %v", secret.Name, err)
} }
@ -127,7 +127,7 @@ var _ = framework.KubeDescribe("Secrets", func() {
By(fmt.Sprintf("Creating secret with name %s", secret.Name)) By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error var err error
if secret, err = f.Client.Secrets(f.Namespace.Name).Create(secret); err != nil { if secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err) framework.Failf("unable to create test secret %s: %v", secret.Name, err)
} }
@ -190,7 +190,7 @@ func doSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int32) {
By(fmt.Sprintf("Creating secret with name %s", secret.Name)) By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error var err error
if secret, err = f.Client.Secrets(f.Namespace.Name).Create(secret); err != nil { if secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err) framework.Failf("unable to create test secret %s: %v", secret.Name, err)
} }
@ -254,7 +254,7 @@ func doSecretE2EWithMapping(f *framework.Framework, mode *int32) {
By(fmt.Sprintf("Creating secret with name %s", secret.Name)) By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error var err error
if secret, err = f.Client.Secrets(f.Namespace.Name).Create(secret); err != nil { if secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err) framework.Failf("unable to create test secret %s: %v", secret.Name, err)
} }

View File

@ -58,7 +58,7 @@ var _ = framework.KubeDescribe("Sysctls", func() {
waitForPodErrorEventOrStarted := func(pod *api.Pod) (*api.Event, error) { waitForPodErrorEventOrStarted := func(pod *api.Pod) (*api.Event, error) {
var ev *api.Event var ev *api.Event
err := wait.Poll(framework.Poll, framework.PodStartTimeout, func() (bool, error) { err := wait.Poll(framework.Poll, framework.PodStartTimeout, func() (bool, error) {
evnts, err := f.Client.Events(pod.Namespace).Search(pod) evnts, err := f.ClientSet.Core().Events(pod.Namespace).Search(pod)
if err != nil { if err != nil {
return false, fmt.Errorf("error in listing events: %s", err) return false, fmt.Errorf("error in listing events: %s", err)
} }
@ -114,7 +114,7 @@ var _ = framework.KubeDescribe("Sysctls", func() {
Expect(pod.Status.Phase).To(Equal(api.PodSucceeded)) Expect(pod.Status.Phase).To(Equal(api.PodSucceeded))
By("Getting logs from the pod") By("Getting logs from the pod")
log, err := framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) log, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Checking that the sysctl is actually updated") By("Checking that the sysctl is actually updated")
@ -155,7 +155,7 @@ var _ = framework.KubeDescribe("Sysctls", func() {
Expect(pod.Status.Phase).To(Equal(api.PodSucceeded)) Expect(pod.Status.Phase).To(Equal(api.PodSucceeded))
By("Getting logs from the pod") By("Getting logs from the pod")
log, err := framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) log, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Checking that the sysctl is actually updated") By("Checking that the sysctl is actually updated")
@ -194,7 +194,7 @@ var _ = framework.KubeDescribe("Sysctls", func() {
}) })
By("Creating a pod with one valid and two invalid sysctls") By("Creating a pod with one valid and two invalid sysctls")
client := f.Client.Pods(f.Namespace.Name) client := f.ClientSet.Core().Pods(f.Namespace.Name)
_, err := client.Create(pod) _, err := client.Create(pod)
Expect(err).NotTo(BeNil()) Expect(err).NotTo(BeNil())

View File

@ -23,7 +23,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
client "k8s.io/kubernetes/pkg/client/unversioned" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
@ -169,9 +169,9 @@ func replacePods(pods []*api.Pod, store cache.Store) {
// getContainerRestarts returns the count of container restarts across all pods matching the given labelSelector, // getContainerRestarts returns the count of container restarts across all pods matching the given labelSelector,
// and a list of nodenames across which these containers restarted. // and a list of nodenames across which these containers restarted.
func getContainerRestarts(c *client.Client, ns string, labelSelector labels.Selector) (int, []string) { func getContainerRestarts(c clientset.Interface, ns string, labelSelector labels.Selector) (int, []string) {
options := api.ListOptions{LabelSelector: labelSelector} options := api.ListOptions{LabelSelector: labelSelector}
pods, err := c.Pods(ns).List(options) pods, err := c.Core().Pods(ns).List(options)
framework.ExpectNoError(err) framework.ExpectNoError(err)
failedContainers := 0 failedContainers := 0
containerRestartNodes := sets.NewString() containerRestartNodes := sets.NewString()
@ -205,10 +205,10 @@ var _ = framework.KubeDescribe("DaemonRestart [Disruptive]", func() {
// All the restart tests need an rc and a watch on pods of the rc. // All the restart tests need an rc and a watch on pods of the rc.
// Additionally some of them might scale the rc during the test. // Additionally some of them might scale the rc during the test.
config = testutils.RCConfig{ config = testutils.RCConfig{
Client: f.Client, Client: f.ClientSet,
Name: rcName, Name: rcName,
Namespace: ns, Namespace: ns,
Image: framework.GetPauseImageName(f.Client), Image: framework.GetPauseImageName(f.ClientSet),
Replicas: numPods, Replicas: numPods,
CreatedPods: &[]*api.Pod{}, CreatedPods: &[]*api.Pod{},
} }
@ -221,11 +221,12 @@ var _ = framework.KubeDescribe("DaemonRestart [Disruptive]", func() {
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
options.LabelSelector = labelSelector options.LabelSelector = labelSelector
return f.Client.Pods(ns).List(options) obj, err := f.ClientSet.Core().Pods(ns).List(options)
return runtime.Object(obj), err
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
options.LabelSelector = labelSelector options.LabelSelector = labelSelector
return f.Client.Pods(ns).Watch(options) return f.ClientSet.Core().Pods(ns).Watch(options)
}, },
}, },
&api.Pod{}, &api.Pod{},
@ -262,7 +263,7 @@ var _ = framework.KubeDescribe("DaemonRestart [Disruptive]", func() {
// that it had the opportunity to create/delete pods, if it were going to do so. Scaling the RC // that it had the opportunity to create/delete pods, if it were going to do so. Scaling the RC
// to the same size achieves this, because the scale operation advances the RC's sequence number // to the same size achieves this, because the scale operation advances the RC's sequence number
// and awaits it to be observed and reported back in the RC's status. // and awaits it to be observed and reported back in the RC's status.
framework.ScaleRC(f.Client, f.ClientSet, ns, rcName, numPods, true) framework.ScaleRC(f.ClientSet, ns, rcName, numPods, true)
// Only check the keys, the pods can be different if the kubelet updated it. // Only check the keys, the pods can be different if the kubelet updated it.
// TODO: Can it really? // TODO: Can it really?
@ -293,16 +294,16 @@ var _ = framework.KubeDescribe("DaemonRestart [Disruptive]", func() {
restarter.kill() restarter.kill()
// This is best effort to try and create pods while the scheduler is down, // This is best effort to try and create pods while the scheduler is down,
// since we don't know exactly when it is restarted after the kill signal. // since we don't know exactly when it is restarted after the kill signal.
framework.ExpectNoError(framework.ScaleRC(f.Client, f.ClientSet, ns, rcName, numPods+5, false)) framework.ExpectNoError(framework.ScaleRC(f.ClientSet, ns, rcName, numPods+5, false))
restarter.waitUp() restarter.waitUp()
framework.ExpectNoError(framework.ScaleRC(f.Client, f.ClientSet, ns, rcName, numPods+5, true)) framework.ExpectNoError(framework.ScaleRC(f.ClientSet, ns, rcName, numPods+5, true))
}) })
It("Kubelet should not restart containers across restart", func() { It("Kubelet should not restart containers across restart", func() {
nodeIPs, err := getNodePublicIps(f.ClientSet) nodeIPs, err := getNodePublicIps(f.ClientSet)
framework.ExpectNoError(err) framework.ExpectNoError(err)
preRestarts, badNodes := getContainerRestarts(f.Client, ns, labelSelector) preRestarts, badNodes := getContainerRestarts(f.ClientSet, ns, labelSelector)
if preRestarts != 0 { if preRestarts != 0 {
framework.Logf("WARNING: Non-zero container restart count: %d across nodes %v", preRestarts, badNodes) framework.Logf("WARNING: Non-zero container restart count: %d across nodes %v", preRestarts, badNodes)
} }
@ -311,9 +312,9 @@ var _ = framework.KubeDescribe("DaemonRestart [Disruptive]", func() {
ip, "kubelet", ports.KubeletReadOnlyPort, restartPollInterval, restartTimeout) ip, "kubelet", ports.KubeletReadOnlyPort, restartPollInterval, restartTimeout)
restarter.restart() restarter.restart()
} }
postRestarts, badNodes := getContainerRestarts(f.Client, ns, labelSelector) postRestarts, badNodes := getContainerRestarts(f.ClientSet, ns, labelSelector)
if postRestarts != preRestarts { if postRestarts != preRestarts {
framework.DumpNodeDebugInfo(f.Client, badNodes, framework.Logf) framework.DumpNodeDebugInfo(f.ClientSet, badNodes, framework.Logf)
framework.Failf("Net container restart count went from %v -> %v after kubelet restart on nodes %v \n\n %+v", preRestarts, postRestarts, badNodes, tracker) framework.Failf("Net container restart count went from %v -> %v after kubelet restart on nodes %v \n\n %+v", preRestarts, postRestarts, badNodes, tracker)
} }
}) })

View File

@ -28,7 +28,6 @@ import (
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/apis/extensions"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
@ -59,17 +58,17 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
var f *framework.Framework var f *framework.Framework
AfterEach(func() { AfterEach(func() {
if daemonsets, err := f.Client.DaemonSets(f.Namespace.Name).List(api.ListOptions{}); err == nil { if daemonsets, err := f.ClientSet.Extensions().DaemonSets(f.Namespace.Name).List(api.ListOptions{}); err == nil {
framework.Logf("daemonset: %s", runtime.EncodeOrDie(api.Codecs.LegacyCodec(registered.EnabledVersions()...), daemonsets)) framework.Logf("daemonset: %s", runtime.EncodeOrDie(api.Codecs.LegacyCodec(registered.EnabledVersions()...), daemonsets))
} else { } else {
framework.Logf("unable to dump daemonsets: %v", err) framework.Logf("unable to dump daemonsets: %v", err)
} }
if pods, err := f.Client.Pods(f.Namespace.Name).List(api.ListOptions{}); err == nil { if pods, err := f.ClientSet.Core().Pods(f.Namespace.Name).List(api.ListOptions{}); err == nil {
framework.Logf("pods: %s", runtime.EncodeOrDie(api.Codecs.LegacyCodec(registered.EnabledVersions()...), pods)) framework.Logf("pods: %s", runtime.EncodeOrDie(api.Codecs.LegacyCodec(registered.EnabledVersions()...), pods))
} else { } else {
framework.Logf("unable to dump pods: %v", err) framework.Logf("unable to dump pods: %v", err)
} }
err := clearDaemonSetNodeLabels(f.Client, f.ClientSet) err := clearDaemonSetNodeLabels(f.ClientSet)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
@ -79,12 +78,13 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
dsName := "daemon-set" dsName := "daemon-set"
var ns string var ns string
var c *client.Client var c clientset.Interface
BeforeEach(func() { BeforeEach(func() {
ns = f.Namespace.Name ns = f.Namespace.Name
c = f.Client
err := clearDaemonSetNodeLabels(c, f.ClientSet) c = f.ClientSet
err := clearDaemonSetNodeLabels(c)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
@ -92,7 +92,7 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
label := map[string]string{daemonsetNameLabel: dsName} label := map[string]string{daemonsetNameLabel: dsName}
framework.Logf("Creating simple daemon set %s", dsName) framework.Logf("Creating simple daemon set %s", dsName)
_, err := c.DaemonSets(ns).Create(&extensions.DaemonSet{ _, err := c.Extensions().DaemonSets(ns).Create(&extensions.DaemonSet{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Name: dsName, Name: dsName,
}, },
@ -132,7 +132,7 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Stop a daemon pod, check that the daemon pod is revived.") By("Stop a daemon pod, check that the daemon pod is revived.")
podClient := c.Pods(ns) podClient := c.Core().Pods(ns)
selector := labels.Set(label).AsSelector() selector := labels.Set(label).AsSelector()
options := api.ListOptions{LabelSelector: selector} options := api.ListOptions{LabelSelector: selector}
@ -151,7 +151,7 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
complexLabel := map[string]string{daemonsetNameLabel: dsName} complexLabel := map[string]string{daemonsetNameLabel: dsName}
nodeSelector := map[string]string{daemonsetColorLabel: "blue"} nodeSelector := map[string]string{daemonsetColorLabel: "blue"}
framework.Logf("Creating daemon with a node selector %s", dsName) framework.Logf("Creating daemon with a node selector %s", dsName)
_, err := c.DaemonSets(ns).Create(&extensions.DaemonSet{ _, err := c.Extensions().DaemonSets(ns).Create(&extensions.DaemonSet{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Name: dsName, Name: dsName,
}, },
@ -199,7 +199,7 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
NotTo(HaveOccurred(), "error waiting for daemon pod to not be running on nodes") NotTo(HaveOccurred(), "error waiting for daemon pod to not be running on nodes")
By("We should now be able to delete the daemon set.") By("We should now be able to delete the daemon set.")
Expect(c.DaemonSets(ns).Delete(dsName)).NotTo(HaveOccurred()) Expect(c.Extensions().DaemonSets(ns).Delete(dsName, nil)).NotTo(HaveOccurred())
}) })
@ -219,7 +219,7 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
}] }]
}}}`, daemonsetColorLabel, nodeSelector[daemonsetColorLabel]), }}}`, daemonsetColorLabel, nodeSelector[daemonsetColorLabel]),
} }
_, err := c.DaemonSets(ns).Create(&extensions.DaemonSet{ _, err := c.Extensions().DaemonSets(ns).Create(&extensions.DaemonSet{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Name: dsName, Name: dsName,
}, },
@ -267,7 +267,7 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
NotTo(HaveOccurred(), "error waiting for daemon pod to not be running on nodes") NotTo(HaveOccurred(), "error waiting for daemon pod to not be running on nodes")
By("We should now be able to delete the daemon set.") By("We should now be able to delete the daemon set.")
Expect(c.DaemonSets(ns).Delete(dsName)).NotTo(HaveOccurred()) Expect(c.Extensions().DaemonSets(ns).Delete(dsName, nil)).NotTo(HaveOccurred())
}) })
}) })
@ -285,8 +285,8 @@ func separateDaemonSetNodeLabels(labels map[string]string) (map[string]string, m
return daemonSetLabels, otherLabels return daemonSetLabels, otherLabels
} }
func clearDaemonSetNodeLabels(c *client.Client, cs clientset.Interface) error { func clearDaemonSetNodeLabels(c clientset.Interface) error {
nodeList := framework.GetReadySchedulableNodesOrDie(cs) nodeList := framework.GetReadySchedulableNodesOrDie(c)
for _, node := range nodeList.Items { for _, node := range nodeList.Items {
_, err := setDaemonSetNodeLabels(c, node.Name, map[string]string{}) _, err := setDaemonSetNodeLabels(c, node.Name, map[string]string{})
if err != nil { if err != nil {
@ -296,8 +296,8 @@ func clearDaemonSetNodeLabels(c *client.Client, cs clientset.Interface) error {
return nil return nil
} }
func setDaemonSetNodeLabels(c *client.Client, nodeName string, labels map[string]string) (*api.Node, error) { func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[string]string) (*api.Node, error) {
nodeClient := c.Nodes() nodeClient := c.Core().Nodes()
var newNode *api.Node var newNode *api.Node
var newLabels map[string]string var newLabels map[string]string
err := wait.Poll(dsRetryPeriod, dsRetryTimeout, func() (bool, error) { err := wait.Poll(dsRetryPeriod, dsRetryTimeout, func() (bool, error) {
@ -340,7 +340,7 @@ func checkDaemonPodOnNodes(f *framework.Framework, selector map[string]string, n
return func() (bool, error) { return func() (bool, error) {
selector := labels.Set(selector).AsSelector() selector := labels.Set(selector).AsSelector()
options := api.ListOptions{LabelSelector: selector} options := api.ListOptions{LabelSelector: selector}
podList, err := f.Client.Pods(f.Namespace.Name).List(options) podList, err := f.ClientSet.Core().Pods(f.Namespace.Name).List(options)
if err != nil { if err != nil {
return false, nil return false, nil
} }
@ -368,7 +368,7 @@ func checkDaemonPodOnNodes(f *framework.Framework, selector map[string]string, n
func checkRunningOnAllNodes(f *framework.Framework, selector map[string]string) func() (bool, error) { func checkRunningOnAllNodes(f *framework.Framework, selector map[string]string) func() (bool, error) {
return func() (bool, error) { return func() (bool, error) {
nodeList, err := f.Client.Nodes().List(api.ListOptions{}) nodeList, err := f.ClientSet.Core().Nodes().List(api.ListOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
nodeNames := make([]string, 0) nodeNames := make([]string, 0)
for _, node := range nodeList.Items { for _, node := range nodeList.Items {
@ -383,7 +383,7 @@ func checkRunningOnNoNodes(f *framework.Framework, selector map[string]string) f
} }
func checkDaemonStatus(f *framework.Framework, dsName string) error { func checkDaemonStatus(f *framework.Framework, dsName string) error {
ds, err := f.Client.DaemonSets(f.Namespace.Name).Get(dsName) ds, err := f.ClientSet.Extensions().DaemonSets(f.Namespace.Name).Get(dsName)
if err != nil { if err != nil {
return fmt.Errorf("Could not get daemon set from api.") return fmt.Errorf("Could not get daemon set from api.")
} }

View File

@ -43,18 +43,18 @@ var _ = framework.KubeDescribe("Kubernetes Dashboard", func() {
It("should check that the kubernetes-dashboard instance is alive", func() { It("should check that the kubernetes-dashboard instance is alive", func() {
By("Checking whether the kubernetes-dashboard service exists.") By("Checking whether the kubernetes-dashboard service exists.")
err := framework.WaitForService(f.Client, uiNamespace, uiServiceName, true, framework.Poll, framework.ServiceStartTimeout) err := framework.WaitForService(f.ClientSet, uiNamespace, uiServiceName, true, framework.Poll, framework.ServiceStartTimeout)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Checking to make sure the kubernetes-dashboard pods are running") By("Checking to make sure the kubernetes-dashboard pods are running")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": uiAppName})) selector := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": uiAppName}))
err = testutils.WaitForPodsWithLabelRunning(f.Client, uiNamespace, selector) err = testutils.WaitForPodsWithLabelRunning(f.ClientSet, uiNamespace, selector)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Checking to make sure we get a response from the kubernetes-dashboard.") By("Checking to make sure we get a response from the kubernetes-dashboard.")
err = wait.Poll(framework.Poll, serverStartTimeout, func() (bool, error) { err = wait.Poll(framework.Poll, serverStartTimeout, func() (bool, error) {
var status int var status int
proxyRequest, errProxy := framework.GetServicesProxyRequest(f.Client, f.Client.Get()) proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.Core().RESTClient().Get())
if errProxy != nil { if errProxy != nil {
framework.Logf("Get services proxy request failed: %v", errProxy) framework.Logf("Get services proxy request failed: %v", errProxy)
} }
@ -77,7 +77,7 @@ var _ = framework.KubeDescribe("Kubernetes Dashboard", func() {
By("Checking that the ApiServer /ui endpoint redirects to a valid server.") By("Checking that the ApiServer /ui endpoint redirects to a valid server.")
var status int var status int
err = f.Client.Get(). err = f.ClientSet.Core().RESTClient().Get().
AbsPath("/ui"). AbsPath("/ui").
Timeout(framework.SingleCallTimeout). Timeout(framework.SingleCallTimeout).
Do(). Do().

View File

@ -30,7 +30,6 @@ import (
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
@ -55,7 +54,6 @@ var MaxContainerFailures = 0
type DensityTestConfig struct { type DensityTestConfig struct {
Configs []testutils.RCConfig Configs []testutils.RCConfig
Client *client.Client
ClientSet internalclientset.Interface ClientSet internalclientset.Interface
Namespace string Namespace string
PollInterval time.Duration PollInterval time.Duration
@ -162,7 +160,7 @@ func density30AddonResourceVerifier(numNodes int) map[string]framework.ResourceC
return constraints return constraints
} }
func logPodStartupStatus(c *client.Client, expectedPods int, ns string, observedLabels map[string]string, period time.Duration, stopCh chan struct{}) { func logPodStartupStatus(c internalclientset.Interface, expectedPods int, ns string, observedLabels map[string]string, period time.Duration, stopCh chan struct{}) {
label := labels.SelectorFromSet(labels.Set(observedLabels)) label := labels.SelectorFromSet(labels.Set(observedLabels))
podStore := testutils.NewPodStore(c, ns, label, fields.Everything()) podStore := testutils.NewPodStore(c, ns, label, fields.Everything())
defer podStore.Stop() defer podStore.Stop()
@ -194,10 +192,10 @@ func runDensityTest(dtc DensityTestConfig) time.Duration {
_, controller := cache.NewInformer( _, controller := cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return dtc.Client.Events(dtc.Namespace).List(options) return dtc.ClientSet.Core().Events(dtc.Namespace).List(options)
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return dtc.Client.Events(dtc.Namespace).Watch(options) return dtc.ClientSet.Core().Events(dtc.Namespace).Watch(options)
}, },
}, },
&api.Event{}, &api.Event{},
@ -222,11 +220,11 @@ func runDensityTest(dtc DensityTestConfig) time.Duration {
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
options.LabelSelector = label options.LabelSelector = label
return dtc.Client.Pods(dtc.Namespace).List(options) return dtc.ClientSet.Core().Pods(dtc.Namespace).List(options)
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
options.LabelSelector = label options.LabelSelector = label
return dtc.Client.Pods(dtc.Namespace).Watch(options) return dtc.ClientSet.Core().Pods(dtc.Namespace).Watch(options)
}, },
}, },
&api.Pod{}, &api.Pod{},
@ -254,7 +252,7 @@ func runDensityTest(dtc DensityTestConfig) time.Duration {
}() }()
} }
logStopCh := make(chan struct{}) logStopCh := make(chan struct{})
go logPodStartupStatus(dtc.Client, dtc.PodCount, dtc.Namespace, map[string]string{"type": "densityPod"}, dtc.PollInterval, logStopCh) go logPodStartupStatus(dtc.ClientSet, dtc.PodCount, dtc.Namespace, map[string]string{"type": "densityPod"}, dtc.PollInterval, logStopCh)
wg.Wait() wg.Wait()
startupTime := time.Now().Sub(startTime) startupTime := time.Now().Sub(startTime)
close(logStopCh) close(logStopCh)
@ -296,7 +294,7 @@ func runDensityTest(dtc DensityTestConfig) time.Duration {
Expect(badEvents).NotTo(BeNumerically(">", int(math.Floor(0.01*float64(dtc.PodCount))))) Expect(badEvents).NotTo(BeNumerically(">", int(math.Floor(0.01*float64(dtc.PodCount)))))
// Print some data about Pod to Node allocation // Print some data about Pod to Node allocation
By("Printing Pod to Node allocation data") By("Printing Pod to Node allocation data")
podList, err := dtc.Client.Pods(api.NamespaceAll).List(api.ListOptions{}) podList, err := dtc.ClientSet.Core().Pods(api.NamespaceAll).List(api.ListOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
pausePodAllocation := make(map[string]int) pausePodAllocation := make(map[string]int)
systemPodAllocation := make(map[string][]string) systemPodAllocation := make(map[string][]string)
@ -324,15 +322,15 @@ func cleanupDensityTest(dtc DensityTestConfig) {
// We explicitly delete all pods to have API calls necessary for deletion accounted in metrics. // We explicitly delete all pods to have API calls necessary for deletion accounted in metrics.
for i := range dtc.Configs { for i := range dtc.Configs {
rcName := dtc.Configs[i].Name rcName := dtc.Configs[i].Name
rc, err := dtc.Client.ReplicationControllers(dtc.Namespace).Get(rcName) rc, err := dtc.ClientSet.Core().ReplicationControllers(dtc.Namespace).Get(rcName)
if err == nil && rc.Spec.Replicas != 0 { if err == nil && rc.Spec.Replicas != 0 {
if framework.TestContext.GarbageCollectorEnabled { if framework.TestContext.GarbageCollectorEnabled {
By("Cleaning up only the replication controller, garbage collector will clean up the pods") By("Cleaning up only the replication controller, garbage collector will clean up the pods")
err := framework.DeleteRCAndWaitForGC(dtc.Client, dtc.Namespace, rcName) err := framework.DeleteRCAndWaitForGC(dtc.ClientSet, dtc.Namespace, rcName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} else { } else {
By("Cleaning up the replication controller and pods") By("Cleaning up the replication controller and pods")
err := framework.DeleteRCAndPods(dtc.Client, dtc.ClientSet, dtc.Namespace, rcName) err := framework.DeleteRCAndPods(dtc.ClientSet, dtc.Namespace, rcName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
} }
@ -347,7 +345,7 @@ func cleanupDensityTest(dtc DensityTestConfig) {
// results will not be representative for control-plane performance as we'll start hitting // results will not be representative for control-plane performance as we'll start hitting
// limits on Docker's concurrent container startup. // limits on Docker's concurrent container startup.
var _ = framework.KubeDescribe("Density", func() { var _ = framework.KubeDescribe("Density", func() {
var c *client.Client var c internalclientset.Interface
var nodeCount int var nodeCount int
var RCName string var RCName string
var additionalPodsPrefix string var additionalPodsPrefix string
@ -392,7 +390,7 @@ var _ = framework.KubeDescribe("Density", func() {
f.NamespaceDeletionTimeout = time.Hour f.NamespaceDeletionTimeout = time.Hour
BeforeEach(func() { BeforeEach(func() {
c = f.Client c = f.ClientSet
ns = f.Namespace.Name ns = f.Namespace.Name
// In large clusters we may get to this point but still have a bunch // In large clusters we may get to this point but still have a bunch
@ -477,7 +475,7 @@ var _ = framework.KubeDescribe("Density", func() {
for i := 0; i < numberOrRCs; i++ { for i := 0; i < numberOrRCs; i++ {
RCName := "density" + strconv.Itoa(totalPods) + "-" + strconv.Itoa(i) + "-" + uuid RCName := "density" + strconv.Itoa(totalPods) + "-" + strconv.Itoa(i) + "-" + uuid
RCConfigs[i] = testutils.RCConfig{Client: c, RCConfigs[i] = testutils.RCConfig{Client: c,
Image: framework.GetPauseImageName(f.Client), Image: framework.GetPauseImageName(f.ClientSet),
Name: RCName, Name: RCName,
Namespace: ns, Namespace: ns,
Labels: map[string]string{"type": "densityPod"}, Labels: map[string]string{"type": "densityPod"},
@ -492,7 +490,6 @@ var _ = framework.KubeDescribe("Density", func() {
} }
dConfig := DensityTestConfig{ dConfig := DensityTestConfig{
Client: c,
ClientSet: f.ClientSet, ClientSet: f.ClientSet,
Configs: RCConfigs, Configs: RCConfigs,
PodCount: totalPods, PodCount: totalPods,
@ -543,11 +540,12 @@ var _ = framework.KubeDescribe("Density", func() {
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": additionalPodsPrefix}) options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": additionalPodsPrefix})
return c.Pods(ns).List(options) obj, err := c.Core().Pods(ns).List(options)
return runtime.Object(obj), err
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": additionalPodsPrefix}) options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": additionalPodsPrefix})
return c.Pods(ns).Watch(options) return c.Core().Pods(ns).Watch(options)
}, },
}, },
&api.Pod{}, &api.Pod{},
@ -586,7 +584,7 @@ var _ = framework.KubeDescribe("Density", func() {
} }
for i := 1; i <= nodeCount; i++ { for i := 1; i <= nodeCount; i++ {
name := additionalPodsPrefix + "-" + strconv.Itoa(i) name := additionalPodsPrefix + "-" + strconv.Itoa(i)
go createRunningPodFromRC(&wg, c, name, ns, framework.GetPauseImageName(f.Client), additionalPodsPrefix, cpuRequest, memRequest) go createRunningPodFromRC(&wg, c, name, ns, framework.GetPauseImageName(f.ClientSet), additionalPodsPrefix, cpuRequest, memRequest)
time.Sleep(200 * time.Millisecond) time.Sleep(200 * time.Millisecond)
} }
wg.Wait() wg.Wait()
@ -616,7 +614,7 @@ var _ = framework.KubeDescribe("Density", func() {
"source": api.DefaultSchedulerName, "source": api.DefaultSchedulerName,
}.AsSelector() }.AsSelector()
options := api.ListOptions{FieldSelector: selector} options := api.ListOptions{FieldSelector: selector}
schedEvents, err := c.Events(ns).List(options) schedEvents, err := c.Core().Events(ns).List(options)
framework.ExpectNoError(err) framework.ExpectNoError(err)
for k := range createTimes { for k := range createTimes {
for _, event := range schedEvents.Items { for _, event := range schedEvents.Items {
@ -700,7 +698,7 @@ var _ = framework.KubeDescribe("Density", func() {
} }
RCName = "density" + strconv.Itoa(totalPods) + "-" + strconv.Itoa(i) + "-" + uuid RCName = "density" + strconv.Itoa(totalPods) + "-" + strconv.Itoa(i) + "-" + uuid
RCConfigs[i] = testutils.RCConfig{Client: c, RCConfigs[i] = testutils.RCConfig{Client: c,
Image: framework.GetPauseImageName(f.Client), Image: framework.GetPauseImageName(f.ClientSet),
Name: RCName, Name: RCName,
Namespace: ns, Namespace: ns,
Labels: map[string]string{"type": "densityPod"}, Labels: map[string]string{"type": "densityPod"},
@ -712,7 +710,6 @@ var _ = framework.KubeDescribe("Density", func() {
} }
} }
dConfig := DensityTestConfig{ dConfig := DensityTestConfig{
Client: c,
ClientSet: f.ClientSet, ClientSet: f.ClientSet,
Configs: RCConfigs, Configs: RCConfigs,
PodCount: totalPods, PodCount: totalPods,
@ -725,7 +722,7 @@ var _ = framework.KubeDescribe("Density", func() {
}) })
}) })
func createRunningPodFromRC(wg *sync.WaitGroup, c *client.Client, name, ns, image, podType string, cpuRequest, memRequest resource.Quantity) { func createRunningPodFromRC(wg *sync.WaitGroup, c internalclientset.Interface, name, ns, image, podType string, cpuRequest, memRequest resource.Quantity) {
defer GinkgoRecover() defer GinkgoRecover()
defer wg.Done() defer wg.Done()
labels := map[string]string{ labels := map[string]string{
@ -762,7 +759,7 @@ func createRunningPodFromRC(wg *sync.WaitGroup, c *client.Client, name, ns, imag
}, },
}, },
} }
_, err := c.ReplicationControllers(ns).Create(rc) _, err := c.Core().ReplicationControllers(ns).Create(rc)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitForRCPodsRunning(c, ns, name)) framework.ExpectNoError(framework.WaitForRCPodsRunning(c, ns, name))
framework.Logf("Found pod '%s' running", name) framework.Logf("Found pod '%s' running", name)

View File

@ -320,9 +320,6 @@ func testDeleteDeployment(f *framework.Framework) {
func testRollingUpdateDeployment(f *framework.Framework) { func testRollingUpdateDeployment(f *framework.Framework) {
ns := f.Namespace.Name ns := f.Namespace.Name
// TODO: remove unversionedClient when the refactoring is done. Currently some
// functions like verifyPod still expects a unversioned#Client.
unversionedClient := f.Client
c := f.ClientSet c := f.ClientSet
// Create nginx pods. // Create nginx pods.
deploymentPodLabels := map[string]string{"name": "sample-pod"} deploymentPodLabels := map[string]string{"name": "sample-pod"}
@ -336,7 +333,7 @@ func testRollingUpdateDeployment(f *framework.Framework) {
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, nginxImageName, nginxImage)) _, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, nginxImageName, nginxImage))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Verify that the required pods have come up. // Verify that the required pods have come up.
err = framework.VerifyPods(unversionedClient, ns, "sample-pod", false, 3) err = framework.VerifyPods(c, ns, "sample-pod", false, 3)
if err != nil { if err != nil {
framework.Logf("error in waiting for pods to come up: %s", err) framework.Logf("error in waiting for pods to come up: %s", err)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -369,9 +366,6 @@ func testRollingUpdateDeployment(f *framework.Framework) {
func testRollingUpdateDeploymentEvents(f *framework.Framework) { func testRollingUpdateDeploymentEvents(f *framework.Framework) {
ns := f.Namespace.Name ns := f.Namespace.Name
// TODO: remove unversionedClient when the refactoring is done. Currently some
// functions like verifyPod still expects a unversioned#Client.
unversionedClient := f.Client
c := f.ClientSet c := f.ClientSet
// Create nginx pods. // Create nginx pods.
deploymentPodLabels := map[string]string{"name": "sample-pod-2"} deploymentPodLabels := map[string]string{"name": "sample-pod-2"}
@ -391,7 +385,7 @@ func testRollingUpdateDeploymentEvents(f *framework.Framework) {
_, err := c.Extensions().ReplicaSets(ns).Create(rs) _, err := c.Extensions().ReplicaSets(ns).Create(rs)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Verify that the required pods have come up. // Verify that the required pods have come up.
err = framework.VerifyPods(unversionedClient, ns, "sample-pod-2", false, 1) err = framework.VerifyPods(c, ns, "sample-pod-2", false, 1)
if err != nil { if err != nil {
framework.Logf("error in waiting for pods to come up: %s", err) framework.Logf("error in waiting for pods to come up: %s", err)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -412,7 +406,7 @@ func testRollingUpdateDeploymentEvents(f *framework.Framework) {
// Verify that the pods were scaled up and down as expected. We use events to verify that. // Verify that the pods were scaled up and down as expected. We use events to verify that.
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
framework.WaitForEvents(unversionedClient, ns, deployment, 2) framework.WaitForEvents(c, ns, deployment, 2)
events, err := c.Core().Events(ns).Search(deployment) events, err := c.Core().Events(ns).Search(deployment)
if err != nil { if err != nil {
framework.Logf("error in listing events: %s", err) framework.Logf("error in listing events: %s", err)
@ -430,9 +424,6 @@ func testRollingUpdateDeploymentEvents(f *framework.Framework) {
func testRecreateDeployment(f *framework.Framework) { func testRecreateDeployment(f *framework.Framework) {
ns := f.Namespace.Name ns := f.Namespace.Name
// TODO: remove unversionedClient when the refactoring is done. Currently some
// functions like verifyPod still expects a unversioned#Client.
unversionedClient := f.Client
c := f.ClientSet c := f.ClientSet
// Create nginx pods. // Create nginx pods.
deploymentPodLabels := map[string]string{"name": "sample-pod-3"} deploymentPodLabels := map[string]string{"name": "sample-pod-3"}
@ -446,7 +437,7 @@ func testRecreateDeployment(f *framework.Framework) {
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, nginxImageName, nginxImage)) _, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, nginxImageName, nginxImage))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Verify that the required pods have come up. // Verify that the required pods have come up.
err = framework.VerifyPods(unversionedClient, ns, "sample-pod-3", false, 3) err = framework.VerifyPods(c, ns, "sample-pod-3", false, 3)
if err != nil { if err != nil {
framework.Logf("error in waiting for pods to come up: %s", err) framework.Logf("error in waiting for pods to come up: %s", err)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -468,7 +459,7 @@ func testRecreateDeployment(f *framework.Framework) {
// Verify that the pods were scaled up and down as expected. We use events to verify that. // Verify that the pods were scaled up and down as expected. We use events to verify that.
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
framework.WaitForEvents(unversionedClient, ns, deployment, 2) framework.WaitForEvents(c, ns, deployment, 2)
events, err := c.Core().Events(ns).Search(deployment) events, err := c.Core().Events(ns).Search(deployment)
if err != nil { if err != nil {
framework.Logf("error in listing events: %s", err) framework.Logf("error in listing events: %s", err)
@ -486,7 +477,6 @@ func testRecreateDeployment(f *framework.Framework) {
// testDeploymentCleanUpPolicy tests that deployment supports cleanup policy // testDeploymentCleanUpPolicy tests that deployment supports cleanup policy
func testDeploymentCleanUpPolicy(f *framework.Framework) { func testDeploymentCleanUpPolicy(f *framework.Framework) {
ns := f.Namespace.Name ns := f.Namespace.Name
unversionedClient := f.Client
c := f.ClientSet c := f.ClientSet
// Create nginx pods. // Create nginx pods.
deploymentPodLabels := map[string]string{"name": "cleanup-pod"} deploymentPodLabels := map[string]string{"name": "cleanup-pod"}
@ -501,7 +491,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Verify that the required pods have come up. // Verify that the required pods have come up.
err = framework.VerifyPods(unversionedClient, ns, "cleanup-pod", false, 1) err = framework.VerifyPods(c, ns, "cleanup-pod", false, 1)
if err != nil { if err != nil {
framework.Logf("error in waiting for pods to come up: %s", err) framework.Logf("error in waiting for pods to come up: %s", err)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -558,9 +548,6 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
// i.e. we can change desired state and kick off rolling update, then change desired state again before it finishes. // i.e. we can change desired state and kick off rolling update, then change desired state again before it finishes.
func testRolloverDeployment(f *framework.Framework) { func testRolloverDeployment(f *framework.Framework) {
ns := f.Namespace.Name ns := f.Namespace.Name
// TODO: remove unversionedClient when the refactoring is done. Currently some
// functions like verifyPod still expects a unversioned#Client.
unversionedClient := f.Client
c := f.ClientSet c := f.ClientSet
podName := "rollover-pod" podName := "rollover-pod"
deploymentPodLabels := map[string]string{"name": podName} deploymentPodLabels := map[string]string{"name": podName}
@ -574,7 +561,7 @@ func testRolloverDeployment(f *framework.Framework) {
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, rsReplicas, rsPodLabels, nginxImageName, nginxImage)) _, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, rsReplicas, rsPodLabels, nginxImageName, nginxImage))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Verify that the required pods have come up. // Verify that the required pods have come up.
err = framework.VerifyPods(unversionedClient, ns, podName, false, rsReplicas) err = framework.VerifyPods(c, ns, podName, false, rsReplicas)
if err != nil { if err != nil {
framework.Logf("error in waiting for pods to come up: %s", err) framework.Logf("error in waiting for pods to come up: %s", err)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -962,9 +949,6 @@ func testRollbackDeploymentRSNoRevision(f *framework.Framework) {
func testDeploymentLabelAdopted(f *framework.Framework) { func testDeploymentLabelAdopted(f *framework.Framework) {
ns := f.Namespace.Name ns := f.Namespace.Name
// TODO: remove unversionedClient when the refactoring is done. Currently some
// functions like verifyPod still expects a unversioned#Client.
unversionedClient := f.Client
c := f.ClientSet c := f.ClientSet
// Create nginx pods. // Create nginx pods.
podName := "nginx" podName := "nginx"
@ -976,7 +960,7 @@ func testDeploymentLabelAdopted(f *framework.Framework) {
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, podLabels, podName, image)) _, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, podLabels, podName, image))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Verify that the required pods have come up. // Verify that the required pods have come up.
err = framework.VerifyPods(unversionedClient, ns, podName, false, 3) err = framework.VerifyPods(c, ns, podName, false, 3)
if err != nil { if err != nil {
framework.Logf("error in waiting for pods to come up: %s", err) framework.Logf("error in waiting for pods to come up: %s", err)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -1097,7 +1081,7 @@ func testScaledRolloutDeployment(f *framework.Framework) {
// Verify that the required pods have come up. // Verify that the required pods have come up.
By("Waiting for all required pods to come up") By("Waiting for all required pods to come up")
err = framework.VerifyPods(f.Client, ns, nginxImageName, false, deployment.Spec.Replicas) err = framework.VerifyPods(f.ClientSet, ns, nginxImageName, false, deployment.Spec.Replicas)
if err != nil { if err != nil {
framework.Logf("error in waiting for pods to come up: %s", err) framework.Logf("error in waiting for pods to come up: %s", err)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())

View File

@ -27,7 +27,7 @@ import (
"k8s.io/kubernetes/pkg/api/pod" "k8s.io/kubernetes/pkg/api/pod"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
client "k8s.io/kubernetes/pkg/client/unversioned" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
@ -171,23 +171,23 @@ func createTargetedProbeCommand(nameToResolve string, lookup string, fileNamePre
return probeCmd, fileName return probeCmd, fileName
} }
func assertFilesExist(fileNames []string, fileDir string, pod *api.Pod, client *client.Client) { func assertFilesExist(fileNames []string, fileDir string, pod *api.Pod, client clientset.Interface) {
assertFilesContain(fileNames, fileDir, pod, client, false, "") assertFilesContain(fileNames, fileDir, pod, client, false, "")
} }
func assertFilesContain(fileNames []string, fileDir string, pod *api.Pod, client *client.Client, check bool, expected string) { func assertFilesContain(fileNames []string, fileDir string, pod *api.Pod, client clientset.Interface, check bool, expected string) {
var failed []string var failed []string
framework.ExpectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) { framework.ExpectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) {
failed = []string{} failed = []string{}
subResourceProxyAvailable, err := framework.ServerVersionGTE(framework.SubResourcePodProxyVersion, client) subResourceProxyAvailable, err := framework.ServerVersionGTE(framework.SubResourcePodProxyVersion, client.Discovery())
if err != nil { if err != nil {
return false, err return false, err
} }
var contents []byte var contents []byte
for _, fileName := range fileNames { for _, fileName := range fileNames {
if subResourceProxyAvailable { if subResourceProxyAvailable {
contents, err = client.Get(). contents, err = client.Core().RESTClient().Get().
Namespace(pod.Namespace). Namespace(pod.Namespace).
Resource("pods"). Resource("pods").
SubResource("proxy"). SubResource("proxy").
@ -195,7 +195,7 @@ func assertFilesContain(fileNames []string, fileDir string, pod *api.Pod, client
Suffix(fileDir, fileName). Suffix(fileDir, fileName).
Do().Raw() Do().Raw()
} else { } else {
contents, err = client.Get(). contents, err = client.Core().RESTClient().Get().
Prefix("proxy"). Prefix("proxy").
Resource("pods"). Resource("pods").
Namespace(pod.Namespace). Namespace(pod.Namespace).
@ -223,7 +223,7 @@ func assertFilesContain(fileNames []string, fileDir string, pod *api.Pod, client
func validateDNSResults(f *framework.Framework, pod *api.Pod, fileNames []string) { func validateDNSResults(f *framework.Framework, pod *api.Pod, fileNames []string) {
By("submitting the pod to kubernetes") By("submitting the pod to kubernetes")
podClient := f.Client.Pods(f.Namespace.Name) podClient := f.ClientSet.Core().Pods(f.Namespace.Name)
defer func() { defer func() {
By("deleting the pod") By("deleting the pod")
defer GinkgoRecover() defer GinkgoRecover()
@ -242,7 +242,7 @@ func validateDNSResults(f *framework.Framework, pod *api.Pod, fileNames []string
} }
// Try to find results for each expected name. // Try to find results for each expected name.
By("looking for the results for each expected name from probers") By("looking for the results for each expected name from probers")
assertFilesExist(fileNames, "results", pod, f.Client) assertFilesExist(fileNames, "results", pod, f.ClientSet)
// TODO: probe from the host, too. // TODO: probe from the host, too.
@ -252,7 +252,7 @@ func validateDNSResults(f *framework.Framework, pod *api.Pod, fileNames []string
func validateTargetedProbeOutput(f *framework.Framework, pod *api.Pod, fileNames []string, value string) { func validateTargetedProbeOutput(f *framework.Framework, pod *api.Pod, fileNames []string, value string) {
By("submitting the pod to kubernetes") By("submitting the pod to kubernetes")
podClient := f.Client.Pods(f.Namespace.Name) podClient := f.ClientSet.Core().Pods(f.Namespace.Name)
defer func() { defer func() {
By("deleting the pod") By("deleting the pod")
defer GinkgoRecover() defer GinkgoRecover()
@ -271,13 +271,13 @@ func validateTargetedProbeOutput(f *framework.Framework, pod *api.Pod, fileNames
} }
// Try to find the expected value for each expected name. // Try to find the expected value for each expected name.
By("looking for the results for each expected name from probers") By("looking for the results for each expected name from probers")
assertFilesContain(fileNames, "results", pod, f.Client, true, value) assertFilesContain(fileNames, "results", pod, f.ClientSet, true, value)
framework.Logf("DNS probes using %s succeeded\n", pod.Name) framework.Logf("DNS probes using %s succeeded\n", pod.Name)
} }
func verifyDNSPodIsRunning(f *framework.Framework) { func verifyDNSPodIsRunning(f *framework.Framework) {
systemClient := f.Client.Pods(api.NamespaceSystem) systemClient := f.ClientSet.Core().Pods(api.NamespaceSystem)
By("Waiting for DNS Service to be Running") By("Waiting for DNS Service to be Running")
options := api.ListOptions{LabelSelector: dnsServiceLabelSelector} options := api.ListOptions{LabelSelector: dnsServiceLabelSelector}
dnsPods, err := systemClient.List(options) dnsPods, err := systemClient.List(options)
@ -288,7 +288,7 @@ func verifyDNSPodIsRunning(f *framework.Framework) {
framework.Failf("No pods match the label selector %v", dnsServiceLabelSelector.String()) framework.Failf("No pods match the label selector %v", dnsServiceLabelSelector.String())
} }
pod := dnsPods.Items[0] pod := dnsPods.Items[0]
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.Client, &pod)) framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, &pod))
} }
func createServiceSpec(serviceName, externalName string, isHeadless bool, selector map[string]string) *api.Service { func createServiceSpec(serviceName, externalName string, isHeadless bool, selector map[string]string) *api.Service {
@ -358,21 +358,21 @@ var _ = framework.KubeDescribe("DNS", func() {
"dns-test": "true", "dns-test": "true",
} }
headlessService := createServiceSpec(dnsTestServiceName, "", true, testServiceSelector) headlessService := createServiceSpec(dnsTestServiceName, "", true, testServiceSelector)
_, err := f.Client.Services(f.Namespace.Name).Create(headlessService) _, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(headlessService)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
defer func() { defer func() {
By("deleting the test headless service") By("deleting the test headless service")
defer GinkgoRecover() defer GinkgoRecover()
f.Client.Services(f.Namespace.Name).Delete(headlessService.Name) f.ClientSet.Core().Services(f.Namespace.Name).Delete(headlessService.Name, nil)
}() }()
regularService := createServiceSpec("test-service-2", "", false, testServiceSelector) regularService := createServiceSpec("test-service-2", "", false, testServiceSelector)
regularService, err = f.Client.Services(f.Namespace.Name).Create(regularService) regularService, err = f.ClientSet.Core().Services(f.Namespace.Name).Create(regularService)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
defer func() { defer func() {
By("deleting the test service") By("deleting the test service")
defer GinkgoRecover() defer GinkgoRecover()
f.Client.Services(f.Namespace.Name).Delete(regularService.Name) f.ClientSet.Core().Services(f.Namespace.Name).Delete(regularService.Name, nil)
}() }()
// All the names we need to be able to resolve. // All the names we need to be able to resolve.
@ -408,12 +408,12 @@ var _ = framework.KubeDescribe("DNS", func() {
serviceName := "dns-test-service-2" serviceName := "dns-test-service-2"
podHostname := "dns-querier-2" podHostname := "dns-querier-2"
headlessService := createServiceSpec(serviceName, "", true, testServiceSelector) headlessService := createServiceSpec(serviceName, "", true, testServiceSelector)
_, err := f.Client.Services(f.Namespace.Name).Create(headlessService) _, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(headlessService)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
defer func() { defer func() {
By("deleting the test headless service") By("deleting the test headless service")
defer GinkgoRecover() defer GinkgoRecover()
f.Client.Services(f.Namespace.Name).Delete(headlessService.Name) f.ClientSet.Core().Services(f.Namespace.Name).Delete(headlessService.Name, nil)
}() }()
hostFQDN := fmt.Sprintf("%s.%s.%s.svc.cluster.local", podHostname, serviceName, f.Namespace.Name) hostFQDN := fmt.Sprintf("%s.%s.%s.svc.cluster.local", podHostname, serviceName, f.Namespace.Name)
@ -441,12 +441,12 @@ var _ = framework.KubeDescribe("DNS", func() {
By("Creating a test externalName service") By("Creating a test externalName service")
serviceName := "dns-test-service-3" serviceName := "dns-test-service-3"
externalNameService := createServiceSpec(serviceName, "foo.example.com", false, nil) externalNameService := createServiceSpec(serviceName, "foo.example.com", false, nil)
_, err := f.Client.Services(f.Namespace.Name).Create(externalNameService) _, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(externalNameService)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
defer func() { defer func() {
By("deleting the test externalName service") By("deleting the test externalName service")
defer GinkgoRecover() defer GinkgoRecover()
f.Client.Services(f.Namespace.Name).Delete(externalNameService.Name) f.ClientSet.Core().Services(f.Namespace.Name).Delete(externalNameService.Name, nil)
}() }()
hostFQDN := fmt.Sprintf("%s.%s.svc.cluster.local", serviceName, f.Namespace.Name) hostFQDN := fmt.Sprintf("%s.%s.svc.cluster.local", serviceName, f.Namespace.Name)
@ -463,7 +463,7 @@ var _ = framework.KubeDescribe("DNS", func() {
// Test changing the externalName field // Test changing the externalName field
By("changing the externalName to bar.example.com") By("changing the externalName to bar.example.com")
_, err = updateService(f.Client, f.Namespace.Name, serviceName, func(s *api.Service) { _, err = updateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *api.Service) {
s.Spec.ExternalName = "bar.example.com" s.Spec.ExternalName = "bar.example.com"
}) })
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -480,7 +480,7 @@ var _ = framework.KubeDescribe("DNS", func() {
// Test changing type from ExternalName to ClusterIP // Test changing type from ExternalName to ClusterIP
By("changing the service to type=ClusterIP") By("changing the service to type=ClusterIP")
_, err = updateService(f.Client, f.Namespace.Name, serviceName, func(s *api.Service) { _, err = updateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *api.Service) {
s.Spec.Type = api.ServiceTypeClusterIP s.Spec.Type = api.ServiceTypeClusterIP
s.Spec.ClusterIP = "127.1.2.3" s.Spec.ClusterIP = "127.1.2.3"
s.Spec.Ports = []api.ServicePort{ s.Spec.Ports = []api.ServicePort{

View File

@ -94,7 +94,7 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
framework.Failf("Failed to setup provider config: %v", err) framework.Failf("Failed to setup provider config: %v", err)
} }
c, err := framework.LoadClient() c, err := framework.LoadInternalClientset()
if err != nil { if err != nil {
glog.Fatal("Error loading client: ", err) glog.Fatal("Error loading client: ", err)
} }

View File

@ -28,7 +28,7 @@ var _ = framework.KubeDescribe("[Feature:Empty]", func() {
f := framework.NewDefaultFramework("empty") f := framework.NewDefaultFramework("empty")
BeforeEach(func() { BeforeEach(func() {
c := f.Client c := f.ClientSet
ns := f.Namespace.Name ns := f.Namespace.Name
// TODO: respect --allow-notready-nodes flag in those functions. // TODO: respect --allow-notready-nodes flag in those functions.

View File

@ -68,7 +68,7 @@ var _ = framework.KubeDescribe("EmptyDir wrapper volumes", func() {
} }
var err error var err error
if secret, err = f.Client.Secrets(f.Namespace.Name).Create(secret); err != nil { if secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err) framework.Failf("unable to create test secret %s: %v", secret.Name, err)
} }
@ -124,11 +124,11 @@ var _ = framework.KubeDescribe("EmptyDir wrapper volumes", func() {
defer func() { defer func() {
By("Cleaning up the secret") By("Cleaning up the secret")
if err := f.Client.Secrets(f.Namespace.Name).Delete(secret.Name); err != nil { if err := f.ClientSet.Core().Secrets(f.Namespace.Name).Delete(secret.Name, nil); err != nil {
framework.Failf("unable to delete secret %v: %v", secret.Name, err) framework.Failf("unable to delete secret %v: %v", secret.Name, err)
} }
By("Cleaning up the git vol pod") By("Cleaning up the git vol pod")
if err = f.Client.Pods(f.Namespace.Name).Delete(pod.Name, api.NewDeleteOptions(0)); err != nil { if err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, api.NewDeleteOptions(0)); err != nil {
framework.Failf("unable to delete git vol pod %v: %v", pod.Name, err) framework.Failf("unable to delete git vol pod %v: %v", pod.Name, err)
} }
}() }()
@ -216,17 +216,17 @@ func createGitServer(f *framework.Framework) (gitURL string, gitRepo string, cle
}, },
} }
if gitServerSvc, err = f.Client.Services(f.Namespace.Name).Create(gitServerSvc); err != nil { if gitServerSvc, err = f.ClientSet.Core().Services(f.Namespace.Name).Create(gitServerSvc); err != nil {
framework.Failf("unable to create test git server service %s: %v", gitServerSvc.Name, err) framework.Failf("unable to create test git server service %s: %v", gitServerSvc.Name, err)
} }
return "http://" + gitServerSvc.Spec.ClusterIP + ":" + strconv.Itoa(httpPort), "test", func() { return "http://" + gitServerSvc.Spec.ClusterIP + ":" + strconv.Itoa(httpPort), "test", func() {
By("Cleaning up the git server pod") By("Cleaning up the git server pod")
if err := f.Client.Pods(f.Namespace.Name).Delete(gitServerPod.Name, api.NewDeleteOptions(0)); err != nil { if err := f.ClientSet.Core().Pods(f.Namespace.Name).Delete(gitServerPod.Name, api.NewDeleteOptions(0)); err != nil {
framework.Failf("unable to delete git server pod %v: %v", gitServerPod.Name, err) framework.Failf("unable to delete git server pod %v: %v", gitServerPod.Name, err)
} }
By("Cleaning up the git server svc") By("Cleaning up the git server svc")
if err := f.Client.Services(f.Namespace.Name).Delete(gitServerSvc.Name); err != nil { if err := f.ClientSet.Core().Services(f.Namespace.Name).Delete(gitServerSvc.Name, nil); err != nil {
framework.Failf("unable to delete git server svc %v: %v", gitServerSvc.Name, err) framework.Failf("unable to delete git server svc %v: %v", gitServerSvc.Name, err)
} }
} }
@ -266,7 +266,7 @@ func createConfigmapsForRace(f *framework.Framework) (configMapNames []string) {
"data-1": "value-1", "data-1": "value-1",
}, },
} }
_, err := f.Client.ConfigMaps(f.Namespace.Name).Create(configMap) _, err := f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
return return
@ -275,7 +275,7 @@ func createConfigmapsForRace(f *framework.Framework) (configMapNames []string) {
func deleteConfigMaps(f *framework.Framework, configMapNames []string) { func deleteConfigMaps(f *framework.Framework, configMapNames []string) {
By("Cleaning up the configMaps") By("Cleaning up the configMaps")
for _, configMapName := range configMapNames { for _, configMapName := range configMapNames {
err := f.Client.ConfigMaps(f.Namespace.Name).Delete(configMapName) err := f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Delete(configMapName, nil)
Expect(err).NotTo(HaveOccurred(), "unable to delete configMap %v", configMapName) Expect(err).NotTo(HaveOccurred(), "unable to delete configMap %v", configMapName)
} }
} }
@ -361,15 +361,15 @@ func testNoWrappedVolumeRace(f *framework.Framework, volumes []api.Volume, volum
}, },
}, },
} }
_, err := f.Client.ReplicationControllers(f.Namespace.Name).Create(rc) _, err := f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Create(rc)
Expect(err).NotTo(HaveOccurred(), "error creating replication controller") Expect(err).NotTo(HaveOccurred(), "error creating replication controller")
defer func() { defer func() {
err := framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, rcName) err := framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, rcName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
}() }()
pods, err := framework.PodsCreated(f.Client, f.Namespace.Name, rcName, podCount) pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, rcName, podCount)
By("Ensuring each pod is running") By("Ensuring each pod is running")

View File

@ -42,10 +42,10 @@ var _ = framework.KubeDescribe("Etcd failure [Disruptive]", func() {
framework.SkipUnlessProviderIs("gce") framework.SkipUnlessProviderIs("gce")
Expect(framework.RunRC(testutils.RCConfig{ Expect(framework.RunRC(testutils.RCConfig{
Client: f.Client, Client: f.ClientSet,
Name: "baz", Name: "baz",
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
Image: framework.GetPauseImageName(f.Client), Image: framework.GetPauseImageName(f.ClientSet),
Replicas: 1, Replicas: 1,
})).NotTo(HaveOccurred()) })).NotTo(HaveOccurred())
}) })
@ -101,7 +101,7 @@ func masterExec(cmd string) {
func checkExistingRCRecovers(f *framework.Framework) { func checkExistingRCRecovers(f *framework.Framework) {
By("assert that the pre-existing replication controller recovers") By("assert that the pre-existing replication controller recovers")
podClient := f.Client.Pods(f.Namespace.Name) podClient := f.ClientSet.Core().Pods(f.Namespace.Name)
rcSelector := labels.Set{"name": "baz"}.AsSelector() rcSelector := labels.Set{"name": "baz"}.AsSelector()
By("deleting pods from existing replication controller") By("deleting pods from existing replication controller")

View File

@ -37,7 +37,7 @@ var _ = framework.KubeDescribe("Events", func() {
It("should be sent by kubelets and the scheduler about pods scheduling and running [Conformance]", func() { It("should be sent by kubelets and the scheduler about pods scheduling and running [Conformance]", func() {
podClient := f.Client.Pods(f.Namespace.Name) podClient := f.ClientSet.Core().Pods(f.Namespace.Name)
By("creating the pod") By("creating the pod")
name := "send-events-" + string(uuid.NewUUID()) name := "send-events-" + string(uuid.NewUUID())
@ -95,7 +95,7 @@ var _ = framework.KubeDescribe("Events", func() {
"source": api.DefaultSchedulerName, "source": api.DefaultSchedulerName,
}.AsSelector() }.AsSelector()
options := api.ListOptions{FieldSelector: selector} options := api.ListOptions{FieldSelector: selector}
events, err := f.Client.Events(f.Namespace.Name).List(options) events, err := f.ClientSet.Core().Events(f.Namespace.Name).List(options)
if err != nil { if err != nil {
return false, err return false, err
} }
@ -115,7 +115,7 @@ var _ = framework.KubeDescribe("Events", func() {
"source": "kubelet", "source": "kubelet",
}.AsSelector() }.AsSelector()
options := api.ListOptions{FieldSelector: selector} options := api.ListOptions{FieldSelector: selector}
events, err = f.Client.Events(f.Namespace.Name).List(options) events, err = f.ClientSet.Core().Events(f.Namespace.Name).List(options)
if err != nil { if err != nil {
return false, err return false, err
} }

View File

@ -22,7 +22,7 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -45,9 +45,9 @@ except:
var _ = framework.KubeDescribe("ClusterDns [Feature:Example]", func() { var _ = framework.KubeDescribe("ClusterDns [Feature:Example]", func() {
f := framework.NewDefaultFramework("cluster-dns") f := framework.NewDefaultFramework("cluster-dns")
var c *client.Client var c clientset.Interface
BeforeEach(func() { BeforeEach(func() {
c = f.Client c = f.ClientSet
}) })
It("should create pod that uses dns", func() { It("should create pod that uses dns", func() {
@ -98,7 +98,7 @@ var _ = framework.KubeDescribe("ClusterDns [Feature:Example]", func() {
for _, ns := range namespaces { for _, ns := range namespaces {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": backendRcName})) label := labels.SelectorFromSet(labels.Set(map[string]string{"name": backendRcName}))
options := api.ListOptions{LabelSelector: label} options := api.ListOptions{LabelSelector: label}
pods, err := c.Pods(ns.Name).List(options) pods, err := c.Core().Pods(ns.Name).List(options)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
err = framework.PodsResponding(c, ns.Name, backendPodName, false, pods) err = framework.PodsResponding(c, ns.Name, backendPodName, false, pods)
Expect(err).NotTo(HaveOccurred(), "waiting for all pods to respond") Expect(err).NotTo(HaveOccurred(), "waiting for all pods to respond")
@ -118,7 +118,7 @@ var _ = framework.KubeDescribe("ClusterDns [Feature:Example]", func() {
// This code is probably unnecessary, but let's stay on the safe side. // This code is probably unnecessary, but let's stay on the safe side.
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": backendPodName})) label := labels.SelectorFromSet(labels.Set(map[string]string{"name": backendPodName}))
options := api.ListOptions{LabelSelector: label} options := api.ListOptions{LabelSelector: label}
pods, err := c.Pods(namespaces[0].Name).List(options) pods, err := c.Core().Pods(namespaces[0].Name).List(options)
if err != nil || pods == nil || len(pods.Items) == 0 { if err != nil || pods == nil || len(pods.Items) == 0 {
framework.Failf("no running pods found") framework.Failf("no running pods found")

View File

@ -25,7 +25,7 @@ import (
"syscall" "syscall"
"time" "time"
client "k8s.io/kubernetes/pkg/client/unversioned" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
@ -47,8 +47,8 @@ const (
// readTransactions reads # of transactions from the k8petstore web server endpoint. // readTransactions reads # of transactions from the k8petstore web server endpoint.
// for more details see the source of the k8petstore web server. // for more details see the source of the k8petstore web server.
func readTransactions(c *client.Client, ns string) (error, int) { func readTransactions(c clientset.Interface, ns string) (error, int) {
proxyRequest, errProxy := framework.GetServicesProxyRequest(c, c.Get()) proxyRequest, errProxy := framework.GetServicesProxyRequest(c, c.Core().RESTClient().Get())
if errProxy != nil { if errProxy != nil {
return errProxy, -1 return errProxy, -1
} }
@ -66,7 +66,7 @@ func readTransactions(c *client.Client, ns string) (error, int) {
// runK8petstore runs the k8petstore application, bound to external nodeport, and // runK8petstore runs the k8petstore application, bound to external nodeport, and
// polls until finalTransactionsExpected transactions are acquired, in a maximum of maxSeconds. // polls until finalTransactionsExpected transactions are acquired, in a maximum of maxSeconds.
func runK8petstore(restServers int, loadGenerators int, c *client.Client, ns string, finalTransactionsExpected int, maxTime time.Duration) { func runK8petstore(restServers int, loadGenerators int, c clientset.Interface, ns string, finalTransactionsExpected int, maxTime time.Duration) {
var err error = nil var err error = nil
k8bpsScriptLocation := filepath.Join(framework.TestContext.RepoRoot, "examples/k8petstore/k8petstore-nodeport.sh") k8bpsScriptLocation := filepath.Join(framework.TestContext.RepoRoot, "examples/k8petstore/k8petstore-nodeport.sh")
@ -171,7 +171,7 @@ var _ = framework.KubeDescribe("Pet Store [Feature:Example]", func() {
loadGenerators := nodeCount loadGenerators := nodeCount
restServers := nodeCount restServers := nodeCount
fmt.Printf("load generators / rest servers [ %v / %v ] ", loadGenerators, restServers) fmt.Printf("load generators / rest servers [ %v / %v ] ", loadGenerators, restServers)
runK8petstore(restServers, loadGenerators, f.Client, f.Namespace.Name, k8bpsSmokeTestFinalTransactions, k8bpsSmokeTestTimeout) runK8petstore(restServers, loadGenerators, f.ClientSet, f.Namespace.Name, k8bpsSmokeTestFinalTransactions, k8bpsSmokeTestTimeout)
}) })
}) })

View File

@ -27,7 +27,7 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -56,10 +56,10 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
forEachPod := func(selectorKey string, selectorValue string, fn func(api.Pod)) { forEachPod := func(selectorKey string, selectorValue string, fn func(api.Pod)) {
clusterState(selectorKey, selectorValue).ForEach(fn) clusterState(selectorKey, selectorValue).ForEach(fn)
} }
var c *client.Client var c clientset.Interface
var ns string var ns string
BeforeEach(func() { BeforeEach(func() {
c = f.Client c = f.ClientSet
ns = f.Namespace.Name ns = f.Namespace.Name
}) })
@ -281,7 +281,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
label := labels.SelectorFromSet(labels.Set(map[string]string{"app": "cassandra"})) label := labels.SelectorFromSet(labels.Set(map[string]string{"app": "cassandra"}))
err = wait.PollImmediate(petsetPoll, petsetTimeout, err = wait.PollImmediate(petsetPoll, petsetTimeout,
func() (bool, error) { func() (bool, error) {
podList, err := c.Pods(ns).List(api.ListOptions{LabelSelector: label}) podList, err := c.Core().Pods(ns).List(api.ListOptions{LabelSelector: label})
if err != nil { if err != nil {
return false, fmt.Errorf("Unable to get list of pods in petset %s", label) return false, fmt.Errorf("Unable to get list of pods in petset %s", label)
} }
@ -396,7 +396,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
err := framework.WaitForPodNameRunningInNamespace(c, podName, ns) err := framework.WaitForPodNameRunningInNamespace(c, podName, ns)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
for t := time.Now(); time.Since(t) < timeout; time.Sleep(framework.Poll) { for t := time.Now(); time.Since(t) < timeout; time.Sleep(framework.Poll) {
pod, err := c.Pods(ns).Get(podName) pod, err := c.Core().Pods(ns).Get(podName)
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podName)) framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podName))
stat := api.GetExistingContainerStatus(pod.Status.ContainerStatuses, podName) stat := api.GetExistingContainerStatus(pod.Status.ContainerStatuses, podName)
framework.Logf("Pod: %s, restart count:%d", stat.Name, stat.RestartCount) framework.Logf("Pod: %s, restart count:%d", stat.Name, stat.RestartCount)
@ -504,7 +504,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("scaling rethinkdb") By("scaling rethinkdb")
framework.ScaleRC(c, f.ClientSet, ns, "rethinkdb-rc", 2, true) framework.ScaleRC(f.ClientSet, ns, "rethinkdb-rc", 2, true)
checkDbInstances() checkDbInstances()
By("starting admin") By("starting admin")
@ -547,7 +547,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("scaling hazelcast") By("scaling hazelcast")
framework.ScaleRC(c, f.ClientSet, ns, "hazelcast", 2, true) framework.ScaleRC(f.ClientSet, ns, "hazelcast", 2, true)
forEachPod("name", "hazelcast", func(pod api.Pod) { forEachPod("name", "hazelcast", func(pod api.Pod) {
_, err := framework.LookForStringInLog(ns, pod.Name, "hazelcast", "Members [2]", serverStartTimeout) _, err := framework.LookForStringInLog(ns, pod.Name, "hazelcast", "Members [2]", serverStartTimeout)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -556,11 +556,11 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
}) })
}) })
func makeHttpRequestToService(c *client.Client, ns, service, path string, timeout time.Duration) (string, error) { func makeHttpRequestToService(c clientset.Interface, ns, service, path string, timeout time.Duration) (string, error) {
var result []byte var result []byte
var err error var err error
for t := time.Now(); time.Since(t) < timeout; time.Sleep(framework.Poll) { for t := time.Now(); time.Since(t) < timeout; time.Sleep(framework.Poll) {
proxyRequest, errProxy := framework.GetServicesProxyRequest(c, c.Get()) proxyRequest, errProxy := framework.GetServicesProxyRequest(c, c.Core().RESTClient().Get())
if errProxy != nil { if errProxy != nil {
break break
} }

View File

@ -63,7 +63,7 @@ var _ = framework.KubeDescribe("Federated ingresses [Feature:Federation]", func(
}) })
It("should be created and deleted successfully", func() { It("should be created and deleted successfully", func() {
framework.SkipUnlessFederated(f.Client) framework.SkipUnlessFederated(f.ClientSet)
framework.SkipUnlessProviderIs("gce", "gke") // TODO: Federated ingress is not yet supported on non-GCP platforms. framework.SkipUnlessProviderIs("gce", "gke") // TODO: Federated ingress is not yet supported on non-GCP platforms.
nsName := f.FederationNamespace.Name nsName := f.FederationNamespace.Name
ingress := createIngressOrFail(f.FederationClientset_1_5, nsName) ingress := createIngressOrFail(f.FederationClientset_1_5, nsName)
@ -85,7 +85,7 @@ var _ = framework.KubeDescribe("Federated ingresses [Feature:Federation]", func(
// register clusters in federation apiserver // register clusters in federation apiserver
BeforeEach(func() { BeforeEach(func() {
framework.SkipUnlessFederated(f.Client) framework.SkipUnlessFederated(f.ClientSet)
framework.SkipUnlessProviderIs("gce", "gke") // TODO: Federated ingress is not yet supported on non-GCP platforms. framework.SkipUnlessProviderIs("gce", "gke") // TODO: Federated ingress is not yet supported on non-GCP platforms.
if federationName = os.Getenv("FEDERATION_NAME"); federationName == "" { if federationName = os.Getenv("FEDERATION_NAME"); federationName == "" {
federationName = DefaultFederationName federationName = DefaultFederationName
@ -124,7 +124,7 @@ var _ = framework.KubeDescribe("Federated ingresses [Feature:Federation]", func(
) )
BeforeEach(func() { BeforeEach(func() {
framework.SkipUnlessFederated(f.Client) framework.SkipUnlessFederated(f.ClientSet)
// create backend pod // create backend pod
createBackendPodsOrFail(clusters, ns, FederatedIngressServicePodName) createBackendPodsOrFail(clusters, ns, FederatedIngressServicePodName)
// create backend service // create backend service

View File

@ -44,7 +44,7 @@ var _ = framework.KubeDescribe("Federation namespace [Feature:Federation]", func
var clusters map[string]*cluster // All clusters, keyed by cluster name var clusters map[string]*cluster // All clusters, keyed by cluster name
BeforeEach(func() { BeforeEach(func() {
framework.SkipUnlessFederated(f.Client) framework.SkipUnlessFederated(f.ClientSet)
// TODO: Federation API server should be able to answer this. // TODO: Federation API server should be able to answer this.
if federationName = os.Getenv("FEDERATION_NAME"); federationName == "" { if federationName = os.Getenv("FEDERATION_NAME"); federationName == "" {
@ -56,7 +56,7 @@ var _ = framework.KubeDescribe("Federation namespace [Feature:Federation]", func
}) })
AfterEach(func() { AfterEach(func() {
framework.SkipUnlessFederated(f.Client) framework.SkipUnlessFederated(f.ClientSet)
deleteAllTestNamespaces( deleteAllTestNamespaces(
f.FederationClientset_1_5.Core().Namespaces().List, f.FederationClientset_1_5.Core().Namespaces().List,
f.FederationClientset_1_5.Core().Namespaces().Delete) f.FederationClientset_1_5.Core().Namespaces().Delete)
@ -69,7 +69,7 @@ var _ = framework.KubeDescribe("Federation namespace [Feature:Federation]", func
}) })
It("should be created and deleted successfully", func() { It("should be created and deleted successfully", func() {
framework.SkipUnlessFederated(f.Client) framework.SkipUnlessFederated(f.ClientSet)
ns := api_v1.Namespace{ ns := api_v1.Namespace{
ObjectMeta: api_v1.ObjectMeta{ ObjectMeta: api_v1.ObjectMeta{

View File

@ -46,18 +46,18 @@ var _ = framework.KubeDescribe("Federation secrets [Feature:Federation12]", func
Describe("Secret objects", func() { Describe("Secret objects", func() {
BeforeEach(func() { BeforeEach(func() {
framework.SkipUnlessFederated(f.Client) framework.SkipUnlessFederated(f.ClientSet)
clusters = map[string]*cluster{} clusters = map[string]*cluster{}
registerClusters(clusters, UserAgentName, "", f) registerClusters(clusters, UserAgentName, "", f)
}) })
AfterEach(func() { AfterEach(func() {
framework.SkipUnlessFederated(f.Client) framework.SkipUnlessFederated(f.ClientSet)
unregisterClusters(clusters, f) unregisterClusters(clusters, f)
}) })
It("should be created and deleted successfully", func() { It("should be created and deleted successfully", func() {
framework.SkipUnlessFederated(f.Client) framework.SkipUnlessFederated(f.ClientSet)
nsName := f.FederationNamespace.Name nsName := f.FederationNamespace.Name
secret := createSecretOrFail(f.FederationClientset_1_5, nsName) secret := createSecretOrFail(f.FederationClientset_1_5, nsName)
defer func() { // Cleanup defer func() { // Cleanup

View File

@ -48,7 +48,7 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() {
var _ = Describe("Federated Services", func() { var _ = Describe("Federated Services", func() {
BeforeEach(func() { BeforeEach(func() {
framework.SkipUnlessFederated(f.Client) framework.SkipUnlessFederated(f.ClientSet)
// TODO: Federation API server should be able to answer this. // TODO: Federation API server should be able to answer this.
if federationName = os.Getenv("FEDERATION_NAME"); federationName == "" { if federationName = os.Getenv("FEDERATION_NAME"); federationName == "" {
@ -70,12 +70,12 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() {
) )
BeforeEach(func() { BeforeEach(func() {
framework.SkipUnlessFederated(f.Client) framework.SkipUnlessFederated(f.ClientSet)
// Placeholder // Placeholder
}) })
AfterEach(func() { AfterEach(func() {
framework.SkipUnlessFederated(f.Client) framework.SkipUnlessFederated(f.ClientSet)
if service != nil { if service != nil {
By(fmt.Sprintf("Deleting service shards and their provider resources in underlying clusters for service %q in namespace %q", service.Name, nsName)) By(fmt.Sprintf("Deleting service shards and their provider resources in underlying clusters for service %q in namespace %q", service.Name, nsName))
@ -86,7 +86,7 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() {
}) })
It("should succeed", func() { It("should succeed", func() {
framework.SkipUnlessFederated(f.Client) framework.SkipUnlessFederated(f.ClientSet)
nsName = f.FederationNamespace.Name nsName = f.FederationNamespace.Name
service = createServiceOrFail(f.FederationClientset_1_5, nsName, FederatedServiceName) service = createServiceOrFail(f.FederationClientset_1_5, nsName, FederatedServiceName)
@ -99,7 +99,7 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() {
}) })
It("should create matching services in underlying clusters", func() { It("should create matching services in underlying clusters", func() {
framework.SkipUnlessFederated(f.Client) framework.SkipUnlessFederated(f.ClientSet)
nsName = f.FederationNamespace.Name nsName = f.FederationNamespace.Name
service = createServiceOrFail(f.FederationClientset_1_5, nsName, FederatedServiceName) service = createServiceOrFail(f.FederationClientset_1_5, nsName, FederatedServiceName)
@ -119,7 +119,7 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() {
) )
BeforeEach(func() { BeforeEach(func() {
framework.SkipUnlessFederated(f.Client) framework.SkipUnlessFederated(f.ClientSet)
nsName := f.FederationNamespace.Name nsName := f.FederationNamespace.Name
createBackendPodsOrFail(clusters, nsName, FederatedServicePodName) createBackendPodsOrFail(clusters, nsName, FederatedServicePodName)
@ -128,7 +128,7 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() {
}) })
AfterEach(func() { AfterEach(func() {
framework.SkipUnlessFederated(f.Client) framework.SkipUnlessFederated(f.ClientSet)
nsName := f.FederationNamespace.Name nsName := f.FederationNamespace.Name
deleteBackendPodsOrFail(clusters, nsName) deleteBackendPodsOrFail(clusters, nsName)
@ -146,7 +146,7 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() {
}) })
It("should be able to discover a federated service", func() { It("should be able to discover a federated service", func() {
framework.SkipUnlessFederated(f.Client) framework.SkipUnlessFederated(f.ClientSet)
nsName := f.FederationNamespace.Name nsName := f.FederationNamespace.Name
svcDNSNames := []string{ svcDNSNames := []string{
@ -166,7 +166,7 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() {
Context("non-local federated service", func() { Context("non-local federated service", func() {
BeforeEach(func() { BeforeEach(func() {
framework.SkipUnlessFederated(f.Client) framework.SkipUnlessFederated(f.ClientSet)
// Delete all the backend pods from the shard which is local to the discovery pod. // Delete all the backend pods from the shard which is local to the discovery pod.
deleteOneBackendPodOrFail(clusters[primaryClusterName]) deleteOneBackendPodOrFail(clusters[primaryClusterName])
@ -174,7 +174,7 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() {
}) })
It("should be able to discover a non-local federated service", func() { It("should be able to discover a non-local federated service", func() {
framework.SkipUnlessFederated(f.Client) framework.SkipUnlessFederated(f.ClientSet)
nsName := f.FederationNamespace.Name nsName := f.FederationNamespace.Name
svcDNSNames := []string{ svcDNSNames := []string{
@ -190,7 +190,7 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() {
// TTL and/or running the pods in parallel. // TTL and/or running the pods in parallel.
Context("[Slow] missing local service", func() { Context("[Slow] missing local service", func() {
It("should never find DNS entries for a missing local service", func() { It("should never find DNS entries for a missing local service", func() {
framework.SkipUnlessFederated(f.Client) framework.SkipUnlessFederated(f.ClientSet)
nsName := f.FederationNamespace.Name nsName := f.FederationNamespace.Name
localSvcDNSNames := []string{ localSvcDNSNames := []string{

View File

@ -36,7 +36,7 @@ var _ = framework.KubeDescribe("Federation apiserver [Feature:Federation]", func
Describe("Cluster objects", func() { Describe("Cluster objects", func() {
AfterEach(func() { AfterEach(func() {
framework.SkipUnlessFederated(f.Client) framework.SkipUnlessFederated(f.ClientSet)
// Delete registered clusters. // Delete registered clusters.
// This is if a test failed, it should not affect other tests. // This is if a test failed, it should not affect other tests.
@ -49,7 +49,7 @@ var _ = framework.KubeDescribe("Federation apiserver [Feature:Federation]", func
}) })
It("should be created and deleted successfully", func() { It("should be created and deleted successfully", func() {
framework.SkipUnlessFederated(f.Client) framework.SkipUnlessFederated(f.ClientSet)
contexts := f.GetUnderlyingFederatedContexts() contexts := f.GetUnderlyingFederatedContexts()
@ -85,11 +85,11 @@ var _ = framework.KubeDescribe("Federation apiserver [Feature:Federation]", func
}) })
Describe("Admission control", func() { Describe("Admission control", func() {
AfterEach(func() { AfterEach(func() {
framework.SkipUnlessFederated(f.Client) framework.SkipUnlessFederated(f.ClientSet)
}) })
It("should not be able to create resources if namespace does not exist", func() { It("should not be able to create resources if namespace does not exist", func() {
framework.SkipUnlessFederated(f.Client) framework.SkipUnlessFederated(f.ClientSet)
// Creating a service in a non-existing namespace should fail. // Creating a service in a non-existing namespace should fail.
svcNamespace := "federation-admission-test-ns" svcNamespace := "federation-admission-test-ns"

View File

@ -34,11 +34,11 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() {
var _ = Describe("Federation API server authentication", func() { var _ = Describe("Federation API server authentication", func() {
BeforeEach(func() { BeforeEach(func() {
framework.SkipUnlessFederated(f.Client) framework.SkipUnlessFederated(f.ClientSet)
}) })
It("should accept cluster resources when the client has right authentication credentials", func() { It("should accept cluster resources when the client has right authentication credentials", func() {
framework.SkipUnlessFederated(f.Client) framework.SkipUnlessFederated(f.ClientSet)
nsName := f.FederationNamespace.Name nsName := f.FederationNamespace.Name
svc := createServiceOrFail(f.FederationClientset_1_5, nsName, FederatedServiceName) svc := createServiceOrFail(f.FederationClientset_1_5, nsName, FederatedServiceName)
@ -46,7 +46,7 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() {
}) })
It("should not accept cluster resources when the client has invalid authentication credentials", func() { It("should not accept cluster resources when the client has invalid authentication credentials", func() {
framework.SkipUnlessFederated(f.Client) framework.SkipUnlessFederated(f.ClientSet)
contexts := f.GetUnderlyingFederatedContexts() contexts := f.GetUnderlyingFederatedContexts()
@ -67,7 +67,7 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() {
}) })
It("should not accept cluster resources when the client has no authentication credentials", func() { It("should not accept cluster resources when the client has no authentication credentials", func() {
framework.SkipUnlessFederated(f.Client) framework.SkipUnlessFederated(f.ClientSet)
fcs, err := invalidAuthFederationClientSet(nil) fcs, err := invalidAuthFederationClientSet(nil)
ExpectNoError(err) ExpectNoError(err)

View File

@ -37,7 +37,7 @@ var _ = framework.KubeDescribe("Federation events [Feature:Federation]", func()
Describe("Event objects", func() { Describe("Event objects", func() {
AfterEach(func() { AfterEach(func() {
framework.SkipUnlessFederated(f.Client) framework.SkipUnlessFederated(f.ClientSet)
nsName := f.FederationNamespace.Name nsName := f.FederationNamespace.Name
// Delete registered events. // Delete registered events.
@ -50,7 +50,7 @@ var _ = framework.KubeDescribe("Federation events [Feature:Federation]", func()
}) })
It("should be created and deleted successfully", func() { It("should be created and deleted successfully", func() {
framework.SkipUnlessFederated(f.Client) framework.SkipUnlessFederated(f.ClientSet)
nsName := f.FederationNamespace.Name nsName := f.FederationNamespace.Name
event := createEventOrFail(f.FederationClientset_1_5, nsName) event := createEventOrFail(f.FederationClientset_1_5, nsName)

View File

@ -46,7 +46,7 @@ var _ = framework.KubeDescribe("Federation replicasets [Feature:Federation]", fu
Describe("ReplicaSet objects", func() { Describe("ReplicaSet objects", func() {
AfterEach(func() { AfterEach(func() {
framework.SkipUnlessFederated(f.Client) framework.SkipUnlessFederated(f.ClientSet)
// Delete registered replicasets. // Delete registered replicasets.
nsName := f.FederationNamespace.Name nsName := f.FederationNamespace.Name
@ -59,7 +59,7 @@ var _ = framework.KubeDescribe("Federation replicasets [Feature:Federation]", fu
}) })
It("should be created and deleted successfully", func() { It("should be created and deleted successfully", func() {
framework.SkipUnlessFederated(f.Client) framework.SkipUnlessFederated(f.ClientSet)
nsName := f.FederationNamespace.Name nsName := f.FederationNamespace.Name
replicaset := createReplicaSetOrFail(f.FederationClientset_1_5, nsName) replicaset := createReplicaSetOrFail(f.FederationClientset_1_5, nsName)
@ -79,7 +79,7 @@ var _ = framework.KubeDescribe("Federation replicasets [Feature:Federation]", fu
federationName string federationName string
) )
BeforeEach(func() { BeforeEach(func() {
framework.SkipUnlessFederated(f.Client) framework.SkipUnlessFederated(f.ClientSet)
if federationName = os.Getenv("FEDERATION_NAME"); federationName == "" { if federationName = os.Getenv("FEDERATION_NAME"); federationName == "" {
federationName = DefaultFederationName federationName = DefaultFederationName
} }

View File

@ -383,7 +383,7 @@ func podExitCodeDetector(f *framework.Framework, name, namespace string, code in
} }
return func() error { return func() error {
pod, err := f.Client.Pods(namespace).Get(name) pod, err := f.ClientSet.Core().Pods(namespace).Get(name)
if err != nil { if err != nil {
return logerr(err) return logerr(err)
} }
@ -392,7 +392,7 @@ func podExitCodeDetector(f *framework.Framework, name, namespace string, code in
} }
// Best effort attempt to grab pod logs for debugging // Best effort attempt to grab pod logs for debugging
logs, err = framework.GetPodLogs(f.Client, namespace, name, pod.Spec.Containers[0].Name) logs, err = framework.GetPodLogs(f.ClientSet, namespace, name, pod.Spec.Containers[0].Name)
if err != nil { if err != nil {
framework.Logf("Cannot fetch pod logs: %v", err) framework.Logf("Cannot fetch pod logs: %v", err)
} }
@ -431,12 +431,12 @@ func discoverService(f *framework.Framework, name string, exists bool, podName s
nsName := f.FederationNamespace.Name nsName := f.FederationNamespace.Name
By(fmt.Sprintf("Creating pod %q in namespace %q", pod.Name, nsName)) By(fmt.Sprintf("Creating pod %q in namespace %q", pod.Name, nsName))
_, err := f.Client.Pods(nsName).Create(pod) _, err := f.ClientSet.Core().Pods(nsName).Create(pod)
framework.ExpectNoError(err, "Trying to create pod to run %q", command) framework.ExpectNoError(err, "Trying to create pod to run %q", command)
By(fmt.Sprintf("Successfully created pod %q in namespace %q", pod.Name, nsName)) By(fmt.Sprintf("Successfully created pod %q in namespace %q", pod.Name, nsName))
defer func() { defer func() {
By(fmt.Sprintf("Deleting pod %q from namespace %q", podName, nsName)) By(fmt.Sprintf("Deleting pod %q from namespace %q", podName, nsName))
err := f.Client.Pods(nsName).Delete(podName, api.NewDeleteOptions(0)) err := f.ClientSet.Core().Pods(nsName).Delete(podName, api.NewDeleteOptions(0))
framework.ExpectNoError(err, "Deleting pod %q from namespace %q", podName, nsName) framework.ExpectNoError(err, "Deleting pod %q from namespace %q", podName, nsName)
By(fmt.Sprintf("Deleted pod %q from namespace %q", podName, nsName)) By(fmt.Sprintf("Deleted pod %q from namespace %q", podName, nsName))
}() }()

View File

@ -47,7 +47,7 @@ func (f *Framework) ExecCommandInContainerWithFullOutput(podName, containerName
var stdout, stderr bytes.Buffer var stdout, stderr bytes.Buffer
var stdin io.Reader var stdin io.Reader
tty := false tty := false
req := f.Client.RESTClient.Post(). req := f.ClientSet.Core().RESTClient().Post().
Resource("pods"). Resource("pods").
Name(podName). Name(podName).
Namespace(f.Namespace.Name). Namespace(f.Namespace.Name).

View File

@ -39,7 +39,6 @@ import (
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/typed/dynamic" "k8s.io/kubernetes/pkg/client/typed/dynamic"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/metrics" "k8s.io/kubernetes/pkg/metrics"
@ -61,9 +60,6 @@ const (
type Framework struct { type Framework struct {
BaseName string BaseName string
// Client is manually created and should not be used unless absolutely necessary. Use ClientSet_1_5
// where possible.
Client *client.Client
// ClientSet uses internal objects, you should use ClientSet_1_5 where possible. // ClientSet uses internal objects, you should use ClientSet_1_5 where possible.
ClientSet internalclientset.Interface ClientSet internalclientset.Interface
@ -134,12 +130,12 @@ func NewDefaultGroupVersionFramework(baseName string, groupVersion unversioned.G
return f return f
} }
func NewFramework(baseName string, options FrameworkOptions, client *client.Client) *Framework { func NewFramework(baseName string, options FrameworkOptions, client internalclientset.Interface) *Framework {
f := &Framework{ f := &Framework{
BaseName: baseName, BaseName: baseName,
AddonResourceConstraints: make(map[string]ResourceConstraint), AddonResourceConstraints: make(map[string]ResourceConstraint),
options: options, options: options,
Client: client, ClientSet: client,
} }
BeforeEach(f.BeforeEach) BeforeEach(f.BeforeEach)
@ -185,7 +181,7 @@ func (f *Framework) BeforeEach() {
// The fact that we need this feels like a bug in ginkgo. // The fact that we need this feels like a bug in ginkgo.
// https://github.com/onsi/ginkgo/issues/222 // https://github.com/onsi/ginkgo/issues/222
f.cleanupHandle = AddCleanupAction(f.AfterEach) f.cleanupHandle = AddCleanupAction(f.AfterEach)
if f.Client == nil { if f.ClientSet == nil {
By("Creating a kubernetes client") By("Creating a kubernetes client")
config, err := LoadConfig() config, err := LoadConfig()
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -197,9 +193,6 @@ func (f *Framework) BeforeEach() {
if TestContext.KubeAPIContentType != "" { if TestContext.KubeAPIContentType != "" {
config.ContentType = TestContext.KubeAPIContentType config.ContentType = TestContext.KubeAPIContentType
} }
c, err := loadClientFromConfig(config)
Expect(err).NotTo(HaveOccurred())
f.Client = c
f.ClientSet, err = internalclientset.NewForConfig(config) f.ClientSet, err = internalclientset.NewForConfig(config)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
f.ClientSet_1_5, err = release_1_5.NewForConfig(config) f.ClientSet_1_5, err = release_1_5.NewForConfig(config)
@ -239,14 +232,14 @@ func (f *Framework) BeforeEach() {
if TestContext.VerifyServiceAccount { if TestContext.VerifyServiceAccount {
By("Waiting for a default service account to be provisioned in namespace") By("Waiting for a default service account to be provisioned in namespace")
err = WaitForDefaultServiceAccountInNamespace(f.Client, namespace.Name) err = WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
} else { } else {
Logf("Skipping waiting for service account") Logf("Skipping waiting for service account")
} }
if TestContext.GatherKubeSystemResourceUsageData != "false" && TestContext.GatherKubeSystemResourceUsageData != "none" { if TestContext.GatherKubeSystemResourceUsageData != "false" && TestContext.GatherKubeSystemResourceUsageData != "none" {
f.gatherer, err = NewResourceUsageGatherer(f.Client, ResourceGathererOptions{ f.gatherer, err = NewResourceUsageGatherer(f.ClientSet, ResourceGathererOptions{
inKubemark: ProviderIs("kubemark"), inKubemark: ProviderIs("kubemark"),
masterOnly: TestContext.GatherKubeSystemResourceUsageData == "master", masterOnly: TestContext.GatherKubeSystemResourceUsageData == "master",
}) })
@ -261,7 +254,7 @@ func (f *Framework) BeforeEach() {
f.logsSizeWaitGroup = sync.WaitGroup{} f.logsSizeWaitGroup = sync.WaitGroup{}
f.logsSizeWaitGroup.Add(1) f.logsSizeWaitGroup.Add(1)
f.logsSizeCloseChannel = make(chan bool) f.logsSizeCloseChannel = make(chan bool)
f.logsSizeVerifier = NewLogsVerifier(f.Client, f.ClientSet, f.logsSizeCloseChannel) f.logsSizeVerifier = NewLogsVerifier(f.ClientSet, f.logsSizeCloseChannel)
go func() { go func() {
f.logsSizeVerifier.Run() f.logsSizeVerifier.Run()
f.logsSizeWaitGroup.Done() f.logsSizeWaitGroup.Done()
@ -326,7 +319,7 @@ func (f *Framework) AfterEach() {
if f.NamespaceDeletionTimeout != 0 { if f.NamespaceDeletionTimeout != 0 {
timeout = f.NamespaceDeletionTimeout timeout = f.NamespaceDeletionTimeout
} }
if err := deleteNS(f.Client, f.ClientPool, ns.Name, timeout); err != nil { if err := deleteNS(f.ClientSet, f.ClientPool, ns.Name, timeout); err != nil {
if !apierrs.IsNotFound(err) { if !apierrs.IsNotFound(err) {
nsDeletionErrors[ns.Name] = err nsDeletionErrors[ns.Name] = err
} else { } else {
@ -348,7 +341,7 @@ func (f *Framework) AfterEach() {
// Paranoia-- prevent reuse! // Paranoia-- prevent reuse!
f.Namespace = nil f.Namespace = nil
f.FederationNamespace = nil f.FederationNamespace = nil
f.Client = nil f.ClientSet = nil
f.namespacesToDelete = nil f.namespacesToDelete = nil
// if we had errors deleting, report them now. // if we had errors deleting, report them now.
@ -376,18 +369,18 @@ func (f *Framework) AfterEach() {
// Print events if the test failed. // Print events if the test failed.
if CurrentGinkgoTestDescription().Failed && TestContext.DumpLogsOnFailure { if CurrentGinkgoTestDescription().Failed && TestContext.DumpLogsOnFailure {
// Pass both unversioned client and and versioned clientset, till we have removed all uses of the unversioned client. // Pass both unversioned client and and versioned clientset, till we have removed all uses of the unversioned client.
DumpAllNamespaceInfo(f.Client, f.ClientSet_1_5, f.Namespace.Name) DumpAllNamespaceInfo(f.ClientSet, f.ClientSet_1_5, f.Namespace.Name)
By(fmt.Sprintf("Dumping a list of prepulled images on each node")) By(fmt.Sprintf("Dumping a list of prepulled images on each node"))
LogContainersInPodsWithLabels(f.Client, api.NamespaceSystem, ImagePullerLabels, "image-puller", Logf) LogContainersInPodsWithLabels(f.ClientSet, api.NamespaceSystem, ImagePullerLabels, "image-puller", Logf)
if f.federated { if f.federated {
// Dump federation events in federation namespace. // Dump federation events in federation namespace.
DumpEventsInNamespace(func(opts v1.ListOptions, ns string) (*v1.EventList, error) { DumpEventsInNamespace(func(opts v1.ListOptions, ns string) (*v1.EventList, error) {
return f.FederationClientset_1_5.Core().Events(ns).List(opts) return f.FederationClientset_1_5.Core().Events(ns).List(opts)
}, f.FederationNamespace.Name) }, f.FederationNamespace.Name)
// Print logs of federation control plane pods (federation-apiserver and federation-controller-manager) // Print logs of federation control plane pods (federation-apiserver and federation-controller-manager)
LogPodsWithLabels(f.Client, "federation", map[string]string{"app": "federated-cluster"}, Logf) LogPodsWithLabels(f.ClientSet, "federation", map[string]string{"app": "federated-cluster"}, Logf)
// Print logs of kube-dns pod // Print logs of kube-dns pod
LogPodsWithLabels(f.Client, "kube-system", map[string]string{"k8s-app": "kube-dns"}, Logf) LogPodsWithLabels(f.ClientSet, "kube-system", map[string]string{"k8s-app": "kube-dns"}, Logf)
} }
} }
@ -407,7 +400,7 @@ func (f *Framework) AfterEach() {
if TestContext.GatherMetricsAfterTest { if TestContext.GatherMetricsAfterTest {
By("Gathering metrics") By("Gathering metrics")
// TODO: enable Scheduler and ControllerManager metrics grabbing when Master's Kubelet will be registered. // TODO: enable Scheduler and ControllerManager metrics grabbing when Master's Kubelet will be registered.
grabber, err := metrics.NewMetricsGrabber(f.Client, true, false, false, true) grabber, err := metrics.NewMetricsGrabber(f.ClientSet, true, false, false, true)
if err != nil { if err != nil {
Logf("Failed to create MetricsGrabber. Skipping metrics gathering.") Logf("Failed to create MetricsGrabber. Skipping metrics gathering.")
} else { } else {
@ -441,7 +434,7 @@ func (f *Framework) AfterEach() {
// Check whether all nodes are ready after the test. // Check whether all nodes are ready after the test.
// This is explicitly done at the very end of the test, to avoid // This is explicitly done at the very end of the test, to avoid
// e.g. not removing namespace in case of this failure. // e.g. not removing namespace in case of this failure.
if err := AllNodesReady(f.Client, 3*time.Minute); err != nil { if err := AllNodesReady(f.ClientSet, 3*time.Minute); err != nil {
Failf("All nodes should be ready after test, %v", err) Failf("All nodes should be ready after test, %v", err)
} }
} }
@ -451,7 +444,7 @@ func (f *Framework) CreateNamespace(baseName string, labels map[string]string) (
if createTestingNS == nil { if createTestingNS == nil {
createTestingNS = CreateTestingNS createTestingNS = CreateTestingNS
} }
ns, err := createTestingNS(baseName, f.Client, labels) ns, err := createTestingNS(baseName, f.ClientSet, labels)
if err == nil { if err == nil {
f.namespacesToDelete = append(f.namespacesToDelete, ns) f.namespacesToDelete = append(f.namespacesToDelete, ns)
} }
@ -483,29 +476,29 @@ func (f *Framework) createFederationNamespace(baseName string) (*v1.Namespace, e
// WaitForPodTerminated waits for the pod to be terminated with the given reason. // WaitForPodTerminated waits for the pod to be terminated with the given reason.
func (f *Framework) WaitForPodTerminated(podName, reason string) error { func (f *Framework) WaitForPodTerminated(podName, reason string) error {
return waitForPodTerminatedInNamespace(f.Client, podName, reason, f.Namespace.Name) return waitForPodTerminatedInNamespace(f.ClientSet, podName, reason, f.Namespace.Name)
} }
// WaitForPodRunning waits for the pod to run in the namespace. // WaitForPodRunning waits for the pod to run in the namespace.
func (f *Framework) WaitForPodRunning(podName string) error { func (f *Framework) WaitForPodRunning(podName string) error {
return WaitForPodNameRunningInNamespace(f.Client, podName, f.Namespace.Name) return WaitForPodNameRunningInNamespace(f.ClientSet, podName, f.Namespace.Name)
} }
// WaitForPodReady waits for the pod to flip to ready in the namespace. // WaitForPodReady waits for the pod to flip to ready in the namespace.
func (f *Framework) WaitForPodReady(podName string) error { func (f *Framework) WaitForPodReady(podName string) error {
return waitTimeoutForPodReadyInNamespace(f.Client, podName, f.Namespace.Name, "", PodStartTimeout) return waitTimeoutForPodReadyInNamespace(f.ClientSet, podName, f.Namespace.Name, "", PodStartTimeout)
} }
// WaitForPodRunningSlow waits for the pod to run in the namespace. // WaitForPodRunningSlow waits for the pod to run in the namespace.
// It has a longer timeout then WaitForPodRunning (util.slowPodStartTimeout). // It has a longer timeout then WaitForPodRunning (util.slowPodStartTimeout).
func (f *Framework) WaitForPodRunningSlow(podName string) error { func (f *Framework) WaitForPodRunningSlow(podName string) error {
return waitForPodRunningInNamespaceSlow(f.Client, podName, f.Namespace.Name, "") return waitForPodRunningInNamespaceSlow(f.ClientSet, podName, f.Namespace.Name, "")
} }
// WaitForPodNoLongerRunning waits for the pod to no longer be running in the namespace, for either // WaitForPodNoLongerRunning waits for the pod to no longer be running in the namespace, for either
// success or failure. // success or failure.
func (f *Framework) WaitForPodNoLongerRunning(podName string) error { func (f *Framework) WaitForPodNoLongerRunning(podName string) error {
return WaitForPodNoLongerRunningInNamespace(f.Client, podName, f.Namespace.Name, "") return WaitForPodNoLongerRunningInNamespace(f.ClientSet, podName, f.Namespace.Name, "")
} }
// TestContainerOutput runs the given pod in the given namespace and waits // TestContainerOutput runs the given pod in the given namespace and waits
@ -528,7 +521,7 @@ func (f *Framework) WaitForAnEndpoint(serviceName string) error {
for { for {
// TODO: Endpoints client should take a field selector so we // TODO: Endpoints client should take a field selector so we
// don't have to list everything. // don't have to list everything.
list, err := f.Client.Endpoints(f.Namespace.Name).List(api.ListOptions{}) list, err := f.ClientSet.Core().Endpoints(f.Namespace.Name).List(api.ListOptions{})
if err != nil { if err != nil {
return err return err
} }
@ -547,7 +540,7 @@ func (f *Framework) WaitForAnEndpoint(serviceName string) error {
FieldSelector: fields.Set{"metadata.name": serviceName}.AsSelector(), FieldSelector: fields.Set{"metadata.name": serviceName}.AsSelector(),
ResourceVersion: rv, ResourceVersion: rv,
} }
w, err := f.Client.Endpoints(f.Namespace.Name).Watch(options) w, err := f.ClientSet.Core().Endpoints(f.Namespace.Name).Watch(options)
if err != nil { if err != nil {
return err return err
} }
@ -613,7 +606,7 @@ func (f *Framework) CreateServiceForSimpleAppWithPods(contPort int, svcPort int,
theService := f.CreateServiceForSimpleApp(contPort, svcPort, appName) theService := f.CreateServiceForSimpleApp(contPort, svcPort, appName)
f.CreatePodsPerNodeForSimpleApp(appName, podSpec, count) f.CreatePodsPerNodeForSimpleApp(appName, podSpec, count)
if block { if block {
err = testutils.WaitForPodsWithLabelRunning(f.Client, f.Namespace.Name, labels.SelectorFromSet(labels.Set(theService.Spec.Selector))) err = testutils.WaitForPodsWithLabelRunning(f.ClientSet, f.Namespace.Name, labels.SelectorFromSet(labels.Set(theService.Spec.Selector)))
} }
return err, theService return err, theService
} }
@ -641,7 +634,7 @@ func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName str
} }
} }
Logf("Creating a service-for-%v for selecting app=%v-pod", appName, appName) Logf("Creating a service-for-%v for selecting app=%v-pod", appName, appName)
service, err := f.Client.Services(f.Namespace.Name).Create(&api.Service{ service, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(&api.Service{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Name: "service-for-" + appName, Name: "service-for-" + appName,
Labels: map[string]string{ Labels: map[string]string{
@ -667,7 +660,7 @@ func (f *Framework) CreatePodsPerNodeForSimpleApp(appName string, podSpec func(n
// one per node, but no more than maxCount. // one per node, but no more than maxCount.
if i <= maxCount { if i <= maxCount {
Logf("%v/%v : Creating container with label app=%v-pod", i, maxCount, appName) Logf("%v/%v : Creating container with label app=%v-pod", i, maxCount, appName)
_, err := f.Client.Pods(f.Namespace.Name).Create(&api.Pod{ _, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(&api.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Name: fmt.Sprintf(appName+"-pod-%v", i), Name: fmt.Sprintf(appName+"-pod-%v", i),
Labels: labels, Labels: labels,
@ -852,14 +845,14 @@ type PodStateVerification struct {
} }
type ClusterVerification struct { type ClusterVerification struct {
client *client.Client client internalclientset.Interface
namespace *api.Namespace // pointer rather than string, since ns isn't created until before each. namespace *api.Namespace // pointer rather than string, since ns isn't created until before each.
podState PodStateVerification podState PodStateVerification
} }
func (f *Framework) NewClusterVerification(filter PodStateVerification) *ClusterVerification { func (f *Framework) NewClusterVerification(filter PodStateVerification) *ClusterVerification {
return &ClusterVerification{ return &ClusterVerification{
f.Client, f.ClientSet,
f.Namespace, f.Namespace,
filter, filter,
} }
@ -894,7 +887,7 @@ func passesPhasesFilter(pod api.Pod, validPhases []api.PodPhase) bool {
} }
// filterLabels returns a list of pods which have labels. // filterLabels returns a list of pods which have labels.
func filterLabels(selectors map[string]string, cli *client.Client, ns string) (*api.PodList, error) { func filterLabels(selectors map[string]string, cli internalclientset.Interface, ns string) (*api.PodList, error) {
var err error var err error
var selector labels.Selector var selector labels.Selector
var pl *api.PodList var pl *api.PodList
@ -903,9 +896,9 @@ func filterLabels(selectors map[string]string, cli *client.Client, ns string) (*
if len(selectors) > 0 { if len(selectors) > 0 {
selector = labels.SelectorFromSet(labels.Set(selectors)) selector = labels.SelectorFromSet(labels.Set(selectors))
options := api.ListOptions{LabelSelector: selector} options := api.ListOptions{LabelSelector: selector}
pl, err = cli.Pods(ns).List(options) pl, err = cli.Core().Pods(ns).List(options)
} else { } else {
pl, err = cli.Pods(ns).List(api.ListOptions{}) pl, err = cli.Core().Pods(ns).List(api.ListOptions{})
} }
return pl, err return pl, err
} }
@ -913,7 +906,7 @@ func filterLabels(selectors map[string]string, cli *client.Client, ns string) (*
// filter filters pods which pass a filter. It can be used to compose // filter filters pods which pass a filter. It can be used to compose
// the more useful abstractions like ForEach, WaitFor, and so on, which // the more useful abstractions like ForEach, WaitFor, and so on, which
// can be used directly by tests. // can be used directly by tests.
func (p *PodStateVerification) filter(c *client.Client, namespace *api.Namespace) ([]api.Pod, error) { func (p *PodStateVerification) filter(c internalclientset.Interface, namespace *api.Namespace) ([]api.Pod, error) {
if len(p.ValidPhases) == 0 || namespace == nil { if len(p.ValidPhases) == 0 || namespace == nil {
panic(fmt.Errorf("Need to specify a valid pod phases (%v) and namespace (%v). ", p.ValidPhases, namespace)) panic(fmt.Errorf("Need to specify a valid pod phases (%v) and namespace (%v). ", p.ValidPhases, namespace))
} }

View File

@ -30,7 +30,7 @@ import (
cadvisorapi "github.com/google/cadvisor/info/v1" cadvisorapi "github.com/google/cadvisor/info/v1"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats" "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats"
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics" kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
kubeletstats "k8s.io/kubernetes/pkg/kubelet/server/stats" kubeletstats "k8s.io/kubernetes/pkg/kubelet/server/stats"
@ -63,7 +63,7 @@ func (a KubeletLatencyMetrics) Less(i, j int) bool { return a[i].Latency > a[j].
// If a apiserver client is passed in, the function will try to get kubelet metrics from metrics grabber; // If a apiserver client is passed in, the function will try to get kubelet metrics from metrics grabber;
// or else, the function will try to get kubelet metrics directly from the node. // or else, the function will try to get kubelet metrics directly from the node.
func getKubeletMetricsFromNode(c *client.Client, nodeName string) (metrics.KubeletMetrics, error) { func getKubeletMetricsFromNode(c clientset.Interface, nodeName string) (metrics.KubeletMetrics, error) {
if c == nil { if c == nil {
return metrics.GrabKubeletMetricsWithoutProxy(nodeName) return metrics.GrabKubeletMetricsWithoutProxy(nodeName)
} }
@ -76,7 +76,7 @@ func getKubeletMetricsFromNode(c *client.Client, nodeName string) (metrics.Kubel
// getKubeletMetrics gets all metrics in kubelet subsystem from specified node and trims // getKubeletMetrics gets all metrics in kubelet subsystem from specified node and trims
// the subsystem prefix. // the subsystem prefix.
func getKubeletMetrics(c *client.Client, nodeName string) (metrics.KubeletMetrics, error) { func getKubeletMetrics(c clientset.Interface, nodeName string) (metrics.KubeletMetrics, error) {
ms, err := getKubeletMetricsFromNode(c, nodeName) ms, err := getKubeletMetricsFromNode(c, nodeName)
if err != nil { if err != nil {
return metrics.KubeletMetrics{}, err return metrics.KubeletMetrics{}, err
@ -138,7 +138,7 @@ func GetKubeletLatencyMetrics(ms metrics.KubeletMetrics) KubeletLatencyMetrics {
// RuntimeOperationMonitor is the tool getting and parsing docker operation metrics. // RuntimeOperationMonitor is the tool getting and parsing docker operation metrics.
type RuntimeOperationMonitor struct { type RuntimeOperationMonitor struct {
client *client.Client client clientset.Interface
nodesRuntimeOps map[string]NodeRuntimeOperationErrorRate nodesRuntimeOps map[string]NodeRuntimeOperationErrorRate
} }
@ -152,12 +152,12 @@ type RuntimeOperationErrorRate struct {
TimeoutRate float64 TimeoutRate float64
} }
func NewRuntimeOperationMonitor(c *client.Client) *RuntimeOperationMonitor { func NewRuntimeOperationMonitor(c clientset.Interface) *RuntimeOperationMonitor {
m := &RuntimeOperationMonitor{ m := &RuntimeOperationMonitor{
client: c, client: c,
nodesRuntimeOps: make(map[string]NodeRuntimeOperationErrorRate), nodesRuntimeOps: make(map[string]NodeRuntimeOperationErrorRate),
} }
nodes, err := m.client.Nodes().List(api.ListOptions{}) nodes, err := m.client.Core().Nodes().List(api.ListOptions{})
if err != nil { if err != nil {
Failf("RuntimeOperationMonitor: unable to get list of nodes: %v", err) Failf("RuntimeOperationMonitor: unable to get list of nodes: %v", err)
} }
@ -224,7 +224,7 @@ func FormatRuntimeOperationErrorRate(nodesResult map[string]NodeRuntimeOperation
} }
// getNodeRuntimeOperationErrorRate gets runtime operation error rate from specified node. // getNodeRuntimeOperationErrorRate gets runtime operation error rate from specified node.
func getNodeRuntimeOperationErrorRate(c *client.Client, node string) (NodeRuntimeOperationErrorRate, error) { func getNodeRuntimeOperationErrorRate(c clientset.Interface, node string) (NodeRuntimeOperationErrorRate, error) {
result := make(NodeRuntimeOperationErrorRate) result := make(NodeRuntimeOperationErrorRate)
ms, err := getKubeletMetrics(c, node) ms, err := getKubeletMetrics(c, node)
if err != nil { if err != nil {
@ -256,7 +256,7 @@ func getNodeRuntimeOperationErrorRate(c *client.Client, node string) (NodeRuntim
} }
// HighLatencyKubeletOperations logs and counts the high latency metrics exported by the kubelet server via /metrics. // HighLatencyKubeletOperations logs and counts the high latency metrics exported by the kubelet server via /metrics.
func HighLatencyKubeletOperations(c *client.Client, threshold time.Duration, nodeName string, logFunc func(fmt string, args ...interface{})) (KubeletLatencyMetrics, error) { func HighLatencyKubeletOperations(c clientset.Interface, threshold time.Duration, nodeName string, logFunc func(fmt string, args ...interface{})) (KubeletLatencyMetrics, error) {
ms, err := getKubeletMetrics(c, nodeName) ms, err := getKubeletMetrics(c, nodeName)
if err != nil { if err != nil {
return KubeletLatencyMetrics{}, err return KubeletLatencyMetrics{}, err
@ -278,19 +278,19 @@ func HighLatencyKubeletOperations(c *client.Client, threshold time.Duration, nod
// in the returned ContainerInfo is subject to the requirements in statsRequest. // in the returned ContainerInfo is subject to the requirements in statsRequest.
// TODO: This function uses the deprecated kubelet stats API; it should be // TODO: This function uses the deprecated kubelet stats API; it should be
// removed. // removed.
func getContainerInfo(c *client.Client, nodeName string, req *kubeletstats.StatsRequest) (map[string]cadvisorapi.ContainerInfo, error) { func getContainerInfo(c clientset.Interface, nodeName string, req *kubeletstats.StatsRequest) (map[string]cadvisorapi.ContainerInfo, error) {
reqBody, err := json.Marshal(req) reqBody, err := json.Marshal(req)
if err != nil { if err != nil {
return nil, err return nil, err
} }
subResourceProxyAvailable, err := ServerVersionGTE(subResourceServiceAndNodeProxyVersion, c) subResourceProxyAvailable, err := ServerVersionGTE(subResourceServiceAndNodeProxyVersion, c.Discovery())
if err != nil { if err != nil {
return nil, err return nil, err
} }
var data []byte var data []byte
if subResourceProxyAvailable { if subResourceProxyAvailable {
data, err = c.Post(). data, err = c.Core().RESTClient().Post().
Resource("nodes"). Resource("nodes").
SubResource("proxy"). SubResource("proxy").
Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)). Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)).
@ -300,7 +300,7 @@ func getContainerInfo(c *client.Client, nodeName string, req *kubeletstats.Stats
Do().Raw() Do().Raw()
} else { } else {
data, err = c.Post(). data, err = c.Core().RESTClient().Post().
Prefix("proxy"). Prefix("proxy").
Resource("nodes"). Resource("nodes").
Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)). Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)).
@ -344,7 +344,7 @@ func getContainerInfo(c *client.Client, nodeName string, req *kubeletstats.Stats
// TODO: This function relies on the deprecated kubelet stats API and should be // TODO: This function relies on the deprecated kubelet stats API and should be
// removed and/or rewritten. // removed and/or rewritten.
func getOneTimeResourceUsageOnNode( func getOneTimeResourceUsageOnNode(
c *client.Client, c clientset.Interface,
nodeName string, nodeName string,
cpuInterval time.Duration, cpuInterval time.Duration,
containerNames func() []string, containerNames func() []string,
@ -400,15 +400,15 @@ func getOneTimeResourceUsageOnNode(
return usageMap, nil return usageMap, nil
} }
func getNodeStatsSummary(c *client.Client, nodeName string) (*stats.Summary, error) { func getNodeStatsSummary(c clientset.Interface, nodeName string) (*stats.Summary, error) {
subResourceProxyAvailable, err := ServerVersionGTE(subResourceServiceAndNodeProxyVersion, c) subResourceProxyAvailable, err := ServerVersionGTE(subResourceServiceAndNodeProxyVersion, c.Discovery())
if err != nil { if err != nil {
return nil, err return nil, err
} }
var data []byte var data []byte
if subResourceProxyAvailable { if subResourceProxyAvailable {
data, err = c.Get(). data, err = c.Core().RESTClient().Get().
Resource("nodes"). Resource("nodes").
SubResource("proxy"). SubResource("proxy").
Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)). Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)).
@ -417,7 +417,7 @@ func getNodeStatsSummary(c *client.Client, nodeName string) (*stats.Summary, err
Do().Raw() Do().Raw()
} else { } else {
data, err = c.Get(). data, err = c.Core().RESTClient().Get().
Prefix("proxy"). Prefix("proxy").
Resource("nodes"). Resource("nodes").
Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)). Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)).
@ -515,7 +515,7 @@ type usageDataPerContainer struct {
memWorkSetData []uint64 memWorkSetData []uint64
} }
func GetKubeletHeapStats(c *client.Client, nodeName string) (string, error) { func GetKubeletHeapStats(c clientset.Interface, nodeName string) (string, error) {
client, err := NodeProxyRequest(c, nodeName, "debug/pprof/heap") client, err := NodeProxyRequest(c, nodeName, "debug/pprof/heap")
if err != nil { if err != nil {
return "", err return "", err
@ -531,7 +531,7 @@ func GetKubeletHeapStats(c *client.Client, nodeName string) (string, error) {
return strings.Join(lines[len(lines)-numLines:], "\n"), nil return strings.Join(lines[len(lines)-numLines:], "\n"), nil
} }
func PrintAllKubeletPods(c *client.Client, nodeName string) { func PrintAllKubeletPods(c clientset.Interface, nodeName string) {
podList, err := GetKubeletPods(c, nodeName) podList, err := GetKubeletPods(c, nodeName)
if err != nil { if err != nil {
Logf("Unable to retrieve kubelet pods for node %v: %v", nodeName, err) Logf("Unable to retrieve kubelet pods for node %v: %v", nodeName, err)
@ -565,13 +565,13 @@ type resourceCollector struct {
lock sync.RWMutex lock sync.RWMutex
node string node string
containers []string containers []string
client *client.Client client clientset.Interface
buffers map[string][]*ContainerResourceUsage buffers map[string][]*ContainerResourceUsage
pollingInterval time.Duration pollingInterval time.Duration
stopCh chan struct{} stopCh chan struct{}
} }
func newResourceCollector(c *client.Client, nodeName string, containerNames []string, pollingInterval time.Duration) *resourceCollector { func newResourceCollector(c clientset.Interface, nodeName string, containerNames []string, pollingInterval time.Duration) *resourceCollector {
buffers := make(map[string][]*ContainerResourceUsage) buffers := make(map[string][]*ContainerResourceUsage)
return &resourceCollector{ return &resourceCollector{
node: nodeName, node: nodeName,
@ -679,13 +679,13 @@ func (r *resourceCollector) GetBasicCPUStats(containerName string) map[float64]f
// ResourceMonitor manages a resourceCollector per node. // ResourceMonitor manages a resourceCollector per node.
type ResourceMonitor struct { type ResourceMonitor struct {
client *client.Client client clientset.Interface
containers []string containers []string
pollingInterval time.Duration pollingInterval time.Duration
collectors map[string]*resourceCollector collectors map[string]*resourceCollector
} }
func NewResourceMonitor(c *client.Client, containerNames []string, pollingInterval time.Duration) *ResourceMonitor { func NewResourceMonitor(c clientset.Interface, containerNames []string, pollingInterval time.Duration) *ResourceMonitor {
return &ResourceMonitor{ return &ResourceMonitor{
containers: containerNames, containers: containerNames,
client: c, client: c,
@ -695,7 +695,7 @@ func NewResourceMonitor(c *client.Client, containerNames []string, pollingInterv
func (r *ResourceMonitor) Start() { func (r *ResourceMonitor) Start() {
// It should be OK to monitor unschedulable Nodes // It should be OK to monitor unschedulable Nodes
nodes, err := r.client.Nodes().List(api.ListOptions{}) nodes, err := r.client.Core().Nodes().List(api.ListOptions{})
if err != nil { if err != nil {
Failf("ResourceMonitor: unable to get list of nodes: %v", err) Failf("ResourceMonitor: unable to get list of nodes: %v", err)
} }

View File

@ -26,7 +26,6 @@ import (
"time" "time"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
client "k8s.io/kubernetes/pkg/client/unversioned"
) )
const ( const (
@ -65,8 +64,7 @@ type LogSizeGatherer struct {
// LogsSizeVerifier gathers data about log files sizes from master and node machines. // LogsSizeVerifier gathers data about log files sizes from master and node machines.
// It oversees a <workersNo> workers which do the gathering. // It oversees a <workersNo> workers which do the gathering.
type LogsSizeVerifier struct { type LogsSizeVerifier struct {
client *client.Client client clientset.Interface
clientset clientset.Interface
stopChannel chan bool stopChannel chan bool
// data stores LogSizeData groupped per IP and log_path // data stores LogSizeData groupped per IP and log_path
data *LogsSizeData data *LogsSizeData
@ -144,8 +142,8 @@ func (d *LogsSizeData) AddNewData(ip, path string, timestamp time.Time, size int
} }
// NewLogsVerifier creates a new LogsSizeVerifier which will stop when stopChannel is closed // NewLogsVerifier creates a new LogsSizeVerifier which will stop when stopChannel is closed
func NewLogsVerifier(c *client.Client, cs clientset.Interface, stopChannel chan bool) *LogsSizeVerifier { func NewLogsVerifier(c clientset.Interface, stopChannel chan bool) *LogsSizeVerifier {
nodeAddresses, err := NodeSSHHosts(cs) nodeAddresses, err := NodeSSHHosts(c)
ExpectNoError(err) ExpectNoError(err)
masterAddress := GetMasterHost() + ":22" masterAddress := GetMasterHost() + ":22"
@ -154,7 +152,6 @@ func NewLogsVerifier(c *client.Client, cs clientset.Interface, stopChannel chan
verifier := &LogsSizeVerifier{ verifier := &LogsSizeVerifier{
client: c, client: c,
clientset: cs,
stopChannel: stopChannel, stopChannel: stopChannel,
data: prepareData(masterAddress, nodeAddresses), data: prepareData(masterAddress, nodeAddresses),
masterAddress: masterAddress, masterAddress: masterAddress,

View File

@ -28,7 +28,7 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/pkg/metrics" "k8s.io/kubernetes/pkg/metrics"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
@ -205,7 +205,7 @@ func setQuantile(metric *LatencyMetric, quantile float64, latency time.Duration)
} }
} }
func readLatencyMetrics(c *client.Client) (APIResponsiveness, error) { func readLatencyMetrics(c clientset.Interface) (APIResponsiveness, error) {
var a APIResponsiveness var a APIResponsiveness
body, err := getMetrics(c) body, err := getMetrics(c)
@ -247,7 +247,7 @@ func readLatencyMetrics(c *client.Client) (APIResponsiveness, error) {
// Prints top five summary metrics for request types with latency and returns // Prints top five summary metrics for request types with latency and returns
// number of such request types above threshold. // number of such request types above threshold.
func HighLatencyRequests(c *client.Client) (int, error) { func HighLatencyRequests(c clientset.Interface) (int, error) {
metrics, err := readLatencyMetrics(c) metrics, err := readLatencyMetrics(c)
if err != nil { if err != nil {
return 0, err return 0, err
@ -297,9 +297,9 @@ func VerifyPodStartupLatency(latency PodStartupLatency) error {
} }
// Resets latency metrics in apiserver. // Resets latency metrics in apiserver.
func ResetMetrics(c *client.Client) error { func ResetMetrics(c clientset.Interface) error {
Logf("Resetting latency metrics in apiserver...") Logf("Resetting latency metrics in apiserver...")
body, err := c.Delete().AbsPath("/metrics").DoRaw() body, err := c.Core().RESTClient().Delete().AbsPath("/metrics").DoRaw()
if err != nil { if err != nil {
return err return err
} }
@ -310,8 +310,8 @@ func ResetMetrics(c *client.Client) error {
} }
// Retrieves metrics information. // Retrieves metrics information.
func getMetrics(c *client.Client) (string, error) { func getMetrics(c clientset.Interface) (string, error) {
body, err := c.Get().AbsPath("/metrics").DoRaw() body, err := c.Core().RESTClient().Get().AbsPath("/metrics").DoRaw()
if err != nil { if err != nil {
return "", err return "", err
} }
@ -319,11 +319,11 @@ func getMetrics(c *client.Client) (string, error) {
} }
// Retrieves scheduler metrics information. // Retrieves scheduler metrics information.
func getSchedulingLatency(c *client.Client) (SchedulingLatency, error) { func getSchedulingLatency(c clientset.Interface) (SchedulingLatency, error) {
result := SchedulingLatency{} result := SchedulingLatency{}
// Check if master Node is registered // Check if master Node is registered
nodes, err := c.Nodes().List(api.ListOptions{}) nodes, err := c.Core().Nodes().List(api.ListOptions{})
ExpectNoError(err) ExpectNoError(err)
var data string var data string
@ -334,7 +334,7 @@ func getSchedulingLatency(c *client.Client) (SchedulingLatency, error) {
} }
} }
if masterRegistered { if masterRegistered {
rawData, err := c.Get(). rawData, err := c.Core().RESTClient().Get().
Prefix("proxy"). Prefix("proxy").
Namespace(api.NamespaceSystem). Namespace(api.NamespaceSystem).
Resource("pods"). Resource("pods").
@ -383,7 +383,7 @@ func getSchedulingLatency(c *client.Client) (SchedulingLatency, error) {
} }
// Verifies (currently just by logging them) the scheduling latencies. // Verifies (currently just by logging them) the scheduling latencies.
func VerifySchedulerLatency(c *client.Client) error { func VerifySchedulerLatency(c clientset.Interface) error {
latency, err := getSchedulingLatency(c) latency, err := getSchedulingLatency(c)
if err != nil { if err != nil {
return err return err
@ -457,7 +457,7 @@ func ExtractLatencyMetrics(latencies []PodLatencyData) LatencyMetric {
// LogSuspiciousLatency logs metrics/docker errors from all nodes that had slow startup times // LogSuspiciousLatency logs metrics/docker errors from all nodes that had slow startup times
// If latencyDataLag is nil then it will be populated from latencyData // If latencyDataLag is nil then it will be populated from latencyData
func LogSuspiciousLatency(latencyData []PodLatencyData, latencyDataLag []PodLatencyData, nodeCount int, c *client.Client) { func LogSuspiciousLatency(latencyData []PodLatencyData, latencyDataLag []PodLatencyData, nodeCount int, c clientset.Interface) {
if latencyDataLag == nil { if latencyDataLag == nil {
latencyDataLag = latencyData latencyDataLag = latencyData
} }

View File

@ -27,7 +27,7 @@ import (
api "k8s.io/kubernetes/pkg/api" api "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
client "k8s.io/kubernetes/pkg/client/unversioned" coreclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/rand" "k8s.io/kubernetes/pkg/util/rand"
@ -372,7 +372,7 @@ func (config *NetworkingTestConfig) createNodePortService(selector map[string]st
} }
func (config *NetworkingTestConfig) DeleteNodePortService() { func (config *NetworkingTestConfig) DeleteNodePortService() {
err := config.getServiceClient().Delete(config.NodePortService.Name) err := config.getServiceClient().Delete(config.NodePortService.Name, nil)
Expect(err).NotTo(HaveOccurred(), "error while deleting NodePortService. err:%v)", err) Expect(err).NotTo(HaveOccurred(), "error while deleting NodePortService. err:%v)", err)
time.Sleep(15 * time.Second) // wait for kube-proxy to catch up with the service being deleted. time.Sleep(15 * time.Second) // wait for kube-proxy to catch up with the service being deleted.
} }
@ -403,7 +403,7 @@ func (config *NetworkingTestConfig) createService(serviceSpec *api.Service) *api
_, err := config.getServiceClient().Create(serviceSpec) _, err := config.getServiceClient().Create(serviceSpec)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create %s service: %v", serviceSpec.Name, err)) Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create %s service: %v", serviceSpec.Name, err))
err = WaitForService(config.f.Client, config.Namespace, serviceSpec.Name, true, 5*time.Second, 45*time.Second) err = WaitForService(config.f.ClientSet, config.Namespace, serviceSpec.Name, true, 5*time.Second, 45*time.Second)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("error while waiting for service:%s err: %v", serviceSpec.Name, err)) Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("error while waiting for service:%s err: %v", serviceSpec.Name, err))
createdService, err := config.getServiceClient().Get(serviceSpec.Name) createdService, err := config.getServiceClient().Get(serviceSpec.Name)
@ -431,7 +431,7 @@ func (config *NetworkingTestConfig) setup(selector map[string]string) {
config.setupCore(selector) config.setupCore(selector)
By("Getting node addresses") By("Getting node addresses")
ExpectNoError(WaitForAllNodesSchedulable(config.f.Client)) ExpectNoError(WaitForAllNodesSchedulable(config.f.ClientSet))
nodeList := GetReadySchedulableNodesOrDie(config.f.ClientSet) nodeList := GetReadySchedulableNodesOrDie(config.f.ClientSet)
config.ExternalAddrs = NodeAddresses(nodeList, api.NodeExternalIP) config.ExternalAddrs = NodeAddresses(nodeList, api.NodeExternalIP)
if len(config.ExternalAddrs) < 2 { if len(config.ExternalAddrs) < 2 {
@ -464,7 +464,7 @@ func (config *NetworkingTestConfig) cleanup() {
if err == nil { if err == nil {
for _, ns := range nsList.Items { for _, ns := range nsList.Items {
if strings.Contains(ns.Name, config.f.BaseName) && ns.Name != config.Namespace { if strings.Contains(ns.Name, config.f.BaseName) && ns.Name != config.Namespace {
nsClient.Delete(ns.Name) nsClient.Delete(ns.Name, nil)
} }
} }
} }
@ -482,7 +482,7 @@ func shuffleNodes(nodes []api.Node) []api.Node {
} }
func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector map[string]string) []*api.Pod { func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector map[string]string) []*api.Pod {
ExpectNoError(WaitForAllNodesSchedulable(config.f.Client)) ExpectNoError(WaitForAllNodesSchedulable(config.f.ClientSet))
nodeList := GetReadySchedulableNodesOrDie(config.f.ClientSet) nodeList := GetReadySchedulableNodesOrDie(config.f.ClientSet)
// To make this test work reasonably fast in large clusters, // To make this test work reasonably fast in large clusters,
@ -520,12 +520,12 @@ func (config *NetworkingTestConfig) DeleteNetProxyPod() {
config.getPodClient().Delete(pod.Name, api.NewDeleteOptions(0)) config.getPodClient().Delete(pod.Name, api.NewDeleteOptions(0))
config.EndpointPods = config.EndpointPods[1:] config.EndpointPods = config.EndpointPods[1:]
// wait for pod being deleted. // wait for pod being deleted.
err := WaitForPodToDisappear(config.f.Client, config.Namespace, pod.Name, labels.Everything(), time.Second, wait.ForeverTestTimeout) err := WaitForPodToDisappear(config.f.ClientSet, config.Namespace, pod.Name, labels.Everything(), time.Second, wait.ForeverTestTimeout)
if err != nil { if err != nil {
Failf("Failed to delete %s pod: %v", pod.Name, err) Failf("Failed to delete %s pod: %v", pod.Name, err)
} }
// wait for endpoint being removed. // wait for endpoint being removed.
err = WaitForServiceEndpointsNum(config.f.Client, config.Namespace, nodePortServiceName, len(config.EndpointPods), time.Second, wait.ForeverTestTimeout) err = WaitForServiceEndpointsNum(config.f.ClientSet, config.Namespace, nodePortServiceName, len(config.EndpointPods), time.Second, wait.ForeverTestTimeout)
if err != nil { if err != nil {
Failf("Failed to remove endpoint from service: %s", nodePortServiceName) Failf("Failed to remove endpoint from service: %s", nodePortServiceName)
} }
@ -544,10 +544,10 @@ func (config *NetworkingTestConfig) getPodClient() *PodClient {
return config.podClient return config.podClient
} }
func (config *NetworkingTestConfig) getServiceClient() client.ServiceInterface { func (config *NetworkingTestConfig) getServiceClient() coreclientset.ServiceInterface {
return config.f.Client.Services(config.Namespace) return config.f.ClientSet.Core().Services(config.Namespace)
} }
func (config *NetworkingTestConfig) getNamespacesClient() client.NamespaceInterface { func (config *NetworkingTestConfig) getNamespacesClient() coreclientset.NamespaceInterface {
return config.f.Client.Namespaces() return config.f.ClientSet.Core().Namespaces()
} }

View File

@ -23,7 +23,7 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
) )
@ -82,7 +82,7 @@ var NodeUpgrade = func(f *Framework, v string, img string) error {
// TODO(ihmccreery) We shouldn't have to wait for nodes to be ready in // TODO(ihmccreery) We shouldn't have to wait for nodes to be ready in
// GKE; the operation shouldn't return until they all are. // GKE; the operation shouldn't return until they all are.
Logf("Waiting up to %v for all nodes to be ready after the upgrade", RestartNodeReadyAgainTimeout) Logf("Waiting up to %v for all nodes to be ready after the upgrade", RestartNodeReadyAgainTimeout)
if _, err := CheckNodesReady(f.Client, RestartNodeReadyAgainTimeout, TestContext.CloudConfig.NumNodes); err != nil { if _, err := CheckNodesReady(f.ClientSet, RestartNodeReadyAgainTimeout, TestContext.CloudConfig.NumNodes); err != nil {
return err return err
} }
return nil return nil
@ -139,7 +139,7 @@ func nodeUpgradeGKE(v string, img string) error {
// CheckNodesReady waits up to nt for expect nodes accessed by c to be ready, // CheckNodesReady waits up to nt for expect nodes accessed by c to be ready,
// returning an error if this doesn't happen in time. It returns the names of // returning an error if this doesn't happen in time. It returns the names of
// nodes it finds. // nodes it finds.
func CheckNodesReady(c *client.Client, nt time.Duration, expect int) ([]string, error) { func CheckNodesReady(c clientset.Interface, nt time.Duration, expect int) ([]string, error) {
// First, keep getting all of the nodes until we get the number we expect. // First, keep getting all of the nodes until we get the number we expect.
var nodeList *api.NodeList var nodeList *api.NodeList
var errLast error var errLast error
@ -148,7 +148,7 @@ func CheckNodesReady(c *client.Client, nt time.Duration, expect int) ([]string,
// A rolling-update (GCE/GKE implementation of restart) can complete before the apiserver // A rolling-update (GCE/GKE implementation of restart) can complete before the apiserver
// knows about all of the nodes. Thus, we retry the list nodes call // knows about all of the nodes. Thus, we retry the list nodes call
// until we get the expected number of nodes. // until we get the expected number of nodes.
nodeList, errLast = c.Nodes().List(api.ListOptions{ nodeList, errLast = c.Core().Nodes().List(api.ListOptions{
FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector()}) FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector()})
if errLast != nil { if errLast != nil {
return false, nil return false, nil

View File

@ -23,7 +23,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/client/unversioned" unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
@ -43,13 +43,13 @@ var ImageWhiteList sets.String
func (f *Framework) PodClient() *PodClient { func (f *Framework) PodClient() *PodClient {
return &PodClient{ return &PodClient{
f: f, f: f,
PodInterface: f.Client.Pods(f.Namespace.Name), PodInterface: f.ClientSet.Core().Pods(f.Namespace.Name),
} }
} }
type PodClient struct { type PodClient struct {
f *Framework f *Framework
unversioned.PodInterface unversionedcore.PodInterface
} }
// Create creates a new pod according to the framework specifications (don't wait for it to start). // Create creates a new pod according to the framework specifications (don't wait for it to start).
@ -116,7 +116,7 @@ func (c *PodClient) DeleteSync(name string, options *api.DeleteOptions, timeout
if err != nil && !errors.IsNotFound(err) { if err != nil && !errors.IsNotFound(err) {
Failf("Failed to delete pod %q: %v", name, err) Failf("Failed to delete pod %q: %v", name, err)
} }
Expect(WaitForPodToDisappear(c.f.Client, c.f.Namespace.Name, name, labels.Everything(), Expect(WaitForPodToDisappear(c.f.ClientSet, c.f.Namespace.Name, name, labels.Everything(),
2*time.Second, timeout)).To(Succeed(), "wait for pod %q to disappear", name) 2*time.Second, timeout)).To(Succeed(), "wait for pod %q to disappear", name)
} }
@ -156,7 +156,7 @@ func (c *PodClient) mungeSpec(pod *api.Pod) {
// WaitForSuccess waits for pod to success. // WaitForSuccess waits for pod to success.
func (c *PodClient) WaitForSuccess(name string, timeout time.Duration) { func (c *PodClient) WaitForSuccess(name string, timeout time.Duration) {
f := c.f f := c.f
Expect(waitForPodCondition(f.Client, f.Namespace.Name, name, "success or failure", timeout, Expect(waitForPodCondition(f.ClientSet, f.Namespace.Name, name, "success or failure", timeout,
func(pod *api.Pod) (bool, error) { func(pod *api.Pod) (bool, error) {
switch pod.Status.Phase { switch pod.Status.Phase {
case api.PodFailed: case api.PodFailed:

View File

@ -30,7 +30,7 @@ import (
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
utilruntime "k8s.io/kubernetes/pkg/util/runtime" utilruntime "k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/util/system" "k8s.io/kubernetes/pkg/util/system"
) )
@ -129,7 +129,7 @@ func leftMergeData(left, right map[int]ResourceUsagePerContainer) map[int]Resour
} }
type resourceGatherWorker struct { type resourceGatherWorker struct {
c *client.Client c clientset.Interface
nodeName string nodeName string
wg *sync.WaitGroup wg *sync.WaitGroup
containerIDToNameMap map[string]string containerIDToNameMap map[string]string
@ -204,7 +204,7 @@ func getKubemarkMasterComponentsResourceUsage() ResourceUsagePerContainer {
return result return result
} }
func (g *containerResourceGatherer) getKubeSystemContainersResourceUsage(c *client.Client) { func (g *containerResourceGatherer) getKubeSystemContainersResourceUsage(c clientset.Interface) {
if len(g.workers) == 0 { if len(g.workers) == 0 {
return return
} }
@ -218,7 +218,7 @@ func (g *containerResourceGatherer) getKubeSystemContainersResourceUsage(c *clie
} }
type containerResourceGatherer struct { type containerResourceGatherer struct {
client *client.Client client clientset.Interface
stopCh chan struct{} stopCh chan struct{}
workers []resourceGatherWorker workers []resourceGatherWorker
workerWg sync.WaitGroup workerWg sync.WaitGroup
@ -232,7 +232,7 @@ type ResourceGathererOptions struct {
masterOnly bool masterOnly bool
} }
func NewResourceUsageGatherer(c *client.Client, options ResourceGathererOptions) (*containerResourceGatherer, error) { func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOptions) (*containerResourceGatherer, error) {
g := containerResourceGatherer{ g := containerResourceGatherer{
client: c, client: c,
stopCh: make(chan struct{}), stopCh: make(chan struct{}),
@ -250,7 +250,7 @@ func NewResourceUsageGatherer(c *client.Client, options ResourceGathererOptions)
finished: false, finished: false,
}) })
} else { } else {
pods, err := c.Pods("kube-system").List(api.ListOptions{}) pods, err := c.Core().Pods("kube-system").List(api.ListOptions{})
if err != nil { if err != nil {
Logf("Error while listing Pods: %v", err) Logf("Error while listing Pods: %v", err)
return nil, err return nil, err
@ -262,7 +262,7 @@ func NewResourceUsageGatherer(c *client.Client, options ResourceGathererOptions)
g.containerIDs = append(g.containerIDs, containerID) g.containerIDs = append(g.containerIDs, containerID)
} }
} }
nodeList, err := c.Nodes().List(api.ListOptions{}) nodeList, err := c.Core().Nodes().List(api.ListOptions{})
if err != nil { if err != nil {
Logf("Error while listing Nodes: %v", err) Logf("Error while listing Nodes: %v", err)
return nil, err return nil, err

File diff suppressed because it is too large Load Diff

View File

@ -100,7 +100,7 @@ func verifyRemainingObjects(f *framework.Framework, clientSet clientset.Interfac
func gatherMetrics(f *framework.Framework) { func gatherMetrics(f *framework.Framework) {
By("Gathering metrics") By("Gathering metrics")
var summary framework.TestDataSummary var summary framework.TestDataSummary
grabber, err := metrics.NewMetricsGrabber(f.Client, false, false, true, false) grabber, err := metrics.NewMetricsGrabber(f.ClientSet, false, false, true, false)
if err != nil { if err != nil {
framework.Logf("Failed to create MetricsGrabber. Skipping metrics gathering.") framework.Logf("Failed to create MetricsGrabber. Skipping metrics gathering.")
} else { } else {

View File

@ -19,12 +19,13 @@ package e2e
import ( import (
"bytes" "bytes"
"fmt" "fmt"
. "github.com/onsi/ginkgo"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/test/e2e/framework"
"os/exec" "os/exec"
"path" "path"
"strconv" "strconv"
. "github.com/onsi/ginkgo"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/test/e2e/framework"
) )
func addMasterReplica() error { func addMasterReplica() error {
@ -47,13 +48,13 @@ func removeMasterReplica() error {
return nil return nil
} }
func verifyRCs(c *client.Client, ns string, names []string) { func verifyRCs(c clientset.Interface, ns string, names []string) {
for _, name := range names { for _, name := range names {
framework.ExpectNoError(framework.VerifyPods(c, ns, name, true, 1)) framework.ExpectNoError(framework.VerifyPods(c, ns, name, true, 1))
} }
} }
func createNewRC(c *client.Client, ns string, name string) { func createNewRC(c clientset.Interface, ns string, name string) {
_, err := newRCByName(c, ns, name, 1) _, err := newRCByName(c, ns, name, 1)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
@ -77,14 +78,14 @@ func verifyNumberOfMasterReplicas(expected int) {
var _ = framework.KubeDescribe("HA-master [Feature:HAMaster]", func() { var _ = framework.KubeDescribe("HA-master [Feature:HAMaster]", func() {
f := framework.NewDefaultFramework("ha-master") f := framework.NewDefaultFramework("ha-master")
var c *client.Client var c clientset.Interface
var ns string var ns string
var additionalReplicas int var additionalReplicas int
var existingRCs []string var existingRCs []string
BeforeEach(func() { BeforeEach(func() {
framework.SkipUnlessProviderIs("gce") framework.SkipUnlessProviderIs("gce")
c = f.Client c = f.ClientSet
ns = f.Namespace.Name ns = f.Namespace.Name
verifyNumberOfMasterReplicas(1) verifyNumberOfMasterReplicas(1)
additionalReplicas = 0 additionalReplicas = 0

View File

@ -191,6 +191,6 @@ func createCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, ma
TargetCPUUtilizationPercentage: &cpu, TargetCPUUtilizationPercentage: &cpu,
}, },
} }
_, errHPA := rc.framework.Client.Autoscaling().HorizontalPodAutoscalers(rc.framework.Namespace.Name).Create(hpa) _, errHPA := rc.framework.ClientSet.Autoscaling().HorizontalPodAutoscalers(rc.framework.Namespace.Name).Create(hpa)
framework.ExpectNoError(errHPA) framework.ExpectNoError(errHPA)
} }

View File

@ -66,7 +66,7 @@ var _ = framework.KubeDescribe("Loadbalancing: L7 [Feature:Ingress]", func() {
BeforeEach(func() { BeforeEach(func() {
f.BeforeEach() f.BeforeEach()
jig = newTestJig(f.Client) jig = newTestJig(f.ClientSet)
ns = f.Namespace.Name ns = f.Namespace.Name
}) })

View File

@ -43,7 +43,7 @@ import (
"google.golang.org/api/googleapi" "google.golang.org/api/googleapi"
apierrs "k8s.io/kubernetes/pkg/api/errors" apierrs "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/apis/extensions"
client "k8s.io/kubernetes/pkg/client/unversioned" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
@ -68,7 +68,7 @@ const (
) )
type testJig struct { type testJig struct {
client *client.Client client clientset.Interface
rootCAs map[string][]byte rootCAs map[string][]byte
address string address string
ing *extensions.Ingress ing *extensions.Ingress
@ -269,7 +269,7 @@ func buildInsecureClient(timeout time.Duration) *http.Client {
// createSecret creates a secret containing TLS certificates for the given Ingress. // createSecret creates a secret containing TLS certificates for the given Ingress.
// If a secret with the same name already exists in the namespace of the // If a secret with the same name already exists in the namespace of the
// Ingress, it's updated. // Ingress, it's updated.
func createSecret(kubeClient *client.Client, ing *extensions.Ingress) (host string, rootCA, privKey []byte, err error) { func createSecret(kubeClient clientset.Interface, ing *extensions.Ingress) (host string, rootCA, privKey []byte, err error) {
var k, c bytes.Buffer var k, c bytes.Buffer
tls := ing.Spec.TLS[0] tls := ing.Spec.TLS[0]
host = strings.Join(tls.Hosts, ",") host = strings.Join(tls.Hosts, ",")
@ -290,14 +290,14 @@ func createSecret(kubeClient *client.Client, ing *extensions.Ingress) (host stri
}, },
} }
var s *api.Secret var s *api.Secret
if s, err = kubeClient.Secrets(ing.Namespace).Get(tls.SecretName); err == nil { if s, err = kubeClient.Core().Secrets(ing.Namespace).Get(tls.SecretName); err == nil {
// TODO: Retry the update. We don't really expect anything to conflict though. // TODO: Retry the update. We don't really expect anything to conflict though.
framework.Logf("Updating secret %v in ns %v with hosts %v for ingress %v", secret.Name, secret.Namespace, host, ing.Name) framework.Logf("Updating secret %v in ns %v with hosts %v for ingress %v", secret.Name, secret.Namespace, host, ing.Name)
s.Data = secret.Data s.Data = secret.Data
_, err = kubeClient.Secrets(ing.Namespace).Update(s) _, err = kubeClient.Core().Secrets(ing.Namespace).Update(s)
} else { } else {
framework.Logf("Creating secret %v in ns %v with hosts %v for ingress %v", secret.Name, secret.Namespace, host, ing.Name) framework.Logf("Creating secret %v in ns %v with hosts %v for ingress %v", secret.Name, secret.Namespace, host, ing.Name)
_, err = kubeClient.Secrets(ing.Namespace).Create(secret) _, err = kubeClient.Core().Secrets(ing.Namespace).Create(secret)
} }
return host, cert, key, err return host, cert, key, err
} }
@ -684,7 +684,7 @@ func (j *testJig) createIngress(manifestPath, ns string, ingAnnotations map[stri
} }
framework.Logf(fmt.Sprintf("creating" + j.ing.Name + " ingress")) framework.Logf(fmt.Sprintf("creating" + j.ing.Name + " ingress"))
var err error var err error
j.ing, err = j.client.Extensions().Ingress(ns).Create(j.ing) j.ing, err = j.client.Extensions().Ingresses(ns).Create(j.ing)
ExpectNoError(err) ExpectNoError(err)
} }
@ -692,12 +692,12 @@ func (j *testJig) update(update func(ing *extensions.Ingress)) {
var err error var err error
ns, name := j.ing.Namespace, j.ing.Name ns, name := j.ing.Namespace, j.ing.Name
for i := 0; i < 3; i++ { for i := 0; i < 3; i++ {
j.ing, err = j.client.Extensions().Ingress(ns).Get(name) j.ing, err = j.client.Extensions().Ingresses(ns).Get(name)
if err != nil { if err != nil {
framework.Failf("failed to get ingress %q: %v", name, err) framework.Failf("failed to get ingress %q: %v", name, err)
} }
update(j.ing) update(j.ing)
j.ing, err = j.client.Extensions().Ingress(ns).Update(j.ing) j.ing, err = j.client.Extensions().Ingresses(ns).Update(j.ing)
if err == nil { if err == nil {
describeIng(j.ing.Namespace) describeIng(j.ing.Namespace)
return return
@ -732,7 +732,7 @@ func (j *testJig) getRootCA(secretName string) (rootCA []byte) {
} }
func (j *testJig) deleteIngress() { func (j *testJig) deleteIngress() {
ExpectNoError(j.client.Extensions().Ingress(j.ing.Namespace).Delete(j.ing.Name, nil)) ExpectNoError(j.client.Extensions().Ingresses(j.ing.Namespace).Delete(j.ing.Name, nil))
} }
func (j *testJig) waitForIngress() { func (j *testJig) waitForIngress() {
@ -803,7 +803,7 @@ func ingFromManifest(fileName string) *extensions.Ingress {
func (cont *GCEIngressController) getL7AddonUID() (string, error) { func (cont *GCEIngressController) getL7AddonUID() (string, error) {
framework.Logf("Retrieving UID from config map: %v/%v", api.NamespaceSystem, uidConfigMap) framework.Logf("Retrieving UID from config map: %v/%v", api.NamespaceSystem, uidConfigMap)
cm, err := cont.c.ConfigMaps(api.NamespaceSystem).Get(uidConfigMap) cm, err := cont.c.Core().ConfigMaps(api.NamespaceSystem).Get(uidConfigMap)
if err != nil { if err != nil {
return "", err return "", err
} }
@ -833,11 +833,11 @@ type GCEIngressController struct {
staticIPName string staticIPName string
rc *api.ReplicationController rc *api.ReplicationController
svc *api.Service svc *api.Service
c *client.Client c clientset.Interface
cloud framework.CloudConfig cloud framework.CloudConfig
} }
func newTestJig(c *client.Client) *testJig { func newTestJig(c clientset.Interface) *testJig {
return &testJig{client: c, rootCAs: map[string][]byte{}} return &testJig{client: c, rootCAs: map[string][]byte{}}
} }
@ -846,7 +846,7 @@ type NginxIngressController struct {
ns string ns string
rc *api.ReplicationController rc *api.ReplicationController
pod *api.Pod pod *api.Pod
c *client.Client c clientset.Interface
externalIP string externalIP string
} }
@ -857,14 +857,14 @@ func (cont *NginxIngressController) init() {
framework.Logf("initializing nginx ingress controller") framework.Logf("initializing nginx ingress controller")
framework.RunKubectlOrDie("create", "-f", mkpath("rc.yaml"), fmt.Sprintf("--namespace=%v", cont.ns)) framework.RunKubectlOrDie("create", "-f", mkpath("rc.yaml"), fmt.Sprintf("--namespace=%v", cont.ns))
rc, err := cont.c.ReplicationControllers(cont.ns).Get("nginx-ingress-controller") rc, err := cont.c.Core().ReplicationControllers(cont.ns).Get("nginx-ingress-controller")
ExpectNoError(err) ExpectNoError(err)
cont.rc = rc cont.rc = rc
framework.Logf("waiting for pods with label %v", rc.Spec.Selector) framework.Logf("waiting for pods with label %v", rc.Spec.Selector)
sel := labels.SelectorFromSet(labels.Set(rc.Spec.Selector)) sel := labels.SelectorFromSet(labels.Set(rc.Spec.Selector))
ExpectNoError(testutils.WaitForPodsWithLabelRunning(cont.c, cont.ns, sel)) ExpectNoError(testutils.WaitForPodsWithLabelRunning(cont.c, cont.ns, sel))
pods, err := cont.c.Pods(cont.ns).List(api.ListOptions{LabelSelector: sel}) pods, err := cont.c.Core().Pods(cont.ns).List(api.ListOptions{LabelSelector: sel})
ExpectNoError(err) ExpectNoError(err)
if len(pods.Items) == 0 { if len(pods.Items) == 0 {
framework.Failf("Failed to find nginx ingress controller pods with selector %v", sel) framework.Failf("Failed to find nginx ingress controller pods with selector %v", sel)

View File

@ -65,8 +65,8 @@ func runPod(f *framework.Framework, name, image string) *api.Pod {
}, },
}, },
} }
createdPod, err := f.Client.Pods(f.Namespace.Name).Create(pod) createdPod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.Client, createdPod)) framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, createdPod))
return createdPod return createdPod
} }

View File

@ -22,7 +22,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/apis/batch"
client "k8s.io/kubernetes/pkg/client/unversioned" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
@ -50,11 +50,11 @@ var _ = framework.KubeDescribe("Job", func() {
It("should run a job to completion when tasks succeed", func() { It("should run a job to completion when tasks succeed", func() {
By("Creating a job") By("Creating a job")
job := newTestJob("succeed", "all-succeed", api.RestartPolicyNever, parallelism, completions) job := newTestJob("succeed", "all-succeed", api.RestartPolicyNever, parallelism, completions)
job, err := createJob(f.Client, f.Namespace.Name, job) job, err := createJob(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring job reaches completions") By("Ensuring job reaches completions")
err = waitForJobFinish(f.Client, f.Namespace.Name, job.Name, completions) err = waitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
@ -69,11 +69,11 @@ var _ = framework.KubeDescribe("Job", func() {
// due to successive failures too likely with a reasonable // due to successive failures too likely with a reasonable
// test timeout. // test timeout.
job := newTestJob("failOnce", "fail-once-local", api.RestartPolicyOnFailure, parallelism, completions) job := newTestJob("failOnce", "fail-once-local", api.RestartPolicyOnFailure, parallelism, completions)
job, err := createJob(f.Client, f.Namespace.Name, job) job, err := createJob(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring job reaches completions") By("Ensuring job reaches completions")
err = waitForJobFinish(f.Client, f.Namespace.Name, job.Name, completions) err = waitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
@ -87,23 +87,23 @@ var _ = framework.KubeDescribe("Job", func() {
// run due to some slowness, 1 in 2^15 chance of happening, // run due to some slowness, 1 in 2^15 chance of happening,
// causing test flake. Should be very rare. // causing test flake. Should be very rare.
job := newTestJob("randomlySucceedOrFail", "rand-non-local", api.RestartPolicyNever, parallelism, completions) job := newTestJob("randomlySucceedOrFail", "rand-non-local", api.RestartPolicyNever, parallelism, completions)
job, err := createJob(f.Client, f.Namespace.Name, job) job, err := createJob(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring job reaches completions") By("Ensuring job reaches completions")
err = waitForJobFinish(f.Client, f.Namespace.Name, job.Name, completions) err = waitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
It("should keep restarting failed pods", func() { It("should keep restarting failed pods", func() {
By("Creating a job") By("Creating a job")
job := newTestJob("fail", "all-fail", api.RestartPolicyNever, parallelism, completions) job := newTestJob("fail", "all-fail", api.RestartPolicyNever, parallelism, completions)
job, err := createJob(f.Client, f.Namespace.Name, job) job, err := createJob(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring job shows many failures") By("Ensuring job shows many failures")
err = wait.Poll(framework.Poll, jobTimeout, func() (bool, error) { err = wait.Poll(framework.Poll, jobTimeout, func() (bool, error) {
curr, err := getJob(f.Client, f.Namespace.Name, job.Name) curr, err := getJob(f.ClientSet, f.Namespace.Name, job.Name)
if err != nil { if err != nil {
return false, err return false, err
} }
@ -116,11 +116,11 @@ var _ = framework.KubeDescribe("Job", func() {
endParallelism := int32(2) endParallelism := int32(2)
By("Creating a job") By("Creating a job")
job := newTestJob("notTerminate", "scale-up", api.RestartPolicyNever, startParallelism, completions) job := newTestJob("notTerminate", "scale-up", api.RestartPolicyNever, startParallelism, completions)
job, err := createJob(f.Client, f.Namespace.Name, job) job, err := createJob(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring active pods == startParallelism") By("Ensuring active pods == startParallelism")
err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, startParallelism) err = waitForAllPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, startParallelism)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("scale job up") By("scale job up")
@ -132,7 +132,7 @@ var _ = framework.KubeDescribe("Job", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring active pods == endParallelism") By("Ensuring active pods == endParallelism")
err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, endParallelism) err = waitForAllPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, endParallelism)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
@ -141,11 +141,11 @@ var _ = framework.KubeDescribe("Job", func() {
endParallelism := int32(1) endParallelism := int32(1)
By("Creating a job") By("Creating a job")
job := newTestJob("notTerminate", "scale-down", api.RestartPolicyNever, startParallelism, completions) job := newTestJob("notTerminate", "scale-down", api.RestartPolicyNever, startParallelism, completions)
job, err := createJob(f.Client, f.Namespace.Name, job) job, err := createJob(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring active pods == startParallelism") By("Ensuring active pods == startParallelism")
err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, startParallelism) err = waitForAllPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, startParallelism)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("scale job down") By("scale job down")
@ -157,18 +157,18 @@ var _ = framework.KubeDescribe("Job", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring active pods == endParallelism") By("Ensuring active pods == endParallelism")
err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, endParallelism) err = waitForAllPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, endParallelism)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
It("should delete a job", func() { It("should delete a job", func() {
By("Creating a job") By("Creating a job")
job := newTestJob("notTerminate", "foo", api.RestartPolicyNever, parallelism, completions) job := newTestJob("notTerminate", "foo", api.RestartPolicyNever, parallelism, completions)
job, err := createJob(f.Client, f.Namespace.Name, job) job, err := createJob(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring active pods == parallelism") By("Ensuring active pods == parallelism")
err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, parallelism) err = waitForAllPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("delete a job") By("delete a job")
@ -179,7 +179,7 @@ var _ = framework.KubeDescribe("Job", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring job was deleted") By("Ensuring job was deleted")
_, err = getJob(f.Client, f.Namespace.Name, job.Name) _, err = getJob(f.ClientSet, f.Namespace.Name, job.Name)
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
Expect(errors.IsNotFound(err)).To(BeTrue()) Expect(errors.IsNotFound(err)).To(BeTrue())
}) })
@ -189,21 +189,21 @@ var _ = framework.KubeDescribe("Job", func() {
job := newTestJob("notTerminate", "foo", api.RestartPolicyNever, parallelism, completions) job := newTestJob("notTerminate", "foo", api.RestartPolicyNever, parallelism, completions)
activeDeadlineSeconds := int64(10) activeDeadlineSeconds := int64(10)
job.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds job.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds
job, err := createJob(f.Client, f.Namespace.Name, job) job, err := createJob(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring job was failed") By("Ensuring job was failed")
err = waitForJobFail(f.Client, f.Namespace.Name, job.Name, 20*time.Second) err = waitForJobFail(f.ClientSet, f.Namespace.Name, job.Name, 20*time.Second)
if err == wait.ErrWaitTimeout { if err == wait.ErrWaitTimeout {
job, err = getJob(f.Client, f.Namespace.Name, job.Name) job, err = getJob(f.ClientSet, f.Namespace.Name, job.Name)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// the job stabilized and won't be synced until modification or full // the job stabilized and won't be synced until modification or full
// resync happens, we don't want to wait for the latter so we force // resync happens, we don't want to wait for the latter so we force
// sync modifying it // sync modifying it
job.Spec.Parallelism = &completions job.Spec.Parallelism = &completions
job, err = updateJob(f.Client, f.Namespace.Name, job) job, err = updateJob(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
err = waitForJobFail(f.Client, f.Namespace.Name, job.Name, jobTimeout) err = waitForJobFail(f.ClientSet, f.Namespace.Name, job.Name, jobTimeout)
} }
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
@ -218,7 +218,7 @@ func newTestJob(behavior, name string, rPol api.RestartPolicy, parallelism, comp
Spec: batch.JobSpec{ Spec: batch.JobSpec{
Parallelism: &parallelism, Parallelism: &parallelism,
Completions: &completions, Completions: &completions,
ManualSelector: newBool(true), ManualSelector: newBool(false),
Template: api.PodTemplateSpec{ Template: api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Labels: map[string]string{jobSelectorKey: name}, Labels: map[string]string{jobSelectorKey: name},
@ -272,28 +272,28 @@ func newTestJob(behavior, name string, rPol api.RestartPolicy, parallelism, comp
return job return job
} }
func getJob(c *client.Client, ns, name string) (*batch.Job, error) { func getJob(c clientset.Interface, ns, name string) (*batch.Job, error) {
return c.Extensions().Jobs(ns).Get(name) return c.Batch().Jobs(ns).Get(name)
} }
func createJob(c *client.Client, ns string, job *batch.Job) (*batch.Job, error) { func createJob(c clientset.Interface, ns string, job *batch.Job) (*batch.Job, error) {
return c.Extensions().Jobs(ns).Create(job) return c.Batch().Jobs(ns).Create(job)
} }
func updateJob(c *client.Client, ns string, job *batch.Job) (*batch.Job, error) { func updateJob(c clientset.Interface, ns string, job *batch.Job) (*batch.Job, error) {
return c.Extensions().Jobs(ns).Update(job) return c.Batch().Jobs(ns).Update(job)
} }
func deleteJob(c *client.Client, ns, name string) error { func deleteJob(c clientset.Interface, ns, name string) error {
return c.Extensions().Jobs(ns).Delete(name, nil) return c.Batch().Jobs(ns).Delete(name, nil)
} }
// Wait for all pods to become Running. Only use when pods will run for a long time, or it will be racy. // Wait for all pods to become Running. Only use when pods will run for a long time, or it will be racy.
func waitForAllPodsRunning(c *client.Client, ns, jobName string, parallelism int32) error { func waitForAllPodsRunning(c clientset.Interface, ns, jobName string, parallelism int32) error {
label := labels.SelectorFromSet(labels.Set(map[string]string{jobSelectorKey: jobName})) label := labels.SelectorFromSet(labels.Set(map[string]string{jobSelectorKey: jobName}))
return wait.Poll(framework.Poll, jobTimeout, func() (bool, error) { return wait.Poll(framework.Poll, jobTimeout, func() (bool, error) {
options := api.ListOptions{LabelSelector: label} options := api.ListOptions{LabelSelector: label}
pods, err := c.Pods(ns).List(options) pods, err := c.Core().Pods(ns).List(options)
if err != nil { if err != nil {
return false, err return false, err
} }
@ -308,9 +308,9 @@ func waitForAllPodsRunning(c *client.Client, ns, jobName string, parallelism int
} }
// Wait for job to reach completions. // Wait for job to reach completions.
func waitForJobFinish(c *client.Client, ns, jobName string, completions int32) error { func waitForJobFinish(c clientset.Interface, ns, jobName string, completions int32) error {
return wait.Poll(framework.Poll, jobTimeout, func() (bool, error) { return wait.Poll(framework.Poll, jobTimeout, func() (bool, error) {
curr, err := c.Extensions().Jobs(ns).Get(jobName) curr, err := c.Batch().Jobs(ns).Get(jobName)
if err != nil { if err != nil {
return false, err return false, err
} }
@ -319,9 +319,9 @@ func waitForJobFinish(c *client.Client, ns, jobName string, completions int32) e
} }
// Wait for job fail. // Wait for job fail.
func waitForJobFail(c *client.Client, ns, jobName string, timeout time.Duration) error { func waitForJobFail(c clientset.Interface, ns, jobName string, timeout time.Duration) error {
return wait.Poll(framework.Poll, timeout, func() (bool, error) { return wait.Poll(framework.Poll, timeout, func() (bool, error) {
curr, err := c.Extensions().Jobs(ns).Get(jobName) curr, err := c.Batch().Jobs(ns).Get(jobName)
if err != nil { if err != nil {
return false, err return false, err
} }

View File

@ -54,7 +54,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) {
// Check for the existence of the Kibana service. // Check for the existence of the Kibana service.
By("Checking the Kibana service exists.") By("Checking the Kibana service exists.")
s := f.Client.Services(api.NamespaceSystem) s := f.ClientSet.Core().Services(api.NamespaceSystem)
// Make a few attempts to connect. This makes the test robust against // Make a few attempts to connect. This makes the test robust against
// being run as the first e2e test just after the e2e cluster has been created. // being run as the first e2e test just after the e2e cluster has been created.
var err error var err error
@ -70,17 +70,17 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) {
By("Checking to make sure the Kibana pods are running") By("Checking to make sure the Kibana pods are running")
label := labels.SelectorFromSet(labels.Set(map[string]string{kibanaKey: kibanaValue})) label := labels.SelectorFromSet(labels.Set(map[string]string{kibanaKey: kibanaValue}))
options := api.ListOptions{LabelSelector: label} options := api.ListOptions{LabelSelector: label}
pods, err := f.Client.Pods(api.NamespaceSystem).List(options) pods, err := f.ClientSet.Core().Pods(api.NamespaceSystem).List(options)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
for _, pod := range pods.Items { for _, pod := range pods.Items {
err = framework.WaitForPodRunningInNamespace(f.Client, &pod) err = framework.WaitForPodRunningInNamespace(f.ClientSet, &pod)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
} }
By("Checking to make sure we get a response from the Kibana UI.") By("Checking to make sure we get a response from the Kibana UI.")
err = nil err = nil
for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) { for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) {
proxyRequest, errProxy := framework.GetServicesProxyRequest(f.Client, f.Client.Get()) proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.Core().RESTClient().Get())
if errProxy != nil { if errProxy != nil {
framework.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy) framework.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy)
continue continue

View File

@ -46,7 +46,7 @@ import (
apierrs "k8s.io/kubernetes/pkg/api/errors" apierrs "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
client "k8s.io/kubernetes/pkg/client/unversioned" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
@ -184,10 +184,10 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
forEachPod := func(podFunc func(p api.Pod)) { forEachPod := func(podFunc func(p api.Pod)) {
clusterState().ForEach(podFunc) clusterState().ForEach(podFunc)
} }
var c *client.Client var c clientset.Interface
var ns string var ns string
BeforeEach(func() { BeforeEach(func() {
c = f.Client c = f.ClientSet
ns = f.Namespace.Name ns = f.Namespace.Name
}) })
@ -260,7 +260,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
} }
It("should create and stop a working application [Conformance]", func() { It("should create and stop a working application [Conformance]", func() {
framework.SkipUnlessServerVersionGTE(deploymentsVersion, c) framework.SkipUnlessServerVersionGTE(deploymentsVersion, c.Discovery())
defer forEachGBFile(func(contents string) { defer forEachGBFile(func(contents string) {
cleanupKubectlInputs(contents, ns) cleanupKubectlInputs(contents, ns)
@ -393,7 +393,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
WithStdinData("abcd1234"). WithStdinData("abcd1234").
Exec() Exec()
ExpectNoError(err) ExpectNoError(err)
framework.WaitForPodToDisappear(f.Client, ns, "failure-3", labels.Everything(), 2*time.Second, wait.ForeverTestTimeout) framework.WaitForPodToDisappear(f.ClientSet, ns, "failure-3", labels.Everything(), 2*time.Second, wait.ForeverTestTimeout)
By("running a failing command with --leave-stdin-open") By("running a failing command with --leave-stdin-open")
_, err = framework.NewKubectlCommand(nsFlag, "run", "-i", "--image="+busyboxImage, "--restart=Never", "failure-4", "--leave-stdin-open", "--", "/bin/sh", "-c", "exit 42"). _, err = framework.NewKubectlCommand(nsFlag, "run", "-i", "--image="+busyboxImage, "--restart=Never", "failure-4", "--leave-stdin-open", "--", "/bin/sh", "-c", "exit 42").
@ -404,7 +404,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
It("should support inline execution and attach", func() { It("should support inline execution and attach", func() {
framework.SkipIfContainerRuntimeIs("rkt") // #23335 framework.SkipIfContainerRuntimeIs("rkt") // #23335
framework.SkipUnlessServerVersionGTE(jobsVersion, c) framework.SkipUnlessServerVersionGTE(jobsVersion, c.Discovery())
nsFlag := fmt.Sprintf("--namespace=%v", ns) nsFlag := fmt.Sprintf("--namespace=%v", ns)
@ -414,7 +414,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
ExecOrDie() ExecOrDie()
Expect(runOutput).To(ContainSubstring("abcd1234")) Expect(runOutput).To(ContainSubstring("abcd1234"))
Expect(runOutput).To(ContainSubstring("stdin closed")) Expect(runOutput).To(ContainSubstring("stdin closed"))
Expect(c.Extensions().Jobs(ns).Delete("run-test", nil)).To(BeNil()) Expect(c.Batch().Jobs(ns).Delete("run-test", nil)).To(BeNil())
By("executing a command with run and attach without stdin") By("executing a command with run and attach without stdin")
runOutput = framework.NewKubectlCommand(fmt.Sprintf("--namespace=%v", ns), "run", "run-test-2", "--image="+busyboxImage, "--restart=OnFailure", "--attach=true", "--leave-stdin-open=true", "--", "sh", "-c", "cat && echo 'stdin closed'"). runOutput = framework.NewKubectlCommand(fmt.Sprintf("--namespace=%v", ns), "run", "run-test-2", "--image="+busyboxImage, "--restart=OnFailure", "--attach=true", "--leave-stdin-open=true", "--", "sh", "-c", "cat && echo 'stdin closed'").
@ -422,7 +422,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
ExecOrDie() ExecOrDie()
Expect(runOutput).ToNot(ContainSubstring("abcd1234")) Expect(runOutput).ToNot(ContainSubstring("abcd1234"))
Expect(runOutput).To(ContainSubstring("stdin closed")) Expect(runOutput).To(ContainSubstring("stdin closed"))
Expect(c.Extensions().Jobs(ns).Delete("run-test-2", nil)).To(BeNil()) Expect(c.Batch().Jobs(ns).Delete("run-test-2", nil)).To(BeNil())
By("executing a command with run and attach with stdin with open stdin should remain running") By("executing a command with run and attach with stdin with open stdin should remain running")
runOutput = framework.NewKubectlCommand(nsFlag, "run", "run-test-3", "--image="+busyboxImage, "--restart=OnFailure", "--attach=true", "--leave-stdin-open=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'"). runOutput = framework.NewKubectlCommand(nsFlag, "run", "run-test-3", "--image="+busyboxImage, "--restart=OnFailure", "--attach=true", "--leave-stdin-open=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'").
@ -453,7 +453,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
} }
Expect(err).To(BeNil()) Expect(err).To(BeNil())
Expect(c.Extensions().Jobs(ns).Delete("run-test-3", nil)).To(BeNil()) Expect(c.Batch().Jobs(ns).Delete("run-test-3", nil)).To(BeNil())
}) })
It("should support port-forward", func() { It("should support port-forward", func() {
@ -541,10 +541,10 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
framework.KubeDescribe("Kubectl describe", func() { framework.KubeDescribe("Kubectl describe", func() {
It("should check if kubectl describe prints relevant information for rc and pods [Conformance]", func() { It("should check if kubectl describe prints relevant information for rc and pods [Conformance]", func() {
framework.SkipUnlessServerVersionGTE(nodePortsOptionalVersion, c) framework.SkipUnlessServerVersionGTE(nodePortsOptionalVersion, c.Discovery())
kv, err := framework.KubectlVersion() kv, err := framework.KubectlVersion()
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
framework.SkipUnlessServerVersionGTE(kv, c) framework.SkipUnlessServerVersionGTE(kv, c.Discovery())
controllerJson := readTestFileOrDie(redisControllerFilename) controllerJson := readTestFileOrDie(redisControllerFilename)
serviceJson := readTestFileOrDie(redisServiceFilename) serviceJson := readTestFileOrDie(redisServiceFilename)
@ -610,7 +610,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
// Node // Node
// It should be OK to list unschedulable Nodes here. // It should be OK to list unschedulable Nodes here.
nodes, err := c.Nodes().List(api.ListOptions{}) nodes, err := c.Core().Nodes().List(api.ListOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
node := nodes.Items[0] node := nodes.Items[0]
output = framework.RunKubectlOrDie("describe", "node", node.Name) output = framework.RunKubectlOrDie("describe", "node", node.Name)
@ -664,7 +664,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
}) })
validateService := func(name string, servicePort int, timeout time.Duration) { validateService := func(name string, servicePort int, timeout time.Duration) {
err := wait.Poll(framework.Poll, timeout, func() (bool, error) { err := wait.Poll(framework.Poll, timeout, func() (bool, error) {
endpoints, err := c.Endpoints(ns).Get(name) endpoints, err := c.Core().Endpoints(ns).Get(name)
if err != nil { if err != nil {
// log the real error // log the real error
framework.Logf("Get endpoints failed (interval %v): %v", framework.Poll, err) framework.Logf("Get endpoints failed (interval %v): %v", framework.Poll, err)
@ -695,7 +695,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
}) })
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
service, err := c.Services(ns).Get(name) service, err := c.Core().Services(ns).Get(name)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
if len(service.Spec.Ports) != 1 { if len(service.Spec.Ports) != 1 {
@ -773,7 +773,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
}) })
It("should be able to retrieve and filter logs [Conformance]", func() { It("should be able to retrieve and filter logs [Conformance]", func() {
framework.SkipUnlessServerVersionGTE(extendedPodLogFilterVersion, c) framework.SkipUnlessServerVersionGTE(extendedPodLogFilterVersion, c.Discovery())
// Split("something\n", "\n") returns ["something", ""], so // Split("something\n", "\n") returns ["something", ""], so
// strip trailing newline first // strip trailing newline first
@ -873,7 +873,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
BeforeEach(func() { BeforeEach(func() {
nsFlag = fmt.Sprintf("--namespace=%v", ns) nsFlag = fmt.Sprintf("--namespace=%v", ns)
gte, err := framework.ServerVersionGTE(deploymentsVersion, c) gte, err := framework.ServerVersionGTE(deploymentsVersion, c.Discovery())
if err != nil { if err != nil {
framework.Failf("Failed to get server version: %v", err) framework.Failf("Failed to get server version: %v", err)
} }
@ -924,7 +924,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
By("running the image " + nginxImage) By("running the image " + nginxImage)
framework.RunKubectlOrDie("run", rcName, "--image="+nginxImage, "--generator=run/v1", nsFlag) framework.RunKubectlOrDie("run", rcName, "--image="+nginxImage, "--generator=run/v1", nsFlag)
By("verifying the rc " + rcName + " was created") By("verifying the rc " + rcName + " was created")
rc, err := c.ReplicationControllers(ns).Get(rcName) rc, err := c.Core().ReplicationControllers(ns).Get(rcName)
if err != nil { if err != nil {
framework.Failf("Failed getting rc %s: %v", rcName, err) framework.Failf("Failed getting rc %s: %v", rcName, err)
} }
@ -964,10 +964,10 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
framework.KubeDescribe("Kubectl rolling-update", func() { framework.KubeDescribe("Kubectl rolling-update", func() {
var nsFlag string var nsFlag string
var rcName string var rcName string
var c *client.Client var c clientset.Interface
BeforeEach(func() { BeforeEach(func() {
c = f.Client c = f.ClientSet
nsFlag = fmt.Sprintf("--namespace=%v", ns) nsFlag = fmt.Sprintf("--namespace=%v", ns)
rcName = "e2e-test-nginx-rc" rcName = "e2e-test-nginx-rc"
}) })
@ -980,7 +980,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
By("running the image " + nginxImage) By("running the image " + nginxImage)
framework.RunKubectlOrDie("run", rcName, "--image="+nginxImage, "--generator=run/v1", nsFlag) framework.RunKubectlOrDie("run", rcName, "--image="+nginxImage, "--generator=run/v1", nsFlag)
By("verifying the rc " + rcName + " was created") By("verifying the rc " + rcName + " was created")
rc, err := c.ReplicationControllers(ns).Get(rcName) rc, err := c.Core().ReplicationControllers(ns).Get(rcName)
if err != nil { if err != nil {
framework.Failf("Failed getting rc %s: %v", rcName, err) framework.Failf("Failed getting rc %s: %v", rcName, err)
} }
@ -1021,7 +1021,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
}) })
It("should create a deployment from an image [Conformance]", func() { It("should create a deployment from an image [Conformance]", func() {
framework.SkipUnlessServerVersionGTE(deploymentsVersion, c) framework.SkipUnlessServerVersionGTE(deploymentsVersion, c.Discovery())
By("running the image " + nginxImage) By("running the image " + nginxImage)
framework.RunKubectlOrDie("run", dName, "--image="+nginxImage, "--generator=deployment/v1beta1", nsFlag) framework.RunKubectlOrDie("run", dName, "--image="+nginxImage, "--generator=deployment/v1beta1", nsFlag)
@ -1063,12 +1063,12 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
}) })
It("should create a job from an image when restart is OnFailure [Conformance]", func() { It("should create a job from an image when restart is OnFailure [Conformance]", func() {
framework.SkipUnlessServerVersionGTE(jobsVersion, c) framework.SkipUnlessServerVersionGTE(jobsVersion, c.Discovery())
By("running the image " + nginxImage) By("running the image " + nginxImage)
framework.RunKubectlOrDie("run", jobName, "--restart=OnFailure", "--generator=job/v1", "--image="+nginxImage, nsFlag) framework.RunKubectlOrDie("run", jobName, "--restart=OnFailure", "--generator=job/v1", "--image="+nginxImage, nsFlag)
By("verifying the job " + jobName + " was created") By("verifying the job " + jobName + " was created")
job, err := c.Extensions().Jobs(ns).Get(jobName) job, err := c.Batch().Jobs(ns).Get(jobName)
if err != nil { if err != nil {
framework.Failf("Failed getting job %s: %v", jobName, err) framework.Failf("Failed getting job %s: %v", jobName, err)
} }
@ -1133,12 +1133,12 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
}) })
It("should create a pod from an image when restart is Never [Conformance]", func() { It("should create a pod from an image when restart is Never [Conformance]", func() {
framework.SkipUnlessServerVersionGTE(jobsVersion, c) framework.SkipUnlessServerVersionGTE(jobsVersion, c.Discovery())
By("running the image " + nginxImage) By("running the image " + nginxImage)
framework.RunKubectlOrDie("run", podName, "--restart=Never", "--generator=run-pod/v1", "--image="+nginxImage, nsFlag) framework.RunKubectlOrDie("run", podName, "--restart=Never", "--generator=run-pod/v1", "--image="+nginxImage, nsFlag)
By("verifying the pod " + podName + " was created") By("verifying the pod " + podName + " was created")
pod, err := c.Pods(ns).Get(podName) pod, err := c.Core().Pods(ns).Get(podName)
if err != nil { if err != nil {
framework.Failf("Failed getting pod %s: %v", podName, err) framework.Failf("Failed getting pod %s: %v", podName, err)
} }
@ -1166,7 +1166,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
}) })
It("should update a single-container pod's image [Conformance]", func() { It("should update a single-container pod's image [Conformance]", func() {
framework.SkipUnlessServerVersionGTE(jobsVersion, c) framework.SkipUnlessServerVersionGTE(jobsVersion, c.Discovery())
By("running the image " + nginxImage) By("running the image " + nginxImage)
framework.RunKubectlOrDie("run", podName, "--generator=run-pod/v1", "--image="+nginxImage, "--labels=run="+podName, nsFlag) framework.RunKubectlOrDie("run", podName, "--generator=run-pod/v1", "--image="+nginxImage, "--labels=run="+podName, nsFlag)
@ -1189,7 +1189,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
framework.RunKubectlOrDieInput(podJson, "replace", "-f", "-", nsFlag) framework.RunKubectlOrDieInput(podJson, "replace", "-f", "-", nsFlag)
By("verifying the pod " + podName + " has the right image " + busyboxImage) By("verifying the pod " + podName + " has the right image " + busyboxImage)
pod, err := c.Pods(ns).Get(podName) pod, err := c.Core().Pods(ns).Get(podName)
if err != nil { if err != nil {
framework.Failf("Failed getting deployment %s: %v", podName, err) framework.Failf("Failed getting deployment %s: %v", podName, err)
} }
@ -1208,7 +1208,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
// The rkt runtime doesn't support attach, see #23335 // The rkt runtime doesn't support attach, see #23335
framework.SkipIfContainerRuntimeIs("rkt") framework.SkipIfContainerRuntimeIs("rkt")
framework.SkipUnlessServerVersionGTE(jobsVersion, c) framework.SkipUnlessServerVersionGTE(jobsVersion, c.Discovery())
By("executing a command with run --rm and attach with stdin") By("executing a command with run --rm and attach with stdin")
t := time.NewTimer(runJobTimeout) t := time.NewTimer(runJobTimeout)
@ -1221,7 +1221,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
Expect(runOutput).To(ContainSubstring("stdin closed")) Expect(runOutput).To(ContainSubstring("stdin closed"))
By("verifying the job " + jobName + " was deleted") By("verifying the job " + jobName + " was deleted")
_, err := c.Extensions().Jobs(ns).Get(jobName) _, err := c.Batch().Jobs(ns).Get(jobName)
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
Expect(apierrs.IsNotFound(err)).To(BeTrue()) Expect(apierrs.IsNotFound(err)).To(BeTrue())
}) })
@ -1286,7 +1286,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
Effect: api.TaintEffectNoSchedule, Effect: api.TaintEffectNoSchedule,
} }
nodes, err := c.Nodes().List(api.ListOptions{}) nodes, err := c.Core().Nodes().List(api.ListOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
node := nodes.Items[0] node := nodes.Items[0]
nodeName := node.Name nodeName := node.Name
@ -1318,7 +1318,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
Effect: api.TaintEffectNoSchedule, Effect: api.TaintEffectNoSchedule,
} }
nodes, err := c.Nodes().List(api.ListOptions{}) nodes, err := c.Core().Nodes().List(api.ListOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
node := nodes.Items[0] node := nodes.Items[0]
nodeName := node.Name nodeName := node.Name
@ -1370,7 +1370,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
framework.RunKubectlOrDie("create", "quota", quotaName, "--hard=pods=1000000,services=1000000", nsFlag) framework.RunKubectlOrDie("create", "quota", quotaName, "--hard=pods=1000000,services=1000000", nsFlag)
By("verifying that the quota was created") By("verifying that the quota was created")
quota, err := c.ResourceQuotas(ns).Get(quotaName) quota, err := c.Core().ResourceQuotas(ns).Get(quotaName)
if err != nil { if err != nil {
framework.Failf("Failed getting quota %s: %v", quotaName, err) framework.Failf("Failed getting quota %s: %v", quotaName, err)
} }
@ -1400,7 +1400,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
framework.RunKubectlOrDie("create", "quota", quotaName, "--hard=pods=1000000", "--scopes=BestEffort,NotTerminating", nsFlag) framework.RunKubectlOrDie("create", "quota", quotaName, "--hard=pods=1000000", "--scopes=BestEffort,NotTerminating", nsFlag)
By("verifying that the quota was created") By("verifying that the quota was created")
quota, err := c.ResourceQuotas(ns).Get(quotaName) quota, err := c.Core().ResourceQuotas(ns).Get(quotaName)
if err != nil { if err != nil {
framework.Failf("Failed getting quota %s: %v", quotaName, err) framework.Failf("Failed getting quota %s: %v", quotaName, err)
} }
@ -1517,7 +1517,7 @@ func curl(url string) (string, error) {
return curlTransport(url, utilnet.SetTransportDefaults(&http.Transport{})) return curlTransport(url, utilnet.SetTransportDefaults(&http.Transport{}))
} }
func validateGuestbookApp(c *client.Client, ns string) { func validateGuestbookApp(c clientset.Interface, ns string) {
framework.Logf("Waiting for all frontend pods to be Running.") framework.Logf("Waiting for all frontend pods to be Running.")
label := labels.SelectorFromSet(labels.Set(map[string]string{"tier": "frontend", "app": "guestbook"})) label := labels.SelectorFromSet(labels.Set(map[string]string{"tier": "frontend", "app": "guestbook"}))
err := testutils.WaitForPodsWithLabelRunning(c, ns, label) err := testutils.WaitForPodsWithLabelRunning(c, ns, label)
@ -1539,7 +1539,7 @@ func validateGuestbookApp(c *client.Client, ns string) {
} }
// Returns whether received expected response from guestbook on time. // Returns whether received expected response from guestbook on time.
func waitForGuestbookResponse(c *client.Client, cmd, arg, expectedResponse string, timeout time.Duration, ns string) bool { func waitForGuestbookResponse(c clientset.Interface, cmd, arg, expectedResponse string, timeout time.Duration, ns string) bool {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) { for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
res, err := makeRequestToGuestbook(c, cmd, arg, ns) res, err := makeRequestToGuestbook(c, cmd, arg, ns)
if err == nil && res == expectedResponse { if err == nil && res == expectedResponse {
@ -1550,8 +1550,8 @@ func waitForGuestbookResponse(c *client.Client, cmd, arg, expectedResponse strin
return false return false
} }
func makeRequestToGuestbook(c *client.Client, cmd, value string, ns string) (string, error) { func makeRequestToGuestbook(c clientset.Interface, cmd, value string, ns string) (string, error) {
proxyRequest, errProxy := framework.GetServicesProxyRequest(c, c.Get()) proxyRequest, errProxy := framework.GetServicesProxyRequest(c, c.Core().RESTClient().Get())
if errProxy != nil { if errProxy != nil {
return "", errProxy return "", errProxy
} }
@ -1609,13 +1609,13 @@ func modifyReplicationControllerConfiguration(contents string) io.Reader {
return bytes.NewReader(data) return bytes.NewReader(data)
} }
func forEachReplicationController(c *client.Client, ns, selectorKey, selectorValue string, fn func(api.ReplicationController)) { func forEachReplicationController(c clientset.Interface, ns, selectorKey, selectorValue string, fn func(api.ReplicationController)) {
var rcs *api.ReplicationControllerList var rcs *api.ReplicationControllerList
var err error var err error
for t := time.Now(); time.Since(t) < framework.PodListTimeout; time.Sleep(framework.Poll) { for t := time.Now(); time.Since(t) < framework.PodListTimeout; time.Sleep(framework.Poll) {
label := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue})) label := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue}))
options := api.ListOptions{LabelSelector: label} options := api.ListOptions{LabelSelector: label}
rcs, err = c.ReplicationControllers(ns).List(options) rcs, err = c.Core().ReplicationControllers(ns).List(options)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
if len(rcs.Items) > 0 { if len(rcs.Items) > 0 {
break break
@ -1646,18 +1646,18 @@ func validateReplicationControllerConfiguration(rc api.ReplicationController) {
// getUDData creates a validator function based on the input string (i.e. kitten.jpg). // getUDData creates a validator function based on the input string (i.e. kitten.jpg).
// For example, if you send "kitten.jpg", this function verifies that the image jpg = kitten.jpg // For example, if you send "kitten.jpg", this function verifies that the image jpg = kitten.jpg
// in the container's json field. // in the container's json field.
func getUDData(jpgExpected string, ns string) func(*client.Client, string) error { func getUDData(jpgExpected string, ns string) func(clientset.Interface, string) error {
// getUDData validates data.json in the update-demo (returns nil if data is ok). // getUDData validates data.json in the update-demo (returns nil if data is ok).
return func(c *client.Client, podID string) error { return func(c clientset.Interface, podID string) error {
framework.Logf("validating pod %s", podID) framework.Logf("validating pod %s", podID)
subResourceProxyAvailable, err := framework.ServerVersionGTE(framework.SubResourcePodProxyVersion, c) subResourceProxyAvailable, err := framework.ServerVersionGTE(framework.SubResourcePodProxyVersion, c.Discovery())
if err != nil { if err != nil {
return err return err
} }
var body []byte var body []byte
if subResourceProxyAvailable { if subResourceProxyAvailable {
body, err = c.Get(). body, err = c.Core().RESTClient().Get().
Namespace(ns). Namespace(ns).
Resource("pods"). Resource("pods").
SubResource("proxy"). SubResource("proxy").
@ -1666,7 +1666,7 @@ func getUDData(jpgExpected string, ns string) func(*client.Client, string) error
Do(). Do().
Raw() Raw()
} else { } else {
body, err = c.Get(). body, err = c.Core().RESTClient().Get().
Prefix("proxy"). Prefix("proxy").
Namespace(ns). Namespace(ns).
Resource("pods"). Resource("pods").
@ -1692,7 +1692,7 @@ func getUDData(jpgExpected string, ns string) func(*client.Client, string) error
} }
} }
func noOpValidatorFn(c *client.Client, podID string) error { return nil } func noOpValidatorFn(c clientset.Interface, podID string) error { return nil }
// newBlockingReader returns a reader that allows reading the given string, // newBlockingReader returns a reader that allows reading the given string,
// then blocks until Close() is called on the returned closer. // then blocks until Close() is called on the returned closer.

View File

@ -22,7 +22,7 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
@ -44,7 +44,7 @@ const (
// getPodMatches returns a set of pod names on the given node that matches the // getPodMatches returns a set of pod names on the given node that matches the
// podNamePrefix and namespace. // podNamePrefix and namespace.
func getPodMatches(c *client.Client, nodeName string, podNamePrefix string, namespace string) sets.String { func getPodMatches(c clientset.Interface, nodeName string, podNamePrefix string, namespace string) sets.String {
matches := sets.NewString() matches := sets.NewString()
framework.Logf("Checking pods on node %v via /runningpods endpoint", nodeName) framework.Logf("Checking pods on node %v via /runningpods endpoint", nodeName)
runningPods, err := framework.GetKubeletPods(c, nodeName) runningPods, err := framework.GetKubeletPods(c, nodeName)
@ -68,7 +68,7 @@ func getPodMatches(c *client.Client, nodeName string, podNamePrefix string, name
// information; they are reconstructed by examining the container runtime. In // information; they are reconstructed by examining the container runtime. In
// the scope of this test, we do not expect pod naming conflicts so // the scope of this test, we do not expect pod naming conflicts so
// podNamePrefix should be sufficient to identify the pods. // podNamePrefix should be sufficient to identify the pods.
func waitTillNPodsRunningOnNodes(c *client.Client, nodeNames sets.String, podNamePrefix string, namespace string, targetNumPods int, timeout time.Duration) error { func waitTillNPodsRunningOnNodes(c clientset.Interface, nodeNames sets.String, podNamePrefix string, namespace string, targetNumPods int, timeout time.Duration) error {
return wait.Poll(pollInterval, timeout, func() (bool, error) { return wait.Poll(pollInterval, timeout, func() (bool, error) {
matchCh := make(chan sets.String, len(nodeNames)) matchCh := make(chan sets.String, len(nodeNames))
for _, item := range nodeNames.List() { for _, item := range nodeNames.List() {
@ -95,13 +95,13 @@ func waitTillNPodsRunningOnNodes(c *client.Client, nodeNames sets.String, podNam
// In case a given label already exists, it overwrites it. If label to remove doesn't exist // In case a given label already exists, it overwrites it. If label to remove doesn't exist
// it silently ignores it. // it silently ignores it.
// TODO: migrate to use framework.AddOrUpdateLabelOnNode/framework.RemoveLabelOffNode // TODO: migrate to use framework.AddOrUpdateLabelOnNode/framework.RemoveLabelOffNode
func updateNodeLabels(c *client.Client, nodeNames sets.String, toAdd, toRemove map[string]string) { func updateNodeLabels(c clientset.Interface, nodeNames sets.String, toAdd, toRemove map[string]string) {
const maxRetries = 5 const maxRetries = 5
for nodeName := range nodeNames { for nodeName := range nodeNames {
var node *api.Node var node *api.Node
var err error var err error
for i := 0; i < maxRetries; i++ { for i := 0; i < maxRetries; i++ {
node, err = c.Nodes().Get(nodeName) node, err = c.Core().Nodes().Get(nodeName)
if err != nil { if err != nil {
framework.Logf("Error getting node %s: %v", nodeName, err) framework.Logf("Error getting node %s: %v", nodeName, err)
continue continue
@ -116,7 +116,7 @@ func updateNodeLabels(c *client.Client, nodeNames sets.String, toAdd, toRemove m
delete(node.ObjectMeta.Labels, k) delete(node.ObjectMeta.Labels, k)
} }
} }
_, err = c.Nodes().Update(node) _, err = c.Core().Nodes().Update(node)
if err != nil { if err != nil {
framework.Logf("Error updating node %s: %v", nodeName, err) framework.Logf("Error updating node %s: %v", nodeName, err)
} else { } else {
@ -128,7 +128,7 @@ func updateNodeLabels(c *client.Client, nodeNames sets.String, toAdd, toRemove m
} }
var _ = framework.KubeDescribe("kubelet", func() { var _ = framework.KubeDescribe("kubelet", func() {
var c *client.Client var c clientset.Interface
var numNodes int var numNodes int
var nodeNames sets.String var nodeNames sets.String
var nodeLabels map[string]string var nodeLabels map[string]string
@ -136,8 +136,8 @@ var _ = framework.KubeDescribe("kubelet", func() {
var resourceMonitor *framework.ResourceMonitor var resourceMonitor *framework.ResourceMonitor
BeforeEach(func() { BeforeEach(func() {
c = f.Client c = f.ClientSet
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodes := framework.GetReadySchedulableNodesOrDie(c)
numNodes = len(nodes.Items) numNodes = len(nodes.Items)
nodeNames = sets.NewString() nodeNames = sets.NewString()
// If there are a lot of nodes, we don't want to use all of them // If there are a lot of nodes, we don't want to use all of them
@ -158,7 +158,7 @@ var _ = framework.KubeDescribe("kubelet", func() {
// Start resourceMonitor only in small clusters. // Start resourceMonitor only in small clusters.
if len(nodes.Items) <= maxNodesToCheck { if len(nodes.Items) <= maxNodesToCheck {
resourceMonitor = framework.NewResourceMonitor(f.Client, framework.TargetContainers(), containerStatsPollingInterval) resourceMonitor = framework.NewResourceMonitor(f.ClientSet, framework.TargetContainers(), containerStatsPollingInterval)
resourceMonitor.Start() resourceMonitor.Start()
} }
}) })
@ -188,10 +188,10 @@ var _ = framework.KubeDescribe("kubelet", func() {
rcName := fmt.Sprintf("cleanup%d-%s", totalPods, string(uuid.NewUUID())) rcName := fmt.Sprintf("cleanup%d-%s", totalPods, string(uuid.NewUUID()))
Expect(framework.RunRC(testutils.RCConfig{ Expect(framework.RunRC(testutils.RCConfig{
Client: f.Client, Client: f.ClientSet,
Name: rcName, Name: rcName,
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
Image: framework.GetPauseImageName(f.Client), Image: framework.GetPauseImageName(f.ClientSet),
Replicas: totalPods, Replicas: totalPods,
NodeSelector: nodeLabels, NodeSelector: nodeLabels,
})).NotTo(HaveOccurred()) })).NotTo(HaveOccurred())
@ -199,14 +199,14 @@ var _ = framework.KubeDescribe("kubelet", func() {
// running on the nodes according to kubelet. The timeout is set to // running on the nodes according to kubelet. The timeout is set to
// only 30 seconds here because framework.RunRC already waited for all pods to // only 30 seconds here because framework.RunRC already waited for all pods to
// transition to the running status. // transition to the running status.
Expect(waitTillNPodsRunningOnNodes(f.Client, nodeNames, rcName, f.Namespace.Name, totalPods, Expect(waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, f.Namespace.Name, totalPods,
time.Second*30)).NotTo(HaveOccurred()) time.Second*30)).NotTo(HaveOccurred())
if resourceMonitor != nil { if resourceMonitor != nil {
resourceMonitor.LogLatest() resourceMonitor.LogLatest()
} }
By("Deleting the RC") By("Deleting the RC")
framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, rcName) framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, rcName)
// Check that the pods really are gone by querying /runningpods on the // Check that the pods really are gone by querying /runningpods on the
// node. The /runningpods handler checks the container runtime (or its // node. The /runningpods handler checks the container runtime (or its
// cache) and returns a list of running pods. Some possible causes of // cache) and returns a list of running pods. Some possible causes of
@ -215,7 +215,7 @@ var _ = framework.KubeDescribe("kubelet", func() {
// - a bug in graceful termination (if it is enabled) // - a bug in graceful termination (if it is enabled)
// - docker slow to delete pods (or resource problems causing slowness) // - docker slow to delete pods (or resource problems causing slowness)
start := time.Now() start := time.Now()
Expect(waitTillNPodsRunningOnNodes(f.Client, nodeNames, rcName, f.Namespace.Name, 0, Expect(waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, f.Namespace.Name, 0,
itArg.timeout)).NotTo(HaveOccurred()) itArg.timeout)).NotTo(HaveOccurred())
framework.Logf("Deleting %d pods on %d nodes completed in %v after the RC was deleted", totalPods, len(nodeNames), framework.Logf("Deleting %d pods on %d nodes completed in %v after the RC was deleted", totalPods, len(nodeNames),
time.Since(start)) time.Since(start))

View File

@ -22,7 +22,7 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats" "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
@ -50,7 +50,7 @@ type resourceTest struct {
memLimits framework.ResourceUsagePerContainer memLimits framework.ResourceUsagePerContainer
} }
func logPodsOnNodes(c *client.Client, nodeNames []string) { func logPodsOnNodes(c clientset.Interface, nodeNames []string) {
for _, n := range nodeNames { for _, n := range nodeNames {
podList, err := framework.GetKubeletRunningPods(c, n) podList, err := framework.GetKubeletRunningPods(c, n)
if err != nil { if err != nil {
@ -70,10 +70,10 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames
// TODO: Use a more realistic workload // TODO: Use a more realistic workload
Expect(framework.RunRC(testutils.RCConfig{ Expect(framework.RunRC(testutils.RCConfig{
Client: f.Client, Client: f.ClientSet,
Name: rcName, Name: rcName,
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
Image: framework.GetPauseImageName(f.Client), Image: framework.GetPauseImageName(f.ClientSet),
Replicas: totalPods, Replicas: totalPods,
})).NotTo(HaveOccurred()) })).NotTo(HaveOccurred())
@ -96,18 +96,18 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames
} else { } else {
time.Sleep(reportingPeriod) time.Sleep(reportingPeriod)
} }
logPodsOnNodes(f.Client, nodeNames.List()) logPodsOnNodes(f.ClientSet, nodeNames.List())
} }
By("Reporting overall resource usage") By("Reporting overall resource usage")
logPodsOnNodes(f.Client, nodeNames.List()) logPodsOnNodes(f.ClientSet, nodeNames.List())
usageSummary, err := rm.GetLatest() usageSummary, err := rm.GetLatest()
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// TODO(random-liu): Remove the original log when we migrate to new perfdash // TODO(random-liu): Remove the original log when we migrate to new perfdash
framework.Logf("%s", rm.FormatResourceUsage(usageSummary)) framework.Logf("%s", rm.FormatResourceUsage(usageSummary))
// Log perf result // Log perf result
framework.PrintPerfData(framework.ResourceUsageToPerfData(rm.GetMasterNodeLatest(usageSummary))) framework.PrintPerfData(framework.ResourceUsageToPerfData(rm.GetMasterNodeLatest(usageSummary)))
verifyMemoryLimits(f.Client, expectedMemory, usageSummary) verifyMemoryLimits(f.ClientSet, expectedMemory, usageSummary)
cpuSummary := rm.GetCPUSummary() cpuSummary := rm.GetCPUSummary()
framework.Logf("%s", rm.FormatCPUSummary(cpuSummary)) framework.Logf("%s", rm.FormatCPUSummary(cpuSummary))
@ -116,10 +116,10 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames
verifyCPULimits(expectedCPU, cpuSummary) verifyCPULimits(expectedCPU, cpuSummary)
By("Deleting the RC") By("Deleting the RC")
framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, rcName) framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, rcName)
} }
func verifyMemoryLimits(c *client.Client, expected framework.ResourceUsagePerContainer, actual framework.ResourceUsagePerNode) { func verifyMemoryLimits(c clientset.Interface, expected framework.ResourceUsagePerContainer, actual framework.ResourceUsagePerNode) {
if expected == nil { if expected == nil {
return return
} }
@ -200,16 +200,16 @@ var _ = framework.KubeDescribe("Kubelet [Serial] [Slow]", func() {
// Wait until image prepull pod has completed so that they wouldn't // Wait until image prepull pod has completed so that they wouldn't
// affect the runtime cpu usage. Fail the test if prepulling cannot // affect the runtime cpu usage. Fail the test if prepulling cannot
// finish in time. // finish in time.
if err := framework.WaitForPodsSuccess(f.Client, api.NamespaceSystem, framework.ImagePullerLabels, imagePrePullingLongTimeout); err != nil { if err := framework.WaitForPodsSuccess(f.ClientSet, api.NamespaceSystem, framework.ImagePullerLabels, imagePrePullingLongTimeout); err != nil {
framework.Failf("Image puller didn't complete in %v, not running resource usage test since the metrics might be adulterated", imagePrePullingLongTimeout) framework.Failf("Image puller didn't complete in %v, not running resource usage test since the metrics might be adultrated", imagePrePullingLongTimeout)
} }
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
nodeNames = sets.NewString() nodeNames = sets.NewString()
for _, node := range nodes.Items { for _, node := range nodes.Items {
nodeNames.Insert(node.Name) nodeNames.Insert(node.Name)
} }
om = framework.NewRuntimeOperationMonitor(f.Client) om = framework.NewRuntimeOperationMonitor(f.ClientSet)
rm = framework.NewResourceMonitor(f.Client, framework.TargetContainers(), containerStatsPollingPeriod) rm = framework.NewResourceMonitor(f.ClientSet, framework.TargetContainers(), containerStatsPollingPeriod)
rm.Start() rm.Start()
}) })

View File

@ -42,11 +42,11 @@ var _ = framework.KubeDescribe("LimitRange", func() {
min, max, min, max,
defaultLimit, defaultRequest, defaultLimit, defaultRequest,
maxLimitRequestRatio) maxLimitRequestRatio)
limitRange, err := f.Client.LimitRanges(f.Namespace.Name).Create(limitRange) limitRange, err := f.ClientSet.Core().LimitRanges(f.Namespace.Name).Create(limitRange)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Fetching the LimitRange to ensure it has proper values") By("Fetching the LimitRange to ensure it has proper values")
limitRange, err = f.Client.LimitRanges(f.Namespace.Name).Get(limitRange.Name) limitRange, err = f.ClientSet.Core().LimitRanges(f.Namespace.Name).Get(limitRange.Name)
expected := api.ResourceRequirements{Requests: defaultRequest, Limits: defaultLimit} expected := api.ResourceRequirements{Requests: defaultRequest, Limits: defaultLimit}
actual := api.ResourceRequirements{Requests: limitRange.Spec.Limits[0].DefaultRequest, Limits: limitRange.Spec.Limits[0].Default} actual := api.ResourceRequirements{Requests: limitRange.Spec.Limits[0].DefaultRequest, Limits: limitRange.Spec.Limits[0].Default}
err = equalResourceRequirement(expected, actual) err = equalResourceRequirement(expected, actual)
@ -54,11 +54,11 @@ var _ = framework.KubeDescribe("LimitRange", func() {
By("Creating a Pod with no resource requirements") By("Creating a Pod with no resource requirements")
pod := newTestPod(f, "pod-no-resources", api.ResourceList{}, api.ResourceList{}) pod := newTestPod(f, "pod-no-resources", api.ResourceList{}, api.ResourceList{})
pod, err = f.Client.Pods(f.Namespace.Name).Create(pod) pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring Pod has resource requirements applied from LimitRange") By("Ensuring Pod has resource requirements applied from LimitRange")
pod, err = f.Client.Pods(f.Namespace.Name).Get(pod.Name) pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Get(pod.Name)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
for i := range pod.Spec.Containers { for i := range pod.Spec.Containers {
err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources) err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources)
@ -71,11 +71,11 @@ var _ = framework.KubeDescribe("LimitRange", func() {
By("Creating a Pod with partial resource requirements") By("Creating a Pod with partial resource requirements")
pod = newTestPod(f, "pod-partial-resources", getResourceList("", "150Mi"), getResourceList("300m", "")) pod = newTestPod(f, "pod-partial-resources", getResourceList("", "150Mi"), getResourceList("300m", ""))
pod, err = f.Client.Pods(f.Namespace.Name).Create(pod) pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring Pod has merged resource requirements applied from LimitRange") By("Ensuring Pod has merged resource requirements applied from LimitRange")
pod, err = f.Client.Pods(f.Namespace.Name).Get(pod.Name) pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Get(pod.Name)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// This is an interesting case, so it's worth a comment // This is an interesting case, so it's worth a comment
// If you specify a Limit, and no Request, the Limit will default to the Request // If you specify a Limit, and no Request, the Limit will default to the Request
@ -92,12 +92,12 @@ var _ = framework.KubeDescribe("LimitRange", func() {
By("Failing to create a Pod with less than min resources") By("Failing to create a Pod with less than min resources")
pod = newTestPod(f, podName, getResourceList("10m", "50Mi"), api.ResourceList{}) pod = newTestPod(f, podName, getResourceList("10m", "50Mi"), api.ResourceList{})
pod, err = f.Client.Pods(f.Namespace.Name).Create(pod) pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
By("Failing to create a Pod with more than max resources") By("Failing to create a Pod with more than max resources")
pod = newTestPod(f, podName, getResourceList("600m", "600Mi"), api.ResourceList{}) pod = newTestPod(f, podName, getResourceList("600m", "600Mi"), api.ResourceList{})
pod, err = f.Client.Pods(f.Namespace.Name).Create(pod) pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
}) })
@ -176,7 +176,7 @@ func newTestPod(f *framework.Framework, name string, requests api.ResourceList,
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: "pause", Name: "pause",
Image: framework.GetPauseImageName(f.Client), Image: framework.GetPauseImageName(f.ClientSet),
Resources: api.ResourceRequirements{ Resources: api.ResourceRequirements{
Requests: requests, Requests: requests,
Limits: limits, Limits: limits,

View File

@ -29,10 +29,8 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
"k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/transport" "k8s.io/kubernetes/pkg/client/transport"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
utilnet "k8s.io/kubernetes/pkg/util/net" utilnet "k8s.io/kubernetes/pkg/util/net"
@ -64,7 +62,7 @@ const (
// To run this suite you must explicitly ask for it by setting the // To run this suite you must explicitly ask for it by setting the
// -t/--test flag or ginkgo.focus flag. // -t/--test flag or ginkgo.focus flag.
var _ = framework.KubeDescribe("Load capacity", func() { var _ = framework.KubeDescribe("Load capacity", func() {
var c *client.Client var clientset internalclientset.Interface
var nodeCount int var nodeCount int
var ns string var ns string
var configs []*testutils.RCConfig var configs []*testutils.RCConfig
@ -74,7 +72,7 @@ var _ = framework.KubeDescribe("Load capacity", func() {
// TODO add flag that allows to skip cleanup on failure // TODO add flag that allows to skip cleanup on failure
AfterEach(func() { AfterEach(func() {
// Verify latency metrics // Verify latency metrics
highLatencyRequests, err := framework.HighLatencyRequests(c) highLatencyRequests, err := framework.HighLatencyRequests(clientset)
framework.ExpectNoError(err, "Too many instances metrics above the threshold") framework.ExpectNoError(err, "Too many instances metrics above the threshold")
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0)) Expect(highLatencyRequests).NotTo(BeNumerically(">", 0))
}) })
@ -99,25 +97,25 @@ var _ = framework.KubeDescribe("Load capacity", func() {
f.NamespaceDeletionTimeout = time.Hour f.NamespaceDeletionTimeout = time.Hour
BeforeEach(func() { BeforeEach(func() {
c = f.Client clientset = f.ClientSet
// In large clusters we may get to this point but still have a bunch // In large clusters we may get to this point but still have a bunch
// of nodes without Routes created. Since this would make a node // of nodes without Routes created. Since this would make a node
// unschedulable, we need to wait until all of them are schedulable. // unschedulable, we need to wait until all of them are schedulable.
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c)) framework.ExpectNoError(framework.WaitForAllNodesSchedulable(clientset))
ns = f.Namespace.Name ns = f.Namespace.Name
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodes := framework.GetReadySchedulableNodesOrDie(clientset)
nodeCount = len(nodes.Items) nodeCount = len(nodes.Items)
Expect(nodeCount).NotTo(BeZero()) Expect(nodeCount).NotTo(BeZero())
// Terminating a namespace (deleting the remaining objects from it - which // Terminating a namespace (deleting the remaining objects from it - which
// generally means events) can affect the current run. Thus we wait for all // generally means events) can affect the current run. Thus we wait for all
// terminating namespace to be finally deleted before starting this test. // terminating namespace to be finally deleted before starting this test.
err := framework.CheckTestingNSDeletedExcept(c, ns) err := framework.CheckTestingNSDeletedExcept(clientset, ns)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNoError(framework.ResetMetrics(c)) framework.ExpectNoError(framework.ResetMetrics(clientset))
}) })
type Load struct { type Load struct {
@ -153,7 +151,7 @@ var _ = framework.KubeDescribe("Load capacity", func() {
framework.Logf("Creating services") framework.Logf("Creating services")
services := generateServicesForConfigs(configs) services := generateServicesForConfigs(configs)
for _, service := range services { for _, service := range services {
_, err := c.Services(service.Namespace).Create(service) _, err := clientset.Core().Services(service.Namespace).Create(service)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
framework.Logf("%v Services created.", len(services)) framework.Logf("%v Services created.", len(services))
@ -203,7 +201,7 @@ var _ = framework.KubeDescribe("Load capacity", func() {
if createServices == "true" { if createServices == "true" {
framework.Logf("Starting to delete services...") framework.Logf("Starting to delete services...")
for _, service := range services { for _, service := range services {
err := c.Services(ns).Delete(service.Name) err := clientset.Core().Services(ns).Delete(service.Name, nil)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
framework.Logf("Services deleted") framework.Logf("Services deleted")
@ -223,8 +221,8 @@ func createNamespaces(f *framework.Framework, nodeCount, podsPerNode int) []*api
return namespaces return namespaces
} }
func createClients(numberOfClients int) ([]*client.Client, error) { func createClients(numberOfClients int) ([]*internalclientset.Clientset, error) {
clients := make([]*client.Client, numberOfClients) clients := make([]*internalclientset.Clientset, numberOfClients)
for i := 0; i < numberOfClients; i++ { for i := 0; i < numberOfClients; i++ {
config, err := framework.LoadConfig() config, err := framework.LoadConfig()
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -260,7 +258,7 @@ func createClients(numberOfClients int) ([]*client.Client, error) {
// Transport field. // Transport field.
config.TLSClientConfig = restclient.TLSClientConfig{} config.TLSClientConfig = restclient.TLSClientConfig{}
c, err := client.New(config) c, err := internalclientset.NewForConfig(config)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -385,14 +383,14 @@ func scaleRC(wg *sync.WaitGroup, config *testutils.RCConfig, scalingTime time.Du
sleepUpTo(scalingTime) sleepUpTo(scalingTime)
newSize := uint(rand.Intn(config.Replicas) + config.Replicas/2) newSize := uint(rand.Intn(config.Replicas) + config.Replicas/2)
framework.ExpectNoError(framework.ScaleRC(config.Client, coreClientSetFromUnversioned(config.Client), config.Namespace, config.Name, newSize, true), framework.ExpectNoError(framework.ScaleRC(config.Client, config.Namespace, config.Name, newSize, true),
fmt.Sprintf("scaling rc %s for the first time", config.Name)) fmt.Sprintf("scaling rc %s for the first time", config.Name))
selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": config.Name})) selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": config.Name}))
options := api.ListOptions{ options := api.ListOptions{
LabelSelector: selector, LabelSelector: selector,
ResourceVersion: "0", ResourceVersion: "0",
} }
_, err := config.Client.Pods(config.Namespace).List(options) _, err := config.Client.Core().Pods(config.Namespace).List(options)
framework.ExpectNoError(err, fmt.Sprintf("listing pods from rc %v", config.Name)) framework.ExpectNoError(err, fmt.Sprintf("listing pods from rc %v", config.Name))
} }
@ -413,17 +411,6 @@ func deleteRC(wg *sync.WaitGroup, config *testutils.RCConfig, deletingTime time.
if framework.TestContext.GarbageCollectorEnabled { if framework.TestContext.GarbageCollectorEnabled {
framework.ExpectNoError(framework.DeleteRCAndWaitForGC(config.Client, config.Namespace, config.Name), fmt.Sprintf("deleting rc %s", config.Name)) framework.ExpectNoError(framework.DeleteRCAndWaitForGC(config.Client, config.Namespace, config.Name), fmt.Sprintf("deleting rc %s", config.Name))
} else { } else {
framework.ExpectNoError(framework.DeleteRCAndPods(config.Client, coreClientSetFromUnversioned(config.Client), config.Namespace, config.Name), fmt.Sprintf("deleting rc %s", config.Name)) framework.ExpectNoError(framework.DeleteRCAndPods(config.Client, config.Namespace, config.Name), fmt.Sprintf("deleting rc %s", config.Name))
} }
} }
// coreClientSetFromUnversioned adapts just enough of a a unversioned.Client to work with the scale RC function
func coreClientSetFromUnversioned(c *client.Client) internalclientset.Interface {
var clientset internalclientset.Clientset
if c != nil {
clientset.CoreClient = unversionedcore.New(c.RESTClient)
} else {
clientset.CoreClient = unversionedcore.New(nil)
}
return &clientset
}

View File

@ -18,14 +18,15 @@ package e2e
import ( import (
"fmt" "fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/test/e2e/framework"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
"time" "time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/test/e2e/framework"
) )
var _ = framework.KubeDescribe("Logging soak [Performance] [Slow] [Disruptive]", func() { var _ = framework.KubeDescribe("Logging soak [Performance] [Slow] [Disruptive]", func() {

View File

@ -21,7 +21,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
client "k8s.io/kubernetes/pkg/client/unversioned" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -32,17 +32,17 @@ import (
var _ = framework.KubeDescribe("Mesos", func() { var _ = framework.KubeDescribe("Mesos", func() {
f := framework.NewDefaultFramework("pods") f := framework.NewDefaultFramework("pods")
var c *client.Client var c clientset.Interface
var ns string var ns string
BeforeEach(func() { BeforeEach(func() {
framework.SkipUnlessProviderIs("mesos/docker") framework.SkipUnlessProviderIs("mesos/docker")
c = f.Client c = f.ClientSet
ns = f.Namespace.Name ns = f.Namespace.Name
}) })
It("applies slave attributes as labels", func() { It("applies slave attributes as labels", func() {
nodeClient := f.Client.Nodes() nodeClient := f.ClientSet.Core().Nodes()
rackA := labels.SelectorFromSet(map[string]string{"k8s.mesosphere.io/attribute-rack": "1"}) rackA := labels.SelectorFromSet(map[string]string{"k8s.mesosphere.io/attribute-rack": "1"})
options := api.ListOptions{LabelSelector: rackA} options := api.ListOptions{LabelSelector: rackA}
@ -62,11 +62,10 @@ var _ = framework.KubeDescribe("Mesos", func() {
}) })
It("starts static pods on every node in the mesos cluster", func() { It("starts static pods on every node in the mesos cluster", func() {
client := f.Client client := f.ClientSet
framework.ExpectNoError(framework.AllNodesReady(client, wait.ForeverTestTimeout), "all nodes ready") framework.ExpectNoError(framework.AllNodesReady(client, wait.ForeverTestTimeout), "all nodes ready")
nodelist := framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodelist := framework.GetReadySchedulableNodesOrDie(client)
const ns = "static-pods" const ns = "static-pods"
numpods := int32(len(nodelist.Items)) numpods := int32(len(nodelist.Items))
framework.ExpectNoError(framework.WaitForPodsRunningReady(client, ns, numpods, wait.ForeverTestTimeout, map[string]string{}), framework.ExpectNoError(framework.WaitForPodsRunningReady(client, ns, numpods, wait.ForeverTestTimeout, map[string]string{}),
@ -80,7 +79,7 @@ var _ = framework.KubeDescribe("Mesos", func() {
// scheduled onto it. // scheduled onto it.
By("Trying to launch a pod with a label to get a node which can launch it.") By("Trying to launch a pod with a label to get a node which can launch it.")
podName := "with-label" podName := "with-label"
_, err := c.Pods(ns).Create(&api.Pod{ _, err := c.Core().Pods(ns).Create(&api.Pod{
TypeMeta: unversioned.TypeMeta{ TypeMeta: unversioned.TypeMeta{
Kind: "Pod", Kind: "Pod",
}, },
@ -94,7 +93,7 @@ var _ = framework.KubeDescribe("Mesos", func() {
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: podName, Name: podName,
Image: framework.GetPauseImageName(f.Client), Image: framework.GetPauseImageName(f.ClientSet),
}, },
}, },
}, },
@ -102,10 +101,10 @@ var _ = framework.KubeDescribe("Mesos", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, podName, ns)) framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, podName, ns))
pod, err := c.Pods(ns).Get(podName) pod, err := c.Core().Pods(ns).Get(podName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
nodeClient := f.Client.Nodes() nodeClient := f.ClientSet.Core().Nodes()
// schedule onto node with rack=2 being assigned to the "public" role // schedule onto node with rack=2 being assigned to the "public" role
rack2 := labels.SelectorFromSet(map[string]string{ rack2 := labels.SelectorFromSet(map[string]string{

View File

@ -20,7 +20,7 @@ import (
"strings" "strings"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/metrics" "k8s.io/kubernetes/pkg/metrics"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -30,11 +30,11 @@ import (
var _ = framework.KubeDescribe("MetricsGrabber", func() { var _ = framework.KubeDescribe("MetricsGrabber", func() {
f := framework.NewDefaultFramework("metrics-grabber") f := framework.NewDefaultFramework("metrics-grabber")
var c *client.Client var c clientset.Interface
var grabber *metrics.MetricsGrabber var grabber *metrics.MetricsGrabber
BeforeEach(func() { BeforeEach(func() {
var err error var err error
c = f.Client c = f.ClientSet
framework.ExpectNoError(err) framework.ExpectNoError(err)
grabber, err = metrics.NewMetricsGrabber(c, true, true, true, true) grabber, err = metrics.NewMetricsGrabber(c, true, true, true, true)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -59,7 +59,7 @@ var _ = framework.KubeDescribe("MetricsGrabber", func() {
It("should grab all metrics from a Scheduler.", func() { It("should grab all metrics from a Scheduler.", func() {
By("Proxying to Pod through the API server") By("Proxying to Pod through the API server")
// Check if master Node is registered // Check if master Node is registered
nodes, err := c.Nodes().List(api.ListOptions{}) nodes, err := c.Core().Nodes().List(api.ListOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
var masterRegistered = false var masterRegistered = false
@ -80,7 +80,7 @@ var _ = framework.KubeDescribe("MetricsGrabber", func() {
It("should grab all metrics from a ControllerManager.", func() { It("should grab all metrics from a ControllerManager.", func() {
By("Proxying to Pod through the API server") By("Proxying to Pod through the API server")
// Check if master Node is registered // Check if master Node is registered
nodes, err := c.Nodes().List(api.ListOptions{}) nodes, err := c.Core().Nodes().List(api.ListOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
var masterRegistered = false var masterRegistered = false

View File

@ -24,7 +24,7 @@ import (
influxdb "github.com/influxdata/influxdb/client" influxdb "github.com/influxdata/influxdb/client"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -39,7 +39,7 @@ var _ = framework.KubeDescribe("Monitoring", func() {
}) })
It("should verify monitoring pods and all cluster nodes are available on influxdb using heapster.", func() { It("should verify monitoring pods and all cluster nodes are available on influxdb using heapster.", func() {
testMonitoringUsingHeapsterInfluxdb(f.Client) testMonitoringUsingHeapsterInfluxdb(f.ClientSet)
}) })
}) })
@ -61,8 +61,8 @@ var (
) )
// Query sends a command to the server and returns the Response // Query sends a command to the server and returns the Response
func Query(c *client.Client, query string) (*influxdb.Response, error) { func Query(c clientset.Interface, query string) (*influxdb.Response, error) {
result, err := c.Get(). result, err := c.Core().RESTClient().Get().
Prefix("proxy"). Prefix("proxy").
Namespace("kube-system"). Namespace("kube-system").
Resource("services"). Resource("services").
@ -89,7 +89,7 @@ func Query(c *client.Client, query string) (*influxdb.Response, error) {
return &response, nil return &response, nil
} }
func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error) { func verifyExpectedRcsExistAndGetExpectedPods(c clientset.Interface) ([]string, error) {
expectedPods := []string{} expectedPods := []string{}
// Iterate over the labels that identify the replication controllers that we // Iterate over the labels that identify the replication controllers that we
// want to check. The rcLabels contains the value values for the k8s-app key // want to check. The rcLabels contains the value values for the k8s-app key
@ -102,11 +102,11 @@ func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error
for _, rcLabel := range rcLabels { for _, rcLabel := range rcLabels {
selector := labels.Set{"k8s-app": rcLabel}.AsSelector() selector := labels.Set{"k8s-app": rcLabel}.AsSelector()
options := api.ListOptions{LabelSelector: selector} options := api.ListOptions{LabelSelector: selector}
deploymentList, err := c.Deployments(api.NamespaceSystem).List(options) deploymentList, err := c.Extensions().Deployments(api.NamespaceSystem).List(options)
if err != nil { if err != nil {
return nil, err return nil, err
} }
rcList, err := c.ReplicationControllers(api.NamespaceSystem).List(options) rcList, err := c.Core().ReplicationControllers(api.NamespaceSystem).List(options)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -122,7 +122,7 @@ func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error
for _, rc := range rcList.Items { for _, rc := range rcList.Items {
selector := labels.Set(rc.Spec.Selector).AsSelector() selector := labels.Set(rc.Spec.Selector).AsSelector()
options := api.ListOptions{LabelSelector: selector} options := api.ListOptions{LabelSelector: selector}
podList, err := c.Pods(api.NamespaceSystem).List(options) podList, err := c.Core().Pods(api.NamespaceSystem).List(options)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -137,7 +137,7 @@ func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error
for _, rc := range deploymentList.Items { for _, rc := range deploymentList.Items {
selector := labels.Set(rc.Spec.Selector.MatchLabels).AsSelector() selector := labels.Set(rc.Spec.Selector.MatchLabels).AsSelector()
options := api.ListOptions{LabelSelector: selector} options := api.ListOptions{LabelSelector: selector}
podList, err := c.Pods(api.NamespaceSystem).List(options) podList, err := c.Core().Pods(api.NamespaceSystem).List(options)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -152,7 +152,7 @@ func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error
for _, ps := range psList.Items { for _, ps := range psList.Items {
selector := labels.Set(ps.Spec.Selector.MatchLabels).AsSelector() selector := labels.Set(ps.Spec.Selector.MatchLabels).AsSelector()
options := api.ListOptions{LabelSelector: selector} options := api.ListOptions{LabelSelector: selector}
podList, err := c.Pods(api.NamespaceSystem).List(options) podList, err := c.Core().Pods(api.NamespaceSystem).List(options)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -167,8 +167,8 @@ func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error
return expectedPods, nil return expectedPods, nil
} }
func expectedServicesExist(c *client.Client) error { func expectedServicesExist(c clientset.Interface) error {
serviceList, err := c.Services(api.NamespaceSystem).List(api.ListOptions{}) serviceList, err := c.Core().Services(api.NamespaceSystem).List(api.ListOptions{})
if err != nil { if err != nil {
return err return err
} }
@ -185,9 +185,9 @@ func expectedServicesExist(c *client.Client) error {
return nil return nil
} }
func getAllNodesInCluster(c *client.Client) ([]string, error) { func getAllNodesInCluster(c clientset.Interface) ([]string, error) {
// It should be OK to list unschedulable Nodes here. // It should be OK to list unschedulable Nodes here.
nodeList, err := c.Nodes().List(api.ListOptions{}) nodeList, err := c.Core().Nodes().List(api.ListOptions{})
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -198,7 +198,7 @@ func getAllNodesInCluster(c *client.Client) ([]string, error) {
return result, nil return result, nil
} }
func getInfluxdbData(c *client.Client, query string, tag string) (map[string]bool, error) { func getInfluxdbData(c clientset.Interface, query string, tag string) (map[string]bool, error) {
response, err := Query(c, query) response, err := Query(c, query)
if err != nil { if err != nil {
return nil, err return nil, err
@ -232,7 +232,7 @@ func expectedItemsExist(expectedItems []string, actualItems map[string]bool) boo
return true return true
} }
func validatePodsAndNodes(c *client.Client, expectedPods, expectedNodes []string) bool { func validatePodsAndNodes(c clientset.Interface, expectedPods, expectedNodes []string) bool {
pods, err := getInfluxdbData(c, podlistQuery, "pod_id") pods, err := getInfluxdbData(c, podlistQuery, "pod_id")
if err != nil { if err != nil {
// We don't fail the test here because the influxdb service might still not be running. // We don't fail the test here because the influxdb service might still not be running.
@ -255,7 +255,7 @@ func validatePodsAndNodes(c *client.Client, expectedPods, expectedNodes []string
return true return true
} }
func testMonitoringUsingHeapsterInfluxdb(c *client.Client) { func testMonitoringUsingHeapsterInfluxdb(c clientset.Interface) {
// Check if heapster pods and services are up. // Check if heapster pods and services are up.
expectedPods, err := verifyExpectedRcsExistAndGetExpectedPods(c) expectedPods, err := verifyExpectedRcsExistAndGetExpectedPods(c)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -279,10 +279,10 @@ func testMonitoringUsingHeapsterInfluxdb(c *client.Client) {
framework.Failf("monitoring using heapster and influxdb test failed") framework.Failf("monitoring using heapster and influxdb test failed")
} }
func printDebugInfo(c *client.Client) { func printDebugInfo(c clientset.Interface) {
set := labels.Set{"k8s-app": "heapster"} set := labels.Set{"k8s-app": "heapster"}
options := api.ListOptions{LabelSelector: set.AsSelector()} options := api.ListOptions{LabelSelector: set.AsSelector()}
podList, err := c.Pods(api.NamespaceSystem).List(options) podList, err := c.Core().Pods(api.NamespaceSystem).List(options)
if err != nil { if err != nil {
framework.Logf("Error while listing pods %v", err) framework.Logf("Error while listing pods %v", err)
return return

View File

@ -51,7 +51,7 @@ func extinguish(f *framework.Framework, totalNS int, maxAllowedAfterDel int, max
//Wait 10 seconds, then SEND delete requests for all the namespaces. //Wait 10 seconds, then SEND delete requests for all the namespaces.
By("Waiting 10 seconds") By("Waiting 10 seconds")
time.Sleep(time.Duration(10 * time.Second)) time.Sleep(time.Duration(10 * time.Second))
deleted, err := framework.DeleteNamespaces(f.Client, []string{"nslifetest"}, nil /* skipFilter */) deleted, err := framework.DeleteNamespaces(f.ClientSet, []string{"nslifetest"}, nil /* skipFilter */)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Expect(len(deleted)).To(Equal(totalNS)) Expect(len(deleted)).To(Equal(totalNS))
@ -60,7 +60,7 @@ func extinguish(f *framework.Framework, totalNS int, maxAllowedAfterDel int, max
framework.ExpectNoError(wait.Poll(2*time.Second, time.Duration(maxSeconds)*time.Second, framework.ExpectNoError(wait.Poll(2*time.Second, time.Duration(maxSeconds)*time.Second,
func() (bool, error) { func() (bool, error) {
var cnt = 0 var cnt = 0
nsList, err := f.Client.Namespaces().List(api.ListOptions{}) nsList, err := f.ClientSet.Core().Namespaces().List(api.ListOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@ -85,7 +85,7 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Waiting for a default service account to be provisioned in namespace") By("Waiting for a default service account to be provisioned in namespace")
err = framework.WaitForDefaultServiceAccountInNamespace(f.Client, namespace.Name) err = framework.WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Creating a pod in the namespace") By("Creating a pod in the namespace")
@ -97,26 +97,26 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: "nginx", Name: "nginx",
Image: framework.GetPauseImageName(f.Client), Image: framework.GetPauseImageName(f.ClientSet),
}, },
}, },
}, },
} }
pod, err = f.Client.Pods(namespace.Name).Create(pod) pod, err = f.ClientSet.Core().Pods(namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Waiting for the pod to have running status") By("Waiting for the pod to have running status")
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.Client, pod)) framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, pod))
By("Deleting the namespace") By("Deleting the namespace")
err = f.Client.Namespaces().Delete(namespace.Name) err = f.ClientSet.Core().Namespaces().Delete(namespace.Name, nil)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Waiting for the namespace to be removed.") By("Waiting for the namespace to be removed.")
maxWaitSeconds := int64(60) + *pod.Spec.TerminationGracePeriodSeconds maxWaitSeconds := int64(60) + *pod.Spec.TerminationGracePeriodSeconds
framework.ExpectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second, framework.ExpectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second,
func() (bool, error) { func() (bool, error) {
_, err = f.Client.Namespaces().Get(namespace.Name) _, err = f.ClientSet.Core().Namespaces().Get(namespace.Name)
if err != nil && errors.IsNotFound(err) { if err != nil && errors.IsNotFound(err) {
return true, nil return true, nil
} }
@ -124,7 +124,7 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
})) }))
By("Verifying there is no pod in the namespace") By("Verifying there is no pod in the namespace")
_, err = f.Client.Pods(namespace.Name).Get(pod.Name) _, err = f.ClientSet.Core().Pods(namespace.Name).Get(pod.Name)
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
} }
@ -136,7 +136,7 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Waiting for a default service account to be provisioned in namespace") By("Waiting for a default service account to be provisioned in namespace")
err = framework.WaitForDefaultServiceAccountInNamespace(f.Client, namespace.Name) err = framework.WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Creating a service in the namespace") By("Creating a service in the namespace")
@ -157,18 +157,18 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
}}, }},
}, },
} }
service, err = f.Client.Services(namespace.Name).Create(service) service, err = f.ClientSet.Core().Services(namespace.Name).Create(service)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Deleting the namespace") By("Deleting the namespace")
err = f.Client.Namespaces().Delete(namespace.Name) err = f.ClientSet.Core().Namespaces().Delete(namespace.Name, nil)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Waiting for the namespace to be removed.") By("Waiting for the namespace to be removed.")
maxWaitSeconds := int64(60) maxWaitSeconds := int64(60)
framework.ExpectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second, framework.ExpectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second,
func() (bool, error) { func() (bool, error) {
_, err = f.Client.Namespaces().Get(namespace.Name) _, err = f.ClientSet.Core().Namespaces().Get(namespace.Name)
if err != nil && errors.IsNotFound(err) { if err != nil && errors.IsNotFound(err) {
return true, nil return true, nil
} }
@ -176,7 +176,7 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
})) }))
By("Verifying there is no service in the namespace") By("Verifying there is no service in the namespace")
_, err = f.Client.Services(namespace.Name).Get(service.Name) _, err = f.ClientSet.Core().Services(namespace.Name).Get(service.Name)
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
} }

View File

@ -64,7 +64,7 @@ var _ = framework.KubeDescribe("Networking", func() {
} }
for _, test := range tests { for _, test := range tests {
By(fmt.Sprintf("testing: %s", test.path)) By(fmt.Sprintf("testing: %s", test.path))
data, err := f.Client.RESTClient.Get(). data, err := f.ClientSet.Core().RESTClient().Get().
AbsPath(test.path). AbsPath(test.path).
DoRaw() DoRaw()
if err != nil { if err != nil {

View File

@ -22,7 +22,8 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
coreclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/system" "k8s.io/kubernetes/pkg/util/system"
@ -41,11 +42,11 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
image = "gcr.io/google_containers/node-problem-detector:v0.1" image = "gcr.io/google_containers/node-problem-detector:v0.1"
) )
f := framework.NewDefaultFramework("node-problem-detector") f := framework.NewDefaultFramework("node-problem-detector")
var c *client.Client var c clientset.Interface
var uid string var uid string
var ns, name, configName, eventNamespace string var ns, name, configName, eventNamespace string
BeforeEach(func() { BeforeEach(func() {
c = f.Client c = f.ClientSet
ns = f.Namespace.Name ns = f.Namespace.Name
uid = string(uuid.NewUUID()) uid = string(uuid.NewUUID())
name = "node-problem-detector-" + uid name = "node-problem-detector-" + uid
@ -116,7 +117,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
] ]
}` }`
By("Get a non master node to run the pod") By("Get a non master node to run the pod")
nodes, err := c.Nodes().List(api.ListOptions{}) nodes, err := c.Core().Nodes().List(api.ListOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
node = nil node = nil
for _, n := range nodes.Items { for _, n := range nodes.Items {
@ -139,7 +140,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
cmd := fmt.Sprintf("mkdir %s; > %s/%s", tmpDir, tmpDir, logFile) cmd := fmt.Sprintf("mkdir %s; > %s/%s", tmpDir, tmpDir, logFile)
Expect(framework.IssueSSHCommand(cmd, framework.TestContext.Provider, node)).To(Succeed()) Expect(framework.IssueSSHCommand(cmd, framework.TestContext.Provider, node)).To(Succeed())
By("Create config map for the node problem detector") By("Create config map for the node problem detector")
_, err = c.ConfigMaps(ns).Create(&api.ConfigMap{ _, err = c.Core().ConfigMaps(ns).Create(&api.ConfigMap{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Name: configName, Name: configName,
}, },
@ -147,7 +148,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
}) })
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Create the node problem detector") By("Create the node problem detector")
_, err = c.Pods(ns).Create(&api.Pod{ _, err = c.Core().Pods(ns).Create(&api.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Name: name, Name: name,
}, },
@ -197,11 +198,11 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
It("should generate node condition and events for corresponding errors", func() { It("should generate node condition and events for corresponding errors", func() {
By("Make sure no events are generated") By("Make sure no events are generated")
Consistently(func() error { Consistently(func() error {
return verifyNoEvents(c.Events(eventNamespace), eventListOptions) return verifyNoEvents(c.Core().Events(eventNamespace), eventListOptions)
}, pollConsistent, pollInterval).Should(Succeed()) }, pollConsistent, pollInterval).Should(Succeed())
By("Make sure the default node condition is generated") By("Make sure the default node condition is generated")
Eventually(func() error { Eventually(func() error {
return verifyCondition(c.Nodes(), node.Name, condition, api.ConditionFalse, defaultReason, defaultMessage) return verifyCondition(c.Core().Nodes(), node.Name, condition, api.ConditionFalse, defaultReason, defaultMessage)
}, pollTimeout, pollInterval).Should(Succeed()) }, pollTimeout, pollInterval).Should(Succeed())
num := 3 num := 3
@ -209,39 +210,39 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
Expect(framework.IssueSSHCommand(injectCommand(tempMessage, num), framework.TestContext.Provider, node)).To(Succeed()) Expect(framework.IssueSSHCommand(injectCommand(tempMessage, num), framework.TestContext.Provider, node)).To(Succeed())
By(fmt.Sprintf("Wait for %d events generated", num)) By(fmt.Sprintf("Wait for %d events generated", num))
Eventually(func() error { Eventually(func() error {
return verifyEvents(c.Events(eventNamespace), eventListOptions, num, tempReason, tempMessage) return verifyEvents(c.Core().Events(eventNamespace), eventListOptions, num, tempReason, tempMessage)
}, pollTimeout, pollInterval).Should(Succeed()) }, pollTimeout, pollInterval).Should(Succeed())
By(fmt.Sprintf("Make sure only %d events generated", num)) By(fmt.Sprintf("Make sure only %d events generated", num))
Consistently(func() error { Consistently(func() error {
return verifyEvents(c.Events(eventNamespace), eventListOptions, num, tempReason, tempMessage) return verifyEvents(c.Core().Events(eventNamespace), eventListOptions, num, tempReason, tempMessage)
}, pollConsistent, pollInterval).Should(Succeed()) }, pollConsistent, pollInterval).Should(Succeed())
By("Make sure the node condition is still false") By("Make sure the node condition is still false")
Expect(verifyCondition(c.Nodes(), node.Name, condition, api.ConditionFalse, defaultReason, defaultMessage)).To(Succeed()) Expect(verifyCondition(c.Core().Nodes(), node.Name, condition, api.ConditionFalse, defaultReason, defaultMessage)).To(Succeed())
By("Inject 1 permanent error") By("Inject 1 permanent error")
Expect(framework.IssueSSHCommand(injectCommand(permMessage, 1), framework.TestContext.Provider, node)).To(Succeed()) Expect(framework.IssueSSHCommand(injectCommand(permMessage, 1), framework.TestContext.Provider, node)).To(Succeed())
By("Make sure the corresponding node condition is generated") By("Make sure the corresponding node condition is generated")
Eventually(func() error { Eventually(func() error {
return verifyCondition(c.Nodes(), node.Name, condition, api.ConditionTrue, permReason, permMessage) return verifyCondition(c.Core().Nodes(), node.Name, condition, api.ConditionTrue, permReason, permMessage)
}, pollTimeout, pollInterval).Should(Succeed()) }, pollTimeout, pollInterval).Should(Succeed())
By("Make sure no new events are generated") By("Make sure no new events are generated")
Consistently(func() error { Consistently(func() error {
return verifyEvents(c.Events(eventNamespace), eventListOptions, num, tempReason, tempMessage) return verifyEvents(c.Core().Events(eventNamespace), eventListOptions, num, tempReason, tempMessage)
}, pollConsistent, pollInterval).Should(Succeed()) }, pollConsistent, pollInterval).Should(Succeed())
}) })
AfterEach(func() { AfterEach(func() {
By("Delete the node problem detector") By("Delete the node problem detector")
c.Pods(ns).Delete(name, api.NewDeleteOptions(0)) c.Core().Pods(ns).Delete(name, api.NewDeleteOptions(0))
By("Wait for the node problem detector to disappear") By("Wait for the node problem detector to disappear")
Expect(framework.WaitForPodToDisappear(c, ns, name, labels.Everything(), pollInterval, pollTimeout)).To(Succeed()) Expect(framework.WaitForPodToDisappear(c, ns, name, labels.Everything(), pollInterval, pollTimeout)).To(Succeed())
By("Delete the config map") By("Delete the config map")
c.ConfigMaps(ns).Delete(configName) c.Core().ConfigMaps(ns).Delete(configName, nil)
By("Clean up the events") By("Clean up the events")
Expect(c.Events(eventNamespace).DeleteCollection(api.NewDeleteOptions(0), eventListOptions)).To(Succeed()) Expect(c.Core().Events(eventNamespace).DeleteCollection(api.NewDeleteOptions(0), eventListOptions)).To(Succeed())
By("Clean up the node condition") By("Clean up the node condition")
patch := []byte(fmt.Sprintf(`{"status":{"conditions":[{"$patch":"delete","type":"%s"}]}}`, condition)) patch := []byte(fmt.Sprintf(`{"status":{"conditions":[{"$patch":"delete","type":"%s"}]}}`, condition))
c.Patch(api.StrategicMergePatchType).Resource("nodes").Name(node.Name).SubResource("status").Body(patch).Do() c.Core().RESTClient().Patch(api.StrategicMergePatchType).Resource("nodes").Name(node.Name).SubResource("status").Body(patch).Do()
By("Clean up the temporary directory") By("Clean up the temporary directory")
framework.IssueSSHCommand(fmt.Sprintf("rm -r %s", tmpDir), framework.TestContext.Provider, node) framework.IssueSSHCommand(fmt.Sprintf("rm -r %s", tmpDir), framework.TestContext.Provider, node)
}) })
@ -249,7 +250,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
}) })
// verifyEvents verifies there are num specific events generated // verifyEvents verifies there are num specific events generated
func verifyEvents(e client.EventInterface, options api.ListOptions, num int, reason, message string) error { func verifyEvents(e coreclientset.EventInterface, options api.ListOptions, num int, reason, message string) error {
events, err := e.List(options) events, err := e.List(options)
if err != nil { if err != nil {
return err return err
@ -268,7 +269,7 @@ func verifyEvents(e client.EventInterface, options api.ListOptions, num int, rea
} }
// verifyNoEvents verifies there is no event generated // verifyNoEvents verifies there is no event generated
func verifyNoEvents(e client.EventInterface, options api.ListOptions) error { func verifyNoEvents(e coreclientset.EventInterface, options api.ListOptions) error {
events, err := e.List(options) events, err := e.List(options)
if err != nil { if err != nil {
return err return err
@ -280,7 +281,7 @@ func verifyNoEvents(e client.EventInterface, options api.ListOptions) error {
} }
// verifyCondition verifies specific node condition is generated, if reason and message are empty, they will not be checked // verifyCondition verifies specific node condition is generated, if reason and message are empty, they will not be checked
func verifyCondition(n client.NodeInterface, nodeName string, condition api.NodeConditionType, status api.ConditionStatus, reason, message string) error { func verifyCondition(n coreclientset.NodeInterface, nodeName string, condition api.NodeConditionType, status api.ConditionStatus, reason, message string) error {
node, err := n.Get(nodeName) node, err := n.Get(nodeName)
if err != nil { if err != nil {
return err return err

View File

@ -25,7 +25,6 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -67,16 +66,14 @@ const (
// //
// Flaky issue #20015. We have no clear path for how to test this functionality in a non-flaky way. // Flaky issue #20015. We have no clear path for how to test this functionality in a non-flaky way.
var _ = framework.KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", func() { var _ = framework.KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", func() {
var c *client.Client var c clientset.Interface
var cs clientset.Interface
var unfilledNodeName, recoveredNodeName string var unfilledNodeName, recoveredNodeName string
f := framework.NewDefaultFramework("node-outofdisk") f := framework.NewDefaultFramework("node-outofdisk")
BeforeEach(func() { BeforeEach(func() {
c = f.Client c = f.ClientSet
cs = f.ClientSet
nodelist := framework.GetReadySchedulableNodesOrDie(cs) nodelist := framework.GetReadySchedulableNodesOrDie(c)
// Skip this test on small clusters. No need to fail since it is not a use // Skip this test on small clusters. No need to fail since it is not a use
// case that any cluster of small size needs to support. // case that any cluster of small size needs to support.
@ -90,7 +87,7 @@ var _ = framework.KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", fu
AfterEach(func() { AfterEach(func() {
nodelist := framework.GetReadySchedulableNodesOrDie(cs) nodelist := framework.GetReadySchedulableNodesOrDie(c)
Expect(len(nodelist.Items)).ToNot(BeZero()) Expect(len(nodelist.Items)).ToNot(BeZero())
for _, node := range nodelist.Items { for _, node := range nodelist.Items {
if unfilledNodeName == node.Name || recoveredNodeName == node.Name { if unfilledNodeName == node.Name || recoveredNodeName == node.Name {
@ -101,7 +98,7 @@ var _ = framework.KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", fu
}) })
It("runs out of disk space", func() { It("runs out of disk space", func() {
unfilledNode, err := c.Nodes().Get(unfilledNodeName) unfilledNode, err := c.Core().Nodes().Get(unfilledNodeName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By(fmt.Sprintf("Calculating CPU availability on node %s", unfilledNode.Name)) By(fmt.Sprintf("Calculating CPU availability on node %s", unfilledNode.Name))
@ -116,7 +113,7 @@ var _ = framework.KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", fu
podCPU := int64(float64(milliCpu/(numNodeOODPods-1)) * 0.99) podCPU := int64(float64(milliCpu/(numNodeOODPods-1)) * 0.99)
ns := f.Namespace.Name ns := f.Namespace.Name
podClient := c.Pods(ns) podClient := c.Core().Pods(ns)
By("Creating pods and waiting for all but one pods to be scheduled") By("Creating pods and waiting for all but one pods to be scheduled")
@ -143,7 +140,7 @@ var _ = framework.KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", fu
"reason": "FailedScheduling", "reason": "FailedScheduling",
}.AsSelector() }.AsSelector()
options := api.ListOptions{FieldSelector: selector} options := api.ListOptions{FieldSelector: selector}
schedEvents, err := c.Events(ns).List(options) schedEvents, err := c.Core().Events(ns).List(options)
framework.ExpectNoError(err) framework.ExpectNoError(err)
if len(schedEvents.Items) > 0 { if len(schedEvents.Items) > 0 {
@ -153,7 +150,7 @@ var _ = framework.KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", fu
} }
}) })
nodelist := framework.GetReadySchedulableNodesOrDie(cs) nodelist := framework.GetReadySchedulableNodesOrDie(c)
Expect(len(nodelist.Items)).To(BeNumerically(">", 1)) Expect(len(nodelist.Items)).To(BeNumerically(">", 1))
nodeToRecover := nodelist.Items[1] nodeToRecover := nodelist.Items[1]
@ -171,8 +168,8 @@ var _ = framework.KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", fu
}) })
// createOutOfDiskPod creates a pod in the given namespace with the requested amount of CPU. // createOutOfDiskPod creates a pod in the given namespace with the requested amount of CPU.
func createOutOfDiskPod(c *client.Client, ns, name string, milliCPU int64) { func createOutOfDiskPod(c clientset.Interface, ns, name string, milliCPU int64) {
podClient := c.Pods(ns) podClient := c.Core().Pods(ns)
pod := &api.Pod{ pod := &api.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
@ -200,8 +197,8 @@ func createOutOfDiskPod(c *client.Client, ns, name string, milliCPU int64) {
// availCpu calculates the available CPU on a given node by subtracting the CPU requested by // availCpu calculates the available CPU on a given node by subtracting the CPU requested by
// all the pods from the total available CPU capacity on the node. // all the pods from the total available CPU capacity on the node.
func availCpu(c *client.Client, node *api.Node) (int64, error) { func availCpu(c clientset.Interface, node *api.Node) (int64, error) {
podClient := c.Pods(api.NamespaceAll) podClient := c.Core().Pods(api.NamespaceAll)
selector := fields.Set{"spec.nodeName": node.Name}.AsSelector() selector := fields.Set{"spec.nodeName": node.Name}.AsSelector()
options := api.ListOptions{FieldSelector: selector} options := api.ListOptions{FieldSelector: selector}
@ -220,10 +217,10 @@ func availCpu(c *client.Client, node *api.Node) (int64, error) {
// availSize returns the available disk space on a given node by querying node stats which // availSize returns the available disk space on a given node by querying node stats which
// is in turn obtained internally from cadvisor. // is in turn obtained internally from cadvisor.
func availSize(c *client.Client, node *api.Node) (uint64, error) { func availSize(c clientset.Interface, node *api.Node) (uint64, error) {
statsResource := fmt.Sprintf("api/v1/proxy/nodes/%s/stats/", node.Name) statsResource := fmt.Sprintf("api/v1/proxy/nodes/%s/stats/", node.Name)
framework.Logf("Querying stats for node %s using url %s", node.Name, statsResource) framework.Logf("Querying stats for node %s using url %s", node.Name, statsResource)
res, err := c.Get().AbsPath(statsResource).Timeout(time.Minute).Do().Raw() res, err := c.Core().RESTClient().Get().AbsPath(statsResource).Timeout(time.Minute).Do().Raw()
if err != nil { if err != nil {
return 0, fmt.Errorf("error querying cAdvisor API: %v", err) return 0, fmt.Errorf("error querying cAdvisor API: %v", err)
} }
@ -238,7 +235,7 @@ func availSize(c *client.Client, node *api.Node) (uint64, error) {
// fillDiskSpace fills the available disk space on a given node by creating a large file. The disk // fillDiskSpace fills the available disk space on a given node by creating a large file. The disk
// space on the node is filled in such a way that the available space after filling the disk is just // space on the node is filled in such a way that the available space after filling the disk is just
// below the lowDiskSpaceThreshold mark. // below the lowDiskSpaceThreshold mark.
func fillDiskSpace(c *client.Client, node *api.Node) { func fillDiskSpace(c clientset.Interface, node *api.Node) {
avail, err := availSize(c, node) avail, err := availSize(c, node)
framework.ExpectNoError(err, "Node %s: couldn't obtain available disk size %v", node.Name, err) framework.ExpectNoError(err, "Node %s: couldn't obtain available disk size %v", node.Name, err)
@ -259,7 +256,7 @@ func fillDiskSpace(c *client.Client, node *api.Node) {
} }
// recoverDiskSpace recovers disk space, filled by creating a large file, on a given node. // recoverDiskSpace recovers disk space, filled by creating a large file, on a given node.
func recoverDiskSpace(c *client.Client, node *api.Node) { func recoverDiskSpace(c clientset.Interface, node *api.Node) {
By(fmt.Sprintf("Recovering disk space on node %s", node.Name)) By(fmt.Sprintf("Recovering disk space on node %s", node.Name))
cmd := "rm -f test.img" cmd := "rm -f test.img"
framework.ExpectNoError(framework.IssueSSHCommand(cmd, framework.TestContext.Provider, node)) framework.ExpectNoError(framework.IssueSSHCommand(cmd, framework.TestContext.Provider, node))

View File

@ -34,7 +34,7 @@ import (
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
client "k8s.io/kubernetes/pkg/client/unversioned" unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
awscloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws" awscloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/types"
@ -53,8 +53,8 @@ const (
var _ = framework.KubeDescribe("Pod Disks", func() { var _ = framework.KubeDescribe("Pod Disks", func() {
var ( var (
podClient client.PodInterface podClient unversionedcore.PodInterface
nodeClient client.NodeInterface nodeClient unversionedcore.NodeInterface
host0Name types.NodeName host0Name types.NodeName
host1Name types.NodeName host1Name types.NodeName
) )
@ -63,8 +63,8 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
BeforeEach(func() { BeforeEach(func() {
framework.SkipUnlessNodeCountIsAtLeast(2) framework.SkipUnlessNodeCountIsAtLeast(2)
podClient = f.Client.Pods(f.Namespace.Name) podClient = f.ClientSet.Core().Pods(f.Namespace.Name)
nodeClient = f.Client.Nodes() nodeClient = f.ClientSet.Core().Nodes()
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(len(nodes.Items)).To(BeNumerically(">=", 2), "Requires at least 2 nodes") Expect(len(nodes.Items)).To(BeNumerically(">=", 2), "Requires at least 2 nodes")
@ -702,7 +702,7 @@ func detachAndDeletePDs(diskName string, hosts []types.NodeName) {
} }
func waitForPDInVolumesInUse( func waitForPDInVolumesInUse(
nodeClient client.NodeInterface, nodeClient unversionedcore.NodeInterface,
diskName string, diskName string,
nodeName types.NodeName, nodeName types.NodeName,
timeout time.Duration, timeout time.Duration,

View File

@ -27,16 +27,16 @@ import (
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
client "k8s.io/kubernetes/pkg/client/unversioned" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/volume/util/volumehelper" "k8s.io/kubernetes/pkg/volume/util/volumehelper"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
) )
// Delete the nfs-server pod. // Delete the nfs-server pod.
func nfsServerPodCleanup(c *client.Client, config VolumeTestConfig) { func nfsServerPodCleanup(c clientset.Interface, config VolumeTestConfig) {
defer GinkgoRecover() defer GinkgoRecover()
podClient := c.Pods(config.namespace) podClient := c.Core().Pods(config.namespace)
if config.serverImage != "" { if config.serverImage != "" {
podName := config.prefix + "-server" podName := config.prefix + "-server"
@ -49,14 +49,14 @@ func nfsServerPodCleanup(c *client.Client, config VolumeTestConfig) {
// Delete the PV. Fail test if delete fails. If success the returned PV should // Delete the PV. Fail test if delete fails. If success the returned PV should
// be nil, which prevents the AfterEach from attempting to delete it. // be nil, which prevents the AfterEach from attempting to delete it.
func deletePersistentVolume(c *client.Client, pv *api.PersistentVolume) (*api.PersistentVolume, error) { func deletePersistentVolume(c clientset.Interface, pv *api.PersistentVolume) (*api.PersistentVolume, error) {
if pv == nil { if pv == nil {
return nil, fmt.Errorf("PV to be deleted is nil") return nil, fmt.Errorf("PV to be deleted is nil")
} }
framework.Logf("Deleting PersistentVolume %v", pv.Name) framework.Logf("Deleting PersistentVolume %v", pv.Name)
err := c.PersistentVolumes().Delete(pv.Name) err := c.Core().PersistentVolumes().Delete(pv.Name, nil)
if err != nil { if err != nil {
return pv, fmt.Errorf("Delete() PersistentVolume %v failed: %v", pv.Name, err) return pv, fmt.Errorf("Delete() PersistentVolume %v failed: %v", pv.Name, err)
} }
@ -77,16 +77,16 @@ func deletePersistentVolume(c *client.Client, pv *api.PersistentVolume) (*api.Pe
// delete is successful the returned pvc should be nil and the pv non-nil. // delete is successful the returned pvc should be nil and the pv non-nil.
// Note: the pv and pvc are returned back to the It() caller so that the // Note: the pv and pvc are returned back to the It() caller so that the
// AfterEach func can delete these objects if they are not nil. // AfterEach func can delete these objects if they are not nil.
func deletePVCandValidatePV(c *client.Client, ns string, pvc *api.PersistentVolumeClaim, pv *api.PersistentVolume) (*api.PersistentVolume, *api.PersistentVolumeClaim, error) { func deletePVCandValidatePV(c clientset.Interface, ns string, pvc *api.PersistentVolumeClaim, pv *api.PersistentVolume) (*api.PersistentVolume, *api.PersistentVolumeClaim, error) {
framework.Logf("Deleting PersistentVolumeClaim %v to trigger PV Recycling", pvc.Name) framework.Logf("Deleting PersistentVolumeClaim %v to trigger PV Recycling", pvc.Name)
err := c.PersistentVolumeClaims(ns).Delete(pvc.Name) err := c.Core().PersistentVolumeClaims(ns).Delete(pvc.Name, nil)
if err != nil { if err != nil {
return pv, pvc, fmt.Errorf("Delete of PVC %v failed: %v", pvc.Name, err) return pv, pvc, fmt.Errorf("Delete of PVC %v failed: %v", pvc.Name, err)
} }
// Check that the PVC is really deleted. // Check that the PVC is really deleted.
pvc, err = c.PersistentVolumeClaims(ns).Get(pvc.Name) pvc, err = c.Core().PersistentVolumeClaims(ns).Get(pvc.Name)
if err == nil { if err == nil {
return pv, pvc, fmt.Errorf("PVC %v deleted yet still exists", pvc.Name) return pv, pvc, fmt.Errorf("PVC %v deleted yet still exists", pvc.Name)
} }
@ -102,7 +102,7 @@ func deletePVCandValidatePV(c *client.Client, ns string, pvc *api.PersistentVolu
} }
// Examine the pv.ClaimRef and UID. Expect nil values. // Examine the pv.ClaimRef and UID. Expect nil values.
pv, err = c.PersistentVolumes().Get(pv.Name) pv, err = c.Core().PersistentVolumes().Get(pv.Name)
if err != nil { if err != nil {
return pv, pvc, fmt.Errorf("Cannot re-get PersistentVolume %v:", pv.Name) return pv, pvc, fmt.Errorf("Cannot re-get PersistentVolume %v:", pv.Name)
} }
@ -115,9 +115,9 @@ func deletePVCandValidatePV(c *client.Client, ns string, pvc *api.PersistentVolu
} }
// create the PV resource. Fails test on error. // create the PV resource. Fails test on error.
func createPV(c *client.Client, pv *api.PersistentVolume) (*api.PersistentVolume, error) { func createPV(c clientset.Interface, pv *api.PersistentVolume) (*api.PersistentVolume, error) {
pv, err := c.PersistentVolumes().Create(pv) pv, err := c.Core().PersistentVolumes().Create(pv)
if err != nil { if err != nil {
return pv, fmt.Errorf("Create PersistentVolume %v failed: %v", pv.Name, err) return pv, fmt.Errorf("Create PersistentVolume %v failed: %v", pv.Name, err)
} }
@ -126,9 +126,9 @@ func createPV(c *client.Client, pv *api.PersistentVolume) (*api.PersistentVolume
} }
// create the PVC resource. Fails test on error. // create the PVC resource. Fails test on error.
func createPVC(c *client.Client, ns string, pvc *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) { func createPVC(c clientset.Interface, ns string, pvc *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) {
pvc, err := c.PersistentVolumeClaims(ns).Create(pvc) pvc, err := c.Core().PersistentVolumeClaims(ns).Create(pvc)
if err != nil { if err != nil {
return pvc, fmt.Errorf("Create PersistentVolumeClaim %v failed: %v", pvc.Name, err) return pvc, fmt.Errorf("Create PersistentVolumeClaim %v failed: %v", pvc.Name, err)
} }
@ -144,7 +144,7 @@ func createPVC(c *client.Client, ns string, pvc *api.PersistentVolumeClaim) (*ap
// Note: in the pre-bind case the real PVC name, which is generated, is not // Note: in the pre-bind case the real PVC name, which is generated, is not
// known until after the PVC is instantiated. This is why the pvc is created // known until after the PVC is instantiated. This is why the pvc is created
// before the pv. // before the pv.
func createPVCPV(c *client.Client, serverIP, ns string, preBind bool) (*api.PersistentVolume, *api.PersistentVolumeClaim, error) { func createPVCPV(c clientset.Interface, serverIP, ns string, preBind bool) (*api.PersistentVolume, *api.PersistentVolumeClaim, error) {
var bindTo *api.PersistentVolumeClaim var bindTo *api.PersistentVolumeClaim
var preBindMsg string var preBindMsg string
@ -187,7 +187,7 @@ func createPVCPV(c *client.Client, serverIP, ns string, preBind bool) (*api.Pers
// Note: in the pre-bind case the real PV name, which is generated, is not // Note: in the pre-bind case the real PV name, which is generated, is not
// known until after the PV is instantiated. This is why the pv is created // known until after the PV is instantiated. This is why the pv is created
// before the pvc. // before the pvc.
func createPVPVC(c *client.Client, serverIP, ns string, preBind bool) (*api.PersistentVolume, *api.PersistentVolumeClaim, error) { func createPVPVC(c clientset.Interface, serverIP, ns string, preBind bool) (*api.PersistentVolume, *api.PersistentVolumeClaim, error) {
preBindMsg := "" preBindMsg := ""
if preBind { if preBind {
@ -219,7 +219,7 @@ func createPVPVC(c *client.Client, serverIP, ns string, preBind bool) (*api.Pers
} }
// Wait for the pv and pvc to bind to each other. Fail test on errors. // Wait for the pv and pvc to bind to each other. Fail test on errors.
func waitOnPVandPVC(c *client.Client, ns string, pv *api.PersistentVolume, pvc *api.PersistentVolumeClaim) error { func waitOnPVandPVC(c clientset.Interface, ns string, pv *api.PersistentVolume, pvc *api.PersistentVolumeClaim) error {
// Wait for newly created PVC to bind to the PV // Wait for newly created PVC to bind to the PV
framework.Logf("Waiting for PV %v to bind to PVC %v", pv.Name, pvc.Name) framework.Logf("Waiting for PV %v to bind to PVC %v", pv.Name, pvc.Name)
@ -243,7 +243,7 @@ func waitOnPVandPVC(c *client.Client, ns string, pv *api.PersistentVolume, pvc *
// reflect that these resources have been retrieved again (Get). // reflect that these resources have been retrieved again (Get).
// Note: the pv and pvc are returned back to the It() caller so that the // Note: the pv and pvc are returned back to the It() caller so that the
// AfterEach func can delete these objects if they are not nil. // AfterEach func can delete these objects if they are not nil.
func waitAndValidatePVandPVC(c *client.Client, ns string, pv *api.PersistentVolume, pvc *api.PersistentVolumeClaim) (*api.PersistentVolume, *api.PersistentVolumeClaim, error) { func waitAndValidatePVandPVC(c clientset.Interface, ns string, pv *api.PersistentVolume, pvc *api.PersistentVolumeClaim) (*api.PersistentVolume, *api.PersistentVolumeClaim, error) {
var err error var err error
@ -254,12 +254,12 @@ func waitAndValidatePVandPVC(c *client.Client, ns string, pv *api.PersistentVolu
// Check that the PersistentVolume.ClaimRef is valid and matches the PVC // Check that the PersistentVolume.ClaimRef is valid and matches the PVC
framework.Logf("Checking PersistentVolume ClaimRef is non-nil") framework.Logf("Checking PersistentVolume ClaimRef is non-nil")
pv, err = c.PersistentVolumes().Get(pv.Name) pv, err = c.Core().PersistentVolumes().Get(pv.Name)
if err != nil { if err != nil {
return pv, pvc, fmt.Errorf("Cannot re-get PersistentVolume %v:", pv.Name) return pv, pvc, fmt.Errorf("Cannot re-get PersistentVolume %v:", pv.Name)
} }
pvc, err = c.PersistentVolumeClaims(ns).Get(pvc.Name) pvc, err = c.Core().PersistentVolumeClaims(ns).Get(pvc.Name)
if err != nil { if err != nil {
return pv, pvc, fmt.Errorf("Cannot re-get PersistentVolumeClaim %v:", pvc.Name) return pv, pvc, fmt.Errorf("Cannot re-get PersistentVolumeClaim %v:", pvc.Name)
} }
@ -273,7 +273,7 @@ func waitAndValidatePVandPVC(c *client.Client, ns string, pv *api.PersistentVolu
} }
// Test the pod's exitcode to be zero. // Test the pod's exitcode to be zero.
func testPodSuccessOrFail(f *framework.Framework, c *client.Client, ns string, pod *api.Pod) error { func testPodSuccessOrFail(f *framework.Framework, c clientset.Interface, ns string, pod *api.Pod) error {
By("Pod should terminate with exitcode 0 (success)") By("Pod should terminate with exitcode 0 (success)")
@ -287,10 +287,10 @@ func testPodSuccessOrFail(f *framework.Framework, c *client.Client, ns string, p
} }
// Delete the passed in pod. // Delete the passed in pod.
func deletePod(f *framework.Framework, c *client.Client, ns string, pod *api.Pod) error { func deletePod(f *framework.Framework, c clientset.Interface, ns string, pod *api.Pod) error {
framework.Logf("Deleting pod %v", pod.Name) framework.Logf("Deleting pod %v", pod.Name)
err := c.Pods(ns).Delete(pod.Name, nil) err := c.Core().Pods(ns).Delete(pod.Name, nil)
if err != nil { if err != nil {
return fmt.Errorf("Pod %v encountered a delete error: %v", pod.Name, err) return fmt.Errorf("Pod %v encountered a delete error: %v", pod.Name, err)
} }
@ -303,7 +303,7 @@ func deletePod(f *framework.Framework, c *client.Client, ns string, pod *api.Pod
// Re-get the pod to double check that it has been deleted; expect err // Re-get the pod to double check that it has been deleted; expect err
// Note: Get() writes a log error if the pod is not found // Note: Get() writes a log error if the pod is not found
_, err = c.Pods(ns).Get(pod.Name) _, err = c.Core().Pods(ns).Get(pod.Name)
if err == nil { if err == nil {
return fmt.Errorf("Pod %v has been deleted but able to re-Get the deleted pod", pod.Name) return fmt.Errorf("Pod %v has been deleted but able to re-Get the deleted pod", pod.Name)
} }
@ -316,7 +316,7 @@ func deletePod(f *framework.Framework, c *client.Client, ns string, pod *api.Pod
} }
// Create the test pod, wait for (hopefully) success, and then delete the pod. // Create the test pod, wait for (hopefully) success, and then delete the pod.
func createWaitAndDeletePod(f *framework.Framework, c *client.Client, ns string, claimName string) error { func createWaitAndDeletePod(f *framework.Framework, c clientset.Interface, ns string, claimName string) error {
var errmsg string var errmsg string
@ -326,7 +326,7 @@ func createWaitAndDeletePod(f *framework.Framework, c *client.Client, ns string,
pod := makeWritePod(ns, claimName) pod := makeWritePod(ns, claimName)
// Instantiate pod (Create) // Instantiate pod (Create)
runPod, err := c.Pods(ns).Create(pod) runPod, err := c.Core().Pods(ns).Create(pod)
if err != nil || runPod == nil { if err != nil || runPod == nil {
name := "" name := ""
if runPod != nil { if runPod != nil {
@ -366,7 +366,7 @@ func createWaitAndDeletePod(f *framework.Framework, c *client.Client, ns string,
// these resources have been retrieved again (Get). // these resources have been retrieved again (Get).
// Note: the pv and pvc are returned back to the It() caller so that the // Note: the pv and pvc are returned back to the It() caller so that the
// AfterEach func can delete these objects if they are not nil. // AfterEach func can delete these objects if they are not nil.
func completeTest(f *framework.Framework, c *client.Client, ns string, pv *api.PersistentVolume, pvc *api.PersistentVolumeClaim) (*api.PersistentVolume, *api.PersistentVolumeClaim, error) { func completeTest(f *framework.Framework, c clientset.Interface, ns string, pv *api.PersistentVolume, pvc *api.PersistentVolumeClaim) (*api.PersistentVolume, *api.PersistentVolumeClaim, error) {
// 1. verify that the PV and PVC have binded correctly // 1. verify that the PV and PVC have binded correctly
By("Validating the PV-PVC binding") By("Validating the PV-PVC binding")
@ -402,7 +402,7 @@ var _ = framework.KubeDescribe("PersistentVolumes", func() {
// global vars for the It() tests below // global vars for the It() tests below
f := framework.NewDefaultFramework("pv") f := framework.NewDefaultFramework("pv")
var c *client.Client var c clientset.Interface
var ns string var ns string
var NFSconfig VolumeTestConfig var NFSconfig VolumeTestConfig
var serverIP string var serverIP string
@ -421,7 +421,7 @@ var _ = framework.KubeDescribe("PersistentVolumes", func() {
} }
BeforeEach(func() { BeforeEach(func() {
c = f.Client c = f.ClientSet
ns = f.Namespace.Name ns = f.Namespace.Name
// If it doesn't exist, create the nfs server pod in "default" ns // If it doesn't exist, create the nfs server pod in "default" ns
@ -439,7 +439,7 @@ var _ = framework.KubeDescribe("PersistentVolumes", func() {
if pvc != nil && len(pvc.Name) > 0 { if pvc != nil && len(pvc.Name) > 0 {
// Delete the PersistentVolumeClaim // Delete the PersistentVolumeClaim
framework.Logf("AfterEach: PVC %v is non-nil, deleting claim", pvc.Name) framework.Logf("AfterEach: PVC %v is non-nil, deleting claim", pvc.Name)
err := c.PersistentVolumeClaims(ns).Delete(pvc.Name) err := c.Core().PersistentVolumeClaims(ns).Delete(pvc.Name, nil)
if err != nil && !apierrs.IsNotFound(err) { if err != nil && !apierrs.IsNotFound(err) {
framework.Logf("AfterEach: delete of PersistentVolumeClaim %v error: %v", pvc.Name, err) framework.Logf("AfterEach: delete of PersistentVolumeClaim %v error: %v", pvc.Name, err)
} }
@ -447,7 +447,7 @@ var _ = framework.KubeDescribe("PersistentVolumes", func() {
} }
if pv != nil && len(pv.Name) > 0 { if pv != nil && len(pv.Name) > 0 {
framework.Logf("AfterEach: PV %v is non-nil, deleting pv", pv.Name) framework.Logf("AfterEach: PV %v is non-nil, deleting pv", pv.Name)
err := c.PersistentVolumes().Delete(pv.Name) err := c.Core().PersistentVolumes().Delete(pv.Name, nil)
if err != nil && !apierrs.IsNotFound(err) { if err != nil && !apierrs.IsNotFound(err) {
framework.Logf("AfterEach: delete of PersistentVolume %v error: %v", pv.Name, err) framework.Logf("AfterEach: delete of PersistentVolume %v error: %v", pv.Name, err)
} }

View File

@ -33,7 +33,7 @@ import (
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/apis/apps"
client "k8s.io/kubernetes/pkg/client/unversioned" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/controller/petset" "k8s.io/kubernetes/pkg/controller/petset"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
@ -71,7 +71,7 @@ var _ = framework.KubeDescribe("PetSet [Slow] [Feature:PetSet]", func() {
} }
f := framework.NewFramework("petset", options, nil) f := framework.NewFramework("petset", options, nil)
var ns string var ns string
var c *client.Client var c clientset.Interface
BeforeEach(func() { BeforeEach(func() {
// PetSet is in alpha, so it's disabled on some platforms. We skip this // PetSet is in alpha, so it's disabled on some platforms. We skip this
@ -82,7 +82,7 @@ var _ = framework.KubeDescribe("PetSet [Slow] [Feature:PetSet]", func() {
framework.SkipIfMissingResource(f.ClientPool, unversioned.GroupVersionResource{Group: apps.GroupName, Version: "v1alpha1", Resource: "petsets"}, f.Namespace.Name) framework.SkipIfMissingResource(f.ClientPool, unversioned.GroupVersionResource{Group: apps.GroupName, Version: "v1alpha1", Resource: "petsets"}, f.Namespace.Name)
} }
c = f.Client c = f.ClientSet
ns = f.Namespace.Name ns = f.Namespace.Name
}) })
@ -97,7 +97,7 @@ var _ = framework.KubeDescribe("PetSet [Slow] [Feature:PetSet]", func() {
BeforeEach(func() { BeforeEach(func() {
By("creating service " + headlessSvcName + " in namespace " + ns) By("creating service " + headlessSvcName + " in namespace " + ns)
headlessService := createServiceSpec(headlessSvcName, "", true, labels) headlessService := createServiceSpec(headlessSvcName, "", true, labels)
_, err := c.Services(ns).Create(headlessService) _, err := c.Core().Services(ns).Create(headlessService)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
@ -254,7 +254,7 @@ var _ = framework.KubeDescribe("PetSet [Slow] [Feature:PetSet]", func() {
var _ = framework.KubeDescribe("Pet set recreate [Slow] [Feature:PetSet]", func() { var _ = framework.KubeDescribe("Pet set recreate [Slow] [Feature:PetSet]", func() {
f := framework.NewDefaultFramework("pet-set-recreate") f := framework.NewDefaultFramework("pet-set-recreate")
var c *client.Client var c clientset.Interface
var ns string var ns string
labels := map[string]string{ labels := map[string]string{
@ -270,9 +270,9 @@ var _ = framework.KubeDescribe("Pet set recreate [Slow] [Feature:PetSet]", func(
framework.SkipUnlessProviderIs("gce", "vagrant") framework.SkipUnlessProviderIs("gce", "vagrant")
By("creating service " + headlessSvcName + " in namespace " + f.Namespace.Name) By("creating service " + headlessSvcName + " in namespace " + f.Namespace.Name)
headlessService := createServiceSpec(headlessSvcName, "", true, labels) headlessService := createServiceSpec(headlessSvcName, "", true, labels)
_, err := f.Client.Services(f.Namespace.Name).Create(headlessService) _, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(headlessService)
framework.ExpectNoError(err) framework.ExpectNoError(err)
c = f.Client c = f.ClientSet
ns = f.Namespace.Name ns = f.Namespace.Name
}) })
@ -306,7 +306,7 @@ var _ = framework.KubeDescribe("Pet set recreate [Slow] [Feature:PetSet]", func(
NodeName: node.Name, NodeName: node.Name,
}, },
} }
pod, err := f.Client.Pods(f.Namespace.Name).Create(pod) pod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("creating petset with conflicting port in namespace " + f.Namespace.Name) By("creating petset with conflicting port in namespace " + f.Namespace.Name)
@ -314,7 +314,7 @@ var _ = framework.KubeDescribe("Pet set recreate [Slow] [Feature:PetSet]", func(
petContainer := &ps.Spec.Template.Spec.Containers[0] petContainer := &ps.Spec.Template.Spec.Containers[0]
petContainer.Ports = append(petContainer.Ports, conflictingPort) petContainer.Ports = append(petContainer.Ports, conflictingPort)
ps.Spec.Template.Spec.NodeName = node.Name ps.Spec.Template.Spec.NodeName = node.Name
_, err = f.Client.Apps().PetSets(f.Namespace.Name).Create(ps) _, err = f.ClientSet.Apps().PetSets(f.Namespace.Name).Create(ps)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("waiting until pod " + podName + " will start running in namespace " + f.Namespace.Name) By("waiting until pod " + podName + " will start running in namespace " + f.Namespace.Name)
@ -324,7 +324,7 @@ var _ = framework.KubeDescribe("Pet set recreate [Slow] [Feature:PetSet]", func(
var initialPetPodUID types.UID var initialPetPodUID types.UID
By("waiting until pet pod " + petPodName + " will be recreated and deleted at least once in namespace " + f.Namespace.Name) By("waiting until pet pod " + petPodName + " will be recreated and deleted at least once in namespace " + f.Namespace.Name)
w, err := f.Client.Pods(f.Namespace.Name).Watch(api.SingleObject(api.ObjectMeta{Name: petPodName})) w, err := f.ClientSet.Core().Pods(f.Namespace.Name).Watch(api.SingleObject(api.ObjectMeta{Name: petPodName}))
framework.ExpectNoError(err) framework.ExpectNoError(err)
// we need to get UID from pod in any state and wait until pet set controller will remove pod atleast once // we need to get UID from pod in any state and wait until pet set controller will remove pod atleast once
_, err = watch.Until(petPodTimeout, w, func(event watch.Event) (bool, error) { _, err = watch.Until(petPodTimeout, w, func(event watch.Event) (bool, error) {
@ -347,13 +347,13 @@ var _ = framework.KubeDescribe("Pet set recreate [Slow] [Feature:PetSet]", func(
} }
By("removing pod with conflicting port in namespace " + f.Namespace.Name) By("removing pod with conflicting port in namespace " + f.Namespace.Name)
err = f.Client.Pods(f.Namespace.Name).Delete(pod.Name, api.NewDeleteOptions(0)) err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, api.NewDeleteOptions(0))
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("waiting when pet pod " + petPodName + " will be recreated in namespace " + f.Namespace.Name + " and will be in running state") By("waiting when pet pod " + petPodName + " will be recreated in namespace " + f.Namespace.Name + " and will be in running state")
// we may catch delete event, thats why we are waiting for running phase like this, and not with watch.Until // we may catch delete event, thats why we are waiting for running phase like this, and not with watch.Until
Eventually(func() error { Eventually(func() error {
petPod, err := f.Client.Pods(f.Namespace.Name).Get(petPodName) petPod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Get(petPodName)
if err != nil { if err != nil {
return err return err
} }
@ -367,8 +367,8 @@ var _ = framework.KubeDescribe("Pet set recreate [Slow] [Feature:PetSet]", func(
}) })
}) })
func dumpDebugInfo(c *client.Client, ns string) { func dumpDebugInfo(c clientset.Interface, ns string) {
pl, _ := c.Pods(ns).List(api.ListOptions{LabelSelector: labels.Everything()}) pl, _ := c.Core().Pods(ns).List(api.ListOptions{LabelSelector: labels.Everything()})
for _, p := range pl.Items { for _, p := range pl.Items {
desc, _ := framework.RunKubectl("describe", "po", p.Name, fmt.Sprintf("--namespace=%v", ns)) desc, _ := framework.RunKubectl("describe", "po", p.Name, fmt.Sprintf("--namespace=%v", ns))
framework.Logf("\nOutput of kubectl describe %v:\n%v", p.Name, desc) framework.Logf("\nOutput of kubectl describe %v:\n%v", p.Name, desc)
@ -526,7 +526,7 @@ func petSetFromManifest(fileName, ns string) *apps.PetSet {
// petSetTester has all methods required to test a single petset. // petSetTester has all methods required to test a single petset.
type petSetTester struct { type petSetTester struct {
c *client.Client c clientset.Interface
} }
func (p *petSetTester) createPetSet(manifestPath, ns string) *apps.PetSet { func (p *petSetTester) createPetSet(manifestPath, ns string) *apps.PetSet {
@ -588,7 +588,7 @@ func (p *petSetTester) deletePetAtIndex(index int, ps *apps.PetSet) {
// pull the name out from an identity mapper. // pull the name out from an identity mapper.
name := fmt.Sprintf("%v-%v", ps.Name, index) name := fmt.Sprintf("%v-%v", ps.Name, index)
noGrace := int64(0) noGrace := int64(0)
if err := p.c.Pods(ps.Namespace).Delete(name, &api.DeleteOptions{GracePeriodSeconds: &noGrace}); err != nil { if err := p.c.Core().Pods(ps.Namespace).Delete(name, &api.DeleteOptions{GracePeriodSeconds: &noGrace}); err != nil {
framework.Failf("Failed to delete pet %v for PetSet %v: %v", name, ps.Name, ps.Namespace, err) framework.Failf("Failed to delete pet %v for PetSet %v: %v", name, ps.Name, ps.Namespace, err)
} }
} }
@ -646,7 +646,7 @@ func (p *petSetTester) update(ns, name string, update func(ps *apps.PetSet)) {
func (p *petSetTester) getPodList(ps *apps.PetSet) *api.PodList { func (p *petSetTester) getPodList(ps *apps.PetSet) *api.PodList {
selector, err := unversioned.LabelSelectorAsSelector(ps.Spec.Selector) selector, err := unversioned.LabelSelectorAsSelector(ps.Spec.Selector)
ExpectNoError(err) ExpectNoError(err)
podList, err := p.c.Pods(ps.Namespace).List(api.ListOptions{LabelSelector: selector}) podList, err := p.c.Core().Pods(ps.Namespace).List(api.ListOptions{LabelSelector: selector})
ExpectNoError(err) ExpectNoError(err)
return podList return podList
} }
@ -735,7 +735,7 @@ func (p *petSetTester) waitForStatus(ps *apps.PetSet, expectedReplicas int32) {
} }
} }
func deleteAllPetSets(c *client.Client, ns string) { func deleteAllPetSets(c clientset.Interface, ns string) {
pst := &petSetTester{c: c} pst := &petSetTester{c: c}
psList, err := c.Apps().PetSets(ns).List(api.ListOptions{LabelSelector: labels.Everything()}) psList, err := c.Apps().PetSets(ns).List(api.ListOptions{LabelSelector: labels.Everything()})
ExpectNoError(err) ExpectNoError(err)
@ -759,7 +759,7 @@ func deleteAllPetSets(c *client.Client, ns string) {
pvNames := sets.NewString() pvNames := sets.NewString()
// TODO: Don't assume all pvcs in the ns belong to a petset // TODO: Don't assume all pvcs in the ns belong to a petset
pvcPollErr := wait.PollImmediate(petsetPoll, petsetTimeout, func() (bool, error) { pvcPollErr := wait.PollImmediate(petsetPoll, petsetTimeout, func() (bool, error) {
pvcList, err := c.PersistentVolumeClaims(ns).List(api.ListOptions{LabelSelector: labels.Everything()}) pvcList, err := c.Core().PersistentVolumeClaims(ns).List(api.ListOptions{LabelSelector: labels.Everything()})
if err != nil { if err != nil {
framework.Logf("WARNING: Failed to list pvcs, retrying %v", err) framework.Logf("WARNING: Failed to list pvcs, retrying %v", err)
return false, nil return false, nil
@ -768,7 +768,7 @@ func deleteAllPetSets(c *client.Client, ns string) {
pvNames.Insert(pvc.Spec.VolumeName) pvNames.Insert(pvc.Spec.VolumeName)
// TODO: Double check that there are no pods referencing the pvc // TODO: Double check that there are no pods referencing the pvc
framework.Logf("Deleting pvc: %v with volume %v", pvc.Name, pvc.Spec.VolumeName) framework.Logf("Deleting pvc: %v with volume %v", pvc.Name, pvc.Spec.VolumeName)
if err := c.PersistentVolumeClaims(ns).Delete(pvc.Name); err != nil { if err := c.Core().PersistentVolumeClaims(ns).Delete(pvc.Name, nil); err != nil {
return false, nil return false, nil
} }
} }
@ -779,7 +779,7 @@ func deleteAllPetSets(c *client.Client, ns string) {
} }
pollErr := wait.PollImmediate(petsetPoll, petsetTimeout, func() (bool, error) { pollErr := wait.PollImmediate(petsetPoll, petsetTimeout, func() (bool, error) {
pvList, err := c.PersistentVolumes().List(api.ListOptions{LabelSelector: labels.Everything()}) pvList, err := c.Core().PersistentVolumes().List(api.ListOptions{LabelSelector: labels.Everything()})
if err != nil { if err != nil {
framework.Logf("WARNING: Failed to list pvs, retrying %v", err) framework.Logf("WARNING: Failed to list pvs, retrying %v", err)
return false, nil return false, nil

View File

@ -39,7 +39,7 @@ var _ = framework.KubeDescribe("Pod garbage collector [Feature:PodGarbageCollect
pod, err := createTerminatingPod(f) pod, err := createTerminatingPod(f)
pod.ResourceVersion = "" pod.ResourceVersion = ""
pod.Status.Phase = api.PodFailed pod.Status.Phase = api.PodFailed
pod, err = f.Client.Pods(f.Namespace.Name).UpdateStatus(pod) pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).UpdateStatus(pod)
if err != nil { if err != nil {
framework.Failf("err failing pod: %v", err) framework.Failf("err failing pod: %v", err)
} }
@ -61,7 +61,7 @@ var _ = framework.KubeDescribe("Pod garbage collector [Feature:PodGarbageCollect
By(fmt.Sprintf("Waiting for gc controller to gc all but %d pods", gcThreshold)) By(fmt.Sprintf("Waiting for gc controller to gc all but %d pods", gcThreshold))
pollErr := wait.Poll(1*time.Minute, timeout, func() (bool, error) { pollErr := wait.Poll(1*time.Minute, timeout, func() (bool, error) {
pods, err = f.Client.Pods(f.Namespace.Name).List(api.ListOptions{}) pods, err = f.ClientSet.Core().Pods(f.Namespace.Name).List(api.ListOptions{})
if err != nil { if err != nil {
framework.Logf("Failed to list pod %v", err) framework.Logf("Failed to list pod %v", err)
return false, nil return false, nil
@ -96,5 +96,5 @@ func createTerminatingPod(f *framework.Framework) (*api.Pod, error) {
}, },
}, },
} }
return f.Client.Pods(f.Namespace.Name).Create(pod) return f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
} }

View File

@ -177,14 +177,14 @@ var _ = framework.KubeDescribe("Port forwarding", func() {
It("should support a client that connects, sends no data, and disconnects [Conformance]", func() { It("should support a client that connects, sends no data, and disconnects [Conformance]", func() {
By("creating the target pod") By("creating the target pod")
pod := pfPod("abc", "1", "1", "1") pod := pfPod("abc", "1", "1", "1")
if _, err := f.Client.Pods(f.Namespace.Name).Create(pod); err != nil { if _, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod); err != nil {
framework.Failf("Couldn't create pod: %v", err) framework.Failf("Couldn't create pod: %v", err)
} }
if err := f.WaitForPodRunning(pod.Name); err != nil { if err := f.WaitForPodRunning(pod.Name); err != nil {
framework.Failf("Pod did not start running: %v", err) framework.Failf("Pod did not start running: %v", err)
} }
defer func() { defer func() {
logs, err := framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, "portforwardtester") logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester")
if err != nil { if err != nil {
framework.Logf("Error getting pod log: %v", err) framework.Logf("Error getting pod log: %v", err)
} else { } else {
@ -211,7 +211,7 @@ var _ = framework.KubeDescribe("Port forwarding", func() {
} }
By("Verifying logs") By("Verifying logs")
logOutput, err := framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, "portforwardtester") logOutput, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester")
if err != nil { if err != nil {
framework.Failf("Error retrieving pod logs: %v", err) framework.Failf("Error retrieving pod logs: %v", err)
} }
@ -222,14 +222,14 @@ var _ = framework.KubeDescribe("Port forwarding", func() {
It("should support a client that connects, sends data, and disconnects [Conformance]", func() { It("should support a client that connects, sends data, and disconnects [Conformance]", func() {
By("creating the target pod") By("creating the target pod")
pod := pfPod("abc", "10", "10", "100") pod := pfPod("abc", "10", "10", "100")
if _, err := f.Client.Pods(f.Namespace.Name).Create(pod); err != nil { if _, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod); err != nil {
framework.Failf("Couldn't create pod: %v", err) framework.Failf("Couldn't create pod: %v", err)
} }
if err := f.WaitForPodRunning(pod.Name); err != nil { if err := f.WaitForPodRunning(pod.Name); err != nil {
framework.Failf("Pod did not start running: %v", err) framework.Failf("Pod did not start running: %v", err)
} }
defer func() { defer func() {
logs, err := framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, "portforwardtester") logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester")
if err != nil { if err != nil {
framework.Logf("Error getting pod log: %v", err) framework.Logf("Error getting pod log: %v", err)
} else { } else {
@ -277,7 +277,7 @@ var _ = framework.KubeDescribe("Port forwarding", func() {
} }
By("Verifying logs") By("Verifying logs")
logOutput, err := framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, "portforwardtester") logOutput, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester")
if err != nil { if err != nil {
framework.Failf("Error retrieving pod logs: %v", err) framework.Failf("Error retrieving pod logs: %v", err)
} }
@ -290,14 +290,14 @@ var _ = framework.KubeDescribe("Port forwarding", func() {
It("should support a client that connects, sends no data, and disconnects [Conformance]", func() { It("should support a client that connects, sends no data, and disconnects [Conformance]", func() {
By("creating the target pod") By("creating the target pod")
pod := pfPod("", "10", "10", "100") pod := pfPod("", "10", "10", "100")
if _, err := f.Client.Pods(f.Namespace.Name).Create(pod); err != nil { if _, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod); err != nil {
framework.Failf("Couldn't create pod: %v", err) framework.Failf("Couldn't create pod: %v", err)
} }
if err := f.WaitForPodRunning(pod.Name); err != nil { if err := f.WaitForPodRunning(pod.Name); err != nil {
framework.Failf("Pod did not start running: %v", err) framework.Failf("Pod did not start running: %v", err)
} }
defer func() { defer func() {
logs, err := framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, "portforwardtester") logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester")
if err != nil { if err != nil {
framework.Logf("Error getting pod log: %v", err) framework.Logf("Error getting pod log: %v", err)
} else { } else {
@ -335,7 +335,7 @@ var _ = framework.KubeDescribe("Port forwarding", func() {
} }
By("Verifying logs") By("Verifying logs")
logOutput, err := framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, "portforwardtester") logOutput, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester")
if err != nil { if err != nil {
framework.Failf("Error retrieving pod logs: %v", err) framework.Failf("Error retrieving pod logs: %v", err)
} }

View File

@ -22,7 +22,7 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -34,7 +34,7 @@ type State struct {
Received map[string]int Received map[string]int
} }
func testPreStop(c *client.Client, ns string) { func testPreStop(c clientset.Interface, ns string) {
// This is the server that will receive the preStop notification // This is the server that will receive the preStop notification
podDescr := &api.Pod{ podDescr := &api.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
@ -51,13 +51,13 @@ func testPreStop(c *client.Client, ns string) {
}, },
} }
By(fmt.Sprintf("Creating server pod %s in namespace %s", podDescr.Name, ns)) By(fmt.Sprintf("Creating server pod %s in namespace %s", podDescr.Name, ns))
podDescr, err := c.Pods(ns).Create(podDescr) podDescr, err := c.Core().Pods(ns).Create(podDescr)
framework.ExpectNoError(err, fmt.Sprintf("creating pod %s", podDescr.Name)) framework.ExpectNoError(err, fmt.Sprintf("creating pod %s", podDescr.Name))
// At the end of the test, clean up by removing the pod. // At the end of the test, clean up by removing the pod.
defer func() { defer func() {
By("Deleting the server pod") By("Deleting the server pod")
c.Pods(ns).Delete(podDescr.Name, nil) c.Core().Pods(ns).Delete(podDescr.Name, nil)
}() }()
By("Waiting for pods to come up.") By("Waiting for pods to come up.")
@ -66,7 +66,7 @@ func testPreStop(c *client.Client, ns string) {
val := "{\"Source\": \"prestop\"}" val := "{\"Source\": \"prestop\"}"
podOut, err := c.Pods(ns).Get(podDescr.Name) podOut, err := c.Core().Pods(ns).Get(podDescr.Name)
framework.ExpectNoError(err, "getting pod info") framework.ExpectNoError(err, "getting pod info")
preStopDescr := &api.Pod{ preStopDescr := &api.Pod{
@ -94,7 +94,7 @@ func testPreStop(c *client.Client, ns string) {
} }
By(fmt.Sprintf("Creating tester pod %s in namespace %s", preStopDescr.Name, ns)) By(fmt.Sprintf("Creating tester pod %s in namespace %s", preStopDescr.Name, ns))
preStopDescr, err = c.Pods(ns).Create(preStopDescr) preStopDescr, err = c.Core().Pods(ns).Create(preStopDescr)
framework.ExpectNoError(err, fmt.Sprintf("creating pod %s", preStopDescr.Name)) framework.ExpectNoError(err, fmt.Sprintf("creating pod %s", preStopDescr.Name))
deletePreStop := true deletePreStop := true
@ -102,7 +102,7 @@ func testPreStop(c *client.Client, ns string) {
defer func() { defer func() {
if deletePreStop { if deletePreStop {
By("Deleting the tester pod") By("Deleting the tester pod")
c.Pods(ns).Delete(preStopDescr.Name, nil) c.Core().Pods(ns).Delete(preStopDescr.Name, nil)
} }
}() }()
@ -111,20 +111,20 @@ func testPreStop(c *client.Client, ns string) {
// Delete the pod with the preStop handler. // Delete the pod with the preStop handler.
By("Deleting pre-stop pod") By("Deleting pre-stop pod")
if err := c.Pods(ns).Delete(preStopDescr.Name, nil); err == nil { if err := c.Core().Pods(ns).Delete(preStopDescr.Name, nil); err == nil {
deletePreStop = false deletePreStop = false
} }
framework.ExpectNoError(err, fmt.Sprintf("deleting pod: %s", preStopDescr.Name)) framework.ExpectNoError(err, fmt.Sprintf("deleting pod: %s", preStopDescr.Name))
// Validate that the server received the web poke. // Validate that the server received the web poke.
err = wait.Poll(time.Second*5, time.Second*60, func() (bool, error) { err = wait.Poll(time.Second*5, time.Second*60, func() (bool, error) {
subResourceProxyAvailable, err := framework.ServerVersionGTE(framework.SubResourcePodProxyVersion, c) subResourceProxyAvailable, err := framework.ServerVersionGTE(framework.SubResourcePodProxyVersion, c.Discovery())
if err != nil { if err != nil {
return false, err return false, err
} }
var body []byte var body []byte
if subResourceProxyAvailable { if subResourceProxyAvailable {
body, err = c.Get(). body, err = c.Core().RESTClient().Get().
Namespace(ns). Namespace(ns).
Resource("pods"). Resource("pods").
SubResource("proxy"). SubResource("proxy").
@ -132,7 +132,7 @@ func testPreStop(c *client.Client, ns string) {
Suffix("read"). Suffix("read").
DoRaw() DoRaw()
} else { } else {
body, err = c.Get(). body, err = c.Core().RESTClient().Get().
Prefix("proxy"). Prefix("proxy").
Namespace(ns). Namespace(ns).
Resource("pods"). Resource("pods").
@ -163,6 +163,6 @@ var _ = framework.KubeDescribe("PreStop", func() {
f := framework.NewDefaultFramework("prestop") f := framework.NewDefaultFramework("prestop")
It("should call prestop when killing a pod [Conformance]", func() { It("should call prestop when killing a pod [Conformance]", func() {
testPreStop(f.Client, f.Namespace.Name) testPreStop(f.ClientSet, f.Namespace.Name)
}) })
}) })

View File

@ -71,7 +71,7 @@ var _ = framework.KubeDescribe("Proxy", func() {
It("should proxy through a service and a pod [Conformance]", func() { It("should proxy through a service and a pod [Conformance]", func() {
start := time.Now() start := time.Now()
labels := map[string]string{"proxy-service-target": "true"} labels := map[string]string{"proxy-service-target": "true"}
service, err := f.Client.Services(f.Namespace.Name).Create(&api.Service{ service, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(&api.Service{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
GenerateName: "proxy-service-", GenerateName: "proxy-service-",
}, },
@ -109,7 +109,7 @@ var _ = framework.KubeDescribe("Proxy", func() {
By("starting an echo server on multiple ports") By("starting an echo server on multiple ports")
pods := []*api.Pod{} pods := []*api.Pod{}
cfg := testutils.RCConfig{ cfg := testutils.RCConfig{
Client: f.Client, Client: f.ClientSet,
Image: "gcr.io/google_containers/porter:cd5cb5791ebaa8641955f0e8c2a9bed669b1eaab", Image: "gcr.io/google_containers/porter:cd5cb5791ebaa8641955f0e8c2a9bed669b1eaab",
Name: service.Name, Name: service.Name,
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
@ -146,7 +146,7 @@ var _ = framework.KubeDescribe("Proxy", func() {
CreatedPods: &pods, CreatedPods: &pods,
} }
Expect(framework.RunRC(cfg)).NotTo(HaveOccurred()) Expect(framework.RunRC(cfg)).NotTo(HaveOccurred())
defer framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, cfg.Name) defer framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, cfg.Name)
Expect(f.WaitForAnEndpoint(service.Name)).NotTo(HaveOccurred()) Expect(f.WaitForAnEndpoint(service.Name)).NotTo(HaveOccurred())
@ -260,7 +260,7 @@ var _ = framework.KubeDescribe("Proxy", func() {
} }
if len(errs) != 0 { if len(errs) != 0 {
body, err := f.Client.Pods(f.Namespace.Name).GetLogs(pods[0].Name, &api.PodLogOptions{}).Do().Raw() body, err := f.ClientSet.Core().Pods(f.Namespace.Name).GetLogs(pods[0].Name, &api.PodLogOptions{}).Do().Raw()
if err != nil { if err != nil {
framework.Logf("Error getting logs for pod %s: %v", pods[0].Name, err) framework.Logf("Error getting logs for pod %s: %v", pods[0].Name, err)
} else { } else {
@ -281,7 +281,7 @@ func doProxy(f *framework.Framework, path string, i int) (body []byte, statusCod
// chance of the things we are talking to being confused for an error // chance of the things we are talking to being confused for an error
// that apiserver would have emitted. // that apiserver would have emitted.
start := time.Now() start := time.Now()
body, err = f.Client.Get().AbsPath(path).Do().StatusCode(&statusCode).Raw() body, err = f.ClientSet.Core().RESTClient().Get().AbsPath(path).Do().StatusCode(&statusCode).Raw()
d = time.Since(start) d = time.Since(start)
if len(body) > 0 { if len(body) > 0 {
framework.Logf("(%v) %v: %s (%v; %v)", i, path, truncate(body, maxDisplayBodyLen), statusCode, d) framework.Logf("(%v) %v: %s (%v; %v)", i, path, truncate(body, maxDisplayBodyLen), statusCode, d)

View File

@ -57,7 +57,7 @@ func ServeImageOrFail(f *framework.Framework, test string, image string) {
// The source for the Docker containter kubernetes/serve_hostname is // The source for the Docker containter kubernetes/serve_hostname is
// in contrib/for-demos/serve_hostname // in contrib/for-demos/serve_hostname
By(fmt.Sprintf("Creating replication controller %s", name)) By(fmt.Sprintf("Creating replication controller %s", name))
controller, err := f.Client.ReplicationControllers(f.Namespace.Name).Create(&api.ReplicationController{ controller, err := f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Create(&api.ReplicationController{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Name: name, Name: name,
}, },
@ -86,7 +86,7 @@ func ServeImageOrFail(f *framework.Framework, test string, image string) {
// Cleanup the replication controller when we are done. // Cleanup the replication controller when we are done.
defer func() { defer func() {
// Resize the replication controller to zero to get rid of pods. // Resize the replication controller to zero to get rid of pods.
if err := framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, controller.Name); err != nil { if err := framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, controller.Name); err != nil {
framework.Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err) framework.Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err)
} }
}() }()
@ -94,7 +94,7 @@ func ServeImageOrFail(f *framework.Framework, test string, image string) {
// List the pods, making sure we observe all the replicas. // List the pods, making sure we observe all the replicas.
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
pods, err := framework.PodsCreated(f.Client, f.Namespace.Name, name, replicas) pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicas)
By("Ensuring each pod is running") By("Ensuring each pod is running")
@ -112,7 +112,7 @@ func ServeImageOrFail(f *framework.Framework, test string, image string) {
By("Trying to dial each unique pod") By("Trying to dial each unique pod")
retryTimeout := 2 * time.Minute retryTimeout := 2 * time.Minute
retryInterval := 5 * time.Second retryInterval := 5 * time.Second
err = wait.Poll(retryInterval, retryTimeout, framework.PodProxyResponseChecker(f.Client, f.Namespace.Name, label, name, true, pods).CheckAllResponses) err = wait.Poll(retryInterval, retryTimeout, framework.PodProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses)
if err != nil { if err != nil {
framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds()) framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds())
} }

View File

@ -23,7 +23,6 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
@ -65,7 +64,7 @@ var _ = framework.KubeDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
// events for the kube-system namespace on failures // events for the kube-system namespace on failures
namespaceName := api.NamespaceSystem namespaceName := api.NamespaceSystem
By(fmt.Sprintf("Collecting events from namespace %q.", namespaceName)) By(fmt.Sprintf("Collecting events from namespace %q.", namespaceName))
events, err := f.Client.Events(namespaceName).List(api.ListOptions{}) events, err := f.ClientSet.Core().Events(namespaceName).List(api.ListOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
for _, e := range events.Items { for _, e := range events.Items {
@ -90,32 +89,32 @@ var _ = framework.KubeDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
It("each node by ordering clean reboot and ensure they function upon restart", func() { It("each node by ordering clean reboot and ensure they function upon restart", func() {
// clean shutdown and restart // clean shutdown and restart
// We sleep 10 seconds to give some time for ssh command to cleanly finish before the node is rebooted. // We sleep 10 seconds to give some time for ssh command to cleanly finish before the node is rebooted.
testReboot(f.Client, f.ClientSet, "nohup sh -c 'sleep 10 && sudo reboot' >/dev/null 2>&1 &") testReboot(f.ClientSet, "nohup sh -c 'sleep 10 && sudo reboot' >/dev/null 2>&1 &")
}) })
It("each node by ordering unclean reboot and ensure they function upon restart", func() { It("each node by ordering unclean reboot and ensure they function upon restart", func() {
// unclean shutdown and restart // unclean shutdown and restart
// We sleep 10 seconds to give some time for ssh command to cleanly finish before the node is shutdown. // We sleep 10 seconds to give some time for ssh command to cleanly finish before the node is shutdown.
testReboot(f.Client, f.ClientSet, "nohup sh -c 'sleep 10 && echo b | sudo tee /proc/sysrq-trigger' >/dev/null 2>&1 &") testReboot(f.ClientSet, "nohup sh -c 'sleep 10 && echo b | sudo tee /proc/sysrq-trigger' >/dev/null 2>&1 &")
}) })
It("each node by triggering kernel panic and ensure they function upon restart", func() { It("each node by triggering kernel panic and ensure they function upon restart", func() {
// kernel panic // kernel panic
// We sleep 10 seconds to give some time for ssh command to cleanly finish before kernel panic is triggered. // We sleep 10 seconds to give some time for ssh command to cleanly finish before kernel panic is triggered.
testReboot(f.Client, f.ClientSet, "nohup sh -c 'sleep 10 && echo c | sudo tee /proc/sysrq-trigger' >/dev/null 2>&1 &") testReboot(f.ClientSet, "nohup sh -c 'sleep 10 && echo c | sudo tee /proc/sysrq-trigger' >/dev/null 2>&1 &")
}) })
It("each node by switching off the network interface and ensure they function upon switch on", func() { It("each node by switching off the network interface and ensure they function upon switch on", func() {
// switch the network interface off for a while to simulate a network outage // switch the network interface off for a while to simulate a network outage
// We sleep 10 seconds to give some time for ssh command to cleanly finish before network is down. // We sleep 10 seconds to give some time for ssh command to cleanly finish before network is down.
testReboot(f.Client, f.ClientSet, "nohup sh -c 'sleep 10 && (sudo ifdown eth0 || sudo ip link set eth0 down) && sleep 120 && (sudo ifup eth0 || sudo ip link set eth0 up)' >/dev/null 2>&1 &") testReboot(f.ClientSet, "nohup sh -c 'sleep 10 && (sudo ifdown eth0 || sudo ip link set eth0 down) && sleep 120 && (sudo ifup eth0 || sudo ip link set eth0 up)' >/dev/null 2>&1 &")
}) })
It("each node by dropping all inbound packets for a while and ensure they function afterwards", func() { It("each node by dropping all inbound packets for a while and ensure they function afterwards", func() {
// tell the firewall to drop all inbound packets for a while // tell the firewall to drop all inbound packets for a while
// We sleep 10 seconds to give some time for ssh command to cleanly finish before starting dropping inbound packets. // We sleep 10 seconds to give some time for ssh command to cleanly finish before starting dropping inbound packets.
// We still accept packages send from localhost to prevent monit from restarting kubelet. // We still accept packages send from localhost to prevent monit from restarting kubelet.
testReboot(f.Client, f.ClientSet, "nohup sh -c 'sleep 10 && sudo iptables -I INPUT 1 -s 127.0.0.1 -j ACCEPT && sudo iptables -I INPUT 2 -j DROP && "+ testReboot(f.ClientSet, "nohup sh -c 'sleep 10 && sudo iptables -I INPUT 1 -s 127.0.0.1 -j ACCEPT && sudo iptables -I INPUT 2 -j DROP && "+
" sleep 120 && sudo iptables -D INPUT -j DROP && sudo iptables -D INPUT -s 127.0.0.1 -j ACCEPT' >/dev/null 2>&1 &") " sleep 120 && sudo iptables -D INPUT -j DROP && sudo iptables -D INPUT -s 127.0.0.1 -j ACCEPT' >/dev/null 2>&1 &")
}) })
@ -123,14 +122,14 @@ var _ = framework.KubeDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
// tell the firewall to drop all outbound packets for a while // tell the firewall to drop all outbound packets for a while
// We sleep 10 seconds to give some time for ssh command to cleanly finish before starting dropping outbound packets. // We sleep 10 seconds to give some time for ssh command to cleanly finish before starting dropping outbound packets.
// We still accept packages send to localhost to prevent monit from restarting kubelet. // We still accept packages send to localhost to prevent monit from restarting kubelet.
testReboot(f.Client, f.ClientSet, "nohup sh -c 'sleep 10 && sudo iptables -I OUTPUT 1 -s 127.0.0.1 -j ACCEPT && sudo iptables -I OUTPUT 2 -j DROP && "+ testReboot(f.ClientSet, "nohup sh -c 'sleep 10 && sudo iptables -I OUTPUT 1 -s 127.0.0.1 -j ACCEPT && sudo iptables -I OUTPUT 2 -j DROP && "+
" sleep 120 && sudo iptables -D OUTPUT -j DROP && sudo iptables -D OUTPUT -s 127.0.0.1 -j ACCEPT' >/dev/null 2>&1 &") " sleep 120 && sudo iptables -D OUTPUT -j DROP && sudo iptables -D OUTPUT -s 127.0.0.1 -j ACCEPT' >/dev/null 2>&1 &")
}) })
}) })
func testReboot(c *client.Client, cs clientset.Interface, rebootCmd string) { func testReboot(c clientset.Interface, rebootCmd string) {
// Get all nodes, and kick off the test on each. // Get all nodes, and kick off the test on each.
nodelist := framework.GetReadySchedulableNodesOrDie(cs) nodelist := framework.GetReadySchedulableNodesOrDie(c)
result := make([]bool, len(nodelist.Items)) result := make([]bool, len(nodelist.Items))
wg := sync.WaitGroup{} wg := sync.WaitGroup{}
wg.Add(len(nodelist.Items)) wg.Add(len(nodelist.Items))
@ -161,7 +160,7 @@ func testReboot(c *client.Client, cs clientset.Interface, rebootCmd string) {
} }
} }
func printStatusAndLogsForNotReadyPods(c *client.Client, ns string, podNames []string, pods []*api.Pod) { func printStatusAndLogsForNotReadyPods(c clientset.Interface, ns string, podNames []string, pods []*api.Pod) {
printFn := func(id, log string, err error, previous bool) { printFn := func(id, log string, err error, previous bool) {
prefix := "Retrieving log for container" prefix := "Retrieving log for container"
if previous { if previous {
@ -208,7 +207,7 @@ func printStatusAndLogsForNotReadyPods(c *client.Client, ns string, podNames []s
// //
// It returns true through result only if all of the steps pass; at the first // It returns true through result only if all of the steps pass; at the first
// failed step, it will return false through result and not run the rest. // failed step, it will return false through result and not run the rest.
func rebootNode(c *client.Client, provider, name, rebootCmd string) bool { func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
// Setup // Setup
ns := api.NamespaceSystem ns := api.NamespaceSystem
ps := testutils.NewPodStore(c, ns, labels.Everything(), fields.OneTermEqualSelector(api.PodHostField, name)) ps := testutils.NewPodStore(c, ns, labels.Everything(), fields.OneTermEqualSelector(api.PodHostField, name))
@ -216,7 +215,7 @@ func rebootNode(c *client.Client, provider, name, rebootCmd string) bool {
// Get the node initially. // Get the node initially.
framework.Logf("Getting %s", name) framework.Logf("Getting %s", name)
node, err := c.Nodes().Get(name) node, err := c.Core().Nodes().Get(name)
if err != nil { if err != nil {
framework.Logf("Couldn't get node %s", name) framework.Logf("Couldn't get node %s", name)
return false return false

View File

@ -57,7 +57,7 @@ func ReplicaSetServeImageOrFail(f *framework.Framework, test string, image strin
// The source for the Docker containter kubernetes/serve_hostname is // The source for the Docker containter kubernetes/serve_hostname is
// in contrib/for-demos/serve_hostname // in contrib/for-demos/serve_hostname
By(fmt.Sprintf("Creating ReplicaSet %s", name)) By(fmt.Sprintf("Creating ReplicaSet %s", name))
rs, err := f.Client.Extensions().ReplicaSets(f.Namespace.Name).Create(&extensions.ReplicaSet{ rs, err := f.ClientSet.Extensions().ReplicaSets(f.Namespace.Name).Create(&extensions.ReplicaSet{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Name: name, Name: name,
}, },
@ -86,7 +86,7 @@ func ReplicaSetServeImageOrFail(f *framework.Framework, test string, image strin
// Cleanup the ReplicaSet when we are done. // Cleanup the ReplicaSet when we are done.
defer func() { defer func() {
// Resize the ReplicaSet to zero to get rid of pods. // Resize the ReplicaSet to zero to get rid of pods.
if err := framework.DeleteReplicaSet(f.Client, f.ClientSet, f.Namespace.Name, rs.Name); err != nil { if err := framework.DeleteReplicaSet(f.ClientSet, f.Namespace.Name, rs.Name); err != nil {
framework.Logf("Failed to cleanup ReplicaSet %v: %v.", rs.Name, err) framework.Logf("Failed to cleanup ReplicaSet %v: %v.", rs.Name, err)
} }
}() }()
@ -94,7 +94,7 @@ func ReplicaSetServeImageOrFail(f *framework.Framework, test string, image strin
// List the pods, making sure we observe all the replicas. // List the pods, making sure we observe all the replicas.
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
pods, err := framework.PodsCreated(f.Client, f.Namespace.Name, name, replicas) pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicas)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring each pod is running") By("Ensuring each pod is running")
@ -113,7 +113,7 @@ func ReplicaSetServeImageOrFail(f *framework.Framework, test string, image strin
By("Trying to dial each unique pod") By("Trying to dial each unique pod")
retryTimeout := 2 * time.Minute retryTimeout := 2 * time.Minute
retryInterval := 5 * time.Second retryInterval := 5 * time.Second
err = wait.Poll(retryInterval, retryTimeout, framework.PodProxyResponseChecker(f.Client, f.Namespace.Name, label, name, true, pods).CheckAllResponses) err = wait.Poll(retryInterval, retryTimeout, framework.PodProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses)
if err != nil { if err != nil {
framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds()) framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds())
} }

View File

@ -49,21 +49,21 @@ var _ = framework.KubeDescribe("Rescheduler [Serial]", func() {
It("should ensure that critical pod is scheduled in case there is no resources available", func() { It("should ensure that critical pod is scheduled in case there is no resources available", func() {
By("reserving all available cpu") By("reserving all available cpu")
err := reserveAllCpu(f, "reserve-all-cpu", totalMillicores) err := reserveAllCpu(f, "reserve-all-cpu", totalMillicores)
defer framework.DeleteRCAndPods(f.Client, f.ClientSet, ns, "reserve-all-cpu") defer framework.DeleteRCAndPods(f.ClientSet, ns, "reserve-all-cpu")
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("creating a new instance of DNS and waiting for DNS to be scheduled") By("creating a new instance of DNS and waiting for DNS to be scheduled")
label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "kube-dns"})) label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "kube-dns"}))
listOpts := api.ListOptions{LabelSelector: label} listOpts := api.ListOptions{LabelSelector: label}
rcs, err := f.Client.ReplicationControllers(api.NamespaceSystem).List(listOpts) rcs, err := f.ClientSet.Core().ReplicationControllers(api.NamespaceSystem).List(listOpts)
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(len(rcs.Items)).Should(Equal(1)) Expect(len(rcs.Items)).Should(Equal(1))
rc := rcs.Items[0] rc := rcs.Items[0]
replicas := uint(rc.Spec.Replicas) replicas := uint(rc.Spec.Replicas)
err = framework.ScaleRC(f.Client, f.ClientSet, api.NamespaceSystem, rc.Name, replicas+1, true) err = framework.ScaleRC(f.ClientSet, api.NamespaceSystem, rc.Name, replicas+1, true)
defer framework.ExpectNoError(framework.ScaleRC(f.Client, f.ClientSet, api.NamespaceSystem, rc.Name, replicas, true)) defer framework.ExpectNoError(framework.ScaleRC(f.ClientSet, api.NamespaceSystem, rc.Name, replicas, true))
framework.ExpectNoError(err) framework.ExpectNoError(err)
}) })
}) })
@ -73,10 +73,10 @@ func reserveAllCpu(f *framework.Framework, id string, millicores int) error {
replicas := millicores / 100 replicas := millicores / 100
ReserveCpu(f, id, 1, 100) ReserveCpu(f, id, 1, 100)
framework.ExpectNoError(framework.ScaleRC(f.Client, f.ClientSet, f.Namespace.Name, id, uint(replicas), false)) framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.Namespace.Name, id, uint(replicas), false))
for start := time.Now(); time.Since(start) < timeout; time.Sleep(10 * time.Second) { for start := time.Now(); time.Since(start) < timeout; time.Sleep(10 * time.Second) {
pods, err := framework.GetPodsInNamespace(f.Client, f.Namespace.Name, framework.ImagePullerLabels) pods, err := framework.GetPodsInNamespace(f.ClientSet, f.Namespace.Name, framework.ImagePullerLabels)
if err != nil { if err != nil {
return err return err
} }

View File

@ -26,7 +26,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
client "k8s.io/kubernetes/pkg/client/unversioned" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -160,8 +160,8 @@ func svcByName(name string, port int) *api.Service {
} }
} }
func newSVCByName(c *client.Client, ns, name string) error { func newSVCByName(c clientset.Interface, ns, name string) error {
_, err := c.Services(ns).Create(svcByName(name, testPort)) _, err := c.Core().Services(ns).Create(svcByName(name, testPort))
return err return err
} }
@ -187,8 +187,8 @@ func podOnNode(podName, nodeName string, image string) *api.Pod {
} }
} }
func newPodOnNode(c *client.Client, namespace, podName, nodeName string) error { func newPodOnNode(c clientset.Interface, namespace, podName, nodeName string) error {
pod, err := c.Pods(namespace).Create(podOnNode(podName, nodeName, serveHostnameImage)) pod, err := c.Core().Pods(namespace).Create(podOnNode(podName, nodeName, serveHostnameImage))
if err == nil { if err == nil {
framework.Logf("Created pod %s on node %s", pod.ObjectMeta.Name, nodeName) framework.Logf("Created pod %s on node %s", pod.ObjectMeta.Name, nodeName)
} else { } else {
@ -243,27 +243,27 @@ func rcByNameContainer(name string, replicas int32, image string, labels map[str
} }
// newRCByName creates a replication controller with a selector by name of name. // newRCByName creates a replication controller with a selector by name of name.
func newRCByName(c *client.Client, ns, name string, replicas int32) (*api.ReplicationController, error) { func newRCByName(c clientset.Interface, ns, name string, replicas int32) (*api.ReplicationController, error) {
By(fmt.Sprintf("creating replication controller %s", name)) By(fmt.Sprintf("creating replication controller %s", name))
return c.ReplicationControllers(ns).Create(rcByNamePort( return c.Core().ReplicationControllers(ns).Create(rcByNamePort(
name, replicas, serveHostnameImage, 9376, api.ProtocolTCP, map[string]string{})) name, replicas, serveHostnameImage, 9376, api.ProtocolTCP, map[string]string{}))
} }
func resizeRC(c *client.Client, ns, name string, replicas int32) error { func resizeRC(c clientset.Interface, ns, name string, replicas int32) error {
rc, err := c.ReplicationControllers(ns).Get(name) rc, err := c.Core().ReplicationControllers(ns).Get(name)
if err != nil { if err != nil {
return err return err
} }
rc.Spec.Replicas = replicas rc.Spec.Replicas = replicas
_, err = c.ReplicationControllers(rc.Namespace).Update(rc) _, err = c.Core().ReplicationControllers(rc.Namespace).Update(rc)
return err return err
} }
func getMaster(c *client.Client) string { func getMaster(c clientset.Interface) string {
master := "" master := ""
switch framework.TestContext.Provider { switch framework.TestContext.Provider {
case "gce": case "gce":
eps, err := c.Endpoints(api.NamespaceDefault).Get("kubernetes") eps, err := c.Core().Endpoints(api.NamespaceDefault).Get("kubernetes")
if err != nil { if err != nil {
framework.Failf("Fail to get kubernetes endpoinds: %v", err) framework.Failf("Fail to get kubernetes endpoinds: %v", err)
} }
@ -306,7 +306,7 @@ func getNodeExternalIP(node *api.Node) string {
// At the end (even in case of errors), the network traffic is brought back to normal. // At the end (even in case of errors), the network traffic is brought back to normal.
// This function executes commands on a node so it will work only for some // This function executes commands on a node so it will work only for some
// environments. // environments.
func performTemporaryNetworkFailure(c *client.Client, ns, rcName string, replicas int32, podNameToDisappear string, node *api.Node) { func performTemporaryNetworkFailure(c clientset.Interface, ns, rcName string, replicas int32, podNameToDisappear string, node *api.Node) {
host := getNodeExternalIP(node) host := getNodeExternalIP(node)
master := getMaster(c) master := getMaster(c)
By(fmt.Sprintf("block network traffic from node %s to the master", node.Name)) By(fmt.Sprintf("block network traffic from node %s to the master", node.Name))
@ -365,13 +365,13 @@ func expectNodeReadiness(isReady bool, newNode chan *api.Node) {
var _ = framework.KubeDescribe("Nodes [Disruptive]", func() { var _ = framework.KubeDescribe("Nodes [Disruptive]", func() {
f := framework.NewDefaultFramework("resize-nodes") f := framework.NewDefaultFramework("resize-nodes")
var systemPodsNo int32 var systemPodsNo int32
var c *client.Client var c clientset.Interface
var ns string var ns string
ignoreLabels := framework.ImagePullerLabels ignoreLabels := framework.ImagePullerLabels
var group string var group string
BeforeEach(func() { BeforeEach(func() {
c = f.Client c = f.ClientSet
ns = f.Namespace.Name ns = f.Namespace.Name
systemPods, err := framework.GetPodsInNamespace(c, ns, ignoreLabels) systemPods, err := framework.GetPodsInNamespace(c, ns, ignoreLabels)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -507,11 +507,11 @@ var _ = framework.KubeDescribe("Nodes [Disruptive]", func() {
By("choose a node with at least one pod - we will block some network traffic on this node") By("choose a node with at least one pod - we will block some network traffic on this node")
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
options := api.ListOptions{LabelSelector: label} options := api.ListOptions{LabelSelector: label}
pods, err := c.Pods(ns).List(options) // list pods after all have been scheduled pods, err := c.Core().Pods(ns).List(options) // list pods after all have been scheduled
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
nodeName := pods.Items[0].Spec.NodeName nodeName := pods.Items[0].Spec.NodeName
node, err := c.Nodes().Get(nodeName) node, err := c.Core().Nodes().Get(nodeName)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("block network traffic from node %s", node.Name)) By(fmt.Sprintf("block network traffic from node %s", node.Name))
@ -535,7 +535,7 @@ var _ = framework.KubeDescribe("Nodes [Disruptive]", func() {
// verify that it is really on the requested node // verify that it is really on the requested node
{ {
pod, err := c.Pods(ns).Get(additionalPod) pod, err := c.Core().Pods(ns).Get(additionalPod)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
if pod.Spec.NodeName != node.Name { if pod.Spec.NodeName != node.Name {
framework.Logf("Pod %s found on invalid node: %s instead of %s", pod.Name, pod.Spec.NodeName, node.Name) framework.Logf("Pod %s found on invalid node: %s instead of %s", pod.Name, pod.Spec.NodeName, node.Name)
@ -554,14 +554,14 @@ var _ = framework.KubeDescribe("Nodes [Disruptive]", func() {
By("choose a node - we will block all network traffic on this node") By("choose a node - we will block all network traffic on this node")
var podOpts api.ListOptions var podOpts api.ListOptions
nodeOpts := api.ListOptions{} nodeOpts := api.ListOptions{}
nodes, err := c.Nodes().List(nodeOpts) nodes, err := c.Core().Nodes().List(nodeOpts)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
framework.FilterNodes(nodes, func(node api.Node) bool { framework.FilterNodes(nodes, func(node api.Node) bool {
if !framework.IsNodeConditionSetAsExpected(&node, api.NodeReady, true) { if !framework.IsNodeConditionSetAsExpected(&node, api.NodeReady, true) {
return false return false
} }
podOpts = api.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name)} podOpts = api.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name)}
pods, err := c.Pods(api.NamespaceAll).List(podOpts) pods, err := c.Core().Pods(api.NamespaceAll).List(podOpts)
if err != nil || len(pods.Items) <= 0 { if err != nil || len(pods.Items) <= 0 {
return false return false
} }
@ -585,11 +585,12 @@ var _ = framework.KubeDescribe("Nodes [Disruptive]", func() {
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
options.FieldSelector = nodeSelector options.FieldSelector = nodeSelector
return f.Client.Nodes().List(options) obj, err := f.ClientSet.Core().Nodes().List(options)
return runtime.Object(obj), err
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
options.FieldSelector = nodeSelector options.FieldSelector = nodeSelector
return f.Client.Nodes().Watch(options) return f.ClientSet.Core().Nodes().Watch(options)
}, },
}, },
&api.Node{}, &api.Node{},

View File

@ -22,7 +22,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
client "k8s.io/kubernetes/pkg/client/unversioned" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -43,13 +43,13 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
By("Creating a ResourceQuota") By("Creating a ResourceQuota")
quotaName := "test-quota" quotaName := "test-quota"
resourceQuota := newTestResourceQuota(quotaName) resourceQuota := newTestResourceQuota(quotaName)
resourceQuota, err := createResourceQuota(f.Client, f.Namespace.Name, resourceQuota) resourceQuota, err := createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status is calculated") By("Ensuring resource quota status is calculated")
usedResources := api.ResourceList{} usedResources := api.ResourceList{}
usedResources[api.ResourceQuotas] = resource.MustParse("1") usedResources[api.ResourceQuotas] = resource.MustParse("1")
err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
@ -57,40 +57,40 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
By("Creating a ResourceQuota") By("Creating a ResourceQuota")
quotaName := "test-quota" quotaName := "test-quota"
resourceQuota := newTestResourceQuota(quotaName) resourceQuota := newTestResourceQuota(quotaName)
resourceQuota, err := createResourceQuota(f.Client, f.Namespace.Name, resourceQuota) resourceQuota, err := createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status is calculated") By("Ensuring resource quota status is calculated")
usedResources := api.ResourceList{} usedResources := api.ResourceList{}
usedResources[api.ResourceQuotas] = resource.MustParse("1") usedResources[api.ResourceQuotas] = resource.MustParse("1")
err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Creating a Service") By("Creating a Service")
service := newTestServiceForQuota("test-service", api.ServiceTypeClusterIP) service := newTestServiceForQuota("test-service", api.ServiceTypeClusterIP)
service, err = f.Client.Services(f.Namespace.Name).Create(service) service, err = f.ClientSet.Core().Services(f.Namespace.Name).Create(service)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status captures service creation") By("Ensuring resource quota status captures service creation")
usedResources = api.ResourceList{} usedResources = api.ResourceList{}
usedResources[api.ResourceQuotas] = resource.MustParse("1") usedResources[api.ResourceQuotas] = resource.MustParse("1")
usedResources[api.ResourceServices] = resource.MustParse("1") usedResources[api.ResourceServices] = resource.MustParse("1")
err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Deleting a Service") By("Deleting a Service")
err = f.Client.Services(f.Namespace.Name).Delete(service.Name) err = f.ClientSet.Core().Services(f.Namespace.Name).Delete(service.Name, nil)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released usage") By("Ensuring resource quota status released usage")
usedResources[api.ResourceServices] = resource.MustParse("0") usedResources[api.ResourceServices] = resource.MustParse("0")
err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
It("should create a ResourceQuota and capture the life of a secret.", func() { It("should create a ResourceQuota and capture the life of a secret.", func() {
By("Discovering how many secrets are in namespace by default") By("Discovering how many secrets are in namespace by default")
secrets, err := f.Client.Secrets(f.Namespace.Name).List(api.ListOptions{}) secrets, err := f.ClientSet.Core().Secrets(f.Namespace.Name).List(api.ListOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
defaultSecrets := fmt.Sprintf("%d", len(secrets.Items)) defaultSecrets := fmt.Sprintf("%d", len(secrets.Items))
hardSecrets := fmt.Sprintf("%d", len(secrets.Items)+1) hardSecrets := fmt.Sprintf("%d", len(secrets.Items)+1)
@ -99,19 +99,19 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
quotaName := "test-quota" quotaName := "test-quota"
resourceQuota := newTestResourceQuota(quotaName) resourceQuota := newTestResourceQuota(quotaName)
resourceQuota.Spec.Hard[api.ResourceSecrets] = resource.MustParse(hardSecrets) resourceQuota.Spec.Hard[api.ResourceSecrets] = resource.MustParse(hardSecrets)
resourceQuota, err = createResourceQuota(f.Client, f.Namespace.Name, resourceQuota) resourceQuota, err = createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status is calculated") By("Ensuring resource quota status is calculated")
usedResources := api.ResourceList{} usedResources := api.ResourceList{}
usedResources[api.ResourceQuotas] = resource.MustParse("1") usedResources[api.ResourceQuotas] = resource.MustParse("1")
usedResources[api.ResourceSecrets] = resource.MustParse(defaultSecrets) usedResources[api.ResourceSecrets] = resource.MustParse(defaultSecrets)
err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Creating a Secret") By("Creating a Secret")
secret := newTestSecretForQuota("test-secret") secret := newTestSecretForQuota("test-secret")
secret, err = f.Client.Secrets(f.Namespace.Name).Create(secret) secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status captures secret creation") By("Ensuring resource quota status captures secret creation")
@ -119,16 +119,16 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
usedResources[api.ResourceSecrets] = resource.MustParse(hardSecrets) usedResources[api.ResourceSecrets] = resource.MustParse(hardSecrets)
// we expect there to be two secrets because each namespace will receive // we expect there to be two secrets because each namespace will receive
// a service account token secret by default // a service account token secret by default
err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Deleting a secret") By("Deleting a secret")
err = f.Client.Secrets(f.Namespace.Name).Delete(secret.Name) err = f.ClientSet.Core().Secrets(f.Namespace.Name).Delete(secret.Name, nil)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released usage") By("Ensuring resource quota status released usage")
usedResources[api.ResourceSecrets] = resource.MustParse(defaultSecrets) usedResources[api.ResourceSecrets] = resource.MustParse(defaultSecrets)
err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
@ -136,13 +136,13 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
By("Creating a ResourceQuota") By("Creating a ResourceQuota")
quotaName := "test-quota" quotaName := "test-quota"
resourceQuota := newTestResourceQuota(quotaName) resourceQuota := newTestResourceQuota(quotaName)
resourceQuota, err := createResourceQuota(f.Client, f.Namespace.Name, resourceQuota) resourceQuota, err := createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status is calculated") By("Ensuring resource quota status is calculated")
usedResources := api.ResourceList{} usedResources := api.ResourceList{}
usedResources[api.ResourceQuotas] = resource.MustParse("1") usedResources[api.ResourceQuotas] = resource.MustParse("1")
err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Creating a Pod that fits quota") By("Creating a Pod that fits quota")
@ -151,7 +151,7 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
requests[api.ResourceCPU] = resource.MustParse("500m") requests[api.ResourceCPU] = resource.MustParse("500m")
requests[api.ResourceMemory] = resource.MustParse("252Mi") requests[api.ResourceMemory] = resource.MustParse("252Mi")
pod := newTestPodForQuota(f, podName, requests, api.ResourceList{}) pod := newTestPodForQuota(f, podName, requests, api.ResourceList{})
pod, err = f.Client.Pods(f.Namespace.Name).Create(pod) pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
podToUpdate := pod podToUpdate := pod
@ -160,7 +160,7 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
usedResources[api.ResourcePods] = resource.MustParse("1") usedResources[api.ResourcePods] = resource.MustParse("1")
usedResources[api.ResourceCPU] = requests[api.ResourceCPU] usedResources[api.ResourceCPU] = requests[api.ResourceCPU]
usedResources[api.ResourceMemory] = requests[api.ResourceMemory] usedResources[api.ResourceMemory] = requests[api.ResourceMemory]
err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Not allowing a pod to be created that exceeds remaining quota") By("Not allowing a pod to be created that exceeds remaining quota")
@ -168,7 +168,7 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
requests[api.ResourceCPU] = resource.MustParse("600m") requests[api.ResourceCPU] = resource.MustParse("600m")
requests[api.ResourceMemory] = resource.MustParse("100Mi") requests[api.ResourceMemory] = resource.MustParse("100Mi")
pod = newTestPodForQuota(f, "fail-pod", requests, api.ResourceList{}) pod = newTestPodForQuota(f, "fail-pod", requests, api.ResourceList{})
pod, err = f.Client.Pods(f.Namespace.Name).Create(pod) pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
By("Ensuring a pod cannot update its resource requirements") By("Ensuring a pod cannot update its resource requirements")
@ -177,15 +177,15 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
requests[api.ResourceCPU] = resource.MustParse("100m") requests[api.ResourceCPU] = resource.MustParse("100m")
requests[api.ResourceMemory] = resource.MustParse("100Mi") requests[api.ResourceMemory] = resource.MustParse("100Mi")
podToUpdate.Spec.Containers[0].Resources.Requests = requests podToUpdate.Spec.Containers[0].Resources.Requests = requests
_, err = f.Client.Pods(f.Namespace.Name).Update(podToUpdate) _, err = f.ClientSet.Core().Pods(f.Namespace.Name).Update(podToUpdate)
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
By("Ensuring attempts to update pod resource requirements did not change quota usage") By("Ensuring attempts to update pod resource requirements did not change quota usage")
err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Deleting the pod") By("Deleting the pod")
err = f.Client.Pods(f.Namespace.Name).Delete(podName, api.NewDeleteOptions(0)) err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(podName, api.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released the pod usage") By("Ensuring resource quota status released the pod usage")
@ -193,7 +193,7 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
usedResources[api.ResourcePods] = resource.MustParse("0") usedResources[api.ResourcePods] = resource.MustParse("0")
usedResources[api.ResourceCPU] = resource.MustParse("0") usedResources[api.ResourceCPU] = resource.MustParse("0")
usedResources[api.ResourceMemory] = resource.MustParse("0") usedResources[api.ResourceMemory] = resource.MustParse("0")
err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
@ -201,34 +201,34 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
By("Creating a ResourceQuota") By("Creating a ResourceQuota")
quotaName := "test-quota" quotaName := "test-quota"
resourceQuota := newTestResourceQuota(quotaName) resourceQuota := newTestResourceQuota(quotaName)
resourceQuota, err := createResourceQuota(f.Client, f.Namespace.Name, resourceQuota) resourceQuota, err := createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status is calculated") By("Ensuring resource quota status is calculated")
usedResources := api.ResourceList{} usedResources := api.ResourceList{}
usedResources[api.ResourceQuotas] = resource.MustParse("1") usedResources[api.ResourceQuotas] = resource.MustParse("1")
err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Creating a ConfigMap") By("Creating a ConfigMap")
configMap := newTestConfigMapForQuota("test-configmap") configMap := newTestConfigMapForQuota("test-configmap")
configMap, err = f.Client.ConfigMaps(f.Namespace.Name).Create(configMap) configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status captures configMap creation") By("Ensuring resource quota status captures configMap creation")
usedResources = api.ResourceList{} usedResources = api.ResourceList{}
usedResources[api.ResourceQuotas] = resource.MustParse("1") usedResources[api.ResourceQuotas] = resource.MustParse("1")
usedResources[api.ResourceConfigMaps] = resource.MustParse("1") usedResources[api.ResourceConfigMaps] = resource.MustParse("1")
err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Deleting a ConfigMap") By("Deleting a ConfigMap")
err = f.Client.ConfigMaps(f.Namespace.Name).Delete(configMap.Name) err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Delete(configMap.Name, nil)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released usage") By("Ensuring resource quota status released usage")
usedResources[api.ResourceConfigMaps] = resource.MustParse("0") usedResources[api.ResourceConfigMaps] = resource.MustParse("0")
err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
@ -236,34 +236,34 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
By("Creating a ResourceQuota") By("Creating a ResourceQuota")
quotaName := "test-quota" quotaName := "test-quota"
resourceQuota := newTestResourceQuota(quotaName) resourceQuota := newTestResourceQuota(quotaName)
resourceQuota, err := createResourceQuota(f.Client, f.Namespace.Name, resourceQuota) resourceQuota, err := createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status is calculated") By("Ensuring resource quota status is calculated")
usedResources := api.ResourceList{} usedResources := api.ResourceList{}
usedResources[api.ResourceQuotas] = resource.MustParse("1") usedResources[api.ResourceQuotas] = resource.MustParse("1")
usedResources[api.ResourceReplicationControllers] = resource.MustParse("0") usedResources[api.ResourceReplicationControllers] = resource.MustParse("0")
err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Creating a ReplicationController") By("Creating a ReplicationController")
replicationController := newTestReplicationControllerForQuota("test-rc", "nginx", 0) replicationController := newTestReplicationControllerForQuota("test-rc", "nginx", 0)
replicationController, err = f.Client.ReplicationControllers(f.Namespace.Name).Create(replicationController) replicationController, err = f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Create(replicationController)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status captures replication controller creation") By("Ensuring resource quota status captures replication controller creation")
usedResources = api.ResourceList{} usedResources = api.ResourceList{}
usedResources[api.ResourceReplicationControllers] = resource.MustParse("1") usedResources[api.ResourceReplicationControllers] = resource.MustParse("1")
err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Deleting a ReplicationController") By("Deleting a ReplicationController")
err = f.Client.ReplicationControllers(f.Namespace.Name).Delete(replicationController.Name, nil) err = f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Delete(replicationController.Name, nil)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released usage") By("Ensuring resource quota status released usage")
usedResources[api.ResourceReplicationControllers] = resource.MustParse("0") usedResources[api.ResourceReplicationControllers] = resource.MustParse("0")
err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
@ -271,7 +271,7 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
By("Creating a ResourceQuota") By("Creating a ResourceQuota")
quotaName := "test-quota" quotaName := "test-quota"
resourceQuota := newTestResourceQuota(quotaName) resourceQuota := newTestResourceQuota(quotaName)
resourceQuota, err := createResourceQuota(f.Client, f.Namespace.Name, resourceQuota) resourceQuota, err := createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status is calculated") By("Ensuring resource quota status is calculated")
@ -279,51 +279,51 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
usedResources[api.ResourceQuotas] = resource.MustParse("1") usedResources[api.ResourceQuotas] = resource.MustParse("1")
usedResources[api.ResourcePersistentVolumeClaims] = resource.MustParse("0") usedResources[api.ResourcePersistentVolumeClaims] = resource.MustParse("0")
usedResources[api.ResourceRequestsStorage] = resource.MustParse("0") usedResources[api.ResourceRequestsStorage] = resource.MustParse("0")
err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Creating a PersistentVolumeClaim") By("Creating a PersistentVolumeClaim")
pvc := newTestPersistentVolumeClaimForQuota("test-claim") pvc := newTestPersistentVolumeClaimForQuota("test-claim")
pvc, err = f.Client.PersistentVolumeClaims(f.Namespace.Name).Create(pvc) pvc, err = f.ClientSet.Core().PersistentVolumeClaims(f.Namespace.Name).Create(pvc)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status captures persistent volume claimcreation") By("Ensuring resource quota status captures persistent volume claimcreation")
usedResources = api.ResourceList{} usedResources = api.ResourceList{}
usedResources[api.ResourcePersistentVolumeClaims] = resource.MustParse("1") usedResources[api.ResourcePersistentVolumeClaims] = resource.MustParse("1")
usedResources[api.ResourceRequestsStorage] = resource.MustParse("1Gi") usedResources[api.ResourceRequestsStorage] = resource.MustParse("1Gi")
err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Deleting a PersistentVolumeClaim") By("Deleting a PersistentVolumeClaim")
err = f.Client.PersistentVolumeClaims(f.Namespace.Name).Delete(pvc.Name) err = f.ClientSet.Core().PersistentVolumeClaims(f.Namespace.Name).Delete(pvc.Name, nil)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released usage") By("Ensuring resource quota status released usage")
usedResources[api.ResourcePersistentVolumeClaims] = resource.MustParse("0") usedResources[api.ResourcePersistentVolumeClaims] = resource.MustParse("0")
usedResources[api.ResourceRequestsStorage] = resource.MustParse("0") usedResources[api.ResourceRequestsStorage] = resource.MustParse("0")
err = waitForResourceQuota(f.Client, f.Namespace.Name, quotaName, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
It("should verify ResourceQuota with terminating scopes.", func() { It("should verify ResourceQuota with terminating scopes.", func() {
By("Creating a ResourceQuota with terminating scope") By("Creating a ResourceQuota with terminating scope")
quotaTerminatingName := "quota-terminating" quotaTerminatingName := "quota-terminating"
resourceQuotaTerminating, err := createResourceQuota(f.Client, f.Namespace.Name, newTestResourceQuotaWithScope(quotaTerminatingName, api.ResourceQuotaScopeTerminating)) resourceQuotaTerminating, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope(quotaTerminatingName, api.ResourceQuotaScopeTerminating))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring ResourceQuota status is calculated") By("Ensuring ResourceQuota status is calculated")
usedResources := api.ResourceList{} usedResources := api.ResourceList{}
usedResources[api.ResourcePods] = resource.MustParse("0") usedResources[api.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Creating a ResourceQuota with not terminating scope") By("Creating a ResourceQuota with not terminating scope")
quotaNotTerminatingName := "quota-not-terminating" quotaNotTerminatingName := "quota-not-terminating"
resourceQuotaNotTerminating, err := createResourceQuota(f.Client, f.Namespace.Name, newTestResourceQuotaWithScope(quotaNotTerminatingName, api.ResourceQuotaScopeNotTerminating)) resourceQuotaNotTerminating, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope(quotaNotTerminatingName, api.ResourceQuotaScopeNotTerminating))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring ResourceQuota status is calculated") By("Ensuring ResourceQuota status is calculated")
err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Creating a long running pod") By("Creating a long running pod")
@ -335,7 +335,7 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
limits[api.ResourceCPU] = resource.MustParse("1") limits[api.ResourceCPU] = resource.MustParse("1")
limits[api.ResourceMemory] = resource.MustParse("400Mi") limits[api.ResourceMemory] = resource.MustParse("400Mi")
pod := newTestPodForQuota(f, podName, requests, limits) pod := newTestPodForQuota(f, podName, requests, limits)
pod, err = f.Client.Pods(f.Namespace.Name).Create(pod) pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with not terminating scope captures the pod usage") By("Ensuring resource quota with not terminating scope captures the pod usage")
@ -344,7 +344,7 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
usedResources[api.ResourceRequestsMemory] = requests[api.ResourceMemory] usedResources[api.ResourceRequestsMemory] = requests[api.ResourceMemory]
usedResources[api.ResourceLimitsCPU] = limits[api.ResourceCPU] usedResources[api.ResourceLimitsCPU] = limits[api.ResourceCPU]
usedResources[api.ResourceLimitsMemory] = limits[api.ResourceMemory] usedResources[api.ResourceLimitsMemory] = limits[api.ResourceMemory]
err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with terminating scope ignored the pod usage") By("Ensuring resource quota with terminating scope ignored the pod usage")
@ -353,11 +353,11 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
usedResources[api.ResourceRequestsMemory] = resource.MustParse("0") usedResources[api.ResourceRequestsMemory] = resource.MustParse("0")
usedResources[api.ResourceLimitsCPU] = resource.MustParse("0") usedResources[api.ResourceLimitsCPU] = resource.MustParse("0")
usedResources[api.ResourceLimitsMemory] = resource.MustParse("0") usedResources[api.ResourceLimitsMemory] = resource.MustParse("0")
err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Deleting the pod") By("Deleting the pod")
err = f.Client.Pods(f.Namespace.Name).Delete(podName, api.NewDeleteOptions(0)) err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(podName, api.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released the pod usage") By("Ensuring resource quota status released the pod usage")
@ -366,7 +366,7 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
usedResources[api.ResourceRequestsMemory] = resource.MustParse("0") usedResources[api.ResourceRequestsMemory] = resource.MustParse("0")
usedResources[api.ResourceLimitsCPU] = resource.MustParse("0") usedResources[api.ResourceLimitsCPU] = resource.MustParse("0")
usedResources[api.ResourceLimitsMemory] = resource.MustParse("0") usedResources[api.ResourceLimitsMemory] = resource.MustParse("0")
err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Creating a terminating pod") By("Creating a terminating pod")
@ -374,7 +374,7 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
pod = newTestPodForQuota(f, podName, requests, limits) pod = newTestPodForQuota(f, podName, requests, limits)
activeDeadlineSeconds := int64(3600) activeDeadlineSeconds := int64(3600)
pod.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds pod.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds
pod, err = f.Client.Pods(f.Namespace.Name).Create(pod) pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with terminating scope captures the pod usage") By("Ensuring resource quota with terminating scope captures the pod usage")
@ -383,7 +383,7 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
usedResources[api.ResourceRequestsMemory] = requests[api.ResourceMemory] usedResources[api.ResourceRequestsMemory] = requests[api.ResourceMemory]
usedResources[api.ResourceLimitsCPU] = limits[api.ResourceCPU] usedResources[api.ResourceLimitsCPU] = limits[api.ResourceCPU]
usedResources[api.ResourceLimitsMemory] = limits[api.ResourceMemory] usedResources[api.ResourceLimitsMemory] = limits[api.ResourceMemory]
err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with not terminating scope ignored the pod usage") By("Ensuring resource quota with not terminating scope ignored the pod usage")
@ -392,11 +392,11 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
usedResources[api.ResourceRequestsMemory] = resource.MustParse("0") usedResources[api.ResourceRequestsMemory] = resource.MustParse("0")
usedResources[api.ResourceLimitsCPU] = resource.MustParse("0") usedResources[api.ResourceLimitsCPU] = resource.MustParse("0")
usedResources[api.ResourceLimitsMemory] = resource.MustParse("0") usedResources[api.ResourceLimitsMemory] = resource.MustParse("0")
err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Deleting the pod") By("Deleting the pod")
err = f.Client.Pods(f.Namespace.Name).Delete(podName, api.NewDeleteOptions(0)) err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(podName, api.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released the pod usage") By("Ensuring resource quota status released the pod usage")
@ -405,51 +405,51 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
usedResources[api.ResourceRequestsMemory] = resource.MustParse("0") usedResources[api.ResourceRequestsMemory] = resource.MustParse("0")
usedResources[api.ResourceLimitsCPU] = resource.MustParse("0") usedResources[api.ResourceLimitsCPU] = resource.MustParse("0")
usedResources[api.ResourceLimitsMemory] = resource.MustParse("0") usedResources[api.ResourceLimitsMemory] = resource.MustParse("0")
err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
It("should verify ResourceQuota with best effort scope.", func() { It("should verify ResourceQuota with best effort scope.", func() {
By("Creating a ResourceQuota with best effort scope") By("Creating a ResourceQuota with best effort scope")
resourceQuotaBestEffort, err := createResourceQuota(f.Client, f.Namespace.Name, newTestResourceQuotaWithScope("quota-besteffort", api.ResourceQuotaScopeBestEffort)) resourceQuotaBestEffort, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope("quota-besteffort", api.ResourceQuotaScopeBestEffort))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring ResourceQuota status is calculated") By("Ensuring ResourceQuota status is calculated")
usedResources := api.ResourceList{} usedResources := api.ResourceList{}
usedResources[api.ResourcePods] = resource.MustParse("0") usedResources[api.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Creating a ResourceQuota with not best effort scope") By("Creating a ResourceQuota with not best effort scope")
resourceQuotaNotBestEffort, err := createResourceQuota(f.Client, f.Namespace.Name, newTestResourceQuotaWithScope("quota-not-besteffort", api.ResourceQuotaScopeNotBestEffort)) resourceQuotaNotBestEffort, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope("quota-not-besteffort", api.ResourceQuotaScopeNotBestEffort))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring ResourceQuota status is calculated") By("Ensuring ResourceQuota status is calculated")
err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Creating a best-effort pod") By("Creating a best-effort pod")
pod := newTestPodForQuota(f, podName, api.ResourceList{}, api.ResourceList{}) pod := newTestPodForQuota(f, podName, api.ResourceList{}, api.ResourceList{})
pod, err = f.Client.Pods(f.Namespace.Name).Create(pod) pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with best effort scope captures the pod usage") By("Ensuring resource quota with best effort scope captures the pod usage")
usedResources[api.ResourcePods] = resource.MustParse("1") usedResources[api.ResourcePods] = resource.MustParse("1")
err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with not best effort ignored the pod usage") By("Ensuring resource quota with not best effort ignored the pod usage")
usedResources[api.ResourcePods] = resource.MustParse("0") usedResources[api.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Deleting the pod") By("Deleting the pod")
err = f.Client.Pods(f.Namespace.Name).Delete(pod.Name, api.NewDeleteOptions(0)) err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, api.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released the pod usage") By("Ensuring resource quota status released the pod usage")
usedResources[api.ResourcePods] = resource.MustParse("0") usedResources[api.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Creating a not best-effort pod") By("Creating a not best-effort pod")
@ -460,26 +460,26 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
limits[api.ResourceCPU] = resource.MustParse("1") limits[api.ResourceCPU] = resource.MustParse("1")
limits[api.ResourceMemory] = resource.MustParse("400Mi") limits[api.ResourceMemory] = resource.MustParse("400Mi")
pod = newTestPodForQuota(f, "burstable-pod", requests, limits) pod = newTestPodForQuota(f, "burstable-pod", requests, limits)
pod, err = f.Client.Pods(f.Namespace.Name).Create(pod) pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with not best effort scope captures the pod usage") By("Ensuring resource quota with not best effort scope captures the pod usage")
usedResources[api.ResourcePods] = resource.MustParse("1") usedResources[api.ResourcePods] = resource.MustParse("1")
err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with best effort scope ignored the pod usage") By("Ensuring resource quota with best effort scope ignored the pod usage")
usedResources[api.ResourcePods] = resource.MustParse("0") usedResources[api.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Deleting the pod") By("Deleting the pod")
err = f.Client.Pods(f.Namespace.Name).Delete(pod.Name, api.NewDeleteOptions(0)) err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, api.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released the pod usage") By("Ensuring resource quota status released the pod usage")
usedResources[api.ResourcePods] = resource.MustParse("0") usedResources[api.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.Client, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
}) })
@ -532,7 +532,7 @@ func newTestPodForQuota(f *framework.Framework, name string, requests api.Resour
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: "pause", Name: "pause",
Image: framework.GetPauseImageName(f.Client), Image: framework.GetPauseImageName(f.ClientSet),
Resources: api.ResourceRequirements{ Resources: api.ResourceRequirements{
Requests: requests, Requests: requests,
Limits: limits, Limits: limits,
@ -633,19 +633,19 @@ func newTestSecretForQuota(name string) *api.Secret {
} }
// createResourceQuota in the specified namespace // createResourceQuota in the specified namespace
func createResourceQuota(c *client.Client, namespace string, resourceQuota *api.ResourceQuota) (*api.ResourceQuota, error) { func createResourceQuota(c clientset.Interface, namespace string, resourceQuota *api.ResourceQuota) (*api.ResourceQuota, error) {
return c.ResourceQuotas(namespace).Create(resourceQuota) return c.Core().ResourceQuotas(namespace).Create(resourceQuota)
} }
// deleteResourceQuota with the specified name // deleteResourceQuota with the specified name
func deleteResourceQuota(c *client.Client, namespace, name string) error { func deleteResourceQuota(c clientset.Interface, namespace, name string) error {
return c.ResourceQuotas(namespace).Delete(name) return c.Core().ResourceQuotas(namespace).Delete(name, nil)
} }
// wait for resource quota status to show the expected used resources value // wait for resource quota status to show the expected used resources value
func waitForResourceQuota(c *client.Client, ns, quotaName string, used api.ResourceList) error { func waitForResourceQuota(c clientset.Interface, ns, quotaName string, used api.ResourceList) error {
return wait.Poll(framework.Poll, resourceQuotaTimeout, func() (bool, error) { return wait.Poll(framework.Poll, resourceQuotaTimeout, func() (bool, error) {
resourceQuota, err := c.ResourceQuotas(ns).Get(quotaName) resourceQuota, err := c.Core().ResourceQuotas(ns).Get(quotaName)
if err != nil { if err != nil {
return false, err return false, err
} }

View File

@ -63,7 +63,7 @@ var _ = framework.KubeDescribe("Restart [Disruptive]", func() {
// check must be identical to that call. // check must be identical to that call.
framework.SkipUnlessProviderIs("gce", "gke") framework.SkipUnlessProviderIs("gce", "gke")
ps = testutils.NewPodStore(f.Client, api.NamespaceSystem, labels.Everything(), fields.Everything()) ps = testutils.NewPodStore(f.ClientSet, api.NamespaceSystem, labels.Everything(), fields.Everything())
}) })
AfterEach(func() { AfterEach(func() {
@ -76,7 +76,7 @@ var _ = framework.KubeDescribe("Restart [Disruptive]", func() {
nn := framework.TestContext.CloudConfig.NumNodes nn := framework.TestContext.CloudConfig.NumNodes
By("ensuring all nodes are ready") By("ensuring all nodes are ready")
nodeNamesBefore, err := framework.CheckNodesReady(f.Client, framework.NodeReadyInitialTimeout, nn) nodeNamesBefore, err := framework.CheckNodesReady(f.ClientSet, framework.NodeReadyInitialTimeout, nn)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
framework.Logf("Got the following nodes before restart: %v", nodeNamesBefore) framework.Logf("Got the following nodes before restart: %v", nodeNamesBefore)
@ -89,7 +89,7 @@ var _ = framework.KubeDescribe("Restart [Disruptive]", func() {
podNamesBefore[i] = p.ObjectMeta.Name podNamesBefore[i] = p.ObjectMeta.Name
} }
ns := api.NamespaceSystem ns := api.NamespaceSystem
if !framework.CheckPodsRunningReadyOrSucceeded(f.Client, ns, podNamesBefore, framework.PodReadyBeforeTimeout) { if !framework.CheckPodsRunningReadyOrSucceeded(f.ClientSet, ns, podNamesBefore, framework.PodReadyBeforeTimeout) {
framework.Failf("At least one pod wasn't running and ready or succeeded at test start.") framework.Failf("At least one pod wasn't running and ready or succeeded at test start.")
} }
@ -98,7 +98,7 @@ var _ = framework.KubeDescribe("Restart [Disruptive]", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("ensuring all nodes are ready after the restart") By("ensuring all nodes are ready after the restart")
nodeNamesAfter, err := framework.CheckNodesReady(f.Client, framework.RestartNodeReadyAgainTimeout, nn) nodeNamesAfter, err := framework.CheckNodesReady(f.ClientSet, framework.RestartNodeReadyAgainTimeout, nn)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
framework.Logf("Got the following nodes after restart: %v", nodeNamesAfter) framework.Logf("Got the following nodes after restart: %v", nodeNamesAfter)
@ -118,7 +118,7 @@ var _ = framework.KubeDescribe("Restart [Disruptive]", func() {
podNamesAfter, err := waitForNPods(ps, len(podNamesBefore), framework.RestartPodReadyAgainTimeout) podNamesAfter, err := waitForNPods(ps, len(podNamesBefore), framework.RestartPodReadyAgainTimeout)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
remaining := framework.RestartPodReadyAgainTimeout - time.Since(podCheckStart) remaining := framework.RestartPodReadyAgainTimeout - time.Since(podCheckStart)
if !framework.CheckPodsRunningReadyOrSucceeded(f.Client, ns, podNamesAfter, remaining) { if !framework.CheckPodsRunningReadyOrSucceeded(f.ClientSet, ns, podNamesAfter, remaining) {
framework.Failf("At least one pod wasn't running and ready after the restart.") framework.Failf("At least one pod wasn't running and ready after the restart.")
} }
}) })
@ -156,7 +156,7 @@ func restartNodes(f *framework.Framework, nodeNames []string) error {
// List old boot IDs. // List old boot IDs.
oldBootIDs := make(map[string]string) oldBootIDs := make(map[string]string)
for _, name := range nodeNames { for _, name := range nodeNames {
node, err := f.Client.Nodes().Get(name) node, err := f.ClientSet.Core().Nodes().Get(name)
if err != nil { if err != nil {
return fmt.Errorf("error getting node info before reboot: %s", err) return fmt.Errorf("error getting node info before reboot: %s", err)
} }
@ -178,7 +178,7 @@ func restartNodes(f *framework.Framework, nodeNames []string) error {
// Wait for their boot IDs to change. // Wait for their boot IDs to change.
for _, name := range nodeNames { for _, name := range nodeNames {
if err := wait.Poll(30*time.Second, 5*time.Minute, func() (bool, error) { if err := wait.Poll(30*time.Second, 5*time.Minute, func() (bool, error) {
node, err := f.Client.Nodes().Get(name) node, err := f.ClientSet.Core().Nodes().Get(name)
if err != nil { if err != nil {
return false, fmt.Errorf("error getting node info after reboot: %s", err) return false, fmt.Errorf("error getting node info after reboot: %s", err)
} }

View File

@ -26,7 +26,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/apis/batch"
client "k8s.io/kubernetes/pkg/client/unversioned" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/controller/job" "k8s.io/kubernetes/pkg/controller/job"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -53,21 +53,21 @@ var _ = framework.KubeDescribe("ScheduledJob", func() {
It("should schedule multiple jobs concurrently", func() { It("should schedule multiple jobs concurrently", func() {
By("Creating a scheduledjob") By("Creating a scheduledjob")
scheduledJob := newTestScheduledJob("concurrent", "*/1 * * * ?", batch.AllowConcurrent, true) scheduledJob := newTestScheduledJob("concurrent", "*/1 * * * ?", batch.AllowConcurrent, true)
scheduledJob, err := createScheduledJob(f.Client, f.Namespace.Name, scheduledJob) scheduledJob, err := createScheduledJob(f.ClientSet, f.Namespace.Name, scheduledJob)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring more than one job is running at a time") By("Ensuring more than one job is running at a time")
err = waitForActiveJobs(f.Client, f.Namespace.Name, scheduledJob.Name, 2) err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, scheduledJob.Name, 2)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring at least two running jobs exists by listing jobs explicitly") By("Ensuring at least two running jobs exists by listing jobs explicitly")
jobs, err := f.Client.Batch().Jobs(f.Namespace.Name).List(api.ListOptions{}) jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(api.ListOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
activeJobs := filterActiveJobs(jobs) activeJobs := filterActiveJobs(jobs)
Expect(len(activeJobs) >= 2).To(BeTrue()) Expect(len(activeJobs) >= 2).To(BeTrue())
By("Removing scheduledjob") By("Removing scheduledjob")
err = deleteScheduledJob(f.Client, f.Namespace.Name, scheduledJob.Name) err = deleteScheduledJob(f.ClientSet, f.Namespace.Name, scheduledJob.Name)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
@ -76,20 +76,20 @@ var _ = framework.KubeDescribe("ScheduledJob", func() {
By("Creating a suspended scheduledjob") By("Creating a suspended scheduledjob")
scheduledJob := newTestScheduledJob("suspended", "*/1 * * * ?", batch.AllowConcurrent, true) scheduledJob := newTestScheduledJob("suspended", "*/1 * * * ?", batch.AllowConcurrent, true)
scheduledJob.Spec.Suspend = newBool(true) scheduledJob.Spec.Suspend = newBool(true)
scheduledJob, err := createScheduledJob(f.Client, f.Namespace.Name, scheduledJob) scheduledJob, err := createScheduledJob(f.ClientSet, f.Namespace.Name, scheduledJob)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring no jobs are scheduled") By("Ensuring no jobs are scheduled")
err = waitForNoJobs(f.Client, f.Namespace.Name, scheduledJob.Name) err = waitForNoJobs(f.ClientSet, f.Namespace.Name, scheduledJob.Name)
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
By("Ensuring no job exists by listing jobs explicitly") By("Ensuring no job exists by listing jobs explicitly")
jobs, err := f.Client.Batch().Jobs(f.Namespace.Name).List(api.ListOptions{}) jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(api.ListOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Expect(jobs.Items).To(HaveLen(0)) Expect(jobs.Items).To(HaveLen(0))
By("Removing scheduledjob") By("Removing scheduledjob")
err = deleteScheduledJob(f.Client, f.Namespace.Name, scheduledJob.Name) err = deleteScheduledJob(f.ClientSet, f.Namespace.Name, scheduledJob.Name)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
@ -97,30 +97,30 @@ var _ = framework.KubeDescribe("ScheduledJob", func() {
It("should not schedule new jobs when ForbidConcurrent [Slow]", func() { It("should not schedule new jobs when ForbidConcurrent [Slow]", func() {
By("Creating a ForbidConcurrent scheduledjob") By("Creating a ForbidConcurrent scheduledjob")
scheduledJob := newTestScheduledJob("forbid", "*/1 * * * ?", batch.ForbidConcurrent, true) scheduledJob := newTestScheduledJob("forbid", "*/1 * * * ?", batch.ForbidConcurrent, true)
scheduledJob, err := createScheduledJob(f.Client, f.Namespace.Name, scheduledJob) scheduledJob, err := createScheduledJob(f.ClientSet, f.Namespace.Name, scheduledJob)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring a job is scheduled") By("Ensuring a job is scheduled")
err = waitForActiveJobs(f.Client, f.Namespace.Name, scheduledJob.Name, 1) err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, scheduledJob.Name, 1)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring exactly one is scheduled") By("Ensuring exactly one is scheduled")
scheduledJob, err = getScheduledJob(f.Client, f.Namespace.Name, scheduledJob.Name) scheduledJob, err = getScheduledJob(f.ClientSet, f.Namespace.Name, scheduledJob.Name)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Expect(scheduledJob.Status.Active).Should(HaveLen(1)) Expect(scheduledJob.Status.Active).Should(HaveLen(1))
By("Ensuring exaclty one running job exists by listing jobs explicitly") By("Ensuring exaclty one running job exists by listing jobs explicitly")
jobs, err := f.Client.Batch().Jobs(f.Namespace.Name).List(api.ListOptions{}) jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(api.ListOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
activeJobs := filterActiveJobs(jobs) activeJobs := filterActiveJobs(jobs)
Expect(activeJobs).To(HaveLen(1)) Expect(activeJobs).To(HaveLen(1))
By("Ensuring no more jobs are scheduled") By("Ensuring no more jobs are scheduled")
err = waitForActiveJobs(f.Client, f.Namespace.Name, scheduledJob.Name, 2) err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, scheduledJob.Name, 2)
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
By("Removing scheduledjob") By("Removing scheduledjob")
err = deleteScheduledJob(f.Client, f.Namespace.Name, scheduledJob.Name) err = deleteScheduledJob(f.ClientSet, f.Namespace.Name, scheduledJob.Name)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
@ -128,30 +128,30 @@ var _ = framework.KubeDescribe("ScheduledJob", func() {
It("should replace jobs when ReplaceConcurrent", func() { It("should replace jobs when ReplaceConcurrent", func() {
By("Creating a ReplaceConcurrent scheduledjob") By("Creating a ReplaceConcurrent scheduledjob")
scheduledJob := newTestScheduledJob("replace", "*/1 * * * ?", batch.ReplaceConcurrent, true) scheduledJob := newTestScheduledJob("replace", "*/1 * * * ?", batch.ReplaceConcurrent, true)
scheduledJob, err := createScheduledJob(f.Client, f.Namespace.Name, scheduledJob) scheduledJob, err := createScheduledJob(f.ClientSet, f.Namespace.Name, scheduledJob)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring a job is scheduled") By("Ensuring a job is scheduled")
err = waitForActiveJobs(f.Client, f.Namespace.Name, scheduledJob.Name, 1) err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, scheduledJob.Name, 1)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring exactly one is scheduled") By("Ensuring exactly one is scheduled")
scheduledJob, err = getScheduledJob(f.Client, f.Namespace.Name, scheduledJob.Name) scheduledJob, err = getScheduledJob(f.ClientSet, f.Namespace.Name, scheduledJob.Name)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Expect(scheduledJob.Status.Active).Should(HaveLen(1)) Expect(scheduledJob.Status.Active).Should(HaveLen(1))
By("Ensuring exaclty one running job exists by listing jobs explicitly") By("Ensuring exaclty one running job exists by listing jobs explicitly")
jobs, err := f.Client.Batch().Jobs(f.Namespace.Name).List(api.ListOptions{}) jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(api.ListOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
activeJobs := filterActiveJobs(jobs) activeJobs := filterActiveJobs(jobs)
Expect(activeJobs).To(HaveLen(1)) Expect(activeJobs).To(HaveLen(1))
By("Ensuring the job is replaced with a new one") By("Ensuring the job is replaced with a new one")
err = waitForJobReplaced(f.Client, f.Namespace.Name, jobs.Items[0].Name) err = waitForJobReplaced(f.ClientSet, f.Namespace.Name, jobs.Items[0].Name)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Removing scheduledjob") By("Removing scheduledjob")
err = deleteScheduledJob(f.Client, f.Namespace.Name, scheduledJob.Name) err = deleteScheduledJob(f.ClientSet, f.Namespace.Name, scheduledJob.Name)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
@ -159,21 +159,21 @@ var _ = framework.KubeDescribe("ScheduledJob", func() {
It("should not emit unexpected warnings", func() { It("should not emit unexpected warnings", func() {
By("Creating a scheduledjob") By("Creating a scheduledjob")
scheduledJob := newTestScheduledJob("concurrent", "*/1 * * * ?", batch.AllowConcurrent, false) scheduledJob := newTestScheduledJob("concurrent", "*/1 * * * ?", batch.AllowConcurrent, false)
scheduledJob, err := createScheduledJob(f.Client, f.Namespace.Name, scheduledJob) scheduledJob, err := createScheduledJob(f.ClientSet, f.Namespace.Name, scheduledJob)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring at least two jobs and at least one finished job exists by listing jobs explicitly") By("Ensuring at least two jobs and at least one finished job exists by listing jobs explicitly")
err = waitForJobsAtLeast(f.Client, f.Namespace.Name, 2) err = waitForJobsAtLeast(f.ClientSet, f.Namespace.Name, 2)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
err = waitForAnyFinishedJob(f.Client, f.Namespace.Name) err = waitForAnyFinishedJob(f.ClientSet, f.Namespace.Name)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring no unexpected event has happened") By("Ensuring no unexpected event has happened")
err = checkNoUnexpectedEvents(f.Client, f.Namespace.Name, scheduledJob.Name) err = checkNoUnexpectedEvents(f.ClientSet, f.Namespace.Name, scheduledJob.Name)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Removing scheduledjob") By("Removing scheduledjob")
err = deleteScheduledJob(f.Client, f.Namespace.Name, scheduledJob.Name) err = deleteScheduledJob(f.ClientSet, f.Namespace.Name, scheduledJob.Name)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
}) })
@ -228,20 +228,20 @@ func newTestScheduledJob(name, schedule string, concurrencyPolicy batch.Concurre
return sj return sj
} }
func createScheduledJob(c *client.Client, ns string, scheduledJob *batch.ScheduledJob) (*batch.ScheduledJob, error) { func createScheduledJob(c clientset.Interface, ns string, scheduledJob *batch.ScheduledJob) (*batch.ScheduledJob, error) {
return c.Batch().ScheduledJobs(ns).Create(scheduledJob) return c.Batch().ScheduledJobs(ns).Create(scheduledJob)
} }
func getScheduledJob(c *client.Client, ns, name string) (*batch.ScheduledJob, error) { func getScheduledJob(c clientset.Interface, ns, name string) (*batch.ScheduledJob, error) {
return c.Batch().ScheduledJobs(ns).Get(name) return c.Batch().ScheduledJobs(ns).Get(name)
} }
func deleteScheduledJob(c *client.Client, ns, name string) error { func deleteScheduledJob(c clientset.Interface, ns, name string) error {
return c.Batch().ScheduledJobs(ns).Delete(name, nil) return c.Batch().ScheduledJobs(ns).Delete(name, nil)
} }
// Wait for at least given amount of active jobs. // Wait for at least given amount of active jobs.
func waitForActiveJobs(c *client.Client, ns, scheduledJobName string, active int) error { func waitForActiveJobs(c clientset.Interface, ns, scheduledJobName string, active int) error {
return wait.Poll(framework.Poll, scheduledJobTimeout, func() (bool, error) { return wait.Poll(framework.Poll, scheduledJobTimeout, func() (bool, error) {
curr, err := c.Batch().ScheduledJobs(ns).Get(scheduledJobName) curr, err := c.Batch().ScheduledJobs(ns).Get(scheduledJobName)
if err != nil { if err != nil {
@ -252,7 +252,7 @@ func waitForActiveJobs(c *client.Client, ns, scheduledJobName string, active int
} }
// Wait for no jobs to appear. // Wait for no jobs to appear.
func waitForNoJobs(c *client.Client, ns, jobName string) error { func waitForNoJobs(c clientset.Interface, ns, jobName string) error {
return wait.Poll(framework.Poll, scheduledJobTimeout, func() (bool, error) { return wait.Poll(framework.Poll, scheduledJobTimeout, func() (bool, error) {
curr, err := c.Batch().ScheduledJobs(ns).Get(jobName) curr, err := c.Batch().ScheduledJobs(ns).Get(jobName)
if err != nil { if err != nil {
@ -264,7 +264,7 @@ func waitForNoJobs(c *client.Client, ns, jobName string) error {
} }
// Wait for a job to be replaced with a new one. // Wait for a job to be replaced with a new one.
func waitForJobReplaced(c *client.Client, ns, previousJobName string) error { func waitForJobReplaced(c clientset.Interface, ns, previousJobName string) error {
return wait.Poll(framework.Poll, scheduledJobTimeout, func() (bool, error) { return wait.Poll(framework.Poll, scheduledJobTimeout, func() (bool, error) {
jobs, err := c.Batch().Jobs(ns).List(api.ListOptions{}) jobs, err := c.Batch().Jobs(ns).List(api.ListOptions{})
if err != nil { if err != nil {
@ -281,7 +281,7 @@ func waitForJobReplaced(c *client.Client, ns, previousJobName string) error {
} }
// waitForJobsAtLeast waits for at least a number of jobs to appear. // waitForJobsAtLeast waits for at least a number of jobs to appear.
func waitForJobsAtLeast(c *client.Client, ns string, atLeast int) error { func waitForJobsAtLeast(c clientset.Interface, ns string, atLeast int) error {
return wait.Poll(framework.Poll, scheduledJobTimeout, func() (bool, error) { return wait.Poll(framework.Poll, scheduledJobTimeout, func() (bool, error) {
jobs, err := c.Batch().Jobs(ns).List(api.ListOptions{}) jobs, err := c.Batch().Jobs(ns).List(api.ListOptions{})
if err != nil { if err != nil {
@ -292,7 +292,7 @@ func waitForJobsAtLeast(c *client.Client, ns string, atLeast int) error {
} }
// waitForAnyFinishedJob waits for any completed job to appear. // waitForAnyFinishedJob waits for any completed job to appear.
func waitForAnyFinishedJob(c *client.Client, ns string) error { func waitForAnyFinishedJob(c clientset.Interface, ns string) error {
return wait.Poll(framework.Poll, scheduledJobTimeout, func() (bool, error) { return wait.Poll(framework.Poll, scheduledJobTimeout, func() (bool, error) {
jobs, err := c.Batch().Jobs(ns).List(api.ListOptions{}) jobs, err := c.Batch().Jobs(ns).List(api.ListOptions{})
if err != nil { if err != nil {
@ -309,12 +309,12 @@ func waitForAnyFinishedJob(c *client.Client, ns string) error {
// checkNoUnexpectedEvents checks unexpected events didn't happen. // checkNoUnexpectedEvents checks unexpected events didn't happen.
// Currently only "UnexpectedJob" is checked. // Currently only "UnexpectedJob" is checked.
func checkNoUnexpectedEvents(c *client.Client, ns, scheduledJobName string) error { func checkNoUnexpectedEvents(c clientset.Interface, ns, scheduledJobName string) error {
sj, err := c.Batch().ScheduledJobs(ns).Get(scheduledJobName) sj, err := c.Batch().ScheduledJobs(ns).Get(scheduledJobName)
if err != nil { if err != nil {
return fmt.Errorf("error in getting scheduledjob %s/%s: %v", ns, scheduledJobName, err) return fmt.Errorf("error in getting scheduledjob %s/%s: %v", ns, scheduledJobName, err)
} }
events, err := c.Events(ns).Search(sj) events, err := c.Core().Events(ns).Search(sj)
if err != nil { if err != nil {
return fmt.Errorf("error in listing events: %s", err) return fmt.Errorf("error in listing events: %s", err)
} }

View File

@ -24,7 +24,6 @@ import (
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -49,7 +48,6 @@ type pausePodConfig struct {
} }
var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
var c *client.Client
var cs clientset.Interface var cs clientset.Interface
var nodeList *api.NodeList var nodeList *api.NodeList
var systemPodsNo int var systemPodsNo int
@ -60,30 +58,29 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
ignoreLabels := framework.ImagePullerLabels ignoreLabels := framework.ImagePullerLabels
AfterEach(func() { AfterEach(func() {
rc, err := c.ReplicationControllers(ns).Get(RCName) rc, err := cs.Core().ReplicationControllers(ns).Get(RCName)
if err == nil && rc.Spec.Replicas != 0 { if err == nil && rc.Spec.Replicas != 0 {
By("Cleaning up the replication controller") By("Cleaning up the replication controller")
err := framework.DeleteRCAndPods(c, f.ClientSet, ns, RCName) err := framework.DeleteRCAndPods(f.ClientSet, ns, RCName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
}) })
BeforeEach(func() { BeforeEach(func() {
c = f.Client
cs = f.ClientSet cs = f.ClientSet
ns = f.Namespace.Name ns = f.Namespace.Name
nodeList = &api.NodeList{} nodeList = &api.NodeList{}
framework.WaitForAllNodesHealthy(c, time.Minute) framework.WaitForAllNodesHealthy(cs, time.Minute)
masterNodes, nodeList = framework.GetMasterAndWorkerNodesOrDie(c) masterNodes, nodeList = framework.GetMasterAndWorkerNodesOrDie(cs)
err := framework.CheckTestingNSDeletedExcept(c, ns) err := framework.CheckTestingNSDeletedExcept(cs, ns)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Every test case in this suite assumes that cluster add-on pods stay stable and // Every test case in this suite assumes that cluster add-on pods stay stable and
// cannot be run in parallel with any other test that touches Nodes or Pods. // cannot be run in parallel with any other test that touches Nodes or Pods.
// It is so because we need to have precise control on what's running in the cluster. // It is so because we need to have precise control on what's running in the cluster.
systemPods, err := framework.GetPodsInNamespace(c, ns, ignoreLabels) systemPods, err := framework.GetPodsInNamespace(cs, ns, ignoreLabels)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
systemPodsNo = 0 systemPodsNo = 0
for _, pod := range systemPods { for _, pod := range systemPods {
@ -92,12 +89,12 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
} }
} }
err = framework.WaitForPodsRunningReady(c, api.NamespaceSystem, int32(systemPodsNo), framework.PodReadyBeforeTimeout, ignoreLabels) err = framework.WaitForPodsRunningReady(cs, api.NamespaceSystem, int32(systemPodsNo), framework.PodReadyBeforeTimeout, ignoreLabels)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
for _, node := range nodeList.Items { for _, node := range nodeList.Items {
framework.Logf("\nLogging pods the kubelet thinks is on node %v before test", node.Name) framework.Logf("\nLogging pods the kubelet thinks is on node %v before test", node.Name)
framework.PrintAllKubeletPods(c, node.Name) framework.PrintAllKubeletPods(cs, node.Name)
} }
}) })
@ -117,7 +114,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
totalPodCapacity += podCapacity.Value() totalPodCapacity += podCapacity.Value()
} }
currentlyScheduledPods := framework.WaitForStableCluster(c, masterNodes) currentlyScheduledPods := framework.WaitForStableCluster(cs, masterNodes)
podsNeededForSaturation := int(totalPodCapacity) - currentlyScheduledPods podsNeededForSaturation := int(totalPodCapacity) - currentlyScheduledPods
By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster max pods and trying to start another one", podsNeededForSaturation)) By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster max pods and trying to start another one", podsNeededForSaturation))
@ -127,7 +124,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
// and there is no need to create additional pods. // and there is no need to create additional pods.
// StartPods requires at least one pod to replicate. // StartPods requires at least one pod to replicate.
if podsNeededForSaturation > 0 { if podsNeededForSaturation > 0 {
framework.ExpectNoError(testutils.StartPods(c, podsNeededForSaturation, ns, "maxp", framework.ExpectNoError(testutils.StartPods(cs, podsNeededForSaturation, ns, "maxp",
*initPausePod(f, pausePodConfig{ *initPausePod(f, pausePodConfig{
Name: "", Name: "",
Labels: map[string]string{"name": ""}, Labels: map[string]string{"name": ""},
@ -139,7 +136,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
Labels: map[string]string{"name": "additional"}, Labels: map[string]string{"name": "additional"},
}) })
waitForScheduler() waitForScheduler()
verifyResult(c, podsNeededForSaturation, 1, ns) verifyResult(cs, podsNeededForSaturation, 1, ns)
}) })
// This test verifies we don't allow scheduling of pods in a way that sum of limits of pods is greater than machines capacity. // This test verifies we don't allow scheduling of pods in a way that sum of limits of pods is greater than machines capacity.
@ -157,9 +154,9 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
nodeMaxCapacity = capacity.MilliValue() nodeMaxCapacity = capacity.MilliValue()
} }
} }
framework.WaitForStableCluster(c, masterNodes) framework.WaitForStableCluster(cs, masterNodes)
pods, err := c.Pods(api.NamespaceAll).List(api.ListOptions{}) pods, err := cs.Core().Pods(api.NamespaceAll).List(api.ListOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
for _, pod := range pods.Items { for _, pod := range pods.Items {
_, found := nodeToCapacityMap[pod.Spec.NodeName] _, found := nodeToCapacityMap[pod.Spec.NodeName]
@ -188,7 +185,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
// and there is no need to create additional pods. // and there is no need to create additional pods.
// StartPods requires at least one pod to replicate. // StartPods requires at least one pod to replicate.
if podsNeededForSaturation > 0 { if podsNeededForSaturation > 0 {
framework.ExpectNoError(testutils.StartPods(c, podsNeededForSaturation, ns, "overcommit", framework.ExpectNoError(testutils.StartPods(cs, podsNeededForSaturation, ns, "overcommit",
*initPausePod(f, pausePodConfig{ *initPausePod(f, pausePodConfig{
Name: "", Name: "",
Labels: map[string]string{"name": ""}, Labels: map[string]string{"name": ""},
@ -213,7 +210,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
}, },
}) })
waitForScheduler() waitForScheduler()
verifyResult(c, podsNeededForSaturation, 1, ns) verifyResult(cs, podsNeededForSaturation, 1, ns)
}) })
// Test Nodes does not have any label, hence it should be impossible to schedule Pod with // Test Nodes does not have any label, hence it should be impossible to schedule Pod with
@ -222,7 +219,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
By("Trying to schedule Pod with nonempty NodeSelector.") By("Trying to schedule Pod with nonempty NodeSelector.")
podName := "restricted-pod" podName := "restricted-pod"
framework.WaitForStableCluster(c, masterNodes) framework.WaitForStableCluster(cs, masterNodes)
createPausePod(f, pausePodConfig{ createPausePod(f, pausePodConfig{
Name: podName, Name: podName,
@ -233,13 +230,13 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
}) })
waitForScheduler() waitForScheduler()
verifyResult(c, 0, 1, ns) verifyResult(cs, 0, 1, ns)
}) })
It("validates that a pod with an invalid NodeAffinity is rejected", func() { It("validates that a pod with an invalid NodeAffinity is rejected", func() {
By("Trying to launch a pod with an invalid Affinity data.") By("Trying to launch a pod with an invalid Affinity data.")
podName := "without-label" podName := "without-label"
_, err := c.Pods(ns).Create(initPausePod(f, pausePodConfig{ _, err := cs.Core().Pods(ns).Create(initPausePod(f, pausePodConfig{
Name: podName, Name: podName,
Affinity: `{ Affinity: `{
"nodeAffinity": { "nodeAffinity": {
@ -285,8 +282,8 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
// kubelet and the scheduler: the scheduler might have scheduled a pod // kubelet and the scheduler: the scheduler might have scheduled a pod
// already when the kubelet does not know about its new label yet. The // already when the kubelet does not know about its new label yet. The
// kubelet will then refuse to launch the pod. // kubelet will then refuse to launch the pod.
framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, labelPodName, pod.ResourceVersion)) framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, labelPodName, pod.ResourceVersion))
labelPod, err := c.Pods(ns).Get(labelPodName) labelPod, err := cs.Core().Pods(ns).Get(labelPodName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(labelPod.Spec.NodeName).To(Equal(nodeName)) Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
}) })
@ -297,7 +294,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
By("Trying to schedule Pod with nonempty NodeSelector.") By("Trying to schedule Pod with nonempty NodeSelector.")
podName := "restricted-pod" podName := "restricted-pod"
framework.WaitForStableCluster(c, masterNodes) framework.WaitForStableCluster(cs, masterNodes)
createPausePod(f, pausePodConfig{ createPausePod(f, pausePodConfig{
Name: podName, Name: podName,
@ -326,7 +323,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
Labels: map[string]string{"name": "restricted"}, Labels: map[string]string{"name": "restricted"},
}) })
waitForScheduler() waitForScheduler()
verifyResult(c, 0, 1, ns) verifyResult(cs, 0, 1, ns)
}) })
// Keep the same steps with the test on NodeSelector, // Keep the same steps with the test on NodeSelector,
@ -369,8 +366,8 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
// kubelet and the scheduler: the scheduler might have scheduled a pod // kubelet and the scheduler: the scheduler might have scheduled a pod
// already when the kubelet does not know about its new label yet. The // already when the kubelet does not know about its new label yet. The
// kubelet will then refuse to launch the pod. // kubelet will then refuse to launch the pod.
framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, labelPodName, pod.ResourceVersion)) framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, labelPodName, pod.ResourceVersion))
labelPod, err := c.Pods(ns).Get(labelPodName) labelPod, err := cs.Core().Pods(ns).Get(labelPodName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(labelPod.Spec.NodeName).To(Equal(nodeName)) Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
}) })
@ -394,8 +391,8 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
// kubelet and the scheduler: the scheduler might have scheduled a pod // kubelet and the scheduler: the scheduler might have scheduled a pod
// already when the kubelet does not know about its new label yet. The // already when the kubelet does not know about its new label yet. The
// kubelet will then refuse to launch the pod. // kubelet will then refuse to launch the pod.
framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, pod.Name, "")) framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, pod.Name, ""))
labelPod, err := c.Pods(ns).Get(pod.Name) labelPod, err := cs.Core().Pods(ns).Get(pod.Name)
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(labelPod.Spec.NodeName).To(Equal(nodeName)) Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
}) })
@ -405,7 +402,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
It("validates that a pod with an invalid podAffinity is rejected because of the LabelSelectorRequirement is invalid", func() { It("validates that a pod with an invalid podAffinity is rejected because of the LabelSelectorRequirement is invalid", func() {
By("Trying to launch a pod with an invalid pod Affinity data.") By("Trying to launch a pod with an invalid pod Affinity data.")
podName := "without-label-" + string(uuid.NewUUID()) podName := "without-label-" + string(uuid.NewUUID())
_, err := c.Pods(ns).Create(initPausePod(f, pausePodConfig{ _, err := cs.Core().Pods(ns).Create(initPausePod(f, pausePodConfig{
Name: podName, Name: podName,
Labels: map[string]string{"name": "without-label"}, Labels: map[string]string{"name": "without-label"},
Affinity: `{ Affinity: `{
@ -439,7 +436,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
// Test Nodes does not have any pod, hence it should be impossible to schedule a Pod with pod affinity. // Test Nodes does not have any pod, hence it should be impossible to schedule a Pod with pod affinity.
It("validates that Inter-pod-Affinity is respected if not matching", func() { It("validates that Inter-pod-Affinity is respected if not matching", func() {
By("Trying to schedule Pod with nonempty Pod Affinity.") By("Trying to schedule Pod with nonempty Pod Affinity.")
framework.WaitForStableCluster(c, masterNodes) framework.WaitForStableCluster(cs, masterNodes)
podName := "without-label-" + string(uuid.NewUUID()) podName := "without-label-" + string(uuid.NewUUID())
createPausePod(f, pausePodConfig{ createPausePod(f, pausePodConfig{
Name: podName, Name: podName,
@ -460,7 +457,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
}) })
waitForScheduler() waitForScheduler()
verifyResult(c, 0, 1, ns) verifyResult(cs, 0, 1, ns)
}) })
// test the pod affinity successful matching scenario. // test the pod affinity successful matching scenario.
@ -500,8 +497,8 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
// kubelet and the scheduler: the scheduler might have scheduled a pod // kubelet and the scheduler: the scheduler might have scheduled a pod
// already when the kubelet does not know about its new label yet. The // already when the kubelet does not know about its new label yet. The
// kubelet will then refuse to launch the pod. // kubelet will then refuse to launch the pod.
framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, labelPodName, pod.ResourceVersion)) framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, labelPodName, pod.ResourceVersion))
labelPod, err := c.Pods(ns).Get(labelPodName) labelPod, err := cs.Core().Pods(ns).Get(labelPodName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(labelPod.Spec.NodeName).To(Equal(nodeName)) Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
}) })
@ -514,8 +511,8 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
// cannot be scheduled onto it. // cannot be scheduled onto it.
By("Launching two pods on two distinct nodes to get two node names") By("Launching two pods on two distinct nodes to get two node names")
CreateHostPortPods(f, "host-port", 2, true) CreateHostPortPods(f, "host-port", 2, true)
defer framework.DeleteRCAndPods(c, f.ClientSet, ns, "host-port") defer framework.DeleteRCAndPods(f.ClientSet, ns, "host-port")
podList, err := c.Pods(ns).List(api.ListOptions{}) podList, err := cs.Core().Pods(ns).List(api.ListOptions{})
ExpectNoError(err) ExpectNoError(err)
Expect(len(podList.Items)).To(Equal(2)) Expect(len(podList.Items)).To(Equal(2))
nodeNames := []string{podList.Items[0].Spec.NodeName, podList.Items[1].Spec.NodeName} nodeNames := []string{podList.Items[0].Spec.NodeName, podList.Items[1].Spec.NodeName}
@ -563,7 +560,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
}) })
waitForScheduler() waitForScheduler()
verifyResult(c, 3, 1, ns) verifyResult(cs, 3, 1, ns)
}) })
// test the pod affinity successful matching scenario with multiple Label Operators. // test the pod affinity successful matching scenario with multiple Label Operators.
@ -611,8 +608,8 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
// kubelet and the scheduler: the scheduler might have scheduled a pod // kubelet and the scheduler: the scheduler might have scheduled a pod
// already when the kubelet does not know about its new label yet. The // already when the kubelet does not know about its new label yet. The
// kubelet will then refuse to launch the pod. // kubelet will then refuse to launch the pod.
framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, labelPodName, pod.ResourceVersion)) framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, labelPodName, pod.ResourceVersion))
labelPod, err := c.Pods(ns).Get(labelPodName) labelPod, err := cs.Core().Pods(ns).Get(labelPodName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(labelPod.Spec.NodeName).To(Equal(nodeName)) Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
}) })
@ -636,8 +633,8 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
// kubelet and the scheduler: the scheduler might have scheduled a pod // kubelet and the scheduler: the scheduler might have scheduled a pod
// already when the kubelet does not know about its new label yet. The // already when the kubelet does not know about its new label yet. The
// kubelet will then refuse to launch the pod. // kubelet will then refuse to launch the pod.
framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, pod.Name, pod.ResourceVersion)) framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, pod.Name, pod.ResourceVersion))
labelPod, err := c.Pods(ns).Get(pod.Name) labelPod, err := cs.Core().Pods(ns).Get(pod.Name)
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(labelPod.Spec.NodeName).To(Equal(nodeName)) Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
}) })
@ -660,8 +657,8 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
// kubelet and the scheduler: the scheduler might have scheduled a pod // kubelet and the scheduler: the scheduler might have scheduled a pod
// already when the kubelet does not know about its new label yet. The // already when the kubelet does not know about its new label yet. The
// kubelet will then refuse to launch the pod. // kubelet will then refuse to launch the pod.
framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, pod.Name, pod.ResourceVersion)) framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, pod.Name, pod.ResourceVersion))
labelPod, err := c.Pods(ns).Get(pod.Name) labelPod, err := cs.Core().Pods(ns).Get(pod.Name)
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(labelPod.Spec.NodeName).To(Equal(nodeName)) Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
}) })
@ -679,9 +676,9 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
Value: "testing-taint-value", Value: "testing-taint-value",
Effect: api.TaintEffectNoSchedule, Effect: api.TaintEffectNoSchedule,
} }
framework.AddOrUpdateTaintOnNode(c, nodeName, testTaint) framework.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
framework.ExpectNodeHasTaint(c, nodeName, testTaint) framework.ExpectNodeHasTaint(cs, nodeName, testTaint)
defer framework.RemoveTaintOffNode(c, nodeName, testTaint) defer framework.RemoveTaintOffNode(cs, nodeName, testTaint)
By("Trying to apply a random label on the found node.") By("Trying to apply a random label on the found node.")
labelKey := fmt.Sprintf("kubernetes.io/e2e-label-key-%s", string(uuid.NewUUID())) labelKey := fmt.Sprintf("kubernetes.io/e2e-label-key-%s", string(uuid.NewUUID()))
@ -712,8 +709,8 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
// kubelet and the scheduler: the scheduler might have scheduled a pod // kubelet and the scheduler: the scheduler might have scheduled a pod
// already when the kubelet does not know about its new taint yet. The // already when the kubelet does not know about its new taint yet. The
// kubelet will then refuse to launch the pod. // kubelet will then refuse to launch the pod.
framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, tolerationPodName, pod.ResourceVersion)) framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, tolerationPodName, pod.ResourceVersion))
deployedPod, err := c.Pods(ns).Get(tolerationPodName) deployedPod, err := cs.Core().Pods(ns).Get(tolerationPodName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(deployedPod.Spec.NodeName).To(Equal(nodeName)) Expect(deployedPod.Spec.NodeName).To(Equal(nodeName))
}) })
@ -731,9 +728,9 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
Value: "testing-taint-value", Value: "testing-taint-value",
Effect: api.TaintEffectNoSchedule, Effect: api.TaintEffectNoSchedule,
} }
framework.AddOrUpdateTaintOnNode(c, nodeName, testTaint) framework.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
framework.ExpectNodeHasTaint(c, nodeName, testTaint) framework.ExpectNodeHasTaint(cs, nodeName, testTaint)
defer framework.RemoveTaintOffNode(c, nodeName, testTaint) defer framework.RemoveTaintOffNode(cs, nodeName, testTaint)
By("Trying to apply a random label on the found node.") By("Trying to apply a random label on the found node.")
labelKey := fmt.Sprintf("kubernetes.io/e2e-label-key-%s", string(uuid.NewUUID())) labelKey := fmt.Sprintf("kubernetes.io/e2e-label-key-%s", string(uuid.NewUUID()))
@ -750,13 +747,13 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
}) })
waitForScheduler() waitForScheduler()
verifyResult(c, 0, 1, ns) verifyResult(cs, 0, 1, ns)
By("Removing taint off the node") By("Removing taint off the node")
framework.RemoveTaintOffNode(c, nodeName, testTaint) framework.RemoveTaintOffNode(cs, nodeName, testTaint)
waitForScheduler() waitForScheduler()
verifyResult(c, 1, 0, ns) verifyResult(cs, 1, 0, ns)
}) })
}) })
@ -781,7 +778,7 @@ func initPausePod(f *framework.Framework, conf pausePodConfig) *api.Pod {
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: podName, Name: podName,
Image: framework.GetPauseImageName(f.Client), Image: framework.GetPauseImageName(f.ClientSet),
}, },
}, },
}, },
@ -793,15 +790,15 @@ func initPausePod(f *framework.Framework, conf pausePodConfig) *api.Pod {
} }
func createPausePod(f *framework.Framework, conf pausePodConfig) *api.Pod { func createPausePod(f *framework.Framework, conf pausePodConfig) *api.Pod {
pod, err := f.Client.Pods(f.Namespace.Name).Create(initPausePod(f, conf)) pod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(initPausePod(f, conf))
framework.ExpectNoError(err) framework.ExpectNoError(err)
return pod return pod
} }
func runPausePod(f *framework.Framework, conf pausePodConfig) *api.Pod { func runPausePod(f *framework.Framework, conf pausePodConfig) *api.Pod {
pod := createPausePod(f, conf) pod := createPausePod(f, conf)
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.Client, pod)) framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, pod))
pod, err := f.Client.Pods(f.Namespace.Name).Get(conf.Name) pod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Get(conf.Name)
framework.ExpectNoError(err) framework.ExpectNoError(err)
return pod return pod
} }
@ -814,7 +811,7 @@ func runPodAndGetNodeName(f *framework.Framework, conf pausePodConfig) string {
pod := runPausePod(f, conf) pod := runPausePod(f, conf)
By("Explicitly delete pod here to free the resource it takes.") By("Explicitly delete pod here to free the resource it takes.")
err := f.Client.Pods(f.Namespace.Name).Delete(pod.Name, api.NewDeleteOptions(0)) err := f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, api.NewDeleteOptions(0))
framework.ExpectNoError(err) framework.ExpectNoError(err)
return pod.Spec.NodeName return pod.Spec.NodeName
@ -915,8 +912,8 @@ func waitForScheduler() {
} }
// TODO: upgrade calls in PodAffinity tests when we're able to run them // TODO: upgrade calls in PodAffinity tests when we're able to run them
func verifyResult(c *client.Client, expectedScheduled int, expectedNotScheduled int, ns string) { func verifyResult(c clientset.Interface, expectedScheduled int, expectedNotScheduled int, ns string) {
allPods, err := c.Pods(ns).List(api.ListOptions{}) allPods, err := c.Core().Pods(ns).List(api.ListOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
scheduledPods, notScheduledPods := framework.GetPodsScheduled(masterNodes, allPods) scheduledPods, notScheduledPods := framework.GetPodsScheduled(masterNodes, allPods)

View File

@ -167,11 +167,11 @@ func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool)
} }
pod.Spec.Containers[0].Command = []string{"sleep", "6000"} pod.Spec.Containers[0].Command = []string{"sleep", "6000"}
client := f.Client.Pods(f.Namespace.Name) client := f.ClientSet.Core().Pods(f.Namespace.Name)
pod, err := client.Create(pod) pod, err := client.Create(pod)
framework.ExpectNoError(err, "Error creating pod %v", pod) framework.ExpectNoError(err, "Error creating pod %v", pod)
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.Client, pod)) framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, pod))
testContent := "hello" testContent := "hello"
testFilePath := mountPath + "/TEST" testFilePath := mountPath + "/TEST"
@ -181,7 +181,7 @@ func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool)
Expect(err).To(BeNil()) Expect(err).To(BeNil())
Expect(content).To(ContainSubstring(testContent)) Expect(content).To(ContainSubstring(testContent))
foundPod, err := f.Client.Pods(f.Namespace.Name).Get(pod.Name) foundPod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Get(pod.Name)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Confirm that the file can be accessed from a second // Confirm that the file can be accessed from a second

View File

@ -34,7 +34,6 @@ import (
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/service" "k8s.io/kubernetes/pkg/api/service"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/controller/endpoint" "k8s.io/kubernetes/pkg/controller/endpoint"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/types"
@ -80,18 +79,16 @@ var ServiceNodePortRange = utilnet.PortRange{Base: 30000, Size: 2768}
var _ = framework.KubeDescribe("Services", func() { var _ = framework.KubeDescribe("Services", func() {
f := framework.NewDefaultFramework("services") f := framework.NewDefaultFramework("services")
var c *client.Client
var cs clientset.Interface var cs clientset.Interface
BeforeEach(func() { BeforeEach(func() {
c = f.Client
cs = f.ClientSet cs = f.ClientSet
}) })
// TODO: We get coverage of TCP/UDP and multi-port services through the DNS test. We should have a simpler test for multi-port TCP here. // TODO: We get coverage of TCP/UDP and multi-port services through the DNS test. We should have a simpler test for multi-port TCP here.
It("should provide secure master service [Conformance]", func() { It("should provide secure master service [Conformance]", func() {
_, err := c.Services(api.NamespaceDefault).Get("kubernetes") _, err := cs.Core().Services(api.NamespaceDefault).Get("kubernetes")
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
@ -106,7 +103,7 @@ var _ = framework.KubeDescribe("Services", func() {
By("creating service " + serviceName + " in namespace " + ns) By("creating service " + serviceName + " in namespace " + ns)
defer func() { defer func() {
err := c.Services(ns).Delete(serviceName) err := cs.Core().Services(ns).Delete(serviceName, nil)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}() }()
@ -122,15 +119,15 @@ var _ = framework.KubeDescribe("Services", func() {
}}, }},
}, },
} }
_, err := c.Services(ns).Create(service) _, err := cs.Core().Services(ns).Create(service)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{}) validateEndpointsOrFail(cs, ns, serviceName, PortsByPodName{})
names := map[string]bool{} names := map[string]bool{}
defer func() { defer func() {
for name := range names { for name := range names {
err := c.Pods(ns).Delete(name, nil) err := cs.Core().Pods(ns).Delete(name, nil)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
} }
}() }()
@ -138,21 +135,21 @@ var _ = framework.KubeDescribe("Services", func() {
name1 := "pod1" name1 := "pod1"
name2 := "pod2" name2 := "pod2"
createPodOrFail(c, ns, name1, labels, []api.ContainerPort{{ContainerPort: 80}}) createPodOrFail(cs, ns, name1, labels, []api.ContainerPort{{ContainerPort: 80}})
names[name1] = true names[name1] = true
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{name1: {80}}) validateEndpointsOrFail(cs, ns, serviceName, PortsByPodName{name1: {80}})
createPodOrFail(c, ns, name2, labels, []api.ContainerPort{{ContainerPort: 80}}) createPodOrFail(cs, ns, name2, labels, []api.ContainerPort{{ContainerPort: 80}})
names[name2] = true names[name2] = true
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{name1: {80}, name2: {80}}) validateEndpointsOrFail(cs, ns, serviceName, PortsByPodName{name1: {80}, name2: {80}})
deletePodOrFail(c, ns, name1) deletePodOrFail(cs, ns, name1)
delete(names, name1) delete(names, name1)
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{name2: {80}}) validateEndpointsOrFail(cs, ns, serviceName, PortsByPodName{name2: {80}})
deletePodOrFail(c, ns, name2) deletePodOrFail(cs, ns, name2)
delete(names, name2) delete(names, name2)
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{}) validateEndpointsOrFail(cs, ns, serviceName, PortsByPodName{})
}) })
It("should serve multiport endpoints from pods [Conformance]", func() { It("should serve multiport endpoints from pods [Conformance]", func() {
@ -162,7 +159,7 @@ var _ = framework.KubeDescribe("Services", func() {
ns := f.Namespace.Name ns := f.Namespace.Name
defer func() { defer func() {
err := c.Services(ns).Delete(serviceName) err := cs.Core().Services(ns).Delete(serviceName, nil)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}() }()
@ -192,16 +189,16 @@ var _ = framework.KubeDescribe("Services", func() {
}, },
}, },
} }
_, err := c.Services(ns).Create(service) _, err := cs.Core().Services(ns).Create(service)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
port1 := 100 port1 := 100
port2 := 101 port2 := 101
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{}) validateEndpointsOrFail(cs, ns, serviceName, PortsByPodName{})
names := map[string]bool{} names := map[string]bool{}
defer func() { defer func() {
for name := range names { for name := range names {
err := c.Pods(ns).Delete(name, nil) err := cs.Core().Pods(ns).Delete(name, nil)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
} }
}() }()
@ -222,21 +219,21 @@ var _ = framework.KubeDescribe("Services", func() {
podname1 := "pod1" podname1 := "pod1"
podname2 := "pod2" podname2 := "pod2"
createPodOrFail(c, ns, podname1, labels, containerPorts1) createPodOrFail(cs, ns, podname1, labels, containerPorts1)
names[podname1] = true names[podname1] = true
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{podname1: {port1}}) validateEndpointsOrFail(cs, ns, serviceName, PortsByPodName{podname1: {port1}})
createPodOrFail(c, ns, podname2, labels, containerPorts2) createPodOrFail(cs, ns, podname2, labels, containerPorts2)
names[podname2] = true names[podname2] = true
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{podname1: {port1}, podname2: {port2}}) validateEndpointsOrFail(cs, ns, serviceName, PortsByPodName{podname1: {port1}, podname2: {port2}})
deletePodOrFail(c, ns, podname1) deletePodOrFail(cs, ns, podname1)
delete(names, podname1) delete(names, podname1)
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{podname2: {port2}}) validateEndpointsOrFail(cs, ns, serviceName, PortsByPodName{podname2: {port2}})
deletePodOrFail(c, ns, podname2) deletePodOrFail(cs, ns, podname2)
delete(names, podname2) delete(names, podname2)
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{}) validateEndpointsOrFail(cs, ns, serviceName, PortsByPodName{})
}) })
It("should preserve source pod IP for traffic thru service cluster IP", func() { It("should preserve source pod IP for traffic thru service cluster IP", func() {
@ -245,13 +242,13 @@ var _ = framework.KubeDescribe("Services", func() {
ns := f.Namespace.Name ns := f.Namespace.Name
By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns) By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns)
jig := NewServiceTestJig(c, cs, serviceName) jig := NewServiceTestJig(cs, serviceName)
servicePort := 8080 servicePort := 8080
tcpService := jig.CreateTCPServiceWithPort(ns, nil, int32(servicePort)) tcpService := jig.CreateTCPServiceWithPort(ns, nil, int32(servicePort))
jig.SanityCheckService(tcpService, api.ServiceTypeClusterIP) jig.SanityCheckService(tcpService, api.ServiceTypeClusterIP)
defer func() { defer func() {
framework.Logf("Cleaning up the sourceip test service") framework.Logf("Cleaning up the sourceip test service")
err := c.Services(ns).Delete(serviceName) err := cs.Core().Services(ns).Delete(serviceName, nil)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}() }()
serviceIp := tcpService.Spec.ClusterIP serviceIp := tcpService.Spec.ClusterIP
@ -272,20 +269,20 @@ var _ = framework.KubeDescribe("Services", func() {
jig.launchEchoserverPodOnNode(f, node1.Name, serverPodName) jig.launchEchoserverPodOnNode(f, node1.Name, serverPodName)
defer func() { defer func() {
framework.Logf("Cleaning up the echo server pod") framework.Logf("Cleaning up the echo server pod")
err := c.Pods(ns).Delete(serverPodName, nil) err := cs.Core().Pods(ns).Delete(serverPodName, nil)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}() }()
// Waiting for service to expose endpoint. // Waiting for service to expose endpoint.
validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{serverPodName: {servicePort}}) validateEndpointsOrFail(cs, ns, serviceName, PortsByPodName{serverPodName: {servicePort}})
By("Retrieve sourceip from a pod on the same node") By("Retrieve sourceip from a pod on the same node")
sourceIp1, execPodIp1 := execSourceipTest(f, c, ns, node1.Name, serviceIp, servicePort) sourceIp1, execPodIp1 := execSourceipTest(f, cs, ns, node1.Name, serviceIp, servicePort)
By("Verifying the preserved source ip") By("Verifying the preserved source ip")
Expect(sourceIp1).To(Equal(execPodIp1)) Expect(sourceIp1).To(Equal(execPodIp1))
By("Retrieve sourceip from a pod on a different node") By("Retrieve sourceip from a pod on a different node")
sourceIp2, execPodIp2 := execSourceipTest(f, c, ns, node2.Name, serviceIp, servicePort) sourceIp2, execPodIp2 := execSourceipTest(f, cs, ns, node2.Name, serviceIp, servicePort)
By("Verifying the preserved source ip") By("Verifying the preserved source ip")
Expect(sourceIp2).To(Equal(execPodIp2)) Expect(sourceIp2).To(Equal(execPodIp2))
}) })
@ -298,13 +295,13 @@ var _ = framework.KubeDescribe("Services", func() {
numPods, servicePort := 3, 80 numPods, servicePort := 3, 80
By("creating service1 in namespace " + ns) By("creating service1 in namespace " + ns)
podNames1, svc1IP, err := startServeHostnameService(c, ns, "service1", servicePort, numPods) podNames1, svc1IP, err := startServeHostnameService(cs, ns, "service1", servicePort, numPods)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("creating service2 in namespace " + ns) By("creating service2 in namespace " + ns)
podNames2, svc2IP, err := startServeHostnameService(c, ns, "service2", servicePort, numPods) podNames2, svc2IP, err := startServeHostnameService(cs, ns, "service2", servicePort, numPods)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
hosts, err := framework.NodeSSHHosts(f.ClientSet) hosts, err := framework.NodeSSHHosts(cs)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
if len(hosts) == 0 { if len(hosts) == 0 {
framework.Failf("No ssh-able nodes") framework.Failf("No ssh-able nodes")
@ -312,23 +309,23 @@ var _ = framework.KubeDescribe("Services", func() {
host := hosts[0] host := hosts[0]
By("verifying service1 is up") By("verifying service1 is up")
framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort)) framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort))
By("verifying service2 is up") By("verifying service2 is up")
framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort)) framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort))
// Stop service 1 and make sure it is gone. // Stop service 1 and make sure it is gone.
By("stopping service1") By("stopping service1")
framework.ExpectNoError(stopServeHostnameService(c, f.ClientSet, ns, "service1")) framework.ExpectNoError(stopServeHostnameService(f.ClientSet, ns, "service1"))
By("verifying service1 is not up") By("verifying service1 is not up")
framework.ExpectNoError(verifyServeHostnameServiceDown(c, host, svc1IP, servicePort)) framework.ExpectNoError(verifyServeHostnameServiceDown(cs, host, svc1IP, servicePort))
By("verifying service2 is still up") By("verifying service2 is still up")
framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort)) framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort))
// Start another service and verify both are up. // Start another service and verify both are up.
By("creating service3 in namespace " + ns) By("creating service3 in namespace " + ns)
podNames3, svc3IP, err := startServeHostnameService(c, ns, "service3", servicePort, numPods) podNames3, svc3IP, err := startServeHostnameService(cs, ns, "service3", servicePort, numPods)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
if svc2IP == svc3IP { if svc2IP == svc3IP {
@ -336,10 +333,10 @@ var _ = framework.KubeDescribe("Services", func() {
} }
By("verifying service2 is still up") By("verifying service2 is still up")
framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort)) framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort))
By("verifying service3 is up") By("verifying service3 is up")
framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames3, svc3IP, servicePort)) framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames3, svc3IP, servicePort))
}) })
It("should work after restarting kube-proxy [Disruptive]", func() { It("should work after restarting kube-proxy [Disruptive]", func() {
@ -352,34 +349,34 @@ var _ = framework.KubeDescribe("Services", func() {
svc1 := "service1" svc1 := "service1"
svc2 := "service2" svc2 := "service2"
defer func() { framework.ExpectNoError(stopServeHostnameService(c, f.ClientSet, ns, svc1)) }() defer func() { framework.ExpectNoError(stopServeHostnameService(f.ClientSet, ns, svc1)) }()
podNames1, svc1IP, err := startServeHostnameService(c, ns, svc1, servicePort, numPods) podNames1, svc1IP, err := startServeHostnameService(cs, ns, svc1, servicePort, numPods)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
defer func() { framework.ExpectNoError(stopServeHostnameService(c, f.ClientSet, ns, svc2)) }() defer func() { framework.ExpectNoError(stopServeHostnameService(f.ClientSet, ns, svc2)) }()
podNames2, svc2IP, err := startServeHostnameService(c, ns, svc2, servicePort, numPods) podNames2, svc2IP, err := startServeHostnameService(cs, ns, svc2, servicePort, numPods)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
if svc1IP == svc2IP { if svc1IP == svc2IP {
framework.Failf("VIPs conflict: %v", svc1IP) framework.Failf("VIPs conflict: %v", svc1IP)
} }
hosts, err := framework.NodeSSHHosts(f.ClientSet) hosts, err := framework.NodeSSHHosts(cs)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
if len(hosts) == 0 { if len(hosts) == 0 {
framework.Failf("No ssh-able nodes") framework.Failf("No ssh-able nodes")
} }
host := hosts[0] host := hosts[0]
framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort)) framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort))
framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort)) framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort))
By(fmt.Sprintf("Restarting kube-proxy on %v", host)) By(fmt.Sprintf("Restarting kube-proxy on %v", host))
if err := framework.RestartKubeProxy(host); err != nil { if err := framework.RestartKubeProxy(host); err != nil {
framework.Failf("error restarting kube-proxy: %v", err) framework.Failf("error restarting kube-proxy: %v", err)
} }
framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort)) framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort))
framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort)) framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort))
By("Removing iptable rules") By("Removing iptable rules")
result, err := framework.SSH(` result, err := framework.SSH(`
@ -390,8 +387,8 @@ var _ = framework.KubeDescribe("Services", func() {
framework.LogSSHResult(result) framework.LogSSHResult(result)
framework.Failf("couldn't remove iptable rules: %v", err) framework.Failf("couldn't remove iptable rules: %v", err)
} }
framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort)) framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort))
framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort)) framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort))
}) })
It("should work after restarting apiserver [Disruptive]", func() { It("should work after restarting apiserver [Disruptive]", func() {
@ -401,40 +398,40 @@ var _ = framework.KubeDescribe("Services", func() {
ns := f.Namespace.Name ns := f.Namespace.Name
numPods, servicePort := 3, 80 numPods, servicePort := 3, 80
defer func() { framework.ExpectNoError(stopServeHostnameService(c, f.ClientSet, ns, "service1")) }() defer func() { framework.ExpectNoError(stopServeHostnameService(f.ClientSet, ns, "service1")) }()
podNames1, svc1IP, err := startServeHostnameService(c, ns, "service1", servicePort, numPods) podNames1, svc1IP, err := startServeHostnameService(cs, ns, "service1", servicePort, numPods)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
hosts, err := framework.NodeSSHHosts(f.ClientSet) hosts, err := framework.NodeSSHHosts(cs)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
if len(hosts) == 0 { if len(hosts) == 0 {
framework.Failf("No ssh-able nodes") framework.Failf("No ssh-able nodes")
} }
host := hosts[0] host := hosts[0]
framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort)) framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort))
// Restart apiserver // Restart apiserver
By("Restarting apiserver") By("Restarting apiserver")
if err := framework.RestartApiserver(c); err != nil { if err := framework.RestartApiserver(cs.Discovery()); err != nil {
framework.Failf("error restarting apiserver: %v", err) framework.Failf("error restarting apiserver: %v", err)
} }
By("Waiting for apiserver to come up by polling /healthz") By("Waiting for apiserver to come up by polling /healthz")
if err := framework.WaitForApiserverUp(c); err != nil { if err := framework.WaitForApiserverUp(cs); err != nil {
framework.Failf("error while waiting for apiserver up: %v", err) framework.Failf("error while waiting for apiserver up: %v", err)
} }
framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort)) framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort))
// Create a new service and check if it's not reusing IP. // Create a new service and check if it's not reusing IP.
defer func() { framework.ExpectNoError(stopServeHostnameService(c, f.ClientSet, ns, "service2")) }() defer func() { framework.ExpectNoError(stopServeHostnameService(f.ClientSet, ns, "service2")) }()
podNames2, svc2IP, err := startServeHostnameService(c, ns, "service2", servicePort, numPods) podNames2, svc2IP, err := startServeHostnameService(cs, ns, "service2", servicePort, numPods)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
if svc1IP == svc2IP { if svc1IP == svc2IP {
framework.Failf("VIPs conflict: %v", svc1IP) framework.Failf("VIPs conflict: %v", svc1IP)
} }
framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort)) framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort))
framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort)) framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort))
}) })
// TODO: Run this test against the userspace proxy and nodes // TODO: Run this test against the userspace proxy and nodes
@ -444,8 +441,8 @@ var _ = framework.KubeDescribe("Services", func() {
serviceName := "nodeport-test" serviceName := "nodeport-test"
ns := f.Namespace.Name ns := f.Namespace.Name
jig := NewServiceTestJig(c, cs, serviceName) jig := NewServiceTestJig(cs, serviceName)
nodeIP := pickNodeIP(jig.ClientSet) // for later nodeIP := pickNodeIP(jig.Client) // for later
By("creating service " + serviceName + " with type=NodePort in namespace " + ns) By("creating service " + serviceName + " with type=NodePort in namespace " + ns)
service := jig.CreateTCPServiceOrFail(ns, func(svc *api.Service) { service := jig.CreateTCPServiceOrFail(ns, func(svc *api.Service) {
@ -461,7 +458,7 @@ var _ = framework.KubeDescribe("Services", func() {
jig.TestReachableHTTP(nodeIP, nodePort, kubeProxyLagTimeout) jig.TestReachableHTTP(nodeIP, nodePort, kubeProxyLagTimeout)
By("verifying the node port is locked") By("verifying the node port is locked")
hostExec := framework.LaunchHostExecPod(f.Client, f.Namespace.Name, "hostexec") hostExec := framework.LaunchHostExecPod(f.ClientSet, f.Namespace.Name, "hostexec")
// Even if the node-ip:node-port check above passed, this hostexec pod // Even if the node-ip:node-port check above passed, this hostexec pod
// might fall on a node with a laggy kube-proxy. // might fall on a node with a laggy kube-proxy.
cmd := fmt.Sprintf(`for i in $(seq 1 300); do if ss -ant46 'sport = :%d' | grep ^LISTEN; then exit 0; fi; sleep 1; done; exit 1`, nodePort) cmd := fmt.Sprintf(`for i in $(seq 1 300); do if ss -ant46 'sport = :%d' | grep ^LISTEN; then exit 0; fi; sleep 1; done; exit 1`, nodePort)
@ -500,8 +497,8 @@ var _ = framework.KubeDescribe("Services", func() {
ns2 := namespacePtr.Name // LB2 in ns2 on UDP ns2 := namespacePtr.Name // LB2 in ns2 on UDP
framework.Logf("namespace for UDP test: %s", ns2) framework.Logf("namespace for UDP test: %s", ns2)
jig := NewServiceTestJig(c, cs, serviceName) jig := NewServiceTestJig(cs, serviceName)
nodeIP := pickNodeIP(jig.ClientSet) // for later nodeIP := pickNodeIP(jig.Client) // for later
// Test TCP and UDP Services. Services with the same name in different // Test TCP and UDP Services. Services with the same name in different
// namespaces should get different node ports and load balancers. // namespaces should get different node ports and load balancers.
@ -794,7 +791,7 @@ var _ = framework.KubeDescribe("Services", func() {
serviceName := "nodeports" serviceName := "nodeports"
ns := f.Namespace.Name ns := f.Namespace.Name
t := NewServerTest(c, ns, serviceName) t := NewServerTest(cs, ns, serviceName)
defer func() { defer func() {
defer GinkgoRecover() defer GinkgoRecover()
errs := t.Cleanup() errs := t.Cleanup()
@ -844,7 +841,7 @@ var _ = framework.KubeDescribe("Services", func() {
serviceName2 := baseName + "2" serviceName2 := baseName + "2"
ns := f.Namespace.Name ns := f.Namespace.Name
t := NewServerTest(c, ns, serviceName1) t := NewServerTest(cs, ns, serviceName1)
defer func() { defer func() {
defer GinkgoRecover() defer GinkgoRecover()
errs := t.Cleanup() errs := t.Cleanup()
@ -896,7 +893,7 @@ var _ = framework.KubeDescribe("Services", func() {
serviceName := "nodeport-range-test" serviceName := "nodeport-range-test"
ns := f.Namespace.Name ns := f.Namespace.Name
t := NewServerTest(c, ns, serviceName) t := NewServerTest(cs, ns, serviceName)
defer func() { defer func() {
defer GinkgoRecover() defer GinkgoRecover()
errs := t.Cleanup() errs := t.Cleanup()
@ -935,7 +932,7 @@ var _ = framework.KubeDescribe("Services", func() {
} }
} }
By(fmt.Sprintf("changing service "+serviceName+" to out-of-range NodePort %d", outOfRangeNodePort)) By(fmt.Sprintf("changing service "+serviceName+" to out-of-range NodePort %d", outOfRangeNodePort))
result, err := updateService(c, ns, serviceName, func(s *api.Service) { result, err := updateService(cs, ns, serviceName, func(s *api.Service) {
s.Spec.Ports[0].NodePort = int32(outOfRangeNodePort) s.Spec.Ports[0].NodePort = int32(outOfRangeNodePort)
}) })
if err == nil { if err == nil {
@ -964,7 +961,7 @@ var _ = framework.KubeDescribe("Services", func() {
serviceName := "nodeport-reuse" serviceName := "nodeport-reuse"
ns := f.Namespace.Name ns := f.Namespace.Name
t := NewServerTest(c, ns, serviceName) t := NewServerTest(cs, ns, serviceName)
defer func() { defer func() {
defer GinkgoRecover() defer GinkgoRecover()
errs := t.Cleanup() errs := t.Cleanup()
@ -999,7 +996,7 @@ var _ = framework.KubeDescribe("Services", func() {
err = t.DeleteService(serviceName) err = t.DeleteService(serviceName)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
hostExec := framework.LaunchHostExecPod(f.Client, f.Namespace.Name, "hostexec") hostExec := framework.LaunchHostExecPod(f.ClientSet, f.Namespace.Name, "hostexec")
cmd := fmt.Sprintf(`! ss -ant46 'sport = :%d' | tail -n +2 | grep LISTEN`, nodePort) cmd := fmt.Sprintf(`! ss -ant46 'sport = :%d' | tail -n +2 | grep LISTEN`, nodePort)
var stdout string var stdout string
if pollErr := wait.PollImmediate(framework.Poll, kubeProxyLagTimeout, func() (bool, error) { if pollErr := wait.PollImmediate(framework.Poll, kubeProxyLagTimeout, func() (bool, error) {
@ -1026,7 +1023,7 @@ var _ = framework.KubeDescribe("Services", func() {
serviceName := "never-ready" serviceName := "never-ready"
ns := f.Namespace.Name ns := f.Namespace.Name
t := NewServerTest(c, ns, serviceName) t := NewServerTest(cs, ns, serviceName)
defer func() { defer func() {
defer GinkgoRecover() defer GinkgoRecover()
errs := t.Cleanup() errs := t.Cleanup()
@ -1064,7 +1061,7 @@ var _ = framework.KubeDescribe("Services", func() {
svcName := fmt.Sprintf("%v.%v", serviceName, f.Namespace.Name) svcName := fmt.Sprintf("%v.%v", serviceName, f.Namespace.Name)
By("waiting for endpoints of Service with DNS name " + svcName) By("waiting for endpoints of Service with DNS name " + svcName)
execPodName := createExecPodOrFail(f.Client, f.Namespace.Name, "execpod-") execPodName := createExecPodOrFail(f.ClientSet, f.Namespace.Name, "execpod-")
cmd := fmt.Sprintf("wget -qO- %v", svcName) cmd := fmt.Sprintf("wget -qO- %v", svcName)
var stdout string var stdout string
if pollErr := wait.PollImmediate(framework.Poll, kubeProxyLagTimeout, func() (bool, error) { if pollErr := wait.PollImmediate(framework.Poll, kubeProxyLagTimeout, func() (bool, error) {
@ -1085,14 +1082,12 @@ var _ = framework.KubeDescribe("ESIPP [Slow][Feature:ExternalTrafficLocalOnly]",
f := framework.NewDefaultFramework("esipp") f := framework.NewDefaultFramework("esipp")
loadBalancerCreateTimeout := loadBalancerCreateTimeoutDefault loadBalancerCreateTimeout := loadBalancerCreateTimeoutDefault
var c *client.Client
var cs clientset.Interface var cs clientset.Interface
BeforeEach(func() { BeforeEach(func() {
// requires cloud load-balancer support - this feature currently supported only on GCE/GKE // requires cloud load-balancer support - this feature currently supported only on GCE/GKE
framework.SkipUnlessProviderIs("gce", "gke") framework.SkipUnlessProviderIs("gce", "gke")
c = f.Client
cs = f.ClientSet cs = f.ClientSet
if nodes := framework.GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > largeClusterMinNodesNumber { if nodes := framework.GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > largeClusterMinNodesNumber {
loadBalancerCreateTimeout = loadBalancerCreateTimeoutLarge loadBalancerCreateTimeout = loadBalancerCreateTimeoutLarge
@ -1102,7 +1097,7 @@ var _ = framework.KubeDescribe("ESIPP [Slow][Feature:ExternalTrafficLocalOnly]",
It("should work for type=LoadBalancer [Slow][Feature:ExternalTrafficLocalOnly]", func() { It("should work for type=LoadBalancer [Slow][Feature:ExternalTrafficLocalOnly]", func() {
namespace := f.Namespace.Name namespace := f.Namespace.Name
serviceName := "external-local" serviceName := "external-local"
jig := NewServiceTestJig(c, cs, serviceName) jig := NewServiceTestJig(cs, serviceName)
svc := jig.createOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, true) svc := jig.createOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, true)
healthCheckNodePort := int(service.GetServiceHealthCheckNodePort(svc)) healthCheckNodePort := int(service.GetServiceHealthCheckNodePort(svc))
@ -1120,7 +1115,7 @@ var _ = framework.KubeDescribe("ESIPP [Slow][Feature:ExternalTrafficLocalOnly]",
} }
break break
} }
Expect(c.Services(svc.Namespace).Delete(svc.Name)).NotTo(HaveOccurred()) Expect(cs.Core().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred())
}() }()
svcTCPPort := int(svc.Spec.Ports[0].Port) svcTCPPort := int(svc.Spec.Ports[0].Port)
@ -1140,11 +1135,11 @@ var _ = framework.KubeDescribe("ESIPP [Slow][Feature:ExternalTrafficLocalOnly]",
It("should work for type=NodePort [Slow][Feature:ExternalTrafficLocalOnly]", func() { It("should work for type=NodePort [Slow][Feature:ExternalTrafficLocalOnly]", func() {
namespace := f.Namespace.Name namespace := f.Namespace.Name
serviceName := "external-local" serviceName := "external-local"
jig := NewServiceTestJig(c, cs, serviceName) jig := NewServiceTestJig(cs, serviceName)
svc := jig.createOnlyLocalNodePortService(namespace, serviceName, true) svc := jig.createOnlyLocalNodePortService(namespace, serviceName, true)
defer func() { defer func() {
Expect(c.Services(svc.Namespace).Delete(svc.Name)).NotTo(HaveOccurred()) Expect(cs.Core().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred())
}() }()
tcpNodePort := int(svc.Spec.Ports[0].NodePort) tcpNodePort := int(svc.Spec.Ports[0].NodePort)
@ -1166,13 +1161,13 @@ var _ = framework.KubeDescribe("ESIPP [Slow][Feature:ExternalTrafficLocalOnly]",
It("should only target nodes with endpoints [Slow][Feature:ExternalTrafficLocalOnly]", func() { It("should only target nodes with endpoints [Slow][Feature:ExternalTrafficLocalOnly]", func() {
namespace := f.Namespace.Name namespace := f.Namespace.Name
serviceName := "external-local" serviceName := "external-local"
jig := NewServiceTestJig(c, cs, serviceName) jig := NewServiceTestJig(cs, serviceName)
nodes := jig.getNodes(maxNodesForEndpointsTests) nodes := jig.getNodes(maxNodesForEndpointsTests)
svc := jig.createOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, false) svc := jig.createOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, false)
defer func() { defer func() {
jig.ChangeServiceType(svc.Namespace, svc.Name, api.ServiceTypeClusterIP, loadBalancerCreateTimeout) jig.ChangeServiceType(svc.Namespace, svc.Name, api.ServiceTypeClusterIP, loadBalancerCreateTimeout)
Expect(c.Services(svc.Namespace).Delete(svc.Name)).NotTo(HaveOccurred()) Expect(cs.Core().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred())
}() }()
healthCheckNodePort := int(service.GetServiceHealthCheckNodePort(svc)) healthCheckNodePort := int(service.GetServiceHealthCheckNodePort(svc))
@ -1218,20 +1213,20 @@ var _ = framework.KubeDescribe("ESIPP [Slow][Feature:ExternalTrafficLocalOnly]",
// Make sure the loadbalancer picked up the helth check change // Make sure the loadbalancer picked up the helth check change
jig.TestReachableHTTP(ingressIP, svcTCPPort, kubeProxyLagTimeout) jig.TestReachableHTTP(ingressIP, svcTCPPort, kubeProxyLagTimeout)
} }
framework.ExpectNoError(framework.DeleteRCAndPods(c, f.ClientSet, namespace, serviceName)) framework.ExpectNoError(framework.DeleteRCAndPods(f.ClientSet, namespace, serviceName))
} }
}) })
It("should work from pods [Slow][Feature:ExternalTrafficLocalOnly]", func() { It("should work from pods [Slow][Feature:ExternalTrafficLocalOnly]", func() {
namespace := f.Namespace.Name namespace := f.Namespace.Name
serviceName := "external-local" serviceName := "external-local"
jig := NewServiceTestJig(c, cs, serviceName) jig := NewServiceTestJig(cs, serviceName)
nodes := jig.getNodes(maxNodesForEndpointsTests) nodes := jig.getNodes(maxNodesForEndpointsTests)
svc := jig.createOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, true) svc := jig.createOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, true)
defer func() { defer func() {
jig.ChangeServiceType(svc.Namespace, svc.Name, api.ServiceTypeClusterIP, loadBalancerCreateTimeout) jig.ChangeServiceType(svc.Namespace, svc.Name, api.ServiceTypeClusterIP, loadBalancerCreateTimeout)
Expect(c.Services(svc.Namespace).Delete(svc.Name)).NotTo(HaveOccurred()) Expect(cs.Core().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred())
}() }()
ingressIP := getIngressPoint(&svc.Status.LoadBalancer.Ingress[0]) ingressIP := getIngressPoint(&svc.Status.LoadBalancer.Ingress[0])
@ -1240,12 +1235,12 @@ var _ = framework.KubeDescribe("ESIPP [Slow][Feature:ExternalTrafficLocalOnly]",
podName := "execpod-sourceip" podName := "execpod-sourceip"
By(fmt.Sprintf("Creating %v on node %v", podName, nodeName)) By(fmt.Sprintf("Creating %v on node %v", podName, nodeName))
execPodName := createExecPodOnNode(f.Client, namespace, nodeName, podName) execPodName := createExecPodOnNode(f.ClientSet, namespace, nodeName, podName)
defer func() { defer func() {
err := c.Pods(namespace).Delete(execPodName, nil) err := cs.Core().Pods(namespace).Delete(execPodName, nil)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}() }()
execPod, err := f.Client.Pods(namespace).Get(execPodName) execPod, err := f.ClientSet.Core().Pods(namespace).Get(execPodName)
ExpectNoError(err) ExpectNoError(err)
framework.Logf("Waiting up to %v wget %v", kubeProxyLagTimeout, path) framework.Logf("Waiting up to %v wget %v", kubeProxyLagTimeout, path)
@ -1269,7 +1264,7 @@ var _ = framework.KubeDescribe("ESIPP [Slow][Feature:ExternalTrafficLocalOnly]",
It("should handle updates to source ip annotation [Slow][Feature:ExternalTrafficLocalOnly]", func() { It("should handle updates to source ip annotation [Slow][Feature:ExternalTrafficLocalOnly]", func() {
namespace := f.Namespace.Name namespace := f.Namespace.Name
serviceName := "external-local" serviceName := "external-local"
jig := NewServiceTestJig(c, cs, serviceName) jig := NewServiceTestJig(cs, serviceName)
nodes := jig.getNodes(maxNodesForEndpointsTests) nodes := jig.getNodes(maxNodesForEndpointsTests)
if len(nodes.Items) < 2 { if len(nodes.Items) < 2 {
@ -1279,7 +1274,7 @@ var _ = framework.KubeDescribe("ESIPP [Slow][Feature:ExternalTrafficLocalOnly]",
svc := jig.createOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, true) svc := jig.createOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, true)
defer func() { defer func() {
jig.ChangeServiceType(svc.Namespace, svc.Name, api.ServiceTypeClusterIP, loadBalancerCreateTimeout) jig.ChangeServiceType(svc.Namespace, svc.Name, api.ServiceTypeClusterIP, loadBalancerCreateTimeout)
Expect(c.Services(svc.Namespace).Delete(svc.Name)).NotTo(HaveOccurred()) Expect(cs.Core().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred())
}() }()
// save the health check node port because it disappears when lift the annotation. // save the health check node port because it disappears when lift the annotation.
@ -1375,18 +1370,18 @@ var _ = framework.KubeDescribe("ESIPP [Slow][Feature:ExternalTrafficLocalOnly]",
// updateService fetches a service, calls the update function on it, // updateService fetches a service, calls the update function on it,
// and then attempts to send the updated service. It retries up to 2 // and then attempts to send the updated service. It retries up to 2
// times in the face of timeouts and conflicts. // times in the face of timeouts and conflicts.
func updateService(c *client.Client, namespace, serviceName string, update func(*api.Service)) (*api.Service, error) { func updateService(c clientset.Interface, namespace, serviceName string, update func(*api.Service)) (*api.Service, error) {
var service *api.Service var service *api.Service
var err error var err error
for i := 0; i < 3; i++ { for i := 0; i < 3; i++ {
service, err = c.Services(namespace).Get(serviceName) service, err = c.Core().Services(namespace).Get(serviceName)
if err != nil { if err != nil {
return service, err return service, err
} }
update(service) update(service)
service, err = c.Services(namespace).Update(service) service, err = c.Core().Services(namespace).Update(service)
if !errors.IsConflict(err) && !errors.IsServerTimeout(err) { if !errors.IsConflict(err) && !errors.IsServerTimeout(err) {
return service, err return service, err
@ -1430,11 +1425,11 @@ func getContainerPortsByPodUID(endpoints *api.Endpoints) PortsByPodUID {
type PortsByPodName map[string][]int type PortsByPodName map[string][]int
type PortsByPodUID map[types.UID][]int type PortsByPodUID map[types.UID][]int
func translatePodNameToUIDOrFail(c *client.Client, ns string, expectedEndpoints PortsByPodName) PortsByPodUID { func translatePodNameToUIDOrFail(c clientset.Interface, ns string, expectedEndpoints PortsByPodName) PortsByPodUID {
portsByUID := make(PortsByPodUID) portsByUID := make(PortsByPodUID)
for name, portList := range expectedEndpoints { for name, portList := range expectedEndpoints {
pod, err := c.Pods(ns).Get(name) pod, err := c.Core().Pods(ns).Get(name)
if err != nil { if err != nil {
framework.Failf("failed to get pod %s, that's pretty weird. validation failed: %s", name, err) framework.Failf("failed to get pod %s, that's pretty weird. validation failed: %s", name, err)
} }
@ -1466,11 +1461,11 @@ func validatePortsOrFail(endpoints PortsByPodUID, expectedEndpoints PortsByPodUI
} }
} }
func validateEndpointsOrFail(c *client.Client, namespace, serviceName string, expectedEndpoints PortsByPodName) { func validateEndpointsOrFail(c clientset.Interface, namespace, serviceName string, expectedEndpoints PortsByPodName) {
By(fmt.Sprintf("waiting up to %v for service %s in namespace %s to expose endpoints %v", framework.ServiceStartTimeout, serviceName, namespace, expectedEndpoints)) By(fmt.Sprintf("waiting up to %v for service %s in namespace %s to expose endpoints %v", framework.ServiceStartTimeout, serviceName, namespace, expectedEndpoints))
i := 1 i := 1
for start := time.Now(); time.Since(start) < framework.ServiceStartTimeout; time.Sleep(1 * time.Second) { for start := time.Now(); time.Since(start) < framework.ServiceStartTimeout; time.Sleep(1 * time.Second) {
endpoints, err := c.Endpoints(namespace).Get(serviceName) endpoints, err := c.Core().Endpoints(namespace).Get(serviceName)
if err != nil { if err != nil {
framework.Logf("Get endpoints failed (%v elapsed, ignoring for 5s): %v", time.Since(start), err) framework.Logf("Get endpoints failed (%v elapsed, ignoring for 5s): %v", time.Since(start), err)
continue continue
@ -1494,7 +1489,7 @@ func validateEndpointsOrFail(c *client.Client, namespace, serviceName string, ex
i++ i++
} }
if pods, err := c.Pods(api.NamespaceAll).List(api.ListOptions{}); err == nil { if pods, err := c.Core().Pods(api.NamespaceAll).List(api.ListOptions{}); err == nil {
for _, pod := range pods.Items { for _, pod := range pods.Items {
framework.Logf("Pod %s\t%s\t%s\t%s", pod.Namespace, pod.Name, pod.Spec.NodeName, pod.DeletionTimestamp) framework.Logf("Pod %s\t%s\t%s\t%s", pod.Namespace, pod.Name, pod.Spec.NodeName, pod.DeletionTimestamp)
} }
@ -1529,13 +1524,13 @@ func newExecPodSpec(ns, generateName string) *api.Pod {
// createExecPodOrFail creates a simple busybox pod in a sleep loop used as a // createExecPodOrFail creates a simple busybox pod in a sleep loop used as a
// vessel for kubectl exec commands. // vessel for kubectl exec commands.
// Returns the name of the created pod. // Returns the name of the created pod.
func createExecPodOrFail(client *client.Client, ns, generateName string) string { func createExecPodOrFail(client clientset.Interface, ns, generateName string) string {
framework.Logf("Creating new exec pod") framework.Logf("Creating new exec pod")
execPod := newExecPodSpec(ns, generateName) execPod := newExecPodSpec(ns, generateName)
created, err := client.Pods(ns).Create(execPod) created, err := client.Core().Pods(ns).Create(execPod)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
err = wait.PollImmediate(framework.Poll, 5*time.Minute, func() (bool, error) { err = wait.PollImmediate(framework.Poll, 5*time.Minute, func() (bool, error) {
retrievedPod, err := client.Pods(execPod.Namespace).Get(created.Name) retrievedPod, err := client.Core().Pods(execPod.Namespace).Get(created.Name)
if err != nil { if err != nil {
return false, nil return false, nil
} }
@ -1547,14 +1542,14 @@ func createExecPodOrFail(client *client.Client, ns, generateName string) string
// createExecPodOnNode launches a exec pod in the given namespace and node // createExecPodOnNode launches a exec pod in the given namespace and node
// waits until it's Running, created pod name would be returned // waits until it's Running, created pod name would be returned
func createExecPodOnNode(client *client.Client, ns, nodeName, generateName string) string { func createExecPodOnNode(client clientset.Interface, ns, nodeName, generateName string) string {
framework.Logf("Creating exec pod %q in namespace %q", generateName, ns) framework.Logf("Creating exec pod %q in namespace %q", generateName, ns)
execPod := newExecPodSpec(ns, generateName) execPod := newExecPodSpec(ns, generateName)
execPod.Spec.NodeName = nodeName execPod.Spec.NodeName = nodeName
created, err := client.Pods(ns).Create(execPod) created, err := client.Core().Pods(ns).Create(execPod)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
err = wait.PollImmediate(framework.Poll, 5*time.Minute, func() (bool, error) { err = wait.PollImmediate(framework.Poll, 5*time.Minute, func() (bool, error) {
retrievedPod, err := client.Pods(execPod.Namespace).Get(created.Name) retrievedPod, err := client.Core().Pods(execPod.Namespace).Get(created.Name)
if err != nil { if err != nil {
return false, nil return false, nil
} }
@ -1564,7 +1559,7 @@ func createExecPodOnNode(client *client.Client, ns, nodeName, generateName strin
return created.Name return created.Name
} }
func createPodOrFail(c *client.Client, ns, name string, labels map[string]string, containerPorts []api.ContainerPort) { func createPodOrFail(c clientset.Interface, ns, name string, labels map[string]string, containerPorts []api.ContainerPort) {
By(fmt.Sprintf("creating pod %s in namespace %s", name, ns)) By(fmt.Sprintf("creating pod %s in namespace %s", name, ns))
pod := &api.Pod{ pod := &api.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
@ -1584,13 +1579,13 @@ func createPodOrFail(c *client.Client, ns, name string, labels map[string]string
}, },
}, },
} }
_, err := c.Pods(ns).Create(pod) _, err := c.Core().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
} }
func deletePodOrFail(c *client.Client, ns, name string) { func deletePodOrFail(c clientset.Interface, ns, name string) {
By(fmt.Sprintf("deleting pod %s in namespace %s", name, ns)) By(fmt.Sprintf("deleting pod %s in namespace %s", name, ns))
err := c.Pods(ns).Delete(name, nil) err := c.Core().Pods(ns).Delete(name, nil)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
} }
@ -1612,8 +1607,8 @@ func collectAddresses(nodes *api.NodeList, addressType api.NodeAddressType) []st
return ips return ips
} }
func getNodePublicIps(cs clientset.Interface) ([]string, error) { func getNodePublicIps(c clientset.Interface) ([]string, error) {
nodes := framework.GetReadySchedulableNodesOrDie(cs) nodes := framework.GetReadySchedulableNodesOrDie(c)
ips := collectAddresses(nodes, api.NodeExternalIP) ips := collectAddresses(nodes, api.NodeExternalIP)
if len(ips) == 0 { if len(ips) == 0 {
@ -1622,8 +1617,8 @@ func getNodePublicIps(cs clientset.Interface) ([]string, error) {
return ips, nil return ips, nil
} }
func pickNodeIP(cs clientset.Interface) string { func pickNodeIP(c clientset.Interface) string {
publicIps, err := getNodePublicIps(cs) publicIps, err := getNodePublicIps(c)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
if len(publicIps) == 0 { if len(publicIps) == 0 {
framework.Failf("got unexpected number (%d) of public IPs", len(publicIps)) framework.Failf("got unexpected number (%d) of public IPs", len(publicIps))
@ -1808,11 +1803,11 @@ func testNotReachableUDP(ip string, port int, request string) (bool, error) {
} }
// Creates a replication controller that serves its hostname and a service on top of it. // Creates a replication controller that serves its hostname and a service on top of it.
func startServeHostnameService(c *client.Client, ns, name string, port, replicas int) ([]string, string, error) { func startServeHostnameService(c clientset.Interface, ns, name string, port, replicas int) ([]string, string, error) {
podNames := make([]string, replicas) podNames := make([]string, replicas)
By("creating service " + name + " in namespace " + ns) By("creating service " + name + " in namespace " + ns)
_, err := c.Services(ns).Create(&api.Service{ _, err := c.Core().Services(ns).Create(&api.Service{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Name: name, Name: name,
}, },
@ -1858,7 +1853,7 @@ func startServeHostnameService(c *client.Client, ns, name string, port, replicas
} }
sort.StringSlice(podNames).Sort() sort.StringSlice(podNames).Sort()
service, err := c.Services(ns).Get(name) service, err := c.Core().Services(ns).Get(name)
if err != nil { if err != nil {
return podNames, "", err return podNames, "", err
} }
@ -1869,11 +1864,11 @@ func startServeHostnameService(c *client.Client, ns, name string, port, replicas
return podNames, serviceIP, nil return podNames, serviceIP, nil
} }
func stopServeHostnameService(c *client.Client, clientset clientset.Interface, ns, name string) error { func stopServeHostnameService(clientset clientset.Interface, ns, name string) error {
if err := framework.DeleteRCAndPods(c, clientset, ns, name); err != nil { if err := framework.DeleteRCAndPods(clientset, ns, name); err != nil {
return err return err
} }
if err := c.Services(ns).Delete(name); err != nil { if err := clientset.Core().Services(ns).Delete(name, nil); err != nil {
return err return err
} }
return nil return nil
@ -1883,7 +1878,7 @@ func stopServeHostnameService(c *client.Client, clientset clientset.Interface, n
// given host and from within a pod. The host is expected to be an SSH-able node // given host and from within a pod. The host is expected to be an SSH-able node
// in the cluster. Each pod in the service is expected to echo its name. These // in the cluster. Each pod in the service is expected to echo its name. These
// names are compared with the given expectedPods list after a sort | uniq. // names are compared with the given expectedPods list after a sort | uniq.
func verifyServeHostnameServiceUp(c *client.Client, ns, host string, expectedPods []string, serviceIP string, servicePort int) error { func verifyServeHostnameServiceUp(c clientset.Interface, ns, host string, expectedPods []string, serviceIP string, servicePort int) error {
execPodName := createExecPodOrFail(c, ns, "execpod-") execPodName := createExecPodOrFail(c, ns, "execpod-")
defer func() { defer func() {
deletePodOrFail(c, ns, execPodName) deletePodOrFail(c, ns, execPodName)
@ -1959,7 +1954,7 @@ func verifyServeHostnameServiceUp(c *client.Client, ns, host string, expectedPod
return nil return nil
} }
func verifyServeHostnameServiceDown(c *client.Client, host string, serviceIP string, servicePort int) error { func verifyServeHostnameServiceDown(c clientset.Interface, host string, serviceIP string, servicePort int) error {
command := fmt.Sprintf( command := fmt.Sprintf(
"curl -s --connect-timeout 2 http://%s:%d && exit 99", serviceIP, servicePort) "curl -s --connect-timeout 2 http://%s:%d && exit 99", serviceIP, servicePort)
@ -1994,18 +1989,16 @@ func httpGetNoConnectionPool(url string) (*http.Response, error) {
// A test jig to help testing. // A test jig to help testing.
type ServiceTestJig struct { type ServiceTestJig struct {
ID string ID string
Name string Name string
Client *client.Client Client clientset.Interface
ClientSet clientset.Interface Labels map[string]string
Labels map[string]string
} }
// NewServiceTestJig allocates and inits a new ServiceTestJig. // NewServiceTestJig allocates and inits a new ServiceTestJig.
func NewServiceTestJig(client *client.Client, cs clientset.Interface, name string) *ServiceTestJig { func NewServiceTestJig(client clientset.Interface, name string) *ServiceTestJig {
j := &ServiceTestJig{} j := &ServiceTestJig{}
j.Client = client j.Client = client
j.ClientSet = cs
j.Name = name j.Name = name
j.ID = j.Name + "-" + string(uuid.NewUUID()) j.ID = j.Name + "-" + string(uuid.NewUUID())
j.Labels = map[string]string{"testid": j.ID} j.Labels = map[string]string{"testid": j.ID}
@ -2044,7 +2037,7 @@ func (j *ServiceTestJig) CreateTCPServiceWithPort(namespace string, tweak func(s
if tweak != nil { if tweak != nil {
tweak(svc) tweak(svc)
} }
result, err := j.Client.Services(namespace).Create(svc) result, err := j.Client.Core().Services(namespace).Create(svc)
if err != nil { if err != nil {
framework.Failf("Failed to create TCP Service %q: %v", svc.Name, err) framework.Failf("Failed to create TCP Service %q: %v", svc.Name, err)
} }
@ -2059,7 +2052,7 @@ func (j *ServiceTestJig) CreateTCPServiceOrFail(namespace string, tweak func(svc
if tweak != nil { if tweak != nil {
tweak(svc) tweak(svc)
} }
result, err := j.Client.Services(namespace).Create(svc) result, err := j.Client.Core().Services(namespace).Create(svc)
if err != nil { if err != nil {
framework.Failf("Failed to create TCP Service %q: %v", svc.Name, err) framework.Failf("Failed to create TCP Service %q: %v", svc.Name, err)
} }
@ -2074,7 +2067,7 @@ func (j *ServiceTestJig) CreateUDPServiceOrFail(namespace string, tweak func(svc
if tweak != nil { if tweak != nil {
tweak(svc) tweak(svc)
} }
result, err := j.Client.Services(namespace).Create(svc) result, err := j.Client.Core().Services(namespace).Create(svc)
if err != nil { if err != nil {
framework.Failf("Failed to create UDP Service %q: %v", svc.Name, err) framework.Failf("Failed to create UDP Service %q: %v", svc.Name, err)
} }
@ -2145,7 +2138,7 @@ func (j *ServiceTestJig) createOnlyLocalLoadBalancerService(namespace, serviceNa
// endpoints of the given Service are running. // endpoints of the given Service are running.
func (j *ServiceTestJig) getEndpointNodes(svc *api.Service) map[string][]string { func (j *ServiceTestJig) getEndpointNodes(svc *api.Service) map[string][]string {
nodes := j.getNodes(maxNodesForEndpointsTests) nodes := j.getNodes(maxNodesForEndpointsTests)
endpoints, err := j.Client.Endpoints(svc.Namespace).Get(svc.Name) endpoints, err := j.Client.Core().Endpoints(svc.Namespace).Get(svc.Name)
if err != nil { if err != nil {
framework.Failf("Get endpoints for service %s/%s failed (%s)", svc.Namespace, svc.Name, err) framework.Failf("Get endpoints for service %s/%s failed (%s)", svc.Namespace, svc.Name, err)
} }
@ -2172,7 +2165,7 @@ func (j *ServiceTestJig) getEndpointNodes(svc *api.Service) map[string][]string
// getNodes returns the first maxNodesForTest nodes. Useful in large clusters // getNodes returns the first maxNodesForTest nodes. Useful in large clusters
// where we don't eg: want to create an endpoint per node. // where we don't eg: want to create an endpoint per node.
func (j *ServiceTestJig) getNodes(maxNodesForTest int) (nodes *api.NodeList) { func (j *ServiceTestJig) getNodes(maxNodesForTest int) (nodes *api.NodeList) {
nodes = framework.GetReadySchedulableNodesOrDie(j.ClientSet) nodes = framework.GetReadySchedulableNodesOrDie(j.Client)
if len(nodes.Items) <= maxNodesForTest { if len(nodes.Items) <= maxNodesForTest {
maxNodesForTest = len(nodes.Items) maxNodesForTest = len(nodes.Items)
} }
@ -2182,7 +2175,7 @@ func (j *ServiceTestJig) getNodes(maxNodesForTest int) (nodes *api.NodeList) {
func (j *ServiceTestJig) waitForEndpointOnNode(namespace, serviceName, nodeName string) { func (j *ServiceTestJig) waitForEndpointOnNode(namespace, serviceName, nodeName string) {
err := wait.PollImmediate(framework.Poll, loadBalancerCreateTimeoutDefault, func() (bool, error) { err := wait.PollImmediate(framework.Poll, loadBalancerCreateTimeoutDefault, func() (bool, error) {
endpoints, err := j.Client.Endpoints(namespace).Get(serviceName) endpoints, err := j.Client.Core().Endpoints(namespace).Get(serviceName)
if err != nil { if err != nil {
framework.Logf("Get endpoints for service %s/%s failed (%s)", namespace, serviceName, err) framework.Logf("Get endpoints for service %s/%s failed (%s)", namespace, serviceName, err)
return false, nil return false, nil
@ -2244,12 +2237,12 @@ func (j *ServiceTestJig) SanityCheckService(svc *api.Service, svcType api.Servic
// face of timeouts and conflicts. // face of timeouts and conflicts.
func (j *ServiceTestJig) UpdateService(namespace, name string, update func(*api.Service)) (*api.Service, error) { func (j *ServiceTestJig) UpdateService(namespace, name string, update func(*api.Service)) (*api.Service, error) {
for i := 0; i < 3; i++ { for i := 0; i < 3; i++ {
service, err := j.Client.Services(namespace).Get(name) service, err := j.Client.Core().Services(namespace).Get(name)
if err != nil { if err != nil {
return nil, fmt.Errorf("Failed to get Service %q: %v", name, err) return nil, fmt.Errorf("Failed to get Service %q: %v", name, err)
} }
update(service) update(service)
service, err = j.Client.Services(namespace).Update(service) service, err = j.Client.Core().Services(namespace).Update(service)
if err == nil { if err == nil {
return service, nil return service, nil
} }
@ -2298,7 +2291,7 @@ func (j *ServiceTestJig) WaitForLoadBalancerOrFail(namespace, name string, timeo
var service *api.Service var service *api.Service
framework.Logf("Waiting up to %v for service %q to have a LoadBalancer", timeout, name) framework.Logf("Waiting up to %v for service %q to have a LoadBalancer", timeout, name)
pollFunc := func() (bool, error) { pollFunc := func() (bool, error) {
svc, err := j.Client.Services(namespace).Get(name) svc, err := j.Client.Core().Services(namespace).Get(name)
if err != nil { if err != nil {
return false, err return false, err
} }
@ -2325,7 +2318,7 @@ func (j *ServiceTestJig) WaitForLoadBalancerDestroyOrFail(namespace, name string
var service *api.Service var service *api.Service
framework.Logf("Waiting up to %v for service %q to have no LoadBalancer", timeout, name) framework.Logf("Waiting up to %v for service %q to have no LoadBalancer", timeout, name)
pollFunc := func() (bool, error) { pollFunc := func() (bool, error) {
svc, err := j.Client.Services(namespace).Get(name) svc, err := j.Client.Core().Services(namespace).Get(name)
if err != nil { if err != nil {
return false, err return false, err
} }
@ -2453,7 +2446,7 @@ func (j *ServiceTestJig) RunOrFail(namespace string, tweak func(rc *api.Replicat
if tweak != nil { if tweak != nil {
tweak(rc) tweak(rc)
} }
result, err := j.Client.ReplicationControllers(namespace).Create(rc) result, err := j.Client.Core().ReplicationControllers(namespace).Create(rc)
if err != nil { if err != nil {
framework.Failf("Failed to created RC %q: %v", rc.Name, err) framework.Failf("Failed to created RC %q: %v", rc.Name, err)
} }
@ -2474,7 +2467,7 @@ func (j *ServiceTestJig) waitForPodsCreated(namespace string, replicas int) ([]s
framework.Logf("Waiting up to %v for %d pods to be created", timeout, replicas) framework.Logf("Waiting up to %v for %d pods to be created", timeout, replicas)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(2 * time.Second) { for start := time.Now(); time.Since(start) < timeout; time.Sleep(2 * time.Second) {
options := api.ListOptions{LabelSelector: label} options := api.ListOptions{LabelSelector: label}
pods, err := j.Client.Pods(namespace).List(options) pods, err := j.Client.Core().Pods(namespace).List(options)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -2507,7 +2500,7 @@ func (j *ServiceTestJig) waitForPodsReady(namespace string, pods []string) error
type ServiceTestFixture struct { type ServiceTestFixture struct {
ServiceName string ServiceName string
Namespace string Namespace string
Client *client.Client Client clientset.Interface
TestId string TestId string
Labels map[string]string Labels map[string]string
@ -2518,7 +2511,7 @@ type ServiceTestFixture struct {
image string image string
} }
func NewServerTest(client *client.Client, namespace string, serviceName string) *ServiceTestFixture { func NewServerTest(client clientset.Interface, namespace string, serviceName string) *ServiceTestFixture {
t := &ServiceTestFixture{} t := &ServiceTestFixture{}
t.Client = client t.Client = client
t.Namespace = namespace t.Namespace = namespace
@ -2571,7 +2564,7 @@ func (t *ServiceTestFixture) CreateWebserverRC(replicas int32) *api.ReplicationC
// createRC creates a replication controller and records it for cleanup. // createRC creates a replication controller and records it for cleanup.
func (t *ServiceTestFixture) createRC(rc *api.ReplicationController) (*api.ReplicationController, error) { func (t *ServiceTestFixture) createRC(rc *api.ReplicationController) (*api.ReplicationController, error) {
rc, err := t.Client.ReplicationControllers(t.Namespace).Create(rc) rc, err := t.Client.Core().ReplicationControllers(t.Namespace).Create(rc)
if err == nil { if err == nil {
t.rcs[rc.Name] = true t.rcs[rc.Name] = true
} }
@ -2580,7 +2573,7 @@ func (t *ServiceTestFixture) createRC(rc *api.ReplicationController) (*api.Repli
// Create a service, and record it for cleanup // Create a service, and record it for cleanup
func (t *ServiceTestFixture) CreateService(service *api.Service) (*api.Service, error) { func (t *ServiceTestFixture) CreateService(service *api.Service) (*api.Service, error) {
result, err := t.Client.Services(t.Namespace).Create(service) result, err := t.Client.Core().Services(t.Namespace).Create(service)
if err == nil { if err == nil {
t.services[service.Name] = true t.services[service.Name] = true
} }
@ -2589,7 +2582,7 @@ func (t *ServiceTestFixture) CreateService(service *api.Service) (*api.Service,
// Delete a service, and remove it from the cleanup list // Delete a service, and remove it from the cleanup list
func (t *ServiceTestFixture) DeleteService(serviceName string) error { func (t *ServiceTestFixture) DeleteService(serviceName string) error {
err := t.Client.Services(t.Namespace).Delete(serviceName) err := t.Client.Core().Services(t.Namespace).Delete(serviceName, nil)
if err == nil { if err == nil {
delete(t.services, serviceName) delete(t.services, serviceName)
} }
@ -2601,25 +2594,25 @@ func (t *ServiceTestFixture) Cleanup() []error {
for rcName := range t.rcs { for rcName := range t.rcs {
By("stopping RC " + rcName + " in namespace " + t.Namespace) By("stopping RC " + rcName + " in namespace " + t.Namespace)
// First, resize the RC to 0. // First, resize the RC to 0.
old, err := t.Client.ReplicationControllers(t.Namespace).Get(rcName) old, err := t.Client.Core().ReplicationControllers(t.Namespace).Get(rcName)
if err != nil { if err != nil {
errs = append(errs, err) errs = append(errs, err)
} }
old.Spec.Replicas = 0 old.Spec.Replicas = 0
if _, err := t.Client.ReplicationControllers(t.Namespace).Update(old); err != nil { if _, err := t.Client.Core().ReplicationControllers(t.Namespace).Update(old); err != nil {
errs = append(errs, err) errs = append(errs, err)
} }
// TODO(mikedanese): Wait. // TODO(mikedanese): Wait.
// Then, delete the RC altogether. // Then, delete the RC altogether.
if err := t.Client.ReplicationControllers(t.Namespace).Delete(rcName, nil); err != nil { if err := t.Client.Core().ReplicationControllers(t.Namespace).Delete(rcName, nil); err != nil {
errs = append(errs, err) errs = append(errs, err)
} }
} }
for serviceName := range t.services { for serviceName := range t.services {
By("deleting service " + serviceName + " in namespace " + t.Namespace) By("deleting service " + serviceName + " in namespace " + t.Namespace)
err := t.Client.Services(t.Namespace).Delete(serviceName) err := t.Client.Core().Services(t.Namespace).Delete(serviceName, nil)
if err != nil { if err != nil {
errs = append(errs, err) errs = append(errs, err)
} }
@ -2657,22 +2650,22 @@ func (j *ServiceTestJig) launchEchoserverPodOnNode(f *framework.Framework, nodeN
pod := newEchoServerPodSpec(podName) pod := newEchoServerPodSpec(podName)
pod.Spec.NodeName = nodeName pod.Spec.NodeName = nodeName
pod.ObjectMeta.Labels = j.Labels pod.ObjectMeta.Labels = j.Labels
podClient := f.Client.Pods(f.Namespace.Name) podClient := f.ClientSet.Core().Pods(f.Namespace.Name)
_, err := podClient.Create(pod) _, err := podClient.Create(pod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNoError(f.WaitForPodRunning(podName)) framework.ExpectNoError(f.WaitForPodRunning(podName))
framework.Logf("Echo server pod %q in namespace %q running", pod.Name, f.Namespace.Name) framework.Logf("Echo server pod %q in namespace %q running", pod.Name, f.Namespace.Name)
} }
func execSourceipTest(f *framework.Framework, c *client.Client, ns, nodeName, serviceIP string, servicePort int) (string, string) { func execSourceipTest(f *framework.Framework, c clientset.Interface, ns, nodeName, serviceIP string, servicePort int) (string, string) {
framework.Logf("Creating an exec pod on node %v", nodeName) framework.Logf("Creating an exec pod on node %v", nodeName)
execPodName := createExecPodOnNode(f.Client, ns, nodeName, fmt.Sprintf("execpod-sourceip-%s", nodeName)) execPodName := createExecPodOnNode(f.ClientSet, ns, nodeName, fmt.Sprintf("execpod-sourceip-%s", nodeName))
defer func() { defer func() {
framework.Logf("Cleaning up the exec pod") framework.Logf("Cleaning up the exec pod")
err := c.Pods(ns).Delete(execPodName, nil) err := c.Core().Pods(ns).Delete(execPodName, nil)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}() }()
execPod, err := f.Client.Pods(ns).Get(execPodName) execPod, err := f.ClientSet.Core().Pods(ns).Get(execPodName)
ExpectNoError(err) ExpectNoError(err)
var stdout string var stdout string

View File

@ -42,7 +42,7 @@ var _ = framework.KubeDescribe("ServiceAccounts", func() {
var secrets []api.ObjectReference var secrets []api.ObjectReference
framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*10, func() (bool, error) { framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*10, func() (bool, error) {
By("waiting for a single token reference") By("waiting for a single token reference")
sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default") sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default")
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {
framework.Logf("default service account was not found") framework.Logf("default service account was not found")
return false, nil return false, nil
@ -68,19 +68,19 @@ var _ = framework.KubeDescribe("ServiceAccounts", func() {
{ {
By("ensuring the single token reference persists") By("ensuring the single token reference persists")
time.Sleep(2 * time.Second) time.Sleep(2 * time.Second)
sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default") sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default")
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(sa.Secrets).To(Equal(secrets)) Expect(sa.Secrets).To(Equal(secrets))
} }
// delete the referenced secret // delete the referenced secret
By("deleting the service account token") By("deleting the service account token")
framework.ExpectNoError(f.Client.Secrets(f.Namespace.Name).Delete(secrets[0].Name)) framework.ExpectNoError(f.ClientSet.Core().Secrets(f.Namespace.Name).Delete(secrets[0].Name, nil))
// wait for the referenced secret to be removed, and another one autocreated // wait for the referenced secret to be removed, and another one autocreated
framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) { framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) {
By("waiting for a new token reference") By("waiting for a new token reference")
sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default") sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default")
if err != nil { if err != nil {
framework.Logf("error getting default service account: %v", err) framework.Logf("error getting default service account: %v", err)
return false, err return false, err
@ -106,7 +106,7 @@ var _ = framework.KubeDescribe("ServiceAccounts", func() {
{ {
By("ensuring the single token reference persists") By("ensuring the single token reference persists")
time.Sleep(2 * time.Second) time.Sleep(2 * time.Second)
sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default") sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default")
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(sa.Secrets).To(Equal(secrets)) Expect(sa.Secrets).To(Equal(secrets))
} }
@ -114,17 +114,17 @@ var _ = framework.KubeDescribe("ServiceAccounts", func() {
// delete the reference from the service account // delete the reference from the service account
By("deleting the reference to the service account token") By("deleting the reference to the service account token")
{ {
sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default") sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default")
framework.ExpectNoError(err) framework.ExpectNoError(err)
sa.Secrets = nil sa.Secrets = nil
_, updateErr := f.Client.ServiceAccounts(f.Namespace.Name).Update(sa) _, updateErr := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Update(sa)
framework.ExpectNoError(updateErr) framework.ExpectNoError(updateErr)
} }
// wait for another one to be autocreated // wait for another one to be autocreated
framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) { framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) {
By("waiting for a new token to be created and added") By("waiting for a new token to be created and added")
sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default") sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default")
if err != nil { if err != nil {
framework.Logf("error getting default service account: %v", err) framework.Logf("error getting default service account: %v", err)
return false, err return false, err
@ -146,7 +146,7 @@ var _ = framework.KubeDescribe("ServiceAccounts", func() {
{ {
By("ensuring the single token reference persists") By("ensuring the single token reference persists")
time.Sleep(2 * time.Second) time.Sleep(2 * time.Second)
sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default") sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default")
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(sa.Secrets).To(Equal(secrets)) Expect(sa.Secrets).To(Equal(secrets))
} }
@ -159,7 +159,7 @@ var _ = framework.KubeDescribe("ServiceAccounts", func() {
// Standard get, update retry loop // Standard get, update retry loop
framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) { framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) {
By("getting the auto-created API token") By("getting the auto-created API token")
sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default") sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default")
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {
framework.Logf("default service account was not found") framework.Logf("default service account was not found")
return false, nil return false, nil
@ -173,7 +173,7 @@ var _ = framework.KubeDescribe("ServiceAccounts", func() {
return false, nil return false, nil
} }
for _, secretRef := range sa.Secrets { for _, secretRef := range sa.Secrets {
secret, err := f.Client.Secrets(f.Namespace.Name).Get(secretRef.Name) secret, err := f.ClientSet.Core().Secrets(f.Namespace.Name).Get(secretRef.Name)
if err != nil { if err != nil {
framework.Logf("Error getting secret %s: %v", secretRef.Name, err) framework.Logf("Error getting secret %s: %v", secretRef.Name, err)
continue continue
@ -214,7 +214,7 @@ var _ = framework.KubeDescribe("ServiceAccounts", func() {
}, },
} }
supportsTokenNamespace, _ := framework.ServerVersionGTE(serviceAccountTokenNamespaceVersion, f.Client) supportsTokenNamespace, _ := framework.ServerVersionGTE(serviceAccountTokenNamespaceVersion, f.ClientSet.Discovery())
if supportsTokenNamespace { if supportsTokenNamespace {
pod.Spec.Containers = append(pod.Spec.Containers, api.Container{ pod.Spec.Containers = append(pod.Spec.Containers, api.Container{
Name: "namespace-test", Name: "namespace-test",

View File

@ -24,6 +24,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/flowcontrol" "k8s.io/kubernetes/pkg/util/flowcontrol"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
@ -66,9 +67,9 @@ var _ = framework.KubeDescribe("Service endpoints latency", func() {
) )
// Turn off rate limiting--it interferes with our measurements. // Turn off rate limiting--it interferes with our measurements.
oldThrottle := f.Client.RESTClient.Throttle oldThrottle := f.ClientSet.Core().RESTClient().GetRateLimiter()
f.Client.RESTClient.Throttle = flowcontrol.NewFakeAlwaysRateLimiter() f.ClientSet.Core().RESTClient().(*restclient.RESTClient).Throttle = flowcontrol.NewFakeAlwaysRateLimiter()
defer func() { f.Client.RESTClient.Throttle = oldThrottle }() defer func() { f.ClientSet.Core().RESTClient().(*restclient.RESTClient).Throttle = oldThrottle }()
failing := sets.NewString() failing := sets.NewString()
d, err := runServiceLatencies(f, parallelTrials, totalTrials) d, err := runServiceLatencies(f, parallelTrials, totalTrials)
@ -117,8 +118,8 @@ var _ = framework.KubeDescribe("Service endpoints latency", func() {
func runServiceLatencies(f *framework.Framework, inParallel, total int) (output []time.Duration, err error) { func runServiceLatencies(f *framework.Framework, inParallel, total int) (output []time.Duration, err error) {
cfg := testutils.RCConfig{ cfg := testutils.RCConfig{
Client: f.Client, Client: f.ClientSet,
Image: framework.GetPauseImageName(f.Client), Image: framework.GetPauseImageName(f.ClientSet),
Name: "svc-latency-rc", Name: "svc-latency-rc",
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
Replicas: 1, Replicas: 1,
@ -277,10 +278,11 @@ func startEndpointWatcher(f *framework.Framework, q *endpointQueries) {
_, controller := cache.NewInformer( _, controller := cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return f.Client.Endpoints(f.Namespace.Name).List(options) obj, err := f.ClientSet.Core().Endpoints(f.Namespace.Name).List(options)
return runtime.Object(obj), err
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return f.Client.Endpoints(f.Namespace.Name).Watch(options) return f.ClientSet.Core().Endpoints(f.Namespace.Name).Watch(options)
}, },
}, },
&api.Endpoints{}, &api.Endpoints{},
@ -325,7 +327,7 @@ func singleServiceLatency(f *framework.Framework, name string, q *endpointQuerie
}, },
} }
startTime := time.Now() startTime := time.Now()
gotSvc, err := f.Client.Services(f.Namespace.Name).Create(svc) gotSvc, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(svc)
if err != nil { if err != nil {
return 0, err return 0, err
} }

Some files were not shown because too many files have changed in this diff Show More