use core client with explicit version globally

This commit is contained in:
Kevin
2017-10-25 23:54:32 +08:00
parent d945927077
commit 4c8539cece
190 changed files with 921 additions and 921 deletions

View File

@@ -94,7 +94,7 @@ func podOnNode(podName, nodeName string, image string) *v1.Pod {
}
func newPodOnNode(c clientset.Interface, namespace, podName, nodeName string) error {
pod, err := c.Core().Pods(namespace).Create(podOnNode(podName, nodeName, framework.ServeHostnameImage))
pod, err := c.CoreV1().Pods(namespace).Create(podOnNode(podName, nodeName, framework.ServeHostnameImage))
if err == nil {
framework.Logf("Created pod %s on node %s", pod.ObjectMeta.Name, nodeName)
} else {
@@ -142,14 +142,14 @@ var _ = framework.KubeDescribe("[sig-apps] Network Partition [Disruptive] [Slow]
By("choose a node - we will block all network traffic on this node")
var podOpts metav1.ListOptions
nodeOpts := metav1.ListOptions{}
nodes, err := c.Core().Nodes().List(nodeOpts)
nodes, err := c.CoreV1().Nodes().List(nodeOpts)
Expect(err).NotTo(HaveOccurred())
framework.FilterNodes(nodes, func(node v1.Node) bool {
if !framework.IsNodeConditionSetAsExpected(&node, v1.NodeReady, true) {
return false
}
podOpts = metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()}
pods, err := c.Core().Pods(metav1.NamespaceAll).List(podOpts)
pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(podOpts)
if err != nil || len(pods.Items) <= 0 {
return false
}
@@ -173,12 +173,12 @@ var _ = framework.KubeDescribe("[sig-apps] Network Partition [Disruptive] [Slow]
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = nodeSelector.String()
obj, err := f.ClientSet.Core().Nodes().List(options)
obj, err := f.ClientSet.CoreV1().Nodes().List(options)
return runtime.Object(obj), err
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = nodeSelector.String()
return f.ClientSet.Core().Nodes().Watch(options)
return f.ClientSet.CoreV1().Nodes().Watch(options)
},
},
&v1.Node{},
@@ -245,11 +245,11 @@ var _ = framework.KubeDescribe("[sig-apps] Network Partition [Disruptive] [Slow]
By("choose a node with at least one pod - we will block some network traffic on this node")
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
options := metav1.ListOptions{LabelSelector: label.String()}
pods, err := c.Core().Pods(ns).List(options) // list pods after all have been scheduled
pods, err := c.CoreV1().Pods(ns).List(options) // list pods after all have been scheduled
Expect(err).NotTo(HaveOccurred())
nodeName := pods.Items[0].Spec.NodeName
node, err := c.Core().Nodes().Get(nodeName, metav1.GetOptions{})
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
// This creates a temporary network partition, verifies that 'podNameToDisappear',
@@ -287,7 +287,7 @@ var _ = framework.KubeDescribe("[sig-apps] Network Partition [Disruptive] [Slow]
// verify that it is really on the requested node
{
pod, err := c.Core().Pods(ns).Get(additionalPod, metav1.GetOptions{})
pod, err := c.CoreV1().Pods(ns).Get(additionalPod, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
if pod.Spec.NodeName != node.Name {
framework.Logf("Pod %s found on invalid node: %s instead of %s", pod.Name, pod.Spec.NodeName, node.Name)
@@ -310,11 +310,11 @@ var _ = framework.KubeDescribe("[sig-apps] Network Partition [Disruptive] [Slow]
By("choose a node with at least one pod - we will block some network traffic on this node")
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
options := metav1.ListOptions{LabelSelector: label.String()}
pods, err := c.Core().Pods(ns).List(options) // list pods after all have been scheduled
pods, err := c.CoreV1().Pods(ns).List(options) // list pods after all have been scheduled
Expect(err).NotTo(HaveOccurred())
nodeName := pods.Items[0].Spec.NodeName
node, err := c.Core().Nodes().Get(nodeName, metav1.GetOptions{})
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
// This creates a temporary network partition, verifies that 'podNameToDisappear',
@@ -351,7 +351,7 @@ var _ = framework.KubeDescribe("[sig-apps] Network Partition [Disruptive] [Slow]
framework.SkipUnlessProviderIs("gce", "gke")
By("creating service " + headlessSvcName + " in namespace " + f.Namespace.Name)
headlessService := framework.CreateServiceSpec(headlessSvcName, "", true, labels)
_, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(headlessService)
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService)
framework.ExpectNoError(err)
c = f.ClientSet
ns = f.Namespace.Name
@@ -392,7 +392,7 @@ var _ = framework.KubeDescribe("[sig-apps] Network Partition [Disruptive] [Slow]
pst.WaitForRunningAndReady(*ps.Spec.Replicas, ps)
pod := pst.GetPodList(ps).Items[0]
node, err := c.Core().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{})
node, err := c.CoreV1().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
// Blocks outgoing network traffic on 'node'. Then verifies that 'podNameToDisappear',
@@ -432,11 +432,11 @@ var _ = framework.KubeDescribe("[sig-apps] Network Partition [Disruptive] [Slow]
By("choose a node with at least one pod - we will block some network traffic on this node")
options := metav1.ListOptions{LabelSelector: label.String()}
pods, err := c.Core().Pods(ns).List(options) // list pods after all have been scheduled
pods, err := c.CoreV1().Pods(ns).List(options) // list pods after all have been scheduled
Expect(err).NotTo(HaveOccurred())
nodeName := pods.Items[0].Spec.NodeName
node, err := c.Core().Nodes().Get(nodeName, metav1.GetOptions{})
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
// This creates a temporary network partition, verifies that the job has 'parallelism' number of
@@ -482,7 +482,7 @@ var _ = framework.KubeDescribe("[sig-apps] Network Partition [Disruptive] [Slow]
return false
}
podOpts = metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()}
pods, err := c.Core().Pods(metav1.NamespaceAll).List(podOpts)
pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(podOpts)
if err != nil || len(pods.Items) <= 0 {
return false
}
@@ -496,7 +496,7 @@ var _ = framework.KubeDescribe("[sig-apps] Network Partition [Disruptive] [Slow]
if err := framework.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReadyOrSucceeded); err != nil {
framework.Failf("Pods on node %s are not ready and running within %v: %v", node.Name, podReadyTimeout, err)
}
pods, err := c.Core().Pods(metav1.NamespaceAll).List(podOpts)
pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(podOpts)
framework.ExpectNoError(err)
podTolerationTimes := map[string]time.Duration{}
// This test doesn't add tolerations by itself, but because they may be present in the cluster
@@ -545,12 +545,12 @@ var _ = framework.KubeDescribe("[sig-apps] Network Partition [Disruptive] [Slow]
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = nodeSelector.String()
obj, err := f.ClientSet.Core().Nodes().List(options)
obj, err := f.ClientSet.CoreV1().Nodes().List(options)
return runtime.Object(obj), err
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = nodeSelector.String()
return f.ClientSet.Core().Nodes().Watch(options)
return f.ClientSet.CoreV1().Nodes().Watch(options)
},
},
&v1.Node{},
@@ -601,7 +601,7 @@ var _ = framework.KubeDescribe("[sig-apps] Network Partition [Disruptive] [Slow]
sleepTime := maxTolerationTime + 20*time.Second
By(fmt.Sprintf("Sleeping for %v and checking if all Pods were evicted", sleepTime))
time.Sleep(sleepTime)
pods, err = c.Core().Pods(v1.NamespaceAll).List(podOpts)
pods, err = c.CoreV1().Pods(v1.NamespaceAll).List(podOpts)
framework.ExpectNoError(err)
seenRunning := []string{}
for _, pod := range pods.Items {