GetOptions - fix tests
This commit is contained in:
@@ -173,7 +173,7 @@ func TestAtomicPut(t *testing.T) {
|
||||
go func(l, v string) {
|
||||
defer wg.Done()
|
||||
for {
|
||||
tmpRC, err := rcs.Get(rc.Name)
|
||||
tmpRC, err := rcs.Get(rc.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Error getting atomicRC: %v", err)
|
||||
continue
|
||||
@@ -199,7 +199,7 @@ func TestAtomicPut(t *testing.T) {
|
||||
}(label, value)
|
||||
}
|
||||
wg.Wait()
|
||||
rc, err = rcs.Get(rc.Name)
|
||||
rc, err = rcs.Get(rc.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed getting atomicRC after writers are complete: %v", err)
|
||||
}
|
||||
@@ -281,7 +281,7 @@ func TestPatch(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("Failed updating patchpod with patch type %s: %v", k, err)
|
||||
}
|
||||
pod, err = pods.Get(name)
|
||||
pod, err = pods.Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed getting patchpod: %v", err)
|
||||
}
|
||||
@@ -294,7 +294,7 @@ func TestPatch(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("Failed updating patchpod with patch type %s: %v", k, err)
|
||||
}
|
||||
pod, err = pods.Get(name)
|
||||
pod, err = pods.Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed getting patchpod: %v", err)
|
||||
}
|
||||
@@ -307,7 +307,7 @@ func TestPatch(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("Failed updating patchpod with patch type %s: %v", k, err)
|
||||
}
|
||||
pod, err = pods.Get(name)
|
||||
pod, err = pods.Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed getting patchpod: %v", err)
|
||||
}
|
||||
@@ -712,7 +712,7 @@ func TestMultiWatch(t *testing.T) {
|
||||
for i := 0; i < watcherCount; i++ {
|
||||
go func(i int) {
|
||||
name := fmt.Sprintf("multi-watch-%v", i)
|
||||
pod, err := client.Core().Pods(ns.Name).Get(name)
|
||||
pod, err := client.Core().Pods(ns.Name).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Couldn't get %v: %v", name, err))
|
||||
}
|
||||
|
@@ -213,7 +213,7 @@ func TestConcurrentEvictionRequests(t *testing.T) {
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
podName := fmt.Sprintf(podNameFormat, i)
|
||||
_, err := clientSet.Core().Pods(ns.Name).Get(podName)
|
||||
_, err := clientSet.Core().Pods(ns.Name).Get(podName, metav1.GetOptions{})
|
||||
if !errors.IsNotFound(err) {
|
||||
t.Errorf("Pod %q is expected to be evicted", podName)
|
||||
}
|
||||
@@ -243,7 +243,7 @@ func waitToObservePods(t *testing.T, podInformer cache.SharedIndexInformer, podN
|
||||
|
||||
func waitPDBStable(t *testing.T, clientSet clientset.Interface, podNum int32, ns, pdbName string) {
|
||||
if err := wait.PollImmediate(2*time.Second, 60*time.Second, func() (bool, error) {
|
||||
pdb, err := clientSet.Policy().PodDisruptionBudgets(ns).Get(pdbName)
|
||||
pdb, err := clientSet.Policy().PodDisruptionBudgets(ns).Get(pdbName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@@ -36,6 +36,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/apis/batch"
|
||||
certificates "k8s.io/kubernetes/pkg/apis/certificates/v1alpha1"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
|
||||
policy "k8s.io/kubernetes/pkg/apis/policy/v1alpha1"
|
||||
rbac "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1"
|
||||
storage "k8s.io/kubernetes/pkg/apis/storage/v1beta1"
|
||||
@@ -452,7 +453,7 @@ func ScaleRC(name, ns string, replicas int32, clientset internalclientset.Interf
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
scaled, err := clientset.Core().ReplicationControllers(ns).Get(name)
|
||||
scaled, err := clientset.Core().ReplicationControllers(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@@ -232,10 +232,10 @@ func TestCascadingDeletion(t *testing.T) {
|
||||
t.Fatalf("expect pod %s to be garbage collected, got err= %v", garbageCollectedPodName, err)
|
||||
}
|
||||
// checks the garbage collect doesn't delete pods it shouldn't delete.
|
||||
if _, err := podClient.Get(independentPodName); err != nil {
|
||||
if _, err := podClient.Get(independentPodName, metav1.GetOptions{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := podClient.Get(oneValidOwnerPodName); err != nil {
|
||||
if _, err := podClient.Get(oneValidOwnerPodName, metav1.GetOptions{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
@@ -39,6 +39,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
"k8s.io/kubernetes/pkg/apis/batch/v2alpha1"
|
||||
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
clienttypedv1 "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
@@ -407,7 +408,7 @@ func TestMasterService(t *testing.T) {
|
||||
}
|
||||
}
|
||||
if found {
|
||||
ep, err := client.Core().Endpoints(api.NamespaceDefault).Get("kubernetes")
|
||||
ep, err := client.Core().Endpoints(api.NamespaceDefault).Get("kubernetes", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
@@ -451,7 +452,7 @@ func TestServiceAlloc(t *testing.T) {
|
||||
|
||||
// Wait until the default "kubernetes" service is created.
|
||||
if err = wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
|
||||
_, err := client.Core().Services(api.NamespaceDefault).Get("kubernetes")
|
||||
_, err := client.Core().Services(api.NamespaceDefault).Get("kubernetes", metav1.GetOptions{})
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return false, err
|
||||
}
|
||||
@@ -595,7 +596,7 @@ func TestUpdateNodeObjects(t *testing.T) {
|
||||
break
|
||||
}
|
||||
|
||||
n, err := c.Nodes().Get(fmt.Sprintf("node-%d", node))
|
||||
n, err := c.Nodes().Get(fmt.Sprintf("node-%d", node), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
fmt.Printf("[%d] error after %d: %v\n", node, i, err)
|
||||
break
|
||||
|
@@ -245,7 +245,7 @@ func TestAdoption(t *testing.T) {
|
||||
waitToObservePods(t, podInformer, 1)
|
||||
go rm.Run(5, stopCh)
|
||||
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
|
||||
updatedPod, err := podClient.Get(pod.Name)
|
||||
updatedPod, err := podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -280,7 +280,7 @@ func createRSsPods(t *testing.T, clientSet clientset.Interface, rss []*v1beta1.R
|
||||
func waitRSStable(t *testing.T, clientSet clientset.Interface, rs *v1beta1.ReplicaSet, ns string) {
|
||||
rsClient := clientSet.Extensions().ReplicaSets(ns)
|
||||
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
|
||||
updatedRS, err := rsClient.Get(rs.Name)
|
||||
updatedRS, err := rsClient.Get(rs.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -371,7 +371,7 @@ func TestUpdateSelectorToRemoveControllerRef(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
podClient := clientSet.Core().Pods(ns.Name)
|
||||
pod2, err = podClient.Get(pod2.Name)
|
||||
pod2, err = podClient.Get(pod2.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get pod2: %v", err)
|
||||
}
|
||||
@@ -414,7 +414,7 @@ func TestUpdateLabelToRemoveControllerRef(t *testing.T) {
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
pod2, err = podClient.Get(pod2.Name)
|
||||
pod2, err = podClient.Get(pod2.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get pod2: %v", err)
|
||||
}
|
||||
|
@@ -243,7 +243,7 @@ func TestAdoption(t *testing.T) {
|
||||
waitToObservePods(t, podInformer, 1)
|
||||
go rm.Run(5, stopCh)
|
||||
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
|
||||
updatedPod, err := podClient.Get(pod.Name)
|
||||
updatedPod, err := podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -278,7 +278,7 @@ func createRCsPods(t *testing.T, clientSet clientset.Interface, rcs []*v1.Replic
|
||||
func waitRCStable(t *testing.T, clientSet clientset.Interface, rc *v1.ReplicationController, ns string) {
|
||||
rcClient := clientSet.Core().ReplicationControllers(ns)
|
||||
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
|
||||
updatedRC, err := rcClient.Get(rc.Name)
|
||||
updatedRC, err := rcClient.Get(rc.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -367,7 +367,7 @@ func TestUpdateSelectorToRemoveControllerRef(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
podClient := clientSet.Core().Pods(ns.Name)
|
||||
pod2, err = podClient.Get(pod2.Name)
|
||||
pod2, err = podClient.Get(pod2.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get pod2: %v", err)
|
||||
}
|
||||
@@ -409,7 +409,7 @@ func TestUpdateLabelToRemoveControllerRef(t *testing.T) {
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
pod2, err = podClient.Get(pod2.Name)
|
||||
pod2, err = podClient.Get(pod2.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get pod2: %v", err)
|
||||
}
|
||||
|
@@ -298,7 +298,7 @@ func DoTestPodScheduling(ns *v1.Namespace, t *testing.T, cs clientset.Interface)
|
||||
t.Fatalf("Failed to schedule pod: %v", err)
|
||||
}
|
||||
|
||||
if myPod, err := cs.Core().Pods(ns.Name).Get(myPod.Name); err != nil {
|
||||
if myPod, err := cs.Core().Pods(ns.Name).Get(myPod.Name, metav1.GetOptions{}); err != nil {
|
||||
t.Fatalf("Failed to get pod: %v", err)
|
||||
} else if myPod.Spec.NodeName != "machine3" {
|
||||
t.Fatalf("Failed to schedule using extender, expected machine3, got %v", myPod.Spec.NodeName)
|
||||
|
@@ -76,7 +76,7 @@ func TestUnschedulableNodes(t *testing.T) {
|
||||
|
||||
func podScheduled(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
pod, err := c.Core().Pods(podNamespace).Get(podName)
|
||||
pod, err := c.Core().Pods(podNamespace).Get(podName, metav1.GetOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
@@ -268,7 +268,7 @@ func DoTestUnschedulableNodes(t *testing.T, cs clientset.Interface, ns *v1.Names
|
||||
}
|
||||
|
||||
// Apply the schedulable modification to the node, and wait for the reflection
|
||||
schedNode, err := cs.Core().Nodes().Get(unSchedNode.Name)
|
||||
schedNode, err := cs.Core().Nodes().Get(unSchedNode.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get node: %v", err)
|
||||
}
|
||||
|
@@ -35,6 +35,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/auth/authenticator"
|
||||
"k8s.io/kubernetes/pkg/auth/authenticator/bearertoken"
|
||||
"k8s.io/kubernetes/pkg/auth/authorizer"
|
||||
@@ -139,7 +140,7 @@ func TestServiceAccountTokenAutoCreate(t *testing.T) {
|
||||
}
|
||||
|
||||
// Trigger creation of a new referenced token
|
||||
serviceAccount, err = c.Core().ServiceAccounts(ns).Get(name)
|
||||
serviceAccount, err = c.Core().ServiceAccounts(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -435,13 +436,13 @@ func startServiceAccountTestServer(t *testing.T) (*clientset.Clientset, restclie
|
||||
|
||||
func getServiceAccount(c *clientset.Clientset, ns string, name string, shouldWait bool) (*v1.ServiceAccount, error) {
|
||||
if !shouldWait {
|
||||
return c.Core().ServiceAccounts(ns).Get(name)
|
||||
return c.Core().ServiceAccounts(ns).Get(name, metav1.GetOptions{})
|
||||
}
|
||||
|
||||
var user *v1.ServiceAccount
|
||||
var err error
|
||||
err = wait.Poll(time.Second, 10*time.Second, func() (bool, error) {
|
||||
user, err = c.Core().ServiceAccounts(ns).Get(name)
|
||||
user, err = c.Core().ServiceAccounts(ns).Get(name, metav1.GetOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
@@ -458,7 +459,7 @@ func getReferencedServiceAccountToken(c *clientset.Clientset, ns string, name st
|
||||
token := ""
|
||||
|
||||
findToken := func() (bool, error) {
|
||||
user, err := c.Core().ServiceAccounts(ns).Get(name)
|
||||
user, err := c.Core().ServiceAccounts(ns).Get(name, metav1.GetOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
@@ -467,7 +468,7 @@ func getReferencedServiceAccountToken(c *clientset.Clientset, ns string, name st
|
||||
}
|
||||
|
||||
for _, ref := range user.Secrets {
|
||||
secret, err := c.Core().Secrets(ns).Get(ref.Name)
|
||||
secret, err := c.Core().Secrets(ns).Get(ref.Name, metav1.GetOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
continue
|
||||
}
|
||||
|
@@ -21,6 +21,7 @@ import (
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
@@ -48,7 +49,7 @@ var Code503 = map[int]bool{503: true}
|
||||
// WaitForPodToDisappear polls the API server if the pod has been deleted.
|
||||
func WaitForPodToDisappear(podClient coreclient.PodInterface, podName string, interval, timeout time.Duration) error {
|
||||
return wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
_, err := podClient.Get(podName)
|
||||
_, err := podClient.Get(podName, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
return false, nil
|
||||
} else {
|
||||
|
@@ -275,7 +275,7 @@ func TestPersistentVolumeBindRace(t *testing.T) {
|
||||
waitForAnyPersistentVolumeClaimPhase(watchPVC, v1.ClaimBound)
|
||||
glog.V(2).Infof("TestPersistentVolumeBindRace pvc bound")
|
||||
|
||||
pv, err = testClient.PersistentVolumes().Get(pv.Name)
|
||||
pv, err = testClient.PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error getting pv: %v", err)
|
||||
}
|
||||
@@ -347,14 +347,14 @@ func TestPersistentVolumeClaimLabelSelector(t *testing.T) {
|
||||
waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, v1.ClaimBound)
|
||||
t.Log("claim bound")
|
||||
|
||||
pv, err := testClient.PersistentVolumes().Get("pv-false")
|
||||
pv, err := testClient.PersistentVolumes().Get("pv-false", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error getting pv: %v", err)
|
||||
}
|
||||
if pv.Spec.ClaimRef != nil {
|
||||
t.Fatalf("False PV shouldn't be bound")
|
||||
}
|
||||
pv, err = testClient.PersistentVolumes().Get("pv-true")
|
||||
pv, err = testClient.PersistentVolumes().Get("pv-true", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error getting pv: %v", err)
|
||||
}
|
||||
@@ -446,14 +446,14 @@ func TestPersistentVolumeClaimLabelSelectorMatchExpressions(t *testing.T) {
|
||||
waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, v1.ClaimBound)
|
||||
t.Log("claim bound")
|
||||
|
||||
pv, err := testClient.PersistentVolumes().Get("pv-false")
|
||||
pv, err := testClient.PersistentVolumes().Get("pv-false", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error getting pv: %v", err)
|
||||
}
|
||||
if pv.Spec.ClaimRef != nil {
|
||||
t.Fatalf("False PV shouldn't be bound")
|
||||
}
|
||||
pv, err = testClient.PersistentVolumes().Get("pv-true")
|
||||
pv, err = testClient.PersistentVolumes().Get("pv-true", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error getting pv: %v", err)
|
||||
}
|
||||
@@ -520,7 +520,7 @@ func TestPersistentVolumeMultiPVs(t *testing.T) {
|
||||
// only one PV is bound
|
||||
bound := 0
|
||||
for i := 0; i < maxPVs; i++ {
|
||||
pv, err := testClient.PersistentVolumes().Get(pvs[i].Name)
|
||||
pv, err := testClient.PersistentVolumes().Get(pvs[i].Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error getting pv: %v", err)
|
||||
}
|
||||
@@ -614,7 +614,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) {
|
||||
// Modify PV
|
||||
i := rand.Intn(objCount)
|
||||
name := "pv-" + strconv.Itoa(i)
|
||||
pv, err := testClient.PersistentVolumes().Get(name)
|
||||
pv, err := testClient.PersistentVolumes().Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
// Silently ignore error, the PV may have be already deleted
|
||||
// or not exists yet.
|
||||
@@ -638,7 +638,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) {
|
||||
// Modify PVC
|
||||
i := rand.Intn(objCount)
|
||||
name := "pvc-" + strconv.Itoa(i)
|
||||
pvc, err := testClient.PersistentVolumeClaims(v1.NamespaceDefault).Get(name)
|
||||
pvc, err := testClient.PersistentVolumeClaims(v1.NamespaceDefault).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
// Silently ignore error, the PVC may have be already
|
||||
// deleted or not exists yet.
|
||||
@@ -693,7 +693,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) {
|
||||
|
||||
// check that everything is bound to something
|
||||
for i := 0; i < objCount; i++ {
|
||||
pv, err := testClient.PersistentVolumes().Get(pvs[i].Name)
|
||||
pv, err := testClient.PersistentVolumes().Get(pvs[i].Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error getting pv: %v", err)
|
||||
}
|
||||
@@ -702,7 +702,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) {
|
||||
}
|
||||
glog.V(2).Infof("PV %q is bound to PVC %q", pv.Name, pv.Spec.ClaimRef.Name)
|
||||
|
||||
pvc, err := testClient.PersistentVolumeClaims(ns.Name).Get(pvcs[i].Name)
|
||||
pvc, err := testClient.PersistentVolumeClaims(ns.Name).Get(pvcs[i].Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error getting pvc: %v", err)
|
||||
}
|
||||
@@ -822,7 +822,7 @@ func TestPersistentVolumeControllerStartup(t *testing.T) {
|
||||
|
||||
// check that everything is bound to something
|
||||
for i := 0; i < objCount; i++ {
|
||||
pv, err := testClient.PersistentVolumes().Get(pvs[i].Name)
|
||||
pv, err := testClient.PersistentVolumes().Get(pvs[i].Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error getting pv: %v", err)
|
||||
}
|
||||
@@ -831,7 +831,7 @@ func TestPersistentVolumeControllerStartup(t *testing.T) {
|
||||
}
|
||||
glog.V(2).Infof("PV %q is bound to PVC %q", pv.Name, pv.Spec.ClaimRef.Name)
|
||||
|
||||
pvc, err := testClient.PersistentVolumeClaims(ns.Name).Get(pvcs[i].Name)
|
||||
pvc, err := testClient.PersistentVolumeClaims(ns.Name).Get(pvcs[i].Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error getting pvc: %v", err)
|
||||
}
|
||||
@@ -991,14 +991,14 @@ func TestPersistentVolumeMultiPVsDiffAccessModes(t *testing.T) {
|
||||
t.Log("claim bound")
|
||||
|
||||
// only RWM PV is bound
|
||||
pv, err := testClient.PersistentVolumes().Get("pv-rwo")
|
||||
pv, err := testClient.PersistentVolumes().Get("pv-rwo", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error getting pv: %v", err)
|
||||
}
|
||||
if pv.Spec.ClaimRef != nil {
|
||||
t.Fatalf("ReadWriteOnce PV shouldn't be bound")
|
||||
}
|
||||
pv, err = testClient.PersistentVolumes().Get("pv-rwm")
|
||||
pv, err = testClient.PersistentVolumes().Get("pv-rwm", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error getting pv: %v", err)
|
||||
}
|
||||
@@ -1021,7 +1021,7 @@ func TestPersistentVolumeMultiPVsDiffAccessModes(t *testing.T) {
|
||||
|
||||
func waitForPersistentVolumePhase(client *clientset.Clientset, pvName string, w watch.Interface, phase v1.PersistentVolumePhase) {
|
||||
// Check if the volume is already in requested phase
|
||||
volume, err := client.Core().PersistentVolumes().Get(pvName)
|
||||
volume, err := client.Core().PersistentVolumes().Get(pvName, metav1.GetOptions{})
|
||||
if err == nil && volume.Status.Phase == phase {
|
||||
return
|
||||
}
|
||||
@@ -1042,7 +1042,7 @@ func waitForPersistentVolumePhase(client *clientset.Clientset, pvName string, w
|
||||
|
||||
func waitForPersistentVolumeClaimPhase(client *clientset.Clientset, claimName, namespace string, w watch.Interface, phase v1.PersistentVolumeClaimPhase) {
|
||||
// Check if the claim is already in requested phase
|
||||
claim, err := client.Core().PersistentVolumeClaims(namespace).Get(claimName)
|
||||
claim, err := client.Core().PersistentVolumeClaims(namespace).Get(claimName, metav1.GetOptions{})
|
||||
if err == nil && claim.Status.Phase == phase {
|
||||
return
|
||||
}
|
||||
|
Reference in New Issue
Block a user