test/integration
This commit is contained in:
@@ -32,7 +32,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
@@ -47,7 +47,7 @@ func TestClient(t *testing.T) {
|
||||
_, s := framework.RunAMaster(nil)
|
||||
defer s.Close()
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
|
||||
ns := framework.CreateTestingNamespace("client", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
@@ -60,7 +60,7 @@ func TestClient(t *testing.T) {
|
||||
t.Errorf("expected %#v, got %#v", e, a)
|
||||
}
|
||||
|
||||
pods, err := client.Core().Pods(ns.Name).List(api.ListOptions{})
|
||||
pods, err := client.Core().Pods(ns.Name).List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -69,13 +69,13 @@ func TestClient(t *testing.T) {
|
||||
}
|
||||
|
||||
// get a validation error
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: "test",
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "test",
|
||||
},
|
||||
@@ -99,7 +99,7 @@ func TestClient(t *testing.T) {
|
||||
}
|
||||
|
||||
// pod is shown, but not scheduled
|
||||
pods, err = client.Core().Pods(ns.Name).List(api.ListOptions{})
|
||||
pods, err = client.Core().Pods(ns.Name).List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -119,35 +119,35 @@ func TestAtomicPut(t *testing.T) {
|
||||
_, s := framework.RunAMaster(nil)
|
||||
defer s.Close()
|
||||
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
|
||||
ns := framework.CreateTestingNamespace("atomic-put", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
rcBody := api.ReplicationController{
|
||||
rcBody := v1.ReplicationController{
|
||||
TypeMeta: unversioned.TypeMeta{
|
||||
APIVersion: c.Core().RESTClient().APIVersion().String(),
|
||||
},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "atomicrc",
|
||||
Namespace: ns.Name,
|
||||
Labels: map[string]string{
|
||||
"name": "atomicrc",
|
||||
},
|
||||
},
|
||||
Spec: api.ReplicationControllerSpec{
|
||||
Replicas: 0,
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Replicas: func(i int32) *int32 { return &i }(0),
|
||||
Selector: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
Template: &api.PodTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Template: &v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "name", Image: "image"},
|
||||
},
|
||||
},
|
||||
@@ -211,24 +211,24 @@ func TestPatch(t *testing.T) {
|
||||
_, s := framework.RunAMaster(nil)
|
||||
defer s.Close()
|
||||
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
|
||||
ns := framework.CreateTestingNamespace("patch", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
name := "patchpod"
|
||||
resource := "pods"
|
||||
podBody := api.Pod{
|
||||
podBody := v1.Pod{
|
||||
TypeMeta: unversioned.TypeMeta{
|
||||
APIVersion: c.Core().RESTClient().APIVersion().String(),
|
||||
},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns.Name,
|
||||
Labels: map[string]string{},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "name", Image: "image"},
|
||||
},
|
||||
},
|
||||
@@ -320,20 +320,20 @@ func TestPatchWithCreateOnUpdate(t *testing.T) {
|
||||
_, s := framework.RunAMaster(nil)
|
||||
defer s.Close()
|
||||
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
|
||||
ns := framework.CreateTestingNamespace("patch-with-create", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
endpointTemplate := &api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
endpointTemplate := &v1.Endpoints{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "patchendpoint",
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
Subsets: []api.EndpointSubset{
|
||||
Subsets: []v1.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}},
|
||||
Ports: []api.EndpointPort{{Port: 80, Protocol: api.ProtocolTCP}},
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}},
|
||||
Ports: []v1.EndpointPort{{Port: 80, Protocol: v1.ProtocolTCP}},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -431,7 +431,7 @@ func TestAPIVersions(t *testing.T) {
|
||||
_, s := framework.RunAMaster(nil)
|
||||
defer s.Close()
|
||||
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
|
||||
clientVersion := c.Core().RESTClient().APIVersion().String()
|
||||
g, err := c.Discovery().ServerGroups()
|
||||
@@ -456,16 +456,16 @@ func TestSingleWatch(t *testing.T) {
|
||||
ns := framework.CreateTestingNamespace("single-watch", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
|
||||
mkEvent := func(i int) *api.Event {
|
||||
mkEvent := func(i int) *v1.Event {
|
||||
name := fmt.Sprintf("event-%v", i)
|
||||
return &api.Event{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
return &v1.Event{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: ns.Name,
|
||||
Name: name,
|
||||
},
|
||||
InvolvedObject: api.ObjectReference{
|
||||
InvolvedObject: v1.ObjectReference{
|
||||
Namespace: ns.Name,
|
||||
Name: name,
|
||||
},
|
||||
@@ -517,7 +517,7 @@ func TestSingleWatch(t *testing.T) {
|
||||
t.Errorf("Wanted %v, got %v", e, a)
|
||||
}
|
||||
switch o := got.Object.(type) {
|
||||
case *api.Event:
|
||||
case *v1.Event:
|
||||
if e, a := "event-9", o.Name; e != a {
|
||||
t.Errorf("Wanted %v, got %v", e, a)
|
||||
}
|
||||
@@ -541,16 +541,16 @@ func TestMultiWatch(t *testing.T) {
|
||||
ns := framework.CreateTestingNamespace("multi-watch", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
|
||||
dummyEvent := func(i int) *api.Event {
|
||||
dummyEvent := func(i int) *v1.Event {
|
||||
name := fmt.Sprintf("unrelated-%v", i)
|
||||
return &api.Event{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
return &v1.Event{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%v.%x", name, time.Now().UnixNano()),
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
InvolvedObject: api.ObjectReference{
|
||||
InvolvedObject: v1.ObjectReference{
|
||||
Name: name,
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
@@ -570,13 +570,13 @@ func TestMultiWatch(t *testing.T) {
|
||||
for i := 0; i < watcherCount; i++ {
|
||||
watchesStarted.Add(1)
|
||||
name := fmt.Sprintf("multi-watch-%v", i)
|
||||
got, err := client.Core().Pods(ns.Name).Create(&api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
got, err := client.Core().Pods(ns.Name).Create(&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: labels.Set{"watchlabel": name},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "pause",
|
||||
Image: e2e.GetPauseImageName(client),
|
||||
}},
|
||||
@@ -587,8 +587,8 @@ func TestMultiWatch(t *testing.T) {
|
||||
t.Fatalf("Couldn't make %v: %v", name, err)
|
||||
}
|
||||
go func(name, rv string) {
|
||||
options := api.ListOptions{
|
||||
LabelSelector: labels.Set{"watchlabel": name}.AsSelector(),
|
||||
options := v1.ListOptions{
|
||||
LabelSelector: labels.Set{"watchlabel": name}.AsSelector().String(),
|
||||
ResourceVersion: rv,
|
||||
}
|
||||
w, err := client.Core().Pods(ns.Name).Watch(options)
|
||||
@@ -677,12 +677,12 @@ func TestMultiWatch(t *testing.T) {
|
||||
return
|
||||
}
|
||||
name := fmt.Sprintf("unrelated-%v", i)
|
||||
_, err := client.Core().Pods(ns.Name).Create(&api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
_, err := client.Core().Pods(ns.Name).Create(&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "nothing",
|
||||
Image: e2e.GetPauseImageName(client),
|
||||
}},
|
||||
@@ -741,16 +741,16 @@ func TestMultiWatch(t *testing.T) {
|
||||
}
|
||||
|
||||
func runSelfLinkTestOnNamespace(t *testing.T, c clientset.Interface, namespace string) {
|
||||
podBody := api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
podBody := v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "selflinktest",
|
||||
Namespace: namespace,
|
||||
Labels: map[string]string{
|
||||
"name": "selflinktest",
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "name", Image: "image"},
|
||||
},
|
||||
},
|
||||
@@ -763,7 +763,7 @@ func runSelfLinkTestOnNamespace(t *testing.T, c clientset.Interface, namespace s
|
||||
t.Errorf("Failed listing pod with supplied self link '%v': %v", pod.SelfLink, err)
|
||||
}
|
||||
|
||||
podList, err := c.Core().Pods(namespace).List(api.ListOptions{})
|
||||
podList, err := c.Core().Pods(namespace).List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed listing pods: %v", err)
|
||||
}
|
||||
@@ -797,7 +797,7 @@ func TestSelfLinkOnNamespace(t *testing.T) {
|
||||
ns := framework.CreateTestingNamespace("selflink", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
|
||||
runSelfLinkTestOnNamespace(t, c, ns.Name)
|
||||
}
|
||||
|
@@ -22,12 +22,11 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/client/typed/dynamic"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
@@ -41,7 +40,7 @@ func TestDynamicClient(t *testing.T) {
|
||||
ns := framework.CreateTestingNamespace("dynamic-client", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
gv := ®istered.GroupOrDie(api.GroupName).GroupVersion
|
||||
gv := ®istered.GroupOrDie(v1.GroupName).GroupVersion
|
||||
config := &restclient.Config{
|
||||
Host: s.URL,
|
||||
ContentConfig: restclient.ContentConfig{GroupVersion: gv},
|
||||
@@ -73,12 +72,12 @@ func TestDynamicClient(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create a Pod with the normal client
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: "test",
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "test",
|
||||
Image: "test-image",
|
||||
@@ -108,7 +107,7 @@ func TestDynamicClient(t *testing.T) {
|
||||
|
||||
got, err := unstructuredToPod(unstructuredList.Items[0])
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error converting Unstructured to api.Pod: %v", err)
|
||||
t.Fatalf("unexpected error converting Unstructured to v1.Pod: %v", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(actual, got) {
|
||||
@@ -123,7 +122,7 @@ func TestDynamicClient(t *testing.T) {
|
||||
|
||||
got, err = unstructuredToPod(unstruct)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error converting Unstructured to api.Pod: %v", err)
|
||||
t.Fatalf("unexpected error converting Unstructured to v1.Pod: %v", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(actual, got) {
|
||||
@@ -136,7 +135,7 @@ func TestDynamicClient(t *testing.T) {
|
||||
t.Fatalf("unexpected error when deleting pod: %v", err)
|
||||
}
|
||||
|
||||
list, err := client.Core().Pods(ns.Name).List(api.ListOptions{})
|
||||
list, err := client.Core().Pods(ns.Name).List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when listing pods: %v", err)
|
||||
}
|
||||
@@ -146,12 +145,14 @@ func TestDynamicClient(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func unstructuredToPod(obj *runtime.Unstructured) (*api.Pod, error) {
|
||||
func unstructuredToPod(obj *runtime.Unstructured) (*v1.Pod, error) {
|
||||
json, err := runtime.Encode(runtime.UnstructuredJSONScheme, obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pod := new(api.Pod)
|
||||
pod := new(v1.Pod)
|
||||
err = runtime.DecodeInto(testapi.Default.Codec(), json, pod)
|
||||
pod.Kind = ""
|
||||
pod.APIVersion = ""
|
||||
return pod, err
|
||||
}
|
||||
|
@@ -23,9 +23,9 @@ package configmap
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/test/integration"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
@@ -36,7 +36,7 @@ func TestConfigMap(t *testing.T) {
|
||||
_, s := framework.RunAMaster(nil)
|
||||
defer s.Close()
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
|
||||
ns := framework.CreateTestingNamespace("config-map", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
@@ -44,9 +44,9 @@ func TestConfigMap(t *testing.T) {
|
||||
DoTestConfigMap(t, client, ns)
|
||||
}
|
||||
|
||||
func DoTestConfigMap(t *testing.T, client clientset.Interface, ns *api.Namespace) {
|
||||
cfg := api.ConfigMap{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
func DoTestConfigMap(t *testing.T, client clientset.Interface, ns *v1.Namespace) {
|
||||
cfg := v1.ConfigMap{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "configmap",
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
@@ -62,22 +62,22 @@ func DoTestConfigMap(t *testing.T, client clientset.Interface, ns *api.Namespace
|
||||
}
|
||||
defer deleteConfigMapOrErrorf(t, client, cfg.Namespace, cfg.Name)
|
||||
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "XXX",
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "fake-name",
|
||||
Image: "fakeimage",
|
||||
Env: []api.EnvVar{
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "CONFIG_DATA_1",
|
||||
ValueFrom: &api.EnvVarSource{
|
||||
ConfigMapKeyRef: &api.ConfigMapKeySelector{
|
||||
LocalObjectReference: api.LocalObjectReference{
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: "configmap",
|
||||
},
|
||||
Key: "data-1",
|
||||
@@ -86,9 +86,9 @@ func DoTestConfigMap(t *testing.T, client clientset.Interface, ns *api.Namespace
|
||||
},
|
||||
{
|
||||
Name: "CONFIG_DATA_2",
|
||||
ValueFrom: &api.EnvVarSource{
|
||||
ConfigMapKeyRef: &api.ConfigMapKeySelector{
|
||||
LocalObjectReference: api.LocalObjectReference{
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: "configmap",
|
||||
},
|
||||
Key: "data-2",
|
||||
@@ -96,9 +96,9 @@ func DoTestConfigMap(t *testing.T, client clientset.Interface, ns *api.Namespace
|
||||
},
|
||||
}, {
|
||||
Name: "CONFIG_DATA_3",
|
||||
ValueFrom: &api.EnvVarSource{
|
||||
ConfigMapKeyRef: &api.ConfigMapKeySelector{
|
||||
LocalObjectReference: api.LocalObjectReference{
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: "configmap",
|
||||
},
|
||||
Key: "data-3",
|
||||
|
@@ -32,20 +32,21 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
"k8s.io/kubernetes/pkg/apis/apps"
|
||||
"k8s.io/kubernetes/pkg/apis/autoscaling"
|
||||
apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
|
||||
autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling/v1"
|
||||
"k8s.io/kubernetes/pkg/apis/batch"
|
||||
"k8s.io/kubernetes/pkg/apis/certificates"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/apis/policy"
|
||||
"k8s.io/kubernetes/pkg/apis/rbac"
|
||||
"k8s.io/kubernetes/pkg/apis/storage"
|
||||
certificates "k8s.io/kubernetes/pkg/apis/certificates/v1alpha1"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
policy "k8s.io/kubernetes/pkg/apis/policy/v1alpha1"
|
||||
rbac "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1"
|
||||
storage "k8s.io/kubernetes/pkg/apis/storage/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/apiserver/authenticator"
|
||||
authauthenticator "k8s.io/kubernetes/pkg/auth/authenticator"
|
||||
authauthorizer "k8s.io/kubernetes/pkg/auth/authorizer"
|
||||
authorizerunion "k8s.io/kubernetes/pkg/auth/authorizer/union"
|
||||
"k8s.io/kubernetes/pkg/auth/user"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
@@ -116,7 +117,7 @@ func NewMasterComponents(c *Config) *MasterComponents {
|
||||
// TODO: Allow callers to pipe through a different master url and create a client/start components using it.
|
||||
glog.Infof("Master %+v", s.URL)
|
||||
// TODO: caesarxuchao: remove this client when the refactoring of client libraray is done.
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}, QPS: c.QPS, Burst: c.Burst})
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}, QPS: c.QPS, Burst: c.Burst})
|
||||
rcStopCh := make(chan struct{})
|
||||
controllerManager := replicationcontroller.NewReplicationManagerFromClient(clientset, controller.NoResyncPeriodFunc, c.Burst, 4096)
|
||||
|
||||
@@ -265,7 +266,7 @@ func startMasterOrDie(masterConfig *master.Config, incomingServer *httptest.Serv
|
||||
if masterConfig.EnableCoreControllers {
|
||||
// TODO Once /healthz is updated for posthooks, we'll wait for good health
|
||||
coreClient := coreclient.NewForConfigOrDie(&cfg)
|
||||
svcWatch, err := coreClient.Services(api.NamespaceDefault).Watch(v1.ListOptions{})
|
||||
svcWatch, err := coreClient.Services(v1.NamespaceDefault).Watch(v1.ListOptions{})
|
||||
if err != nil {
|
||||
glog.Fatal(err)
|
||||
}
|
||||
@@ -309,7 +310,7 @@ func NewMasterConfig() *master.Config {
|
||||
|
||||
storageFactory := genericapiserver.NewDefaultStorageFactory(config, runtime.ContentTypeJSON, ns, genericapiserver.NewDefaultResourceEncodingConfig(), master.DefaultAPIResourceConfigSource())
|
||||
storageFactory.SetSerializer(
|
||||
unversioned.GroupResource{Group: api.GroupName, Resource: genericapiserver.AllResources},
|
||||
unversioned.GroupResource{Group: v1.GroupName, Resource: genericapiserver.AllResources},
|
||||
"",
|
||||
ns)
|
||||
storageFactory.SetSerializer(
|
||||
@@ -389,29 +390,29 @@ func (m *MasterComponents) Stop(apiServer, rcManager bool) {
|
||||
}
|
||||
}
|
||||
|
||||
func CreateTestingNamespace(baseName string, apiserver *httptest.Server, t *testing.T) *api.Namespace {
|
||||
func CreateTestingNamespace(baseName string, apiserver *httptest.Server, t *testing.T) *v1.Namespace {
|
||||
// TODO: Create a namespace with a given basename.
|
||||
// Currently we neither create the namespace nor delete all its contents at the end.
|
||||
// But as long as tests are not using the same namespaces, this should work fine.
|
||||
return &api.Namespace{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
return &v1.Namespace{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
// TODO: Once we start creating namespaces, switch to GenerateName.
|
||||
Name: baseName,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func DeleteTestingNamespace(ns *api.Namespace, apiserver *httptest.Server, t *testing.T) {
|
||||
func DeleteTestingNamespace(ns *v1.Namespace, apiserver *httptest.Server, t *testing.T) {
|
||||
// TODO: Remove all resources from a given namespace once we implement CreateTestingNamespace.
|
||||
}
|
||||
|
||||
// RCFromManifest reads a .json file and returns the rc in it.
|
||||
func RCFromManifest(fileName string) *api.ReplicationController {
|
||||
func RCFromManifest(fileName string) *v1.ReplicationController {
|
||||
data, err := ioutil.ReadFile(fileName)
|
||||
if err != nil {
|
||||
glog.Fatalf("Unexpected error reading rc manifest %v", err)
|
||||
}
|
||||
var controller api.ReplicationController
|
||||
var controller v1.ReplicationController
|
||||
if err := runtime.DecodeInto(testapi.Default.Codec(), data, &controller); err != nil {
|
||||
glog.Fatalf("Unexpected error reading rc manifest %v", err)
|
||||
}
|
||||
@@ -419,7 +420,7 @@ func RCFromManifest(fileName string) *api.ReplicationController {
|
||||
}
|
||||
|
||||
// StopRC stops the rc via kubectl's stop library
|
||||
func StopRC(rc *api.ReplicationController, clientset clientset.Interface) error {
|
||||
func StopRC(rc *v1.ReplicationController, clientset internalclientset.Interface) error {
|
||||
reaper, err := kubectl.ReaperFor(api.Kind("ReplicationController"), clientset)
|
||||
if err != nil || reaper == nil {
|
||||
return err
|
||||
@@ -432,7 +433,7 @@ func StopRC(rc *api.ReplicationController, clientset clientset.Interface) error
|
||||
}
|
||||
|
||||
// ScaleRC scales the given rc to the given replicas.
|
||||
func ScaleRC(name, ns string, replicas int32, clientset clientset.Interface) (*api.ReplicationController, error) {
|
||||
func ScaleRC(name, ns string, replicas int32, clientset internalclientset.Interface) (*api.ReplicationController, error) {
|
||||
scaler, err := kubectl.ScalerFor(api.Kind("ReplicationController"), clientset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@@ -17,9 +17,9 @@ limitations under the License.
|
||||
package framework
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
e2eframework "k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
@@ -51,23 +51,23 @@ func (p *IntegrationTestNodePreparer) PrepareNodes() error {
|
||||
}
|
||||
|
||||
glog.Infof("Making %d nodes", numNodes)
|
||||
baseNode := &api.Node{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
baseNode := &v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: p.nodeNamePrefix,
|
||||
},
|
||||
Spec: api.NodeSpec{
|
||||
Spec: v1.NodeSpec{
|
||||
// TODO: investigate why this is needed.
|
||||
ExternalID: "foo",
|
||||
},
|
||||
Status: api.NodeStatus{
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
|
||||
api.ResourceCPU: resource.MustParse("4"),
|
||||
api.ResourceMemory: resource.MustParse("32Gi"),
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("32Gi"),
|
||||
},
|
||||
Phase: api.NodeRunning,
|
||||
Conditions: []api.NodeCondition{
|
||||
{Type: api.NodeReady, Status: api.ConditionTrue},
|
||||
Phase: v1.NodeRunning,
|
||||
Conditions: []v1.NodeCondition{
|
||||
{Type: v1.NodeReady, Status: v1.ConditionTrue},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -95,7 +95,7 @@ func (p *IntegrationTestNodePreparer) PrepareNodes() error {
|
||||
func (p *IntegrationTestNodePreparer) CleanupNodes() error {
|
||||
nodes := e2eframework.GetReadySchedulableNodesOrDie(p.client)
|
||||
for i := range nodes.Items {
|
||||
if err := p.client.Core().Nodes().Delete(nodes.Items[i].Name, &api.DeleteOptions{}); err != nil {
|
||||
if err := p.client.Core().Nodes().Delete(nodes.Items[i].Name, &v1.DeleteOptions{}); err != nil {
|
||||
glog.Errorf("Error while deleting Node: %v", err)
|
||||
}
|
||||
}
|
||||
|
@@ -25,9 +25,9 @@ import (
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
|
||||
@@ -108,8 +108,8 @@ func TestApiserverMetrics(t *testing.T) {
|
||||
|
||||
// Make a request to the apiserver to ensure there's at least one data point
|
||||
// for the metrics we're expecting -- otherwise, they won't be exported.
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
if _, err := client.Core().Pods(api.NamespaceDefault).List(api.ListOptions{}); err != nil {
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
if _, err := client.Core().Pods(v1.NamespaceDefault).List(v1.ListOptions{}); err != nil {
|
||||
t.Fatalf("unexpected error getting pods: %v", err)
|
||||
}
|
||||
|
||||
|
@@ -21,10 +21,10 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"golang.org/x/net/context"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/genericapiserver"
|
||||
etcdstorage "k8s.io/kubernetes/pkg/storage/etcd"
|
||||
@@ -38,14 +38,14 @@ func TestIgnoreClusterName(t *testing.T) {
|
||||
_, s := framework.RunAMaster(config)
|
||||
defer s.Close()
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
etcdClient := framework.NewEtcdClient()
|
||||
etcdStorage := etcdstorage.NewEtcdStorage(etcdClient, testapi.Default.Codec(),
|
||||
prefix+"/namespaces/", false, etcdtest.DeserializationCacheSize)
|
||||
ctx := context.TODO()
|
||||
|
||||
ns := api.Namespace{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ns := v1.Namespace{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "test-namespace",
|
||||
ClusterName: "cluster-name-to-ignore",
|
||||
},
|
||||
@@ -55,7 +55,7 @@ func TestIgnoreClusterName(t *testing.T) {
|
||||
assert.Equal(t, ns.Name, nsNew.Name)
|
||||
assert.Empty(t, nsNew.ClusterName)
|
||||
|
||||
nsEtcd := api.Namespace{}
|
||||
nsEtcd := v1.Namespace{}
|
||||
err = etcdStorage.Get(ctx, ns.Name, &nsEtcd, false)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, ns.Name, nsEtcd.Name)
|
||||
@@ -66,7 +66,7 @@ func TestIgnoreClusterName(t *testing.T) {
|
||||
assert.Equal(t, ns.Name, nsNew.Name)
|
||||
assert.Empty(t, nsNew.ClusterName)
|
||||
|
||||
nsEtcd = api.Namespace{}
|
||||
nsEtcd = v1.Namespace{}
|
||||
err = etcdStorage.Get(ctx, ns.Name, &nsEtcd, false)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, ns.Name, nsEtcd.Name)
|
||||
|
@@ -27,13 +27,13 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
"k8s.io/kubernetes/pkg/apis/storage"
|
||||
storageutil "k8s.io/kubernetes/pkg/apis/storage/util"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
storage "k8s.io/kubernetes/pkg/apis/storage/v1beta1"
|
||||
storageutil "k8s.io/kubernetes/pkg/apis/storage/v1beta1/util"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
fake_cloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
|
||||
persistentvolumecontroller "k8s.io/kubernetes/pkg/controller/volume/persistentvolume"
|
||||
@@ -122,15 +122,15 @@ func TestPersistentVolumeRecycler(t *testing.T) {
|
||||
|
||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||
// non-namespaced objects (PersistenceVolumes).
|
||||
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, api.ListOptions{})
|
||||
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, v1.ListOptions{})
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
ctrl.Run(stopCh)
|
||||
defer close(stopCh)
|
||||
|
||||
// This PV will be claimed, released, and recycled.
|
||||
pv := createPV("fake-pv-recycler", "/tmp/foo", "10G", []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, api.PersistentVolumeReclaimRecycle)
|
||||
pvc := createPVC("fake-pvc-recycler", ns.Name, "5G", []api.PersistentVolumeAccessMode{api.ReadWriteOnce})
|
||||
pv := createPV("fake-pv-recycler", "/tmp/foo", "10G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimRecycle)
|
||||
pvc := createPVC("fake-pvc-recycler", ns.Name, "5G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce})
|
||||
|
||||
_, err := testClient.PersistentVolumes().Create(pv)
|
||||
if err != nil {
|
||||
@@ -145,9 +145,9 @@ func TestPersistentVolumeRecycler(t *testing.T) {
|
||||
glog.V(2).Infof("TestPersistentVolumeRecycler pvc created")
|
||||
|
||||
// wait until the controller pairs the volume and claim
|
||||
waitForPersistentVolumePhase(testClient, pv.Name, watchPV, api.VolumeBound)
|
||||
waitForPersistentVolumePhase(testClient, pv.Name, watchPV, v1.VolumeBound)
|
||||
glog.V(2).Infof("TestPersistentVolumeRecycler pv bound")
|
||||
waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, api.ClaimBound)
|
||||
waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, v1.ClaimBound)
|
||||
glog.V(2).Infof("TestPersistentVolumeRecycler pvc bound")
|
||||
|
||||
// deleting a claim releases the volume, after which it can be recycled
|
||||
@@ -156,9 +156,9 @@ func TestPersistentVolumeRecycler(t *testing.T) {
|
||||
}
|
||||
glog.V(2).Infof("TestPersistentVolumeRecycler pvc deleted")
|
||||
|
||||
waitForPersistentVolumePhase(testClient, pv.Name, watchPV, api.VolumeReleased)
|
||||
waitForPersistentVolumePhase(testClient, pv.Name, watchPV, v1.VolumeReleased)
|
||||
glog.V(2).Infof("TestPersistentVolumeRecycler pv released")
|
||||
waitForPersistentVolumePhase(testClient, pv.Name, watchPV, api.VolumeAvailable)
|
||||
waitForPersistentVolumePhase(testClient, pv.Name, watchPV, v1.VolumeAvailable)
|
||||
glog.V(2).Infof("TestPersistentVolumeRecycler pv available")
|
||||
}
|
||||
|
||||
@@ -176,15 +176,15 @@ func TestPersistentVolumeDeleter(t *testing.T) {
|
||||
|
||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||
// non-namespaced objects (PersistenceVolumes).
|
||||
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, api.ListOptions{})
|
||||
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, v1.ListOptions{})
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
ctrl.Run(stopCh)
|
||||
defer close(stopCh)
|
||||
|
||||
// This PV will be claimed, released, and deleted.
|
||||
pv := createPV("fake-pv-deleter", "/tmp/foo", "10G", []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, api.PersistentVolumeReclaimDelete)
|
||||
pvc := createPVC("fake-pvc-deleter", ns.Name, "5G", []api.PersistentVolumeAccessMode{api.ReadWriteOnce})
|
||||
pv := createPV("fake-pv-deleter", "/tmp/foo", "10G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimDelete)
|
||||
pvc := createPVC("fake-pvc-deleter", ns.Name, "5G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce})
|
||||
|
||||
_, err := testClient.PersistentVolumes().Create(pv)
|
||||
if err != nil {
|
||||
@@ -196,9 +196,9 @@ func TestPersistentVolumeDeleter(t *testing.T) {
|
||||
t.Errorf("Failed to create PersistentVolumeClaim: %v", err)
|
||||
}
|
||||
glog.V(2).Infof("TestPersistentVolumeDeleter pvc created")
|
||||
waitForPersistentVolumePhase(testClient, pv.Name, watchPV, api.VolumeBound)
|
||||
waitForPersistentVolumePhase(testClient, pv.Name, watchPV, v1.VolumeBound)
|
||||
glog.V(2).Infof("TestPersistentVolumeDeleter pv bound")
|
||||
waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, api.ClaimBound)
|
||||
waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, v1.ClaimBound)
|
||||
glog.V(2).Infof("TestPersistentVolumeDeleter pvc bound")
|
||||
|
||||
// deleting a claim releases the volume, after which it can be recycled
|
||||
@@ -207,7 +207,7 @@ func TestPersistentVolumeDeleter(t *testing.T) {
|
||||
}
|
||||
glog.V(2).Infof("TestPersistentVolumeDeleter pvc deleted")
|
||||
|
||||
waitForPersistentVolumePhase(testClient, pv.Name, watchPV, api.VolumeReleased)
|
||||
waitForPersistentVolumePhase(testClient, pv.Name, watchPV, v1.VolumeReleased)
|
||||
glog.V(2).Infof("TestPersistentVolumeDeleter pv released")
|
||||
|
||||
for {
|
||||
@@ -235,22 +235,22 @@ func TestPersistentVolumeBindRace(t *testing.T) {
|
||||
|
||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||
// non-namespaced objects (PersistenceVolumes).
|
||||
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, api.ListOptions{})
|
||||
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, v1.ListOptions{})
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
ctrl.Run(stopCh)
|
||||
defer close(stopCh)
|
||||
|
||||
pv := createPV("fake-pv-race", "/tmp/foo", "10G", []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, api.PersistentVolumeReclaimRetain)
|
||||
pvc := createPVC("fake-pvc-race", ns.Name, "5G", []api.PersistentVolumeAccessMode{api.ReadWriteOnce})
|
||||
pv := createPV("fake-pv-race", "/tmp/foo", "10G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimRetain)
|
||||
pvc := createPVC("fake-pvc-race", ns.Name, "5G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce})
|
||||
counter := 0
|
||||
maxClaims := 100
|
||||
claims := []*api.PersistentVolumeClaim{}
|
||||
claims := []*v1.PersistentVolumeClaim{}
|
||||
for counter <= maxClaims {
|
||||
counter += 1
|
||||
clone, _ := conversion.NewCloner().DeepCopy(pvc)
|
||||
newPvc, _ := clone.(*api.PersistentVolumeClaim)
|
||||
newPvc.ObjectMeta = api.ObjectMeta{Name: fmt.Sprintf("fake-pvc-race-%d", counter)}
|
||||
newPvc, _ := clone.(*v1.PersistentVolumeClaim)
|
||||
newPvc.ObjectMeta = v1.ObjectMeta{Name: fmt.Sprintf("fake-pvc-race-%d", counter)}
|
||||
claim, err := testClient.PersistentVolumeClaims(ns.Name).Create(newPvc)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating newPvc: %v", err)
|
||||
@@ -262,7 +262,7 @@ func TestPersistentVolumeBindRace(t *testing.T) {
|
||||
// putting a bind manually on a pv should only match the claim it is bound to
|
||||
rand.Seed(time.Now().Unix())
|
||||
claim := claims[rand.Intn(maxClaims-1)]
|
||||
claimRef, err := api.GetReference(claim)
|
||||
claimRef, err := v1.GetReference(claim)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error getting claimRef: %v", err)
|
||||
}
|
||||
@@ -275,9 +275,9 @@ func TestPersistentVolumeBindRace(t *testing.T) {
|
||||
}
|
||||
glog.V(2).Infof("TestPersistentVolumeBindRace pv created, pre-bound to %s", claim.Name)
|
||||
|
||||
waitForPersistentVolumePhase(testClient, pv.Name, watchPV, api.VolumeBound)
|
||||
waitForPersistentVolumePhase(testClient, pv.Name, watchPV, v1.VolumeBound)
|
||||
glog.V(2).Infof("TestPersistentVolumeBindRace pv bound")
|
||||
waitForAnyPersistentVolumeClaimPhase(watchPVC, api.ClaimBound)
|
||||
waitForAnyPersistentVolumeClaimPhase(watchPVC, v1.ClaimBound)
|
||||
glog.V(2).Infof("TestPersistentVolumeBindRace pvc bound")
|
||||
|
||||
pv, err = testClient.PersistentVolumes().Get(pv.Name)
|
||||
@@ -306,7 +306,7 @@ func TestPersistentVolumeClaimLabelSelector(t *testing.T) {
|
||||
|
||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||
// non-namespaced objects (PersistenceVolumes).
|
||||
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, api.ListOptions{})
|
||||
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, v1.ListOptions{})
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
controller.Run(stopCh)
|
||||
@@ -314,8 +314,8 @@ func TestPersistentVolumeClaimLabelSelector(t *testing.T) {
|
||||
|
||||
var (
|
||||
err error
|
||||
modes = []api.PersistentVolumeAccessMode{api.ReadWriteOnce}
|
||||
reclaim = api.PersistentVolumeReclaimRetain
|
||||
modes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
|
||||
reclaim = v1.PersistentVolumeReclaimRetain
|
||||
|
||||
pv_true = createPV("pv-true", "/tmp/foo-label", "1G", modes, reclaim)
|
||||
pv_false = createPV("pv-false", "/tmp/foo-label", "1G", modes, reclaim)
|
||||
@@ -347,9 +347,9 @@ func TestPersistentVolumeClaimLabelSelector(t *testing.T) {
|
||||
}
|
||||
t.Log("claim created")
|
||||
|
||||
waitForAnyPersistentVolumePhase(watchPV, api.VolumeBound)
|
||||
waitForAnyPersistentVolumePhase(watchPV, v1.VolumeBound)
|
||||
t.Log("volume bound")
|
||||
waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, api.ClaimBound)
|
||||
waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, v1.ClaimBound)
|
||||
t.Log("claim bound")
|
||||
|
||||
pv, err := testClient.PersistentVolumes().Get("pv-false")
|
||||
@@ -386,7 +386,7 @@ func TestPersistentVolumeClaimLabelSelectorMatchExpressions(t *testing.T) {
|
||||
|
||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||
// non-namespaced objects (PersistenceVolumes).
|
||||
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, api.ListOptions{})
|
||||
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, v1.ListOptions{})
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
controller.Run(stopCh)
|
||||
@@ -394,8 +394,8 @@ func TestPersistentVolumeClaimLabelSelectorMatchExpressions(t *testing.T) {
|
||||
|
||||
var (
|
||||
err error
|
||||
modes = []api.PersistentVolumeAccessMode{api.ReadWriteOnce}
|
||||
reclaim = api.PersistentVolumeReclaimRetain
|
||||
modes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
|
||||
reclaim = v1.PersistentVolumeReclaimRetain
|
||||
|
||||
pv_true = createPV("pv-true", "/tmp/foo-label", "1G", modes, reclaim)
|
||||
pv_false = createPV("pv-false", "/tmp/foo-label", "1G", modes, reclaim)
|
||||
@@ -446,9 +446,9 @@ func TestPersistentVolumeClaimLabelSelectorMatchExpressions(t *testing.T) {
|
||||
}
|
||||
t.Log("claim created")
|
||||
|
||||
waitForAnyPersistentVolumePhase(watchPV, api.VolumeBound)
|
||||
waitForAnyPersistentVolumePhase(watchPV, v1.VolumeBound)
|
||||
t.Log("volume bound")
|
||||
waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, api.ClaimBound)
|
||||
waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, v1.ClaimBound)
|
||||
t.Log("claim bound")
|
||||
|
||||
pv, err := testClient.PersistentVolumes().Get("pv-false")
|
||||
@@ -485,28 +485,28 @@ func TestPersistentVolumeMultiPVs(t *testing.T) {
|
||||
|
||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||
// non-namespaced objects (PersistenceVolumes).
|
||||
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, api.ListOptions{})
|
||||
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, v1.ListOptions{})
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
controller.Run(stopCh)
|
||||
defer close(stopCh)
|
||||
|
||||
maxPVs := getObjectCount()
|
||||
pvs := make([]*api.PersistentVolume, maxPVs)
|
||||
pvs := make([]*v1.PersistentVolume, maxPVs)
|
||||
for i := 0; i < maxPVs; i++ {
|
||||
// This PV will be claimed, released, and deleted
|
||||
pvs[i] = createPV("pv-"+strconv.Itoa(i), "/tmp/foo"+strconv.Itoa(i), strconv.Itoa(i)+"G",
|
||||
[]api.PersistentVolumeAccessMode{api.ReadWriteOnce}, api.PersistentVolumeReclaimRetain)
|
||||
[]v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimRetain)
|
||||
}
|
||||
|
||||
pvc := createPVC("pvc-2", ns.Name, strconv.Itoa(maxPVs/2)+"G", []api.PersistentVolumeAccessMode{api.ReadWriteOnce})
|
||||
pvc := createPVC("pvc-2", ns.Name, strconv.Itoa(maxPVs/2)+"G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce})
|
||||
|
||||
for i := 0; i < maxPVs; i++ {
|
||||
_, err := testClient.PersistentVolumes().Create(pvs[i])
|
||||
if err != nil {
|
||||
t.Errorf("Failed to create PersistentVolume %d: %v", i, err)
|
||||
}
|
||||
waitForPersistentVolumePhase(testClient, pvs[i].Name, watchPV, api.VolumeAvailable)
|
||||
waitForPersistentVolumePhase(testClient, pvs[i].Name, watchPV, v1.VolumeAvailable)
|
||||
}
|
||||
t.Log("volumes created")
|
||||
|
||||
@@ -517,9 +517,9 @@ func TestPersistentVolumeMultiPVs(t *testing.T) {
|
||||
t.Log("claim created")
|
||||
|
||||
// wait until the binder pairs the claim with a volume
|
||||
waitForAnyPersistentVolumePhase(watchPV, api.VolumeBound)
|
||||
waitForAnyPersistentVolumePhase(watchPV, v1.VolumeBound)
|
||||
t.Log("volume bound")
|
||||
waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, api.ClaimBound)
|
||||
waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, v1.ClaimBound)
|
||||
t.Log("claim bound")
|
||||
|
||||
// only one PV is bound
|
||||
@@ -533,14 +533,14 @@ func TestPersistentVolumeMultiPVs(t *testing.T) {
|
||||
continue
|
||||
}
|
||||
// found a bounded PV
|
||||
p := pv.Spec.Capacity[api.ResourceStorage]
|
||||
p := pv.Spec.Capacity[v1.ResourceStorage]
|
||||
pvCap := p.Value()
|
||||
expectedCap := resource.MustParse(strconv.Itoa(maxPVs/2) + "G")
|
||||
expectedCapVal := expectedCap.Value()
|
||||
if pv.Spec.ClaimRef.Name != pvc.Name || pvCap != expectedCapVal {
|
||||
t.Fatalf("Bind mismatch! Expected %s capacity %d but got %s capacity %d", pvc.Name, expectedCapVal, pv.Spec.ClaimRef.Name, pvCap)
|
||||
}
|
||||
t.Logf("claim bounded to %s capacity %v", pv.Name, pv.Spec.Capacity[api.ResourceStorage])
|
||||
t.Logf("claim bounded to %s capacity %v", pv.Name, pv.Spec.Capacity[v1.ResourceStorage])
|
||||
bound += 1
|
||||
}
|
||||
t.Log("volumes checked")
|
||||
@@ -555,7 +555,7 @@ func TestPersistentVolumeMultiPVs(t *testing.T) {
|
||||
}
|
||||
t.Log("claim deleted")
|
||||
|
||||
waitForAnyPersistentVolumePhase(watchPV, api.VolumeReleased)
|
||||
waitForAnyPersistentVolumePhase(watchPV, v1.VolumeReleased)
|
||||
t.Log("volumes released")
|
||||
}
|
||||
|
||||
@@ -574,20 +574,20 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) {
|
||||
|
||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||
// non-namespaced objects (PersistenceVolumes).
|
||||
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, api.ListOptions{})
|
||||
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, v1.ListOptions{})
|
||||
|
||||
controllerStopCh := make(chan struct{})
|
||||
binder.Run(controllerStopCh)
|
||||
defer close(controllerStopCh)
|
||||
|
||||
objCount := getObjectCount()
|
||||
pvs := make([]*api.PersistentVolume, objCount)
|
||||
pvcs := make([]*api.PersistentVolumeClaim, objCount)
|
||||
pvs := make([]*v1.PersistentVolume, objCount)
|
||||
pvcs := make([]*v1.PersistentVolumeClaim, objCount)
|
||||
for i := 0; i < objCount; i++ {
|
||||
// This PV will be claimed, released, and deleted
|
||||
pvs[i] = createPV("pv-"+strconv.Itoa(i), "/tmp/foo"+strconv.Itoa(i), "1G",
|
||||
[]api.PersistentVolumeAccessMode{api.ReadWriteOnce}, api.PersistentVolumeReclaimRetain)
|
||||
pvcs[i] = createPVC("pvc-"+strconv.Itoa(i), ns.Name, "1G", []api.PersistentVolumeAccessMode{api.ReadWriteOnce})
|
||||
[]v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimRetain)
|
||||
pvcs[i] = createPVC("pvc-"+strconv.Itoa(i), ns.Name, "1G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce})
|
||||
}
|
||||
|
||||
// Create PVs first
|
||||
@@ -603,7 +603,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) {
|
||||
}()
|
||||
// Wait for them to get Available
|
||||
for i := 0; i < objCount; i++ {
|
||||
waitForAnyPersistentVolumePhase(watchPV, api.VolumeAvailable)
|
||||
waitForAnyPersistentVolumePhase(watchPV, v1.VolumeAvailable)
|
||||
glog.V(1).Infof("%d volumes available", i+1)
|
||||
}
|
||||
glog.V(2).Infof("TestPersistentVolumeMultiPVsPVCs: volumes are Available")
|
||||
@@ -643,7 +643,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) {
|
||||
// Modify PVC
|
||||
i := rand.Intn(objCount)
|
||||
name := "pvc-" + strconv.Itoa(i)
|
||||
pvc, err := testClient.PersistentVolumeClaims(api.NamespaceDefault).Get(name)
|
||||
pvc, err := testClient.PersistentVolumeClaims(v1.NamespaceDefault).Get(name)
|
||||
if err != nil {
|
||||
// Silently ignore error, the PVC may have be already
|
||||
// deleted or not exists yet.
|
||||
@@ -655,7 +655,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) {
|
||||
} else {
|
||||
pvc.Annotations["TestAnnotation"] = fmt.Sprint(rand.Int())
|
||||
}
|
||||
_, err = testClient.PersistentVolumeClaims(api.NamespaceDefault).Update(pvc)
|
||||
_, err = testClient.PersistentVolumeClaims(v1.NamespaceDefault).Update(pvc)
|
||||
if err != nil {
|
||||
// Silently ignore error, the PVC may have been updated by
|
||||
// the controller.
|
||||
@@ -684,12 +684,12 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) {
|
||||
|
||||
// wait until the binder pairs all claims
|
||||
for i := 0; i < objCount; i++ {
|
||||
waitForAnyPersistentVolumeClaimPhase(watchPVC, api.ClaimBound)
|
||||
waitForAnyPersistentVolumeClaimPhase(watchPVC, v1.ClaimBound)
|
||||
glog.V(1).Infof("%d claims bound", i+1)
|
||||
}
|
||||
// wait until the binder pairs all volumes
|
||||
for i := 0; i < objCount; i++ {
|
||||
waitForPersistentVolumePhase(testClient, pvs[i].Name, watchPV, api.VolumeBound)
|
||||
waitForPersistentVolumePhase(testClient, pvs[i].Name, watchPV, v1.VolumeBound)
|
||||
glog.V(1).Infof("%d claims bound", i+1)
|
||||
}
|
||||
|
||||
@@ -738,13 +738,13 @@ func TestPersistentVolumeControllerStartup(t *testing.T) {
|
||||
defer watchPVC.Stop()
|
||||
|
||||
// Create *bound* volumes and PVCs
|
||||
pvs := make([]*api.PersistentVolume, objCount)
|
||||
pvcs := make([]*api.PersistentVolumeClaim, objCount)
|
||||
pvs := make([]*v1.PersistentVolume, objCount)
|
||||
pvcs := make([]*v1.PersistentVolumeClaim, objCount)
|
||||
for i := 0; i < objCount; i++ {
|
||||
pvName := "pv-startup-" + strconv.Itoa(i)
|
||||
pvcName := "pvc-startup-" + strconv.Itoa(i)
|
||||
|
||||
pvc := createPVC(pvcName, ns.Name, "1G", []api.PersistentVolumeAccessMode{api.ReadWriteOnce})
|
||||
pvc := createPVC(pvcName, ns.Name, "1G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce})
|
||||
pvc.Annotations = map[string]string{"annBindCompleted": ""}
|
||||
pvc.Spec.VolumeName = pvName
|
||||
newPVC, err := testClient.PersistentVolumeClaims(ns.Name).Create(pvc)
|
||||
@@ -752,7 +752,7 @@ func TestPersistentVolumeControllerStartup(t *testing.T) {
|
||||
t.Fatalf("Cannot create claim %q: %v", pvc.Name, err)
|
||||
}
|
||||
// Save Bound status as a separate transaction
|
||||
newPVC.Status.Phase = api.ClaimBound
|
||||
newPVC.Status.Phase = v1.ClaimBound
|
||||
newPVC, err = testClient.PersistentVolumeClaims(ns.Name).UpdateStatus(newPVC)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot update claim status %q: %v", pvc.Name, err)
|
||||
@@ -761,11 +761,11 @@ func TestPersistentVolumeControllerStartup(t *testing.T) {
|
||||
// Drain watchPVC with all events generated by the PVC until it's bound
|
||||
// We don't want to catch "PVC craated with Status.Phase == Pending"
|
||||
// later in this test.
|
||||
waitForAnyPersistentVolumeClaimPhase(watchPVC, api.ClaimBound)
|
||||
waitForAnyPersistentVolumeClaimPhase(watchPVC, v1.ClaimBound)
|
||||
|
||||
pv := createPV(pvName, "/tmp/foo"+strconv.Itoa(i), "1G",
|
||||
[]api.PersistentVolumeAccessMode{api.ReadWriteOnce}, api.PersistentVolumeReclaimRetain)
|
||||
claimRef, err := api.GetReference(newPVC)
|
||||
[]v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimRetain)
|
||||
claimRef, err := v1.GetReference(newPVC)
|
||||
if err != nil {
|
||||
glog.V(3).Infof("unexpected error getting claim reference: %v", err)
|
||||
return
|
||||
@@ -776,7 +776,7 @@ func TestPersistentVolumeControllerStartup(t *testing.T) {
|
||||
t.Fatalf("Cannot create volume %q: %v", pv.Name, err)
|
||||
}
|
||||
// Save Bound status as a separate transaction
|
||||
newPV.Status.Phase = api.VolumeBound
|
||||
newPV.Status.Phase = v1.VolumeBound
|
||||
newPV, err = testClient.PersistentVolumes().UpdateStatus(newPV)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot update volume status %q: %v", pv.Name, err)
|
||||
@@ -785,7 +785,7 @@ func TestPersistentVolumeControllerStartup(t *testing.T) {
|
||||
// Drain watchPV with all events generated by the PV until it's bound
|
||||
// We don't want to catch "PV craated with Status.Phase == Pending"
|
||||
// later in this test.
|
||||
waitForAnyPersistentVolumePhase(watchPV, api.VolumeBound)
|
||||
waitForAnyPersistentVolumePhase(watchPV, v1.VolumeBound)
|
||||
}
|
||||
|
||||
// Start the controller when all PVs and PVCs are already saved in etcd
|
||||
@@ -801,20 +801,20 @@ func TestPersistentVolumeControllerStartup(t *testing.T) {
|
||||
for !finished {
|
||||
select {
|
||||
case volumeEvent := <-watchPV.ResultChan():
|
||||
volume, ok := volumeEvent.Object.(*api.PersistentVolume)
|
||||
volume, ok := volumeEvent.Object.(*v1.PersistentVolume)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if volume.Status.Phase != api.VolumeBound {
|
||||
if volume.Status.Phase != v1.VolumeBound {
|
||||
t.Errorf("volume %s unexpectedly changed state to %s", volume.Name, volume.Status.Phase)
|
||||
}
|
||||
|
||||
case claimEvent := <-watchPVC.ResultChan():
|
||||
claim, ok := claimEvent.Object.(*api.PersistentVolumeClaim)
|
||||
claim, ok := claimEvent.Object.(*v1.PersistentVolumeClaim)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if claim.Status.Phase != api.ClaimBound {
|
||||
if claim.Status.Phase != v1.ClaimBound {
|
||||
t.Errorf("claim %s unexpectedly changed state to %s", claim.Name, claim.Status.Phase)
|
||||
}
|
||||
|
||||
@@ -862,14 +862,14 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) {
|
||||
|
||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||
// non-namespaced objects (PersistenceVolumes and StorageClasses).
|
||||
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, api.ListOptions{})
|
||||
defer testClient.Storage().StorageClasses().DeleteCollection(nil, api.ListOptions{})
|
||||
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, v1.ListOptions{})
|
||||
defer testClient.Storage().StorageClasses().DeleteCollection(nil, v1.ListOptions{})
|
||||
|
||||
storageClass := storage.StorageClass{
|
||||
TypeMeta: unversioned.TypeMeta{
|
||||
Kind: "StorageClass",
|
||||
},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "gold",
|
||||
},
|
||||
Provisioner: provisionerPluginName,
|
||||
@@ -881,9 +881,9 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) {
|
||||
defer close(stopCh)
|
||||
|
||||
objCount := getObjectCount()
|
||||
pvcs := make([]*api.PersistentVolumeClaim, objCount)
|
||||
pvcs := make([]*v1.PersistentVolumeClaim, objCount)
|
||||
for i := 0; i < objCount; i++ {
|
||||
pvc := createPVC("pvc-provision-"+strconv.Itoa(i), ns.Name, "1G", []api.PersistentVolumeAccessMode{api.ReadWriteOnce})
|
||||
pvc := createPVC("pvc-provision-"+strconv.Itoa(i), ns.Name, "1G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce})
|
||||
pvc.Annotations = map[string]string{
|
||||
storageutil.StorageClassAnnotation: "gold",
|
||||
}
|
||||
@@ -901,13 +901,13 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) {
|
||||
|
||||
// Wait until the controller provisions and binds all of them
|
||||
for i := 0; i < objCount; i++ {
|
||||
waitForAnyPersistentVolumeClaimPhase(watchPVC, api.ClaimBound)
|
||||
waitForAnyPersistentVolumeClaimPhase(watchPVC, v1.ClaimBound)
|
||||
glog.V(1).Infof("%d claims bound", i+1)
|
||||
}
|
||||
glog.V(2).Infof("TestPersistentVolumeProvisionMultiPVCs: claims are bound")
|
||||
|
||||
// check that we have enough bound PVs
|
||||
pvList, err := testClient.PersistentVolumes().List(api.ListOptions{})
|
||||
pvList, err := testClient.PersistentVolumes().List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to list volumes: %s", err)
|
||||
}
|
||||
@@ -916,7 +916,7 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) {
|
||||
}
|
||||
for i := 0; i < objCount; i++ {
|
||||
pv := &pvList.Items[i]
|
||||
if pv.Status.Phase != api.VolumeBound {
|
||||
if pv.Status.Phase != v1.VolumeBound {
|
||||
t.Fatalf("Expected volume %s to be bound, is %s instead", pv.Name, pv.Status.Phase)
|
||||
}
|
||||
glog.V(2).Infof("PV %q is bound to PVC %q", pv.Name, pv.Spec.ClaimRef.Name)
|
||||
@@ -930,7 +930,7 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) {
|
||||
// Wait for the PVs to get deleted by listing remaining volumes
|
||||
// (delete events were unreliable)
|
||||
for {
|
||||
volumes, err := testClient.PersistentVolumes().List(api.ListOptions{})
|
||||
volumes, err := testClient.PersistentVolumes().List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to list volumes: %v", err)
|
||||
}
|
||||
@@ -959,7 +959,7 @@ func TestPersistentVolumeMultiPVsDiffAccessModes(t *testing.T) {
|
||||
|
||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||
// non-namespaced objects (PersistenceVolumes).
|
||||
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, api.ListOptions{})
|
||||
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, v1.ListOptions{})
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
controller.Run(stopCh)
|
||||
@@ -967,11 +967,11 @@ func TestPersistentVolumeMultiPVsDiffAccessModes(t *testing.T) {
|
||||
|
||||
// This PV will be claimed, released, and deleted
|
||||
pv_rwo := createPV("pv-rwo", "/tmp/foo", "10G",
|
||||
[]api.PersistentVolumeAccessMode{api.ReadWriteOnce}, api.PersistentVolumeReclaimRetain)
|
||||
[]v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimRetain)
|
||||
pv_rwm := createPV("pv-rwm", "/tmp/bar", "10G",
|
||||
[]api.PersistentVolumeAccessMode{api.ReadWriteMany}, api.PersistentVolumeReclaimRetain)
|
||||
[]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}, v1.PersistentVolumeReclaimRetain)
|
||||
|
||||
pvc := createPVC("pvc-rwm", ns.Name, "5G", []api.PersistentVolumeAccessMode{api.ReadWriteMany})
|
||||
pvc := createPVC("pvc-rwm", ns.Name, "5G", []v1.PersistentVolumeAccessMode{v1.ReadWriteMany})
|
||||
|
||||
_, err := testClient.PersistentVolumes().Create(pv_rwm)
|
||||
if err != nil {
|
||||
@@ -990,9 +990,9 @@ func TestPersistentVolumeMultiPVsDiffAccessModes(t *testing.T) {
|
||||
t.Log("claim created")
|
||||
|
||||
// wait until the controller pairs the volume and claim
|
||||
waitForAnyPersistentVolumePhase(watchPV, api.VolumeBound)
|
||||
waitForAnyPersistentVolumePhase(watchPV, v1.VolumeBound)
|
||||
t.Log("volume bound")
|
||||
waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, api.ClaimBound)
|
||||
waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, v1.ClaimBound)
|
||||
t.Log("claim bound")
|
||||
|
||||
// only RWM PV is bound
|
||||
@@ -1020,11 +1020,11 @@ func TestPersistentVolumeMultiPVsDiffAccessModes(t *testing.T) {
|
||||
}
|
||||
t.Log("claim deleted")
|
||||
|
||||
waitForAnyPersistentVolumePhase(watchPV, api.VolumeReleased)
|
||||
waitForAnyPersistentVolumePhase(watchPV, v1.VolumeReleased)
|
||||
t.Log("volume released")
|
||||
}
|
||||
|
||||
func waitForPersistentVolumePhase(client *clientset.Clientset, pvName string, w watch.Interface, phase api.PersistentVolumePhase) {
|
||||
func waitForPersistentVolumePhase(client *clientset.Clientset, pvName string, w watch.Interface, phase v1.PersistentVolumePhase) {
|
||||
// Check if the volume is already in requested phase
|
||||
volume, err := client.Core().PersistentVolumes().Get(pvName)
|
||||
if err == nil && volume.Status.Phase == phase {
|
||||
@@ -1034,7 +1034,7 @@ func waitForPersistentVolumePhase(client *clientset.Clientset, pvName string, w
|
||||
// Wait for the phase
|
||||
for {
|
||||
event := <-w.ResultChan()
|
||||
volume, ok := event.Object.(*api.PersistentVolume)
|
||||
volume, ok := event.Object.(*v1.PersistentVolume)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
@@ -1045,7 +1045,7 @@ func waitForPersistentVolumePhase(client *clientset.Clientset, pvName string, w
|
||||
}
|
||||
}
|
||||
|
||||
func waitForPersistentVolumeClaimPhase(client *clientset.Clientset, claimName, namespace string, w watch.Interface, phase api.PersistentVolumeClaimPhase) {
|
||||
func waitForPersistentVolumeClaimPhase(client *clientset.Clientset, claimName, namespace string, w watch.Interface, phase v1.PersistentVolumeClaimPhase) {
|
||||
// Check if the claim is already in requested phase
|
||||
claim, err := client.Core().PersistentVolumeClaims(namespace).Get(claimName)
|
||||
if err == nil && claim.Status.Phase == phase {
|
||||
@@ -1055,7 +1055,7 @@ func waitForPersistentVolumeClaimPhase(client *clientset.Clientset, claimName, n
|
||||
// Wait for the phase
|
||||
for {
|
||||
event := <-w.ResultChan()
|
||||
claim, ok := event.Object.(*api.PersistentVolumeClaim)
|
||||
claim, ok := event.Object.(*v1.PersistentVolumeClaim)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
@@ -1066,10 +1066,10 @@ func waitForPersistentVolumeClaimPhase(client *clientset.Clientset, claimName, n
|
||||
}
|
||||
}
|
||||
|
||||
func waitForAnyPersistentVolumePhase(w watch.Interface, phase api.PersistentVolumePhase) {
|
||||
func waitForAnyPersistentVolumePhase(w watch.Interface, phase v1.PersistentVolumePhase) {
|
||||
for {
|
||||
event := <-w.ResultChan()
|
||||
volume, ok := event.Object.(*api.PersistentVolume)
|
||||
volume, ok := event.Object.(*v1.PersistentVolume)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
@@ -1080,10 +1080,10 @@ func waitForAnyPersistentVolumePhase(w watch.Interface, phase api.PersistentVolu
|
||||
}
|
||||
}
|
||||
|
||||
func waitForAnyPersistentVolumeClaimPhase(w watch.Interface, phase api.PersistentVolumeClaimPhase) {
|
||||
func waitForAnyPersistentVolumeClaimPhase(w watch.Interface, phase v1.PersistentVolumeClaimPhase) {
|
||||
for {
|
||||
event := <-w.ResultChan()
|
||||
claim, ok := event.Object.(*api.PersistentVolumeClaim)
|
||||
claim, ok := event.Object.(*v1.PersistentVolumeClaim)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
@@ -1094,18 +1094,18 @@ func waitForAnyPersistentVolumeClaimPhase(w watch.Interface, phase api.Persisten
|
||||
}
|
||||
}
|
||||
|
||||
func createClients(ns *api.Namespace, t *testing.T, s *httptest.Server, syncPeriod time.Duration) (*clientset.Clientset, *persistentvolumecontroller.PersistentVolumeController, watch.Interface, watch.Interface) {
|
||||
func createClients(ns *v1.Namespace, t *testing.T, s *httptest.Server, syncPeriod time.Duration) (*clientset.Clientset, *persistentvolumecontroller.PersistentVolumeController, watch.Interface, watch.Interface) {
|
||||
// Use higher QPS and Burst, there is a test for race conditions which
|
||||
// creates many objects and default values were too low.
|
||||
binderClient := clientset.NewForConfigOrDie(&restclient.Config{
|
||||
Host: s.URL,
|
||||
ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion},
|
||||
ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion},
|
||||
QPS: 1000000,
|
||||
Burst: 1000000,
|
||||
})
|
||||
testClient := clientset.NewForConfigOrDie(&restclient.Config{
|
||||
Host: s.URL,
|
||||
ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion},
|
||||
ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion},
|
||||
QPS: 1000000,
|
||||
Burst: 1000000,
|
||||
})
|
||||
@@ -1134,11 +1134,11 @@ func createClients(ns *api.Namespace, t *testing.T, s *httptest.Server, syncPeri
|
||||
EnableDynamicProvisioning: true,
|
||||
})
|
||||
|
||||
watchPV, err := testClient.PersistentVolumes().Watch(api.ListOptions{})
|
||||
watchPV, err := testClient.PersistentVolumes().Watch(v1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to watch PersistentVolumes: %v", err)
|
||||
}
|
||||
watchPVC, err := testClient.PersistentVolumeClaims(ns.Name).Watch(api.ListOptions{})
|
||||
watchPVC, err := testClient.PersistentVolumeClaims(ns.Name).Watch(v1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to watch PersistentVolumeClaimss: %v", err)
|
||||
}
|
||||
@@ -1146,26 +1146,26 @@ func createClients(ns *api.Namespace, t *testing.T, s *httptest.Server, syncPeri
|
||||
return testClient, ctrl, watchPV, watchPVC
|
||||
}
|
||||
|
||||
func createPV(name, path, cap string, mode []api.PersistentVolumeAccessMode, reclaim api.PersistentVolumeReclaimPolicy) *api.PersistentVolume {
|
||||
return &api.PersistentVolume{
|
||||
ObjectMeta: api.ObjectMeta{Name: name},
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: api.PersistentVolumeSource{HostPath: &api.HostPathVolumeSource{Path: path}},
|
||||
Capacity: api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse(cap)},
|
||||
func createPV(name, path, cap string, mode []v1.PersistentVolumeAccessMode, reclaim v1.PersistentVolumeReclaimPolicy) *v1.PersistentVolume {
|
||||
return &v1.PersistentVolume{
|
||||
ObjectMeta: v1.ObjectMeta{Name: name},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{HostPath: &v1.HostPathVolumeSource{Path: path}},
|
||||
Capacity: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse(cap)},
|
||||
AccessModes: mode,
|
||||
PersistentVolumeReclaimPolicy: reclaim,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func createPVC(name, namespace, cap string, mode []api.PersistentVolumeAccessMode) *api.PersistentVolumeClaim {
|
||||
return &api.PersistentVolumeClaim{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
func createPVC(name, namespace, cap string, mode []v1.PersistentVolumeAccessMode) *v1.PersistentVolumeClaim {
|
||||
return &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: api.PersistentVolumeClaimSpec{
|
||||
Resources: api.ResourceRequirements{Requests: api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse(cap)}},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse(cap)}},
|
||||
AccessModes: mode,
|
||||
},
|
||||
}
|
||||
|
@@ -22,9 +22,9 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/test/integration"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
@@ -37,7 +37,7 @@ func TestPodUpdateActiveDeadlineSeconds(t *testing.T) {
|
||||
ns := framework.CreateTestingNamespace("pod-activedeadline-update", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
|
||||
var (
|
||||
iZero = int64(0)
|
||||
@@ -46,13 +46,13 @@ func TestPodUpdateActiveDeadlineSeconds(t *testing.T) {
|
||||
iNeg = int64(-1)
|
||||
)
|
||||
|
||||
prototypePod := func() *api.Pod {
|
||||
return &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
prototypePod := func() *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "xxx",
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "fake-name",
|
||||
Image: "fakeimage",
|
||||
@@ -155,18 +155,18 @@ func TestPodReadOnlyFilesystem(t *testing.T) {
|
||||
ns := framework.CreateTestingNamespace("pod-readonly-root", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "xxx",
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "fake-name",
|
||||
Image: "fakeimage",
|
||||
SecurityContext: &api.SecurityContext{
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
ReadOnlyRootFilesystem: &isReadOnly,
|
||||
},
|
||||
},
|
||||
|
@@ -28,8 +28,10 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
replicationcontroller "k8s.io/kubernetes/pkg/controller/replication"
|
||||
@@ -63,8 +65,9 @@ func TestQuota(t *testing.T) {
|
||||
defer s.Close()
|
||||
|
||||
admissionCh := make(chan struct{})
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
admission, err := resourcequota.NewResourceQuota(clientset, quotainstall.NewRegistry(clientset, nil), 5, admissionCh)
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
internalClientset := internalclientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
admission, err := resourcequota.NewResourceQuota(internalClientset, quotainstall.NewRegistry(nil, nil), 5, admissionCh)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -104,14 +107,14 @@ func TestQuota(t *testing.T) {
|
||||
endTime := time.Now()
|
||||
t.Logf("Took %v to scale up without quota", endTime.Sub(startTime))
|
||||
|
||||
quota := &api.ResourceQuota{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
quota := &v1.ResourceQuota{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "quota",
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourcePods: resource.MustParse("1000"),
|
||||
Spec: v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourcePods: resource.MustParse("1000"),
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -123,8 +126,8 @@ func TestQuota(t *testing.T) {
|
||||
t.Logf("Took %v to scale up with quota", endTime.Sub(startTime))
|
||||
}
|
||||
|
||||
func waitForQuota(t *testing.T, quota *api.ResourceQuota, clientset *clientset.Clientset) {
|
||||
w, err := clientset.Core().ResourceQuotas(quota.Namespace).Watch(api.SingleObject(api.ObjectMeta{Name: quota.Name}))
|
||||
func waitForQuota(t *testing.T, quota *v1.ResourceQuota, clientset *clientset.Clientset) {
|
||||
w, err := clientset.Core().ResourceQuotas(quota.Namespace).Watch(v1.SingleObject(v1.ObjectMeta{Name: quota.Name}))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -141,7 +144,7 @@ func waitForQuota(t *testing.T, quota *api.ResourceQuota, clientset *clientset.C
|
||||
}
|
||||
|
||||
switch cast := event.Object.(type) {
|
||||
case *api.ResourceQuota:
|
||||
case *v1.ResourceQuota:
|
||||
if len(cast.Status.Hard) > 0 {
|
||||
return true, nil
|
||||
}
|
||||
@@ -155,23 +158,23 @@ func waitForQuota(t *testing.T, quota *api.ResourceQuota, clientset *clientset.C
|
||||
}
|
||||
|
||||
func scale(t *testing.T, namespace string, clientset *clientset.Clientset) {
|
||||
target := 100
|
||||
rc := &api.ReplicationController{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
target := int32(100)
|
||||
rc := &v1.ReplicationController{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: api.ReplicationControllerSpec{
|
||||
Replicas: int32(target),
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Replicas: &target,
|
||||
Selector: map[string]string{"foo": "bar"},
|
||||
Template: &api.PodTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Template: &v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "container",
|
||||
Image: "busybox",
|
||||
@@ -182,7 +185,7 @@ func scale(t *testing.T, namespace string, clientset *clientset.Clientset) {
|
||||
},
|
||||
}
|
||||
|
||||
w, err := clientset.Core().ReplicationControllers(namespace).Watch(api.SingleObject(api.ObjectMeta{Name: rc.Name}))
|
||||
w, err := clientset.Core().ReplicationControllers(namespace).Watch(v1.SingleObject(v1.ObjectMeta{Name: rc.Name}))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -199,9 +202,9 @@ func scale(t *testing.T, namespace string, clientset *clientset.Clientset) {
|
||||
}
|
||||
|
||||
switch cast := event.Object.(type) {
|
||||
case *api.ReplicationController:
|
||||
case *v1.ReplicationController:
|
||||
fmt.Printf("Found %v of %v replicas\n", int(cast.Status.Replicas), target)
|
||||
if int(cast.Status.Replicas) == target {
|
||||
if cast.Status.Replicas == target {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
@@ -209,7 +212,7 @@ func scale(t *testing.T, namespace string, clientset *clientset.Clientset) {
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
pods, _ := clientset.Core().Pods(namespace).List(api.ListOptions{LabelSelector: labels.Everything(), FieldSelector: fields.Everything()})
|
||||
pods, _ := clientset.Core().Pods(namespace).List(v1.ListOptions{LabelSelector: labels.Everything().String(), FieldSelector: fields.Everything().String()})
|
||||
t.Fatalf("unexpected error: %v, ended with %v pods", err, len(pods.Items))
|
||||
}
|
||||
}
|
||||
|
@@ -30,7 +30,6 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
@@ -137,12 +136,12 @@ func rmSetup(t *testing.T, enableGarbageCollector bool) (*httptest.Server, *repl
|
||||
t.Fatalf("Error in create clientset: %v", err)
|
||||
}
|
||||
resyncPeriod := 12 * time.Hour
|
||||
informers := informers.NewSharedInformerFactory(internalclientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "rs-informers")), resyncPeriod)
|
||||
informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "rs-informers")), nil, resyncPeriod)
|
||||
|
||||
rm := replicaset.NewReplicaSetController(
|
||||
informers.ReplicaSets(),
|
||||
informers.Pods(),
|
||||
internalclientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "replicaset-controller")),
|
||||
clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "replicaset-controller")),
|
||||
replicaset.BurstReplicas,
|
||||
4096,
|
||||
enableGarbageCollector,
|
||||
|
@@ -29,7 +29,6 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
@@ -137,10 +136,10 @@ func rmSetup(t *testing.T, enableGarbageCollector bool) (*httptest.Server, *repl
|
||||
resyncPeriodFunc := func() time.Duration {
|
||||
return resyncPeriod
|
||||
}
|
||||
podInformer := informers.NewPodInformer(internalclientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "pod-informer")), resyncPeriod)
|
||||
podInformer := informers.NewPodInformer(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "pod-informer")), resyncPeriod)
|
||||
rm := replication.NewReplicationManager(
|
||||
podInformer,
|
||||
internalclientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "replication-controller")),
|
||||
clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "replication-controller")),
|
||||
resyncPeriodFunc,
|
||||
replication.BurstReplicas,
|
||||
4096,
|
||||
|
@@ -29,12 +29,12 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
@@ -51,8 +51,8 @@ const (
|
||||
prioritize = "prioritize"
|
||||
)
|
||||
|
||||
type fitPredicate func(pod *api.Pod, node *api.Node) (bool, error)
|
||||
type priorityFunc func(pod *api.Pod, nodes *api.NodeList) (*schedulerapi.HostPriorityList, error)
|
||||
type fitPredicate func(pod *v1.Pod, node *v1.Node) (bool, error)
|
||||
type priorityFunc func(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostPriorityList, error)
|
||||
|
||||
type priorityConfig struct {
|
||||
function priorityFunc
|
||||
@@ -104,15 +104,15 @@ func (e *Extender) serveHTTP(t *testing.T, w http.ResponseWriter, req *http.Requ
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Extender) Filter(pod *api.Pod, nodes *api.NodeList) (*api.NodeList, schedulerapi.FailedNodesMap, error) {
|
||||
filtered := []api.Node{}
|
||||
func (e *Extender) Filter(pod *v1.Pod, nodes *v1.NodeList) (*v1.NodeList, schedulerapi.FailedNodesMap, error) {
|
||||
filtered := []v1.Node{}
|
||||
failedNodesMap := schedulerapi.FailedNodesMap{}
|
||||
for _, node := range nodes.Items {
|
||||
fits := true
|
||||
for _, predicate := range e.predicates {
|
||||
fit, err := predicate(pod, &node)
|
||||
if err != nil {
|
||||
return &api.NodeList{}, schedulerapi.FailedNodesMap{}, err
|
||||
return &v1.NodeList{}, schedulerapi.FailedNodesMap{}, err
|
||||
}
|
||||
if !fit {
|
||||
fits = false
|
||||
@@ -125,10 +125,10 @@ func (e *Extender) Filter(pod *api.Pod, nodes *api.NodeList) (*api.NodeList, sch
|
||||
failedNodesMap[node.Name] = fmt.Sprintf("extender failed: %s", e.name)
|
||||
}
|
||||
}
|
||||
return &api.NodeList{Items: filtered}, failedNodesMap, nil
|
||||
return &v1.NodeList{Items: filtered}, failedNodesMap, nil
|
||||
}
|
||||
|
||||
func (e *Extender) Prioritize(pod *api.Pod, nodes *api.NodeList) (*schedulerapi.HostPriorityList, error) {
|
||||
func (e *Extender) Prioritize(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostPriorityList, error) {
|
||||
result := schedulerapi.HostPriorityList{}
|
||||
combinedScores := map[string]int{}
|
||||
for _, prioritizer := range e.prioritizers {
|
||||
@@ -151,21 +151,21 @@ func (e *Extender) Prioritize(pod *api.Pod, nodes *api.NodeList) (*schedulerapi.
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func machine_1_2_3_Predicate(pod *api.Pod, node *api.Node) (bool, error) {
|
||||
func machine_1_2_3_Predicate(pod *v1.Pod, node *v1.Node) (bool, error) {
|
||||
if node.Name == "machine1" || node.Name == "machine2" || node.Name == "machine3" {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func machine_2_3_5_Predicate(pod *api.Pod, node *api.Node) (bool, error) {
|
||||
func machine_2_3_5_Predicate(pod *v1.Pod, node *v1.Node) (bool, error) {
|
||||
if node.Name == "machine2" || node.Name == "machine3" || node.Name == "machine5" {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func machine_2_Prioritizer(pod *api.Pod, nodes *api.NodeList) (*schedulerapi.HostPriorityList, error) {
|
||||
func machine_2_Prioritizer(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostPriorityList, error) {
|
||||
result := schedulerapi.HostPriorityList{}
|
||||
for _, node := range nodes.Items {
|
||||
score := 1
|
||||
@@ -177,7 +177,7 @@ func machine_2_Prioritizer(pod *api.Pod, nodes *api.NodeList) (*schedulerapi.Hos
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func machine_3_Prioritizer(pod *api.Pod, nodes *api.NodeList) (*schedulerapi.HostPriorityList, error) {
|
||||
func machine_3_Prioritizer(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostPriorityList, error) {
|
||||
result := schedulerapi.HostPriorityList{}
|
||||
for _, node := range nodes.Items {
|
||||
score := 1
|
||||
@@ -196,7 +196,7 @@ func TestSchedulerExtender(t *testing.T) {
|
||||
ns := framework.CreateTestingNamespace("scheduler-extender", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
|
||||
extender1 := &Extender{
|
||||
name: "extender1",
|
||||
@@ -236,16 +236,16 @@ func TestSchedulerExtender(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
policy.APIVersion = registered.GroupOrDie(api.GroupName).GroupVersion.String()
|
||||
policy.APIVersion = registered.GroupOrDie(v1.GroupName).GroupVersion.String()
|
||||
|
||||
schedulerConfigFactory := factory.NewConfigFactory(clientSet, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains)
|
||||
schedulerConfigFactory := factory.NewConfigFactory(clientSet, v1.DefaultSchedulerName, v1.DefaultHardPodAffinitySymmetricWeight, v1.DefaultFailureDomains)
|
||||
schedulerConfig, err := schedulerConfigFactory.CreateFromConfig(policy)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create scheduler config: %v", err)
|
||||
}
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: api.DefaultSchedulerName})
|
||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: clientSet.Core().Events("")})
|
||||
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(v1.EventSource{Component: v1.DefaultSchedulerName})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: clientSet.Core().Events("")})
|
||||
scheduler.New(schedulerConfig).Run()
|
||||
|
||||
defer close(schedulerConfig.StopEverything)
|
||||
@@ -253,24 +253,24 @@ func TestSchedulerExtender(t *testing.T) {
|
||||
DoTestPodScheduling(ns, t, clientSet)
|
||||
}
|
||||
|
||||
func DoTestPodScheduling(ns *api.Namespace, t *testing.T, cs clientset.Interface) {
|
||||
func DoTestPodScheduling(ns *v1.Namespace, t *testing.T, cs clientset.Interface) {
|
||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||
// non-namespaced objects (Nodes).
|
||||
defer cs.Core().Nodes().DeleteCollection(nil, api.ListOptions{})
|
||||
defer cs.Core().Nodes().DeleteCollection(nil, v1.ListOptions{})
|
||||
|
||||
goodCondition := api.NodeCondition{
|
||||
Type: api.NodeReady,
|
||||
Status: api.ConditionTrue,
|
||||
goodCondition := v1.NodeCondition{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: fmt.Sprintf("schedulable condition"),
|
||||
LastHeartbeatTime: unversioned.Time{time.Now()},
|
||||
}
|
||||
node := &api.Node{
|
||||
Spec: api.NodeSpec{Unschedulable: false},
|
||||
Status: api.NodeStatus{
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
node := &v1.Node{
|
||||
Spec: v1.NodeSpec{Unschedulable: false},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
},
|
||||
Conditions: []api.NodeCondition{goodCondition},
|
||||
Conditions: []v1.NodeCondition{goodCondition},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -281,10 +281,10 @@ func DoTestPodScheduling(ns *api.Namespace, t *testing.T, cs clientset.Interface
|
||||
}
|
||||
}
|
||||
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Name: "extender-test-pod"},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{{Name: "container", Image: e2e.GetPauseImageName(cs)}},
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "extender-test-pod"},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: e2e.GetPauseImageName(cs)}},
|
||||
},
|
||||
}
|
||||
|
||||
|
@@ -25,14 +25,14 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
@@ -43,7 +43,7 @@ import (
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
type nodeMutationFunc func(t *testing.T, n *api.Node, nodeStore cache.Store, c clientset.Interface)
|
||||
type nodeMutationFunc func(t *testing.T, n *v1.Node, nodeStore cache.Store, c clientset.Interface)
|
||||
|
||||
type nodeStateManager struct {
|
||||
makeSchedulable nodeMutationFunc
|
||||
@@ -57,16 +57,16 @@ func TestUnschedulableNodes(t *testing.T) {
|
||||
ns := framework.CreateTestingNamespace("unschedulable-nodes", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
|
||||
schedulerConfigFactory := factory.NewConfigFactory(clientSet, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains)
|
||||
schedulerConfigFactory := factory.NewConfigFactory(clientSet, v1.DefaultSchedulerName, v1.DefaultHardPodAffinitySymmetricWeight, v1.DefaultFailureDomains)
|
||||
schedulerConfig, err := schedulerConfigFactory.Create()
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create scheduler config: %v", err)
|
||||
}
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: api.DefaultSchedulerName})
|
||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: clientSet.Core().Events(ns.Name)})
|
||||
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(v1.EventSource{Component: v1.DefaultSchedulerName})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: clientSet.Core().Events(ns.Name)})
|
||||
scheduler.New(schedulerConfig).Run()
|
||||
|
||||
defer close(schedulerConfig.StopEverything)
|
||||
@@ -94,7 +94,7 @@ func podScheduled(c clientset.Interface, podNamespace, podName string) wait.Cond
|
||||
// Wait till the passFunc confirms that the object it expects to see is in the store.
|
||||
// Used to observe reflected events.
|
||||
func waitForReflection(t *testing.T, s cache.Store, key string, passFunc func(n interface{}) bool) error {
|
||||
nodes := []*api.Node{}
|
||||
nodes := []*v1.Node{}
|
||||
err := wait.Poll(time.Millisecond*100, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
if n, _, err := s.GetByKey(key); err == nil && passFunc(n) {
|
||||
return true, nil
|
||||
@@ -105,7 +105,7 @@ func waitForReflection(t *testing.T, s cache.Store, key string, passFunc func(n
|
||||
if n == nil {
|
||||
nodes = append(nodes, nil)
|
||||
} else {
|
||||
nodes = append(nodes, n.(*api.Node))
|
||||
nodes = append(nodes, n.(*v1.Node))
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
@@ -120,33 +120,33 @@ func waitForReflection(t *testing.T, s cache.Store, key string, passFunc func(n
|
||||
return err
|
||||
}
|
||||
|
||||
func DoTestUnschedulableNodes(t *testing.T, cs clientset.Interface, ns *api.Namespace, nodeStore cache.Store) {
|
||||
func DoTestUnschedulableNodes(t *testing.T, cs clientset.Interface, ns *v1.Namespace, nodeStore cache.Store) {
|
||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||
// non-namespaced objects (Nodes).
|
||||
defer cs.Core().Nodes().DeleteCollection(nil, api.ListOptions{})
|
||||
defer cs.Core().Nodes().DeleteCollection(nil, v1.ListOptions{})
|
||||
|
||||
goodCondition := api.NodeCondition{
|
||||
Type: api.NodeReady,
|
||||
Status: api.ConditionTrue,
|
||||
goodCondition := v1.NodeCondition{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: fmt.Sprintf("schedulable condition"),
|
||||
LastHeartbeatTime: unversioned.Time{time.Now()},
|
||||
}
|
||||
badCondition := api.NodeCondition{
|
||||
Type: api.NodeReady,
|
||||
Status: api.ConditionUnknown,
|
||||
badCondition := v1.NodeCondition{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionUnknown,
|
||||
Reason: fmt.Sprintf("unschedulable condition"),
|
||||
LastHeartbeatTime: unversioned.Time{time.Now()},
|
||||
}
|
||||
// Create a new schedulable node, since we're first going to apply
|
||||
// the unschedulable condition and verify that pods aren't scheduled.
|
||||
node := &api.Node{
|
||||
ObjectMeta: api.ObjectMeta{Name: "node-scheduling-test-node"},
|
||||
Spec: api.NodeSpec{Unschedulable: false},
|
||||
Status: api.NodeStatus{
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
node := &v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "node-scheduling-test-node"},
|
||||
Spec: v1.NodeSpec{Unschedulable: false},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
},
|
||||
Conditions: []api.NodeCondition{goodCondition},
|
||||
Conditions: []v1.NodeCondition{goodCondition},
|
||||
},
|
||||
}
|
||||
nodeKey, err := cache.MetaNamespaceKeyFunc(node)
|
||||
@@ -166,7 +166,7 @@ func DoTestUnschedulableNodes(t *testing.T, cs clientset.Interface, ns *api.Name
|
||||
nodeModifications := []nodeStateManager{
|
||||
// Test node.Spec.Unschedulable=true/false
|
||||
{
|
||||
makeUnSchedulable: func(t *testing.T, n *api.Node, s cache.Store, c clientset.Interface) {
|
||||
makeUnSchedulable: func(t *testing.T, n *v1.Node, s cache.Store, c clientset.Interface) {
|
||||
n.Spec.Unschedulable = true
|
||||
if _, err := c.Core().Nodes().Update(n); err != nil {
|
||||
t.Fatalf("Failed to update node with unschedulable=true: %v", err)
|
||||
@@ -176,19 +176,19 @@ func DoTestUnschedulableNodes(t *testing.T, cs clientset.Interface, ns *api.Name
|
||||
// Nodes that are unschedulable or that are not ready or
|
||||
// have their disk full (Node.Spec.Conditions) are exluded
|
||||
// based on NodeConditionPredicate, a separate check
|
||||
return node != nil && node.(*api.Node).Spec.Unschedulable == true
|
||||
return node != nil && node.(*v1.Node).Spec.Unschedulable == true
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to observe reflected update for setting unschedulable=true: %v", err)
|
||||
}
|
||||
},
|
||||
makeSchedulable: func(t *testing.T, n *api.Node, s cache.Store, c clientset.Interface) {
|
||||
makeSchedulable: func(t *testing.T, n *v1.Node, s cache.Store, c clientset.Interface) {
|
||||
n.Spec.Unschedulable = false
|
||||
if _, err := c.Core().Nodes().Update(n); err != nil {
|
||||
t.Fatalf("Failed to update node with unschedulable=false: %v", err)
|
||||
}
|
||||
err = waitForReflection(t, s, nodeKey, func(node interface{}) bool {
|
||||
return node != nil && node.(*api.Node).Spec.Unschedulable == false
|
||||
return node != nil && node.(*v1.Node).Spec.Unschedulable == false
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to observe reflected update for setting unschedulable=false: %v", err)
|
||||
@@ -197,35 +197,35 @@ func DoTestUnschedulableNodes(t *testing.T, cs clientset.Interface, ns *api.Name
|
||||
},
|
||||
// Test node.Status.Conditions=ConditionTrue/Unknown
|
||||
{
|
||||
makeUnSchedulable: func(t *testing.T, n *api.Node, s cache.Store, c clientset.Interface) {
|
||||
n.Status = api.NodeStatus{
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
makeUnSchedulable: func(t *testing.T, n *v1.Node, s cache.Store, c clientset.Interface) {
|
||||
n.Status = v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
},
|
||||
Conditions: []api.NodeCondition{badCondition},
|
||||
Conditions: []v1.NodeCondition{badCondition},
|
||||
}
|
||||
if _, err = c.Core().Nodes().UpdateStatus(n); err != nil {
|
||||
t.Fatalf("Failed to update node with bad status condition: %v", err)
|
||||
}
|
||||
err = waitForReflection(t, s, nodeKey, func(node interface{}) bool {
|
||||
return node != nil && node.(*api.Node).Status.Conditions[0].Status == api.ConditionUnknown
|
||||
return node != nil && node.(*v1.Node).Status.Conditions[0].Status == v1.ConditionUnknown
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to observe reflected update for status condition update: %v", err)
|
||||
}
|
||||
},
|
||||
makeSchedulable: func(t *testing.T, n *api.Node, s cache.Store, c clientset.Interface) {
|
||||
n.Status = api.NodeStatus{
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
makeSchedulable: func(t *testing.T, n *v1.Node, s cache.Store, c clientset.Interface) {
|
||||
n.Status = v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
},
|
||||
Conditions: []api.NodeCondition{goodCondition},
|
||||
Conditions: []v1.NodeCondition{goodCondition},
|
||||
}
|
||||
if _, err = c.Core().Nodes().UpdateStatus(n); err != nil {
|
||||
t.Fatalf("Failed to update node with healthy status condition: %v", err)
|
||||
}
|
||||
err = waitForReflection(t, s, nodeKey, func(node interface{}) bool {
|
||||
return node != nil && node.(*api.Node).Status.Conditions[0].Status == api.ConditionTrue
|
||||
return node != nil && node.(*v1.Node).Status.Conditions[0].Status == v1.ConditionTrue
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to observe reflected update for status condition update: %v", err)
|
||||
@@ -245,10 +245,10 @@ func DoTestUnschedulableNodes(t *testing.T, cs clientset.Interface, ns *api.Name
|
||||
|
||||
// Create the new pod, note that this needs to happen post unschedulable
|
||||
// modification or we have a race in the test.
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Name: "node-scheduling-test-pod"},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{{Name: "container", Image: e2e.GetPauseImageName(cs)}},
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "node-scheduling-test-pod"},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: e2e.GetPauseImageName(cs)}},
|
||||
},
|
||||
}
|
||||
myPod, err := cs.Core().Pods(ns.Name).Create(pod)
|
||||
@@ -282,7 +282,7 @@ func DoTestUnschedulableNodes(t *testing.T, cs clientset.Interface, ns *api.Name
|
||||
t.Logf("Test %d: Pod got scheduled on a schedulable node", i)
|
||||
}
|
||||
|
||||
err = cs.Core().Pods(ns.Name).Delete(myPod.Name, api.NewDeleteOptions(0))
|
||||
err = cs.Core().Pods(ns.Name).Delete(myPod.Name, v1.NewDeleteOptions(0))
|
||||
if err != nil {
|
||||
t.Errorf("Failed to delete pod: %v", err)
|
||||
}
|
||||
@@ -322,30 +322,30 @@ func TestMultiScheduler(t *testing.T) {
|
||||
- testPodNoAnnotation2 and testPodWithAnnotationFitsDefault2 shoule NOT be scheduled
|
||||
*/
|
||||
// 1. create and start default-scheduler
|
||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
|
||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||
// non-namespaced objects (Nodes).
|
||||
defer clientSet.Core().Nodes().DeleteCollection(nil, api.ListOptions{})
|
||||
defer clientSet.Core().Nodes().DeleteCollection(nil, v1.ListOptions{})
|
||||
|
||||
schedulerConfigFactory := factory.NewConfigFactory(clientSet, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains)
|
||||
schedulerConfigFactory := factory.NewConfigFactory(clientSet, v1.DefaultSchedulerName, v1.DefaultHardPodAffinitySymmetricWeight, v1.DefaultFailureDomains)
|
||||
schedulerConfig, err := schedulerConfigFactory.Create()
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create scheduler config: %v", err)
|
||||
}
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: api.DefaultSchedulerName})
|
||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: clientSet.Core().Events(ns.Name)})
|
||||
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(v1.EventSource{Component: v1.DefaultSchedulerName})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: clientSet.Core().Events(ns.Name)})
|
||||
scheduler.New(schedulerConfig).Run()
|
||||
// default-scheduler will be stopped later
|
||||
|
||||
// 2. create a node
|
||||
node := &api.Node{
|
||||
ObjectMeta: api.ObjectMeta{Name: "node-multi-scheduler-test-node"},
|
||||
Spec: api.NodeSpec{Unschedulable: false},
|
||||
Status: api.NodeStatus{
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
node := &v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "node-multi-scheduler-test-node"},
|
||||
Spec: v1.NodeSpec{Unschedulable: false},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -397,16 +397,16 @@ func TestMultiScheduler(t *testing.T) {
|
||||
}
|
||||
|
||||
// 5. create and start a scheduler with name "foo-scheduler"
|
||||
clientSet2 := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
clientSet2 := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
|
||||
schedulerConfigFactory2 := factory.NewConfigFactory(clientSet2, "foo-scheduler", api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains)
|
||||
schedulerConfigFactory2 := factory.NewConfigFactory(clientSet2, "foo-scheduler", v1.DefaultHardPodAffinitySymmetricWeight, v1.DefaultFailureDomains)
|
||||
schedulerConfig2, err := schedulerConfigFactory2.Create()
|
||||
if err != nil {
|
||||
t.Errorf("Couldn't create scheduler config: %v", err)
|
||||
}
|
||||
eventBroadcaster2 := record.NewBroadcaster()
|
||||
schedulerConfig2.Recorder = eventBroadcaster2.NewRecorder(api.EventSource{Component: "foo-scheduler"})
|
||||
eventBroadcaster2.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: clientSet2.Core().Events(ns.Name)})
|
||||
schedulerConfig2.Recorder = eventBroadcaster2.NewRecorder(v1.EventSource{Component: "foo-scheduler"})
|
||||
eventBroadcaster2.StartRecordingToSink(&v1core.EventSinkImpl{Interface: clientSet2.Core().Events(ns.Name)})
|
||||
scheduler.New(schedulerConfig2).Run()
|
||||
|
||||
defer close(schedulerConfig2.StopEverything)
|
||||
@@ -421,11 +421,11 @@ func TestMultiScheduler(t *testing.T) {
|
||||
}
|
||||
|
||||
// 7. delete the pods that were scheduled by the default scheduler, and stop the default scheduler
|
||||
err = clientSet.Core().Pods(ns.Name).Delete(testPodNoAnnotation.Name, api.NewDeleteOptions(0))
|
||||
err = clientSet.Core().Pods(ns.Name).Delete(testPodNoAnnotation.Name, v1.NewDeleteOptions(0))
|
||||
if err != nil {
|
||||
t.Errorf("Failed to delete pod: %v", err)
|
||||
}
|
||||
err = clientSet.Core().Pods(ns.Name).Delete(testPodWithAnnotationFitsDefault.Name, api.NewDeleteOptions(0))
|
||||
err = clientSet.Core().Pods(ns.Name).Delete(testPodWithAnnotationFitsDefault.Name, v1.NewDeleteOptions(0))
|
||||
if err != nil {
|
||||
t.Errorf("Failed to delete pod: %v", err)
|
||||
}
|
||||
@@ -469,11 +469,11 @@ func TestMultiScheduler(t *testing.T) {
|
||||
*/
|
||||
}
|
||||
|
||||
func createPod(client clientset.Interface, name string, annotation map[string]string) *api.Pod {
|
||||
return &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Name: name, Annotations: annotation},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{{Name: "container", Image: e2e.GetPauseImageName(client)}},
|
||||
func createPod(client clientset.Interface, name string, annotation map[string]string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{Name: name, Annotations: annotation},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: e2e.GetPauseImageName(client)}},
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -487,33 +487,33 @@ func TestAllocatable(t *testing.T) {
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
// 1. create and start default-scheduler
|
||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
|
||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||
// non-namespaced objects (Nodes).
|
||||
defer clientSet.Core().Nodes().DeleteCollection(nil, api.ListOptions{})
|
||||
defer clientSet.Core().Nodes().DeleteCollection(nil, v1.ListOptions{})
|
||||
|
||||
schedulerConfigFactory := factory.NewConfigFactory(clientSet, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains)
|
||||
schedulerConfigFactory := factory.NewConfigFactory(clientSet, v1.DefaultSchedulerName, v1.DefaultHardPodAffinitySymmetricWeight, v1.DefaultFailureDomains)
|
||||
schedulerConfig, err := schedulerConfigFactory.Create()
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create scheduler config: %v", err)
|
||||
}
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: api.DefaultSchedulerName})
|
||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: clientSet.Core().Events(ns.Name)})
|
||||
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(v1.EventSource{Component: v1.DefaultSchedulerName})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: clientSet.Core().Events(ns.Name)})
|
||||
scheduler.New(schedulerConfig).Run()
|
||||
// default-scheduler will be stopped later
|
||||
defer close(schedulerConfig.StopEverything)
|
||||
|
||||
// 2. create a node without allocatable awareness
|
||||
node := &api.Node{
|
||||
ObjectMeta: api.ObjectMeta{Name: "node-allocatable-scheduler-test-node"},
|
||||
Spec: api.NodeSpec{Unschedulable: false},
|
||||
Status: api.NodeStatus{
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
api.ResourceCPU: *resource.NewMilliQuantity(30, resource.DecimalSI),
|
||||
api.ResourceMemory: *resource.NewQuantity(30, resource.BinarySI),
|
||||
node := &v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "node-allocatable-scheduler-test-node"},
|
||||
Spec: v1.NodeSpec{Unschedulable: false},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(30, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(30, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -524,17 +524,17 @@ func TestAllocatable(t *testing.T) {
|
||||
}
|
||||
|
||||
// 3. create resource pod which requires less than Capacity
|
||||
podResource := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Name: "pod-test-allocatable"},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
podResource := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "pod-test-allocatable"},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "container",
|
||||
Image: e2e.GetPauseImageName(clientSet),
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
api.ResourceCPU: *resource.NewMilliQuantity(20, resource.DecimalSI),
|
||||
api.ResourceMemory: *resource.NewQuantity(20, resource.BinarySI),
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(20, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(20, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -556,16 +556,16 @@ func TestAllocatable(t *testing.T) {
|
||||
}
|
||||
|
||||
// 5. Change the node status to allocatable aware, note that Allocatable is less than Pod's requirement
|
||||
allocNode.Status = api.NodeStatus{
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
api.ResourceCPU: *resource.NewMilliQuantity(30, resource.DecimalSI),
|
||||
api.ResourceMemory: *resource.NewQuantity(30, resource.BinarySI),
|
||||
allocNode.Status = v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(30, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(30, resource.BinarySI),
|
||||
},
|
||||
Allocatable: api.ResourceList{
|
||||
api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
api.ResourceCPU: *resource.NewMilliQuantity(10, resource.DecimalSI),
|
||||
api.ResourceMemory: *resource.NewQuantity(10, resource.BinarySI),
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(10, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(10, resource.BinarySI),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -573,7 +573,7 @@ func TestAllocatable(t *testing.T) {
|
||||
t.Fatalf("Failed to update node with Status.Allocatable: %v", err)
|
||||
}
|
||||
|
||||
if err := clientSet.Core().Pods(ns.Name).Delete(podResource.Name, &api.DeleteOptions{}); err != nil {
|
||||
if err := clientSet.Core().Pods(ns.Name).Delete(podResource.Name, &v1.DeleteOptions{}); err != nil {
|
||||
t.Fatalf("Failed to remove first resource pod: %v", err)
|
||||
}
|
||||
|
||||
|
@@ -22,7 +22,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
@@ -96,10 +96,10 @@ func TestSchedule100Node3KNodeAffinityPods(t *testing.T) {
|
||||
podCreatorConfig := testutils.NewTestPodCreatorConfig()
|
||||
for i := 0; i < numGroups; i++ {
|
||||
podCreatorConfig.AddStrategy("sched-perf-node-affinity", config.numPods/numGroups,
|
||||
testutils.NewCustomCreatePodStrategy(&api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
testutils.NewCustomCreatePodStrategy(&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: "sched-perf-node-affinity-pod-",
|
||||
Annotations: map[string]string{api.AffinityAnnotationKey: fmt.Sprintf(affinityTemplate, i)},
|
||||
Annotations: map[string]string{v1.AffinityAnnotationKey: fmt.Sprintf(affinityTemplate, i)},
|
||||
},
|
||||
Spec: testutils.MakePodSpec(),
|
||||
}),
|
||||
|
@@ -21,10 +21,10 @@ import (
|
||||
"net/http/httptest"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler"
|
||||
@@ -52,20 +52,20 @@ func mustSetupScheduler() (schedulerConfigFactory *factory.ConfigFactory, destro
|
||||
|
||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{
|
||||
Host: s.URL,
|
||||
ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion},
|
||||
ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion},
|
||||
QPS: 5000.0,
|
||||
Burst: 5000,
|
||||
})
|
||||
|
||||
schedulerConfigFactory = factory.NewConfigFactory(clientSet, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains)
|
||||
schedulerConfigFactory = factory.NewConfigFactory(clientSet, v1.DefaultSchedulerName, v1.DefaultHardPodAffinitySymmetricWeight, v1.DefaultFailureDomains)
|
||||
|
||||
schedulerConfig, err := schedulerConfigFactory.Create()
|
||||
if err != nil {
|
||||
panic("Couldn't create scheduler config")
|
||||
}
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"})
|
||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: clientSet.Core().Events("")})
|
||||
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(v1.EventSource{Component: "scheduler"})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: clientSet.Core().Events("")})
|
||||
scheduler.New(schedulerConfig).Run()
|
||||
|
||||
destroyFunc = func() {
|
||||
|
@@ -23,9 +23,9 @@ package secrets
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/test/integration"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
@@ -42,7 +42,7 @@ func TestSecrets(t *testing.T) {
|
||||
_, s := framework.RunAMaster(nil)
|
||||
defer s.Close()
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
|
||||
ns := framework.CreateTestingNamespace("secret", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
@@ -51,10 +51,10 @@ func TestSecrets(t *testing.T) {
|
||||
}
|
||||
|
||||
// DoTestSecrets test secrets for one api version.
|
||||
func DoTestSecrets(t *testing.T, client clientset.Interface, ns *api.Namespace) {
|
||||
func DoTestSecrets(t *testing.T, client clientset.Interface, ns *v1.Namespace) {
|
||||
// Make a secret object.
|
||||
s := api.Secret{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
s := v1.Secret{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "secret",
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
@@ -69,27 +69,27 @@ func DoTestSecrets(t *testing.T, client clientset.Interface, ns *api.Namespace)
|
||||
defer deleteSecretOrErrorf(t, client, s.Namespace, s.Name)
|
||||
|
||||
// Template for pods that use a secret.
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "XXX",
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Volumes: []api.Volume{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "secvol",
|
||||
VolumeSource: api.VolumeSource{
|
||||
Secret: &api.SecretVolumeSource{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: "secret",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []api.Container{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "fake-name",
|
||||
Image: "fakeimage",
|
||||
VolumeMounts: []api.VolumeMount{
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "secvol",
|
||||
MountPath: "/fake/path",
|
||||
|
@@ -31,14 +31,15 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
"k8s.io/kubernetes/pkg/auth/authenticator"
|
||||
"k8s.io/kubernetes/pkg/auth/authenticator/bearertoken"
|
||||
"k8s.io/kubernetes/pkg/auth/authorizer"
|
||||
"k8s.io/kubernetes/pkg/auth/user"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
@@ -71,7 +72,7 @@ func TestServiceAccountAutoCreate(t *testing.T) {
|
||||
ns := "test-service-account-creation"
|
||||
|
||||
// Create namespace
|
||||
_, err := c.Core().Namespaces().Create(&api.Namespace{ObjectMeta: api.ObjectMeta{Name: ns}})
|
||||
_, err := c.Core().Namespaces().Create(&v1.Namespace{ObjectMeta: v1.ObjectMeta{Name: ns}})
|
||||
if err != nil {
|
||||
t.Fatalf("could not create namespace: %v", err)
|
||||
}
|
||||
@@ -106,13 +107,13 @@ func TestServiceAccountTokenAutoCreate(t *testing.T) {
|
||||
name := "my-service-account"
|
||||
|
||||
// Create namespace
|
||||
_, err := c.Core().Namespaces().Create(&api.Namespace{ObjectMeta: api.ObjectMeta{Name: ns}})
|
||||
_, err := c.Core().Namespaces().Create(&v1.Namespace{ObjectMeta: v1.ObjectMeta{Name: ns}})
|
||||
if err != nil {
|
||||
t.Fatalf("could not create namespace: %v", err)
|
||||
}
|
||||
|
||||
// Create service account
|
||||
serviceAccount, err := c.Core().ServiceAccounts(ns).Create(&api.ServiceAccount{ObjectMeta: api.ObjectMeta{Name: name}})
|
||||
serviceAccount, err := c.Core().ServiceAccounts(ns).Create(&v1.ServiceAccount{ObjectMeta: v1.ObjectMeta{Name: name}})
|
||||
if err != nil {
|
||||
t.Fatalf("Service Account not created: %v", err)
|
||||
}
|
||||
@@ -146,7 +147,7 @@ func TestServiceAccountTokenAutoCreate(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
serviceAccount.Secrets = []api.ObjectReference{}
|
||||
serviceAccount.Secrets = []v1.ObjectReference{}
|
||||
_, err = c.Core().ServiceAccounts(ns).Update(serviceAccount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -174,7 +175,7 @@ func TestServiceAccountTokenAutoCreate(t *testing.T) {
|
||||
tokensToCleanup := sets.NewString(token1Name, token2Name, token3Name)
|
||||
err = wait.Poll(time.Second, 10*time.Second, func() (bool, error) {
|
||||
// Get all secrets in the namespace
|
||||
secrets, err := c.Core().Secrets(ns).List(api.ListOptions{})
|
||||
secrets, err := c.Core().Secrets(ns).List(v1.ListOptions{})
|
||||
// Retrieval errors should fail
|
||||
if err != nil {
|
||||
return false, err
|
||||
@@ -200,7 +201,7 @@ func TestServiceAccountTokenAutoMount(t *testing.T) {
|
||||
ns := "auto-mount-ns"
|
||||
|
||||
// Create "my" namespace
|
||||
_, err := c.Core().Namespaces().Create(&api.Namespace{ObjectMeta: api.ObjectMeta{Name: ns}})
|
||||
_, err := c.Core().Namespaces().Create(&v1.Namespace{ObjectMeta: v1.ObjectMeta{Name: ns}})
|
||||
if err != nil && !errors.IsAlreadyExists(err) {
|
||||
t.Fatalf("could not create namespace: %v", err)
|
||||
}
|
||||
@@ -212,10 +213,10 @@ func TestServiceAccountTokenAutoMount(t *testing.T) {
|
||||
}
|
||||
|
||||
// Pod to create
|
||||
protoPod := api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Name: "protopod"},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
protoPod := v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "protopod"},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "container-1",
|
||||
Image: "container-1-image",
|
||||
@@ -223,15 +224,15 @@ func TestServiceAccountTokenAutoMount(t *testing.T) {
|
||||
{
|
||||
Name: "container-2",
|
||||
Image: "container-2-image",
|
||||
VolumeMounts: []api.VolumeMount{
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{Name: "empty-dir", MountPath: serviceaccountadmission.DefaultAPITokenMountPath},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: []api.Volume{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "empty-dir",
|
||||
VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}},
|
||||
VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -240,16 +241,16 @@ func TestServiceAccountTokenAutoMount(t *testing.T) {
|
||||
// Pod we expect to get created
|
||||
defaultMode := int32(0644)
|
||||
expectedServiceAccount := serviceaccountadmission.DefaultServiceAccountName
|
||||
expectedVolumes := append(protoPod.Spec.Volumes, api.Volume{
|
||||
expectedVolumes := append(protoPod.Spec.Volumes, v1.Volume{
|
||||
Name: defaultTokenName,
|
||||
VolumeSource: api.VolumeSource{
|
||||
Secret: &api.SecretVolumeSource{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: defaultTokenName,
|
||||
DefaultMode: &defaultMode,
|
||||
},
|
||||
},
|
||||
})
|
||||
expectedContainer1VolumeMounts := []api.VolumeMount{
|
||||
expectedContainer1VolumeMounts := []v1.VolumeMount{
|
||||
{Name: defaultTokenName, MountPath: serviceaccountadmission.DefaultAPITokenMountPath, ReadOnly: true},
|
||||
}
|
||||
expectedContainer2VolumeMounts := protoPod.Spec.Containers[1].VolumeMounts
|
||||
@@ -261,13 +262,13 @@ func TestServiceAccountTokenAutoMount(t *testing.T) {
|
||||
if createdPod.Spec.ServiceAccountName != expectedServiceAccount {
|
||||
t.Fatalf("Expected %s, got %s", expectedServiceAccount, createdPod.Spec.ServiceAccountName)
|
||||
}
|
||||
if !api.Semantic.DeepEqual(&expectedVolumes, &createdPod.Spec.Volumes) {
|
||||
if !v1.Semantic.DeepEqual(&expectedVolumes, &createdPod.Spec.Volumes) {
|
||||
t.Fatalf("Expected\n\t%#v\n\tgot\n\t%#v", expectedVolumes, createdPod.Spec.Volumes)
|
||||
}
|
||||
if !api.Semantic.DeepEqual(&expectedContainer1VolumeMounts, &createdPod.Spec.Containers[0].VolumeMounts) {
|
||||
if !v1.Semantic.DeepEqual(&expectedContainer1VolumeMounts, &createdPod.Spec.Containers[0].VolumeMounts) {
|
||||
t.Fatalf("Expected\n\t%#v\n\tgot\n\t%#v", expectedContainer1VolumeMounts, createdPod.Spec.Containers[0].VolumeMounts)
|
||||
}
|
||||
if !api.Semantic.DeepEqual(&expectedContainer2VolumeMounts, &createdPod.Spec.Containers[1].VolumeMounts) {
|
||||
if !v1.Semantic.DeepEqual(&expectedContainer2VolumeMounts, &createdPod.Spec.Containers[1].VolumeMounts) {
|
||||
t.Fatalf("Expected\n\t%#v\n\tgot\n\t%#v", expectedContainer2VolumeMounts, createdPod.Spec.Containers[1].VolumeMounts)
|
||||
}
|
||||
}
|
||||
@@ -280,19 +281,19 @@ func TestServiceAccountTokenAuthentication(t *testing.T) {
|
||||
otherns := "other-ns"
|
||||
|
||||
// Create "my" namespace
|
||||
_, err := c.Core().Namespaces().Create(&api.Namespace{ObjectMeta: api.ObjectMeta{Name: myns}})
|
||||
_, err := c.Core().Namespaces().Create(&v1.Namespace{ObjectMeta: v1.ObjectMeta{Name: myns}})
|
||||
if err != nil && !errors.IsAlreadyExists(err) {
|
||||
t.Fatalf("could not create namespace: %v", err)
|
||||
}
|
||||
|
||||
// Create "other" namespace
|
||||
_, err = c.Core().Namespaces().Create(&api.Namespace{ObjectMeta: api.ObjectMeta{Name: otherns}})
|
||||
_, err = c.Core().Namespaces().Create(&v1.Namespace{ObjectMeta: v1.ObjectMeta{Name: otherns}})
|
||||
if err != nil && !errors.IsAlreadyExists(err) {
|
||||
t.Fatalf("could not create namespace: %v", err)
|
||||
}
|
||||
|
||||
// Create "ro" user in myns
|
||||
_, err = c.Core().ServiceAccounts(myns).Create(&api.ServiceAccount{ObjectMeta: api.ObjectMeta{Name: readOnlyServiceAccountName}})
|
||||
_, err = c.Core().ServiceAccounts(myns).Create(&v1.ServiceAccount{ObjectMeta: v1.ObjectMeta{Name: readOnlyServiceAccountName}})
|
||||
if err != nil {
|
||||
t.Fatalf("Service Account not created: %v", err)
|
||||
}
|
||||
@@ -312,7 +313,7 @@ func TestServiceAccountTokenAuthentication(t *testing.T) {
|
||||
doServiceAccountAPIRequests(t, roClient, myns, false, false, false)
|
||||
|
||||
// Create "rw" user in myns
|
||||
_, err = c.Core().ServiceAccounts(myns).Create(&api.ServiceAccount{ObjectMeta: api.ObjectMeta{Name: readWriteServiceAccountName}})
|
||||
_, err = c.Core().ServiceAccounts(myns).Create(&v1.ServiceAccount{ObjectMeta: v1.ObjectMeta{Name: readWriteServiceAccountName}})
|
||||
if err != nil {
|
||||
t.Fatalf("Service Account not created: %v", err)
|
||||
}
|
||||
@@ -348,10 +349,11 @@ func startServiceAccountTestServer(t *testing.T) (*clientset.Clientset, restclie
|
||||
}))
|
||||
|
||||
// Anonymous client config
|
||||
clientConfig := restclient.Config{Host: apiServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}
|
||||
clientConfig := restclient.Config{Host: apiServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}}
|
||||
// Root client
|
||||
// TODO: remove rootClient after we refactor pkg/admission to use the clientset.
|
||||
rootClientset := clientset.NewForConfigOrDie(&restclient.Config{Host: apiServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}, BearerToken: rootToken})
|
||||
rootClientset := clientset.NewForConfigOrDie(&restclient.Config{Host: apiServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}, BearerToken: rootToken})
|
||||
internalRootClientset := internalclientset.NewForConfigOrDie(&restclient.Config{Host: apiServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}, BearerToken: rootToken})
|
||||
// Set up two authenticators:
|
||||
// 1. A token authenticator that maps the rootToken to the "root" user
|
||||
// 2. A ServiceAccountToken authenticator that validates ServiceAccount tokens
|
||||
@@ -405,7 +407,7 @@ func startServiceAccountTestServer(t *testing.T) (*clientset.Clientset, restclie
|
||||
})
|
||||
|
||||
// Set up admission plugin to auto-assign serviceaccounts to pods
|
||||
serviceAccountAdmission := serviceaccountadmission.NewServiceAccount(rootClientset)
|
||||
serviceAccountAdmission := serviceaccountadmission.NewServiceAccount(internalRootClientset)
|
||||
|
||||
masterConfig := framework.NewMasterConfig()
|
||||
masterConfig.GenericConfig.EnableIndex = true
|
||||
@@ -419,7 +421,7 @@ func startServiceAccountTestServer(t *testing.T) (*clientset.Clientset, restclie
|
||||
tokenController := serviceaccountcontroller.NewTokensController(rootClientset, serviceaccountcontroller.TokensControllerOptions{TokenGenerator: serviceaccount.JWTTokenGenerator(serviceAccountKey)})
|
||||
go tokenController.Run(1, stopCh)
|
||||
|
||||
informers := informers.NewSharedInformerFactory(rootClientset, controller.NoResyncPeriodFunc())
|
||||
informers := informers.NewSharedInformerFactory(rootClientset, nil, controller.NoResyncPeriodFunc())
|
||||
serviceAccountController := serviceaccountcontroller.NewServiceAccountsController(informers.ServiceAccounts(), informers.Namespaces(), rootClientset, serviceaccountcontroller.DefaultServiceAccountsControllerOptions())
|
||||
informers.Start(stopCh)
|
||||
go serviceAccountController.Run(5, stopCh)
|
||||
@@ -435,12 +437,12 @@ func startServiceAccountTestServer(t *testing.T) (*clientset.Clientset, restclie
|
||||
return rootClientset, clientConfig, stop
|
||||
}
|
||||
|
||||
func getServiceAccount(c *clientset.Clientset, ns string, name string, shouldWait bool) (*api.ServiceAccount, error) {
|
||||
func getServiceAccount(c *clientset.Clientset, ns string, name string, shouldWait bool) (*v1.ServiceAccount, error) {
|
||||
if !shouldWait {
|
||||
return c.Core().ServiceAccounts(ns).Get(name)
|
||||
}
|
||||
|
||||
var user *api.ServiceAccount
|
||||
var user *v1.ServiceAccount
|
||||
var err error
|
||||
err = wait.Poll(time.Second, 10*time.Second, func() (bool, error) {
|
||||
user, err = c.Core().ServiceAccounts(ns).Get(name)
|
||||
@@ -476,12 +478,12 @@ func getReferencedServiceAccountToken(c *clientset.Clientset, ns string, name st
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if secret.Type != api.SecretTypeServiceAccountToken {
|
||||
if secret.Type != v1.SecretTypeServiceAccountToken {
|
||||
continue
|
||||
}
|
||||
name := secret.Annotations[api.ServiceAccountNameKey]
|
||||
uid := secret.Annotations[api.ServiceAccountUIDKey]
|
||||
tokenData := secret.Data[api.ServiceAccountTokenKey]
|
||||
name := secret.Annotations[v1.ServiceAccountNameKey]
|
||||
uid := secret.Annotations[v1.ServiceAccountUIDKey]
|
||||
tokenData := secret.Data[v1.ServiceAccountTokenKey]
|
||||
if name == user.Name && uid == string(user.UID) && len(tokenData) > 0 {
|
||||
tokenName = secret.Name
|
||||
token = string(tokenData)
|
||||
@@ -512,18 +514,18 @@ func getReferencedServiceAccountToken(c *clientset.Clientset, ns string, name st
|
||||
type testOperation func() error
|
||||
|
||||
func doServiceAccountAPIRequests(t *testing.T, c *clientset.Clientset, ns string, authenticated bool, canRead bool, canWrite bool) {
|
||||
testSecret := &api.Secret{
|
||||
ObjectMeta: api.ObjectMeta{Name: "testSecret"},
|
||||
testSecret := &v1.Secret{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "testSecret"},
|
||||
Data: map[string][]byte{"test": []byte("data")},
|
||||
}
|
||||
|
||||
readOps := []testOperation{
|
||||
func() error {
|
||||
_, err := c.Core().Secrets(ns).List(api.ListOptions{})
|
||||
_, err := c.Core().Secrets(ns).List(v1.ListOptions{})
|
||||
return err
|
||||
},
|
||||
func() error {
|
||||
_, err := c.Core().Pods(ns).List(api.ListOptions{})
|
||||
_, err := c.Core().Pods(ns).List(v1.ListOptions{})
|
||||
return err
|
||||
},
|
||||
}
|
||||
|
@@ -23,13 +23,13 @@ package storageclasses
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
"k8s.io/kubernetes/pkg/apis/storage"
|
||||
storageutil "k8s.io/kubernetes/pkg/apis/storage/util"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
storage "k8s.io/kubernetes/pkg/apis/storage/v1beta1"
|
||||
storageutil "k8s.io/kubernetes/pkg/apis/storage/v1beta1/util"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
@@ -41,7 +41,7 @@ func TestStorageClasses(t *testing.T) {
|
||||
_, s := framework.RunAMaster(nil)
|
||||
defer s.Close()
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
|
||||
ns := framework.CreateTestingNamespace("storageclass", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
@@ -50,13 +50,13 @@ func TestStorageClasses(t *testing.T) {
|
||||
}
|
||||
|
||||
// DoTestStorageClasses tests storage classes for one api version.
|
||||
func DoTestStorageClasses(t *testing.T, client clientset.Interface, ns *api.Namespace) {
|
||||
func DoTestStorageClasses(t *testing.T, client clientset.Interface, ns *v1.Namespace) {
|
||||
// Make a storage class object.
|
||||
s := storage.StorageClass{
|
||||
TypeMeta: unversioned.TypeMeta{
|
||||
Kind: "StorageClass",
|
||||
},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "gold",
|
||||
},
|
||||
Provisioner: provisionerPluginName,
|
||||
@@ -68,17 +68,17 @@ func DoTestStorageClasses(t *testing.T, client clientset.Interface, ns *api.Name
|
||||
defer deleteStorageClassOrErrorf(t, client, s.Namespace, s.Name)
|
||||
|
||||
// Template for pvcs that specify a storage class
|
||||
pvc := &api.PersistentVolumeClaim{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
pvc := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "XXX",
|
||||
Namespace: ns.Name,
|
||||
Annotations: map[string]string{
|
||||
storageutil.StorageClassAnnotation: "gold",
|
||||
},
|
||||
},
|
||||
Spec: api.PersistentVolumeClaimSpec{
|
||||
Resources: api.ResourceRequirements{Requests: api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse("1G")}},
|
||||
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("1G")}},
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
|
||||
},
|
||||
}
|
||||
|
||||
|
27
test/integration/thirdparty/thirdparty_test.go
vendored
27
test/integration/thirdparty/thirdparty_test.go
vendored
@@ -29,8 +29,9 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
apierrors "k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/util/diff"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
@@ -43,7 +44,7 @@ func TestThirdPartyDelete(t *testing.T) {
|
||||
defer s.Close()
|
||||
|
||||
clientConfig := &restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{NegotiatedSerializer: api.Codecs}}
|
||||
client := internalclientset.NewForConfigOrDie(clientConfig)
|
||||
client := clientset.NewForConfigOrDie(clientConfig)
|
||||
|
||||
DoTestInstallThirdPartyAPIDelete(t, client, clientConfig)
|
||||
}
|
||||
@@ -53,7 +54,7 @@ func TestThirdPartyMultiple(t *testing.T) {
|
||||
defer s.Close()
|
||||
|
||||
clientConfig := &restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{NegotiatedSerializer: api.Codecs}}
|
||||
client := internalclientset.NewForConfigOrDie(clientConfig)
|
||||
client := clientset.NewForConfigOrDie(clientConfig)
|
||||
|
||||
DoTestInstallMultipleAPIs(t, client, clientConfig)
|
||||
}
|
||||
@@ -63,7 +64,7 @@ var versionsToTest = []string{"v1"}
|
||||
|
||||
type Foo struct {
|
||||
unversioned.TypeMeta `json:",inline"`
|
||||
api.ObjectMeta `json:"metadata,omitempty" description:"standard object metadata"`
|
||||
v1.ObjectMeta `json:"metadata,omitempty" description:"standard object metadata"`
|
||||
|
||||
SomeField string `json:"someField"`
|
||||
OtherField int `json:"otherField"`
|
||||
@@ -77,7 +78,7 @@ type FooList struct {
|
||||
}
|
||||
|
||||
// installThirdParty installs a third party resoure and returns a defer func
|
||||
func installThirdParty(t *testing.T, client internalclientset.Interface, clientConfig *restclient.Config, tpr *extensions.ThirdPartyResource, group, version, resource string) func() {
|
||||
func installThirdParty(t *testing.T, client clientset.Interface, clientConfig *restclient.Config, tpr *extensions.ThirdPartyResource, group, version, resource string) func() {
|
||||
var err error
|
||||
_, err = client.Extensions().ThirdPartyResources().Create(tpr)
|
||||
if err != nil {
|
||||
@@ -123,13 +124,13 @@ func installThirdParty(t *testing.T, client internalclientset.Interface, clientC
|
||||
}
|
||||
}
|
||||
|
||||
func DoTestInstallMultipleAPIs(t *testing.T, client internalclientset.Interface, clientConfig *restclient.Config) {
|
||||
func DoTestInstallMultipleAPIs(t *testing.T, client clientset.Interface, clientConfig *restclient.Config) {
|
||||
group := "company.com"
|
||||
version := "v1"
|
||||
|
||||
defer installThirdParty(t, client, clientConfig,
|
||||
&extensions.ThirdPartyResource{
|
||||
ObjectMeta: api.ObjectMeta{Name: "foo.company.com"},
|
||||
ObjectMeta: v1.ObjectMeta{Name: "foo.company.com"},
|
||||
Versions: []extensions.APIVersion{{Name: version}},
|
||||
}, group, version, "foos",
|
||||
)()
|
||||
@@ -137,24 +138,24 @@ func DoTestInstallMultipleAPIs(t *testing.T, client internalclientset.Interface,
|
||||
// TODO make multiple resources in one version work
|
||||
// defer installThirdParty(t, client, clientConfig,
|
||||
// &extensions.ThirdPartyResource{
|
||||
// ObjectMeta: api.ObjectMeta{Name: "bar.company.com"},
|
||||
// ObjectMeta: v1.ObjectMeta{Name: "bar.company.com"},
|
||||
// Versions: []extensions.APIVersion{{Name: version}},
|
||||
// }, group, version, "bars",
|
||||
// )()
|
||||
}
|
||||
|
||||
func DoTestInstallThirdPartyAPIDelete(t *testing.T, client internalclientset.Interface, clientConfig *restclient.Config) {
|
||||
func DoTestInstallThirdPartyAPIDelete(t *testing.T, client clientset.Interface, clientConfig *restclient.Config) {
|
||||
for _, version := range versionsToTest {
|
||||
testInstallThirdPartyAPIDeleteVersion(t, client, clientConfig, version)
|
||||
}
|
||||
}
|
||||
|
||||
func testInstallThirdPartyAPIDeleteVersion(t *testing.T, client internalclientset.Interface, clientConfig *restclient.Config, version string) {
|
||||
func testInstallThirdPartyAPIDeleteVersion(t *testing.T, client clientset.Interface, clientConfig *restclient.Config, version string) {
|
||||
group := "company.com"
|
||||
|
||||
defer installThirdParty(t, client, clientConfig,
|
||||
&extensions.ThirdPartyResource{
|
||||
ObjectMeta: api.ObjectMeta{Name: "foo.company.com"},
|
||||
ObjectMeta: v1.ObjectMeta{Name: "foo.company.com"},
|
||||
Versions: []extensions.APIVersion{{Name: version}},
|
||||
}, group, version, "foos",
|
||||
)()
|
||||
@@ -168,7 +169,7 @@ func testInstallThirdPartyAPIDeleteVersion(t *testing.T, client internalclientse
|
||||
}
|
||||
|
||||
expectedObj := Foo{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "test",
|
||||
Namespace: "default",
|
||||
},
|
||||
|
@@ -26,7 +26,7 @@ import (
|
||||
"github.com/golang/glog"
|
||||
"golang.org/x/net/context"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
|
Reference in New Issue
Block a user