test/integration
This commit is contained in:
@@ -32,7 +32,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||||
"k8s.io/kubernetes/pkg/client/restclient"
|
"k8s.io/kubernetes/pkg/client/restclient"
|
||||||
"k8s.io/kubernetes/pkg/labels"
|
"k8s.io/kubernetes/pkg/labels"
|
||||||
"k8s.io/kubernetes/pkg/runtime"
|
"k8s.io/kubernetes/pkg/runtime"
|
||||||
@@ -47,7 +47,7 @@ func TestClient(t *testing.T) {
|
|||||||
_, s := framework.RunAMaster(nil)
|
_, s := framework.RunAMaster(nil)
|
||||||
defer s.Close()
|
defer s.Close()
|
||||||
|
|
||||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
|
|
||||||
ns := framework.CreateTestingNamespace("client", s, t)
|
ns := framework.CreateTestingNamespace("client", s, t)
|
||||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||||
@@ -60,7 +60,7 @@ func TestClient(t *testing.T) {
|
|||||||
t.Errorf("expected %#v, got %#v", e, a)
|
t.Errorf("expected %#v, got %#v", e, a)
|
||||||
}
|
}
|
||||||
|
|
||||||
pods, err := client.Core().Pods(ns.Name).List(api.ListOptions{})
|
pods, err := client.Core().Pods(ns.Name).List(v1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
@@ -69,13 +69,13 @@ func TestClient(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// get a validation error
|
// get a validation error
|
||||||
pod := &api.Pod{
|
pod := &v1.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
GenerateName: "test",
|
GenerateName: "test",
|
||||||
Namespace: ns.Name,
|
Namespace: ns.Name,
|
||||||
},
|
},
|
||||||
Spec: api.PodSpec{
|
Spec: v1.PodSpec{
|
||||||
Containers: []api.Container{
|
Containers: []v1.Container{
|
||||||
{
|
{
|
||||||
Name: "test",
|
Name: "test",
|
||||||
},
|
},
|
||||||
@@ -99,7 +99,7 @@ func TestClient(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// pod is shown, but not scheduled
|
// pod is shown, but not scheduled
|
||||||
pods, err = client.Core().Pods(ns.Name).List(api.ListOptions{})
|
pods, err = client.Core().Pods(ns.Name).List(v1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
@@ -119,35 +119,35 @@ func TestAtomicPut(t *testing.T) {
|
|||||||
_, s := framework.RunAMaster(nil)
|
_, s := framework.RunAMaster(nil)
|
||||||
defer s.Close()
|
defer s.Close()
|
||||||
|
|
||||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
|
|
||||||
ns := framework.CreateTestingNamespace("atomic-put", s, t)
|
ns := framework.CreateTestingNamespace("atomic-put", s, t)
|
||||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||||
|
|
||||||
rcBody := api.ReplicationController{
|
rcBody := v1.ReplicationController{
|
||||||
TypeMeta: unversioned.TypeMeta{
|
TypeMeta: unversioned.TypeMeta{
|
||||||
APIVersion: c.Core().RESTClient().APIVersion().String(),
|
APIVersion: c.Core().RESTClient().APIVersion().String(),
|
||||||
},
|
},
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
Name: "atomicrc",
|
Name: "atomicrc",
|
||||||
Namespace: ns.Name,
|
Namespace: ns.Name,
|
||||||
Labels: map[string]string{
|
Labels: map[string]string{
|
||||||
"name": "atomicrc",
|
"name": "atomicrc",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Spec: api.ReplicationControllerSpec{
|
Spec: v1.ReplicationControllerSpec{
|
||||||
Replicas: 0,
|
Replicas: func(i int32) *int32 { return &i }(0),
|
||||||
Selector: map[string]string{
|
Selector: map[string]string{
|
||||||
"foo": "bar",
|
"foo": "bar",
|
||||||
},
|
},
|
||||||
Template: &api.PodTemplateSpec{
|
Template: &v1.PodTemplateSpec{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
Labels: map[string]string{
|
Labels: map[string]string{
|
||||||
"foo": "bar",
|
"foo": "bar",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Spec: api.PodSpec{
|
Spec: v1.PodSpec{
|
||||||
Containers: []api.Container{
|
Containers: []v1.Container{
|
||||||
{Name: "name", Image: "image"},
|
{Name: "name", Image: "image"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -211,24 +211,24 @@ func TestPatch(t *testing.T) {
|
|||||||
_, s := framework.RunAMaster(nil)
|
_, s := framework.RunAMaster(nil)
|
||||||
defer s.Close()
|
defer s.Close()
|
||||||
|
|
||||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
|
|
||||||
ns := framework.CreateTestingNamespace("patch", s, t)
|
ns := framework.CreateTestingNamespace("patch", s, t)
|
||||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||||
|
|
||||||
name := "patchpod"
|
name := "patchpod"
|
||||||
resource := "pods"
|
resource := "pods"
|
||||||
podBody := api.Pod{
|
podBody := v1.Pod{
|
||||||
TypeMeta: unversioned.TypeMeta{
|
TypeMeta: unversioned.TypeMeta{
|
||||||
APIVersion: c.Core().RESTClient().APIVersion().String(),
|
APIVersion: c.Core().RESTClient().APIVersion().String(),
|
||||||
},
|
},
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
Name: name,
|
Name: name,
|
||||||
Namespace: ns.Name,
|
Namespace: ns.Name,
|
||||||
Labels: map[string]string{},
|
Labels: map[string]string{},
|
||||||
},
|
},
|
||||||
Spec: api.PodSpec{
|
Spec: v1.PodSpec{
|
||||||
Containers: []api.Container{
|
Containers: []v1.Container{
|
||||||
{Name: "name", Image: "image"},
|
{Name: "name", Image: "image"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -320,20 +320,20 @@ func TestPatchWithCreateOnUpdate(t *testing.T) {
|
|||||||
_, s := framework.RunAMaster(nil)
|
_, s := framework.RunAMaster(nil)
|
||||||
defer s.Close()
|
defer s.Close()
|
||||||
|
|
||||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
|
|
||||||
ns := framework.CreateTestingNamespace("patch-with-create", s, t)
|
ns := framework.CreateTestingNamespace("patch-with-create", s, t)
|
||||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||||
|
|
||||||
endpointTemplate := &api.Endpoints{
|
endpointTemplate := &v1.Endpoints{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
Name: "patchendpoint",
|
Name: "patchendpoint",
|
||||||
Namespace: ns.Name,
|
Namespace: ns.Name,
|
||||||
},
|
},
|
||||||
Subsets: []api.EndpointSubset{
|
Subsets: []v1.EndpointSubset{
|
||||||
{
|
{
|
||||||
Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}},
|
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}},
|
||||||
Ports: []api.EndpointPort{{Port: 80, Protocol: api.ProtocolTCP}},
|
Ports: []v1.EndpointPort{{Port: 80, Protocol: v1.ProtocolTCP}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -431,7 +431,7 @@ func TestAPIVersions(t *testing.T) {
|
|||||||
_, s := framework.RunAMaster(nil)
|
_, s := framework.RunAMaster(nil)
|
||||||
defer s.Close()
|
defer s.Close()
|
||||||
|
|
||||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
|
|
||||||
clientVersion := c.Core().RESTClient().APIVersion().String()
|
clientVersion := c.Core().RESTClient().APIVersion().String()
|
||||||
g, err := c.Discovery().ServerGroups()
|
g, err := c.Discovery().ServerGroups()
|
||||||
@@ -456,16 +456,16 @@ func TestSingleWatch(t *testing.T) {
|
|||||||
ns := framework.CreateTestingNamespace("single-watch", s, t)
|
ns := framework.CreateTestingNamespace("single-watch", s, t)
|
||||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||||
|
|
||||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
|
|
||||||
mkEvent := func(i int) *api.Event {
|
mkEvent := func(i int) *v1.Event {
|
||||||
name := fmt.Sprintf("event-%v", i)
|
name := fmt.Sprintf("event-%v", i)
|
||||||
return &api.Event{
|
return &v1.Event{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
Namespace: ns.Name,
|
Namespace: ns.Name,
|
||||||
Name: name,
|
Name: name,
|
||||||
},
|
},
|
||||||
InvolvedObject: api.ObjectReference{
|
InvolvedObject: v1.ObjectReference{
|
||||||
Namespace: ns.Name,
|
Namespace: ns.Name,
|
||||||
Name: name,
|
Name: name,
|
||||||
},
|
},
|
||||||
@@ -517,7 +517,7 @@ func TestSingleWatch(t *testing.T) {
|
|||||||
t.Errorf("Wanted %v, got %v", e, a)
|
t.Errorf("Wanted %v, got %v", e, a)
|
||||||
}
|
}
|
||||||
switch o := got.Object.(type) {
|
switch o := got.Object.(type) {
|
||||||
case *api.Event:
|
case *v1.Event:
|
||||||
if e, a := "event-9", o.Name; e != a {
|
if e, a := "event-9", o.Name; e != a {
|
||||||
t.Errorf("Wanted %v, got %v", e, a)
|
t.Errorf("Wanted %v, got %v", e, a)
|
||||||
}
|
}
|
||||||
@@ -541,16 +541,16 @@ func TestMultiWatch(t *testing.T) {
|
|||||||
ns := framework.CreateTestingNamespace("multi-watch", s, t)
|
ns := framework.CreateTestingNamespace("multi-watch", s, t)
|
||||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||||
|
|
||||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
|
|
||||||
dummyEvent := func(i int) *api.Event {
|
dummyEvent := func(i int) *v1.Event {
|
||||||
name := fmt.Sprintf("unrelated-%v", i)
|
name := fmt.Sprintf("unrelated-%v", i)
|
||||||
return &api.Event{
|
return &v1.Event{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
Name: fmt.Sprintf("%v.%x", name, time.Now().UnixNano()),
|
Name: fmt.Sprintf("%v.%x", name, time.Now().UnixNano()),
|
||||||
Namespace: ns.Name,
|
Namespace: ns.Name,
|
||||||
},
|
},
|
||||||
InvolvedObject: api.ObjectReference{
|
InvolvedObject: v1.ObjectReference{
|
||||||
Name: name,
|
Name: name,
|
||||||
Namespace: ns.Name,
|
Namespace: ns.Name,
|
||||||
},
|
},
|
||||||
@@ -570,13 +570,13 @@ func TestMultiWatch(t *testing.T) {
|
|||||||
for i := 0; i < watcherCount; i++ {
|
for i := 0; i < watcherCount; i++ {
|
||||||
watchesStarted.Add(1)
|
watchesStarted.Add(1)
|
||||||
name := fmt.Sprintf("multi-watch-%v", i)
|
name := fmt.Sprintf("multi-watch-%v", i)
|
||||||
got, err := client.Core().Pods(ns.Name).Create(&api.Pod{
|
got, err := client.Core().Pods(ns.Name).Create(&v1.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
Name: name,
|
Name: name,
|
||||||
Labels: labels.Set{"watchlabel": name},
|
Labels: labels.Set{"watchlabel": name},
|
||||||
},
|
},
|
||||||
Spec: api.PodSpec{
|
Spec: v1.PodSpec{
|
||||||
Containers: []api.Container{{
|
Containers: []v1.Container{{
|
||||||
Name: "pause",
|
Name: "pause",
|
||||||
Image: e2e.GetPauseImageName(client),
|
Image: e2e.GetPauseImageName(client),
|
||||||
}},
|
}},
|
||||||
@@ -587,8 +587,8 @@ func TestMultiWatch(t *testing.T) {
|
|||||||
t.Fatalf("Couldn't make %v: %v", name, err)
|
t.Fatalf("Couldn't make %v: %v", name, err)
|
||||||
}
|
}
|
||||||
go func(name, rv string) {
|
go func(name, rv string) {
|
||||||
options := api.ListOptions{
|
options := v1.ListOptions{
|
||||||
LabelSelector: labels.Set{"watchlabel": name}.AsSelector(),
|
LabelSelector: labels.Set{"watchlabel": name}.AsSelector().String(),
|
||||||
ResourceVersion: rv,
|
ResourceVersion: rv,
|
||||||
}
|
}
|
||||||
w, err := client.Core().Pods(ns.Name).Watch(options)
|
w, err := client.Core().Pods(ns.Name).Watch(options)
|
||||||
@@ -677,12 +677,12 @@ func TestMultiWatch(t *testing.T) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
name := fmt.Sprintf("unrelated-%v", i)
|
name := fmt.Sprintf("unrelated-%v", i)
|
||||||
_, err := client.Core().Pods(ns.Name).Create(&api.Pod{
|
_, err := client.Core().Pods(ns.Name).Create(&v1.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
Name: name,
|
Name: name,
|
||||||
},
|
},
|
||||||
Spec: api.PodSpec{
|
Spec: v1.PodSpec{
|
||||||
Containers: []api.Container{{
|
Containers: []v1.Container{{
|
||||||
Name: "nothing",
|
Name: "nothing",
|
||||||
Image: e2e.GetPauseImageName(client),
|
Image: e2e.GetPauseImageName(client),
|
||||||
}},
|
}},
|
||||||
@@ -741,16 +741,16 @@ func TestMultiWatch(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func runSelfLinkTestOnNamespace(t *testing.T, c clientset.Interface, namespace string) {
|
func runSelfLinkTestOnNamespace(t *testing.T, c clientset.Interface, namespace string) {
|
||||||
podBody := api.Pod{
|
podBody := v1.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
Name: "selflinktest",
|
Name: "selflinktest",
|
||||||
Namespace: namespace,
|
Namespace: namespace,
|
||||||
Labels: map[string]string{
|
Labels: map[string]string{
|
||||||
"name": "selflinktest",
|
"name": "selflinktest",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Spec: api.PodSpec{
|
Spec: v1.PodSpec{
|
||||||
Containers: []api.Container{
|
Containers: []v1.Container{
|
||||||
{Name: "name", Image: "image"},
|
{Name: "name", Image: "image"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -763,7 +763,7 @@ func runSelfLinkTestOnNamespace(t *testing.T, c clientset.Interface, namespace s
|
|||||||
t.Errorf("Failed listing pod with supplied self link '%v': %v", pod.SelfLink, err)
|
t.Errorf("Failed listing pod with supplied self link '%v': %v", pod.SelfLink, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
podList, err := c.Core().Pods(namespace).List(api.ListOptions{})
|
podList, err := c.Core().Pods(namespace).List(v1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed listing pods: %v", err)
|
t.Errorf("Failed listing pods: %v", err)
|
||||||
}
|
}
|
||||||
@@ -797,7 +797,7 @@ func TestSelfLinkOnNamespace(t *testing.T) {
|
|||||||
ns := framework.CreateTestingNamespace("selflink", s, t)
|
ns := framework.CreateTestingNamespace("selflink", s, t)
|
||||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||||
|
|
||||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
|
|
||||||
runSelfLinkTestOnNamespace(t, c, ns.Name)
|
runSelfLinkTestOnNamespace(t, c, ns.Name)
|
||||||
}
|
}
|
||||||
|
@@ -22,12 +22,11 @@ import (
|
|||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
|
||||||
"k8s.io/kubernetes/pkg/api/testapi"
|
"k8s.io/kubernetes/pkg/api/testapi"
|
||||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||||
"k8s.io/kubernetes/pkg/client/restclient"
|
"k8s.io/kubernetes/pkg/client/restclient"
|
||||||
"k8s.io/kubernetes/pkg/client/typed/dynamic"
|
"k8s.io/kubernetes/pkg/client/typed/dynamic"
|
||||||
"k8s.io/kubernetes/pkg/runtime"
|
"k8s.io/kubernetes/pkg/runtime"
|
||||||
@@ -41,7 +40,7 @@ func TestDynamicClient(t *testing.T) {
|
|||||||
ns := framework.CreateTestingNamespace("dynamic-client", s, t)
|
ns := framework.CreateTestingNamespace("dynamic-client", s, t)
|
||||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||||
|
|
||||||
gv := ®istered.GroupOrDie(api.GroupName).GroupVersion
|
gv := ®istered.GroupOrDie(v1.GroupName).GroupVersion
|
||||||
config := &restclient.Config{
|
config := &restclient.Config{
|
||||||
Host: s.URL,
|
Host: s.URL,
|
||||||
ContentConfig: restclient.ContentConfig{GroupVersion: gv},
|
ContentConfig: restclient.ContentConfig{GroupVersion: gv},
|
||||||
@@ -73,12 +72,12 @@ func TestDynamicClient(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create a Pod with the normal client
|
// Create a Pod with the normal client
|
||||||
pod := &api.Pod{
|
pod := &v1.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
GenerateName: "test",
|
GenerateName: "test",
|
||||||
},
|
},
|
||||||
Spec: api.PodSpec{
|
Spec: v1.PodSpec{
|
||||||
Containers: []api.Container{
|
Containers: []v1.Container{
|
||||||
{
|
{
|
||||||
Name: "test",
|
Name: "test",
|
||||||
Image: "test-image",
|
Image: "test-image",
|
||||||
@@ -108,7 +107,7 @@ func TestDynamicClient(t *testing.T) {
|
|||||||
|
|
||||||
got, err := unstructuredToPod(unstructuredList.Items[0])
|
got, err := unstructuredToPod(unstructuredList.Items[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unexpected error converting Unstructured to api.Pod: %v", err)
|
t.Fatalf("unexpected error converting Unstructured to v1.Pod: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !reflect.DeepEqual(actual, got) {
|
if !reflect.DeepEqual(actual, got) {
|
||||||
@@ -123,7 +122,7 @@ func TestDynamicClient(t *testing.T) {
|
|||||||
|
|
||||||
got, err = unstructuredToPod(unstruct)
|
got, err = unstructuredToPod(unstruct)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unexpected error converting Unstructured to api.Pod: %v", err)
|
t.Fatalf("unexpected error converting Unstructured to v1.Pod: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !reflect.DeepEqual(actual, got) {
|
if !reflect.DeepEqual(actual, got) {
|
||||||
@@ -136,7 +135,7 @@ func TestDynamicClient(t *testing.T) {
|
|||||||
t.Fatalf("unexpected error when deleting pod: %v", err)
|
t.Fatalf("unexpected error when deleting pod: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
list, err := client.Core().Pods(ns.Name).List(api.ListOptions{})
|
list, err := client.Core().Pods(ns.Name).List(v1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unexpected error when listing pods: %v", err)
|
t.Fatalf("unexpected error when listing pods: %v", err)
|
||||||
}
|
}
|
||||||
@@ -146,12 +145,14 @@ func TestDynamicClient(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func unstructuredToPod(obj *runtime.Unstructured) (*api.Pod, error) {
|
func unstructuredToPod(obj *runtime.Unstructured) (*v1.Pod, error) {
|
||||||
json, err := runtime.Encode(runtime.UnstructuredJSONScheme, obj)
|
json, err := runtime.Encode(runtime.UnstructuredJSONScheme, obj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
pod := new(api.Pod)
|
pod := new(v1.Pod)
|
||||||
err = runtime.DecodeInto(testapi.Default.Codec(), json, pod)
|
err = runtime.DecodeInto(testapi.Default.Codec(), json, pod)
|
||||||
|
pod.Kind = ""
|
||||||
|
pod.APIVersion = ""
|
||||||
return pod, err
|
return pod, err
|
||||||
}
|
}
|
||||||
|
@@ -23,9 +23,9 @@ package configmap
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||||
"k8s.io/kubernetes/pkg/client/restclient"
|
"k8s.io/kubernetes/pkg/client/restclient"
|
||||||
"k8s.io/kubernetes/test/integration"
|
"k8s.io/kubernetes/test/integration"
|
||||||
"k8s.io/kubernetes/test/integration/framework"
|
"k8s.io/kubernetes/test/integration/framework"
|
||||||
@@ -36,7 +36,7 @@ func TestConfigMap(t *testing.T) {
|
|||||||
_, s := framework.RunAMaster(nil)
|
_, s := framework.RunAMaster(nil)
|
||||||
defer s.Close()
|
defer s.Close()
|
||||||
|
|
||||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
|
|
||||||
ns := framework.CreateTestingNamespace("config-map", s, t)
|
ns := framework.CreateTestingNamespace("config-map", s, t)
|
||||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||||
@@ -44,9 +44,9 @@ func TestConfigMap(t *testing.T) {
|
|||||||
DoTestConfigMap(t, client, ns)
|
DoTestConfigMap(t, client, ns)
|
||||||
}
|
}
|
||||||
|
|
||||||
func DoTestConfigMap(t *testing.T, client clientset.Interface, ns *api.Namespace) {
|
func DoTestConfigMap(t *testing.T, client clientset.Interface, ns *v1.Namespace) {
|
||||||
cfg := api.ConfigMap{
|
cfg := v1.ConfigMap{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
Name: "configmap",
|
Name: "configmap",
|
||||||
Namespace: ns.Name,
|
Namespace: ns.Name,
|
||||||
},
|
},
|
||||||
@@ -62,22 +62,22 @@ func DoTestConfigMap(t *testing.T, client clientset.Interface, ns *api.Namespace
|
|||||||
}
|
}
|
||||||
defer deleteConfigMapOrErrorf(t, client, cfg.Namespace, cfg.Name)
|
defer deleteConfigMapOrErrorf(t, client, cfg.Namespace, cfg.Name)
|
||||||
|
|
||||||
pod := &api.Pod{
|
pod := &v1.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
Name: "XXX",
|
Name: "XXX",
|
||||||
Namespace: ns.Name,
|
Namespace: ns.Name,
|
||||||
},
|
},
|
||||||
Spec: api.PodSpec{
|
Spec: v1.PodSpec{
|
||||||
Containers: []api.Container{
|
Containers: []v1.Container{
|
||||||
{
|
{
|
||||||
Name: "fake-name",
|
Name: "fake-name",
|
||||||
Image: "fakeimage",
|
Image: "fakeimage",
|
||||||
Env: []api.EnvVar{
|
Env: []v1.EnvVar{
|
||||||
{
|
{
|
||||||
Name: "CONFIG_DATA_1",
|
Name: "CONFIG_DATA_1",
|
||||||
ValueFrom: &api.EnvVarSource{
|
ValueFrom: &v1.EnvVarSource{
|
||||||
ConfigMapKeyRef: &api.ConfigMapKeySelector{
|
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
|
||||||
LocalObjectReference: api.LocalObjectReference{
|
LocalObjectReference: v1.LocalObjectReference{
|
||||||
Name: "configmap",
|
Name: "configmap",
|
||||||
},
|
},
|
||||||
Key: "data-1",
|
Key: "data-1",
|
||||||
@@ -86,9 +86,9 @@ func DoTestConfigMap(t *testing.T, client clientset.Interface, ns *api.Namespace
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "CONFIG_DATA_2",
|
Name: "CONFIG_DATA_2",
|
||||||
ValueFrom: &api.EnvVarSource{
|
ValueFrom: &v1.EnvVarSource{
|
||||||
ConfigMapKeyRef: &api.ConfigMapKeySelector{
|
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
|
||||||
LocalObjectReference: api.LocalObjectReference{
|
LocalObjectReference: v1.LocalObjectReference{
|
||||||
Name: "configmap",
|
Name: "configmap",
|
||||||
},
|
},
|
||||||
Key: "data-2",
|
Key: "data-2",
|
||||||
@@ -96,9 +96,9 @@ func DoTestConfigMap(t *testing.T, client clientset.Interface, ns *api.Namespace
|
|||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
Name: "CONFIG_DATA_3",
|
Name: "CONFIG_DATA_3",
|
||||||
ValueFrom: &api.EnvVarSource{
|
ValueFrom: &v1.EnvVarSource{
|
||||||
ConfigMapKeyRef: &api.ConfigMapKeySelector{
|
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
|
||||||
LocalObjectReference: api.LocalObjectReference{
|
LocalObjectReference: v1.LocalObjectReference{
|
||||||
Name: "configmap",
|
Name: "configmap",
|
||||||
},
|
},
|
||||||
Key: "data-3",
|
Key: "data-3",
|
||||||
|
@@ -32,20 +32,21 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||||
"k8s.io/kubernetes/pkg/apis/apps"
|
apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
|
||||||
"k8s.io/kubernetes/pkg/apis/autoscaling"
|
autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling/v1"
|
||||||
"k8s.io/kubernetes/pkg/apis/batch"
|
"k8s.io/kubernetes/pkg/apis/batch"
|
||||||
"k8s.io/kubernetes/pkg/apis/certificates"
|
certificates "k8s.io/kubernetes/pkg/apis/certificates/v1alpha1"
|
||||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||||
"k8s.io/kubernetes/pkg/apis/policy"
|
policy "k8s.io/kubernetes/pkg/apis/policy/v1alpha1"
|
||||||
"k8s.io/kubernetes/pkg/apis/rbac"
|
rbac "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1"
|
||||||
"k8s.io/kubernetes/pkg/apis/storage"
|
storage "k8s.io/kubernetes/pkg/apis/storage/v1beta1"
|
||||||
"k8s.io/kubernetes/pkg/apiserver/authenticator"
|
"k8s.io/kubernetes/pkg/apiserver/authenticator"
|
||||||
authauthenticator "k8s.io/kubernetes/pkg/auth/authenticator"
|
authauthenticator "k8s.io/kubernetes/pkg/auth/authenticator"
|
||||||
authauthorizer "k8s.io/kubernetes/pkg/auth/authorizer"
|
authauthorizer "k8s.io/kubernetes/pkg/auth/authorizer"
|
||||||
authorizerunion "k8s.io/kubernetes/pkg/auth/authorizer/union"
|
authorizerunion "k8s.io/kubernetes/pkg/auth/authorizer/union"
|
||||||
"k8s.io/kubernetes/pkg/auth/user"
|
"k8s.io/kubernetes/pkg/auth/user"
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||||
|
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||||
coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
|
coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
|
||||||
"k8s.io/kubernetes/pkg/client/record"
|
"k8s.io/kubernetes/pkg/client/record"
|
||||||
"k8s.io/kubernetes/pkg/client/restclient"
|
"k8s.io/kubernetes/pkg/client/restclient"
|
||||||
@@ -116,7 +117,7 @@ func NewMasterComponents(c *Config) *MasterComponents {
|
|||||||
// TODO: Allow callers to pipe through a different master url and create a client/start components using it.
|
// TODO: Allow callers to pipe through a different master url and create a client/start components using it.
|
||||||
glog.Infof("Master %+v", s.URL)
|
glog.Infof("Master %+v", s.URL)
|
||||||
// TODO: caesarxuchao: remove this client when the refactoring of client libraray is done.
|
// TODO: caesarxuchao: remove this client when the refactoring of client libraray is done.
|
||||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}, QPS: c.QPS, Burst: c.Burst})
|
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}, QPS: c.QPS, Burst: c.Burst})
|
||||||
rcStopCh := make(chan struct{})
|
rcStopCh := make(chan struct{})
|
||||||
controllerManager := replicationcontroller.NewReplicationManagerFromClient(clientset, controller.NoResyncPeriodFunc, c.Burst, 4096)
|
controllerManager := replicationcontroller.NewReplicationManagerFromClient(clientset, controller.NoResyncPeriodFunc, c.Burst, 4096)
|
||||||
|
|
||||||
@@ -265,7 +266,7 @@ func startMasterOrDie(masterConfig *master.Config, incomingServer *httptest.Serv
|
|||||||
if masterConfig.EnableCoreControllers {
|
if masterConfig.EnableCoreControllers {
|
||||||
// TODO Once /healthz is updated for posthooks, we'll wait for good health
|
// TODO Once /healthz is updated for posthooks, we'll wait for good health
|
||||||
coreClient := coreclient.NewForConfigOrDie(&cfg)
|
coreClient := coreclient.NewForConfigOrDie(&cfg)
|
||||||
svcWatch, err := coreClient.Services(api.NamespaceDefault).Watch(v1.ListOptions{})
|
svcWatch, err := coreClient.Services(v1.NamespaceDefault).Watch(v1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Fatal(err)
|
glog.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -309,7 +310,7 @@ func NewMasterConfig() *master.Config {
|
|||||||
|
|
||||||
storageFactory := genericapiserver.NewDefaultStorageFactory(config, runtime.ContentTypeJSON, ns, genericapiserver.NewDefaultResourceEncodingConfig(), master.DefaultAPIResourceConfigSource())
|
storageFactory := genericapiserver.NewDefaultStorageFactory(config, runtime.ContentTypeJSON, ns, genericapiserver.NewDefaultResourceEncodingConfig(), master.DefaultAPIResourceConfigSource())
|
||||||
storageFactory.SetSerializer(
|
storageFactory.SetSerializer(
|
||||||
unversioned.GroupResource{Group: api.GroupName, Resource: genericapiserver.AllResources},
|
unversioned.GroupResource{Group: v1.GroupName, Resource: genericapiserver.AllResources},
|
||||||
"",
|
"",
|
||||||
ns)
|
ns)
|
||||||
storageFactory.SetSerializer(
|
storageFactory.SetSerializer(
|
||||||
@@ -389,29 +390,29 @@ func (m *MasterComponents) Stop(apiServer, rcManager bool) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateTestingNamespace(baseName string, apiserver *httptest.Server, t *testing.T) *api.Namespace {
|
func CreateTestingNamespace(baseName string, apiserver *httptest.Server, t *testing.T) *v1.Namespace {
|
||||||
// TODO: Create a namespace with a given basename.
|
// TODO: Create a namespace with a given basename.
|
||||||
// Currently we neither create the namespace nor delete all its contents at the end.
|
// Currently we neither create the namespace nor delete all its contents at the end.
|
||||||
// But as long as tests are not using the same namespaces, this should work fine.
|
// But as long as tests are not using the same namespaces, this should work fine.
|
||||||
return &api.Namespace{
|
return &v1.Namespace{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
// TODO: Once we start creating namespaces, switch to GenerateName.
|
// TODO: Once we start creating namespaces, switch to GenerateName.
|
||||||
Name: baseName,
|
Name: baseName,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func DeleteTestingNamespace(ns *api.Namespace, apiserver *httptest.Server, t *testing.T) {
|
func DeleteTestingNamespace(ns *v1.Namespace, apiserver *httptest.Server, t *testing.T) {
|
||||||
// TODO: Remove all resources from a given namespace once we implement CreateTestingNamespace.
|
// TODO: Remove all resources from a given namespace once we implement CreateTestingNamespace.
|
||||||
}
|
}
|
||||||
|
|
||||||
// RCFromManifest reads a .json file and returns the rc in it.
|
// RCFromManifest reads a .json file and returns the rc in it.
|
||||||
func RCFromManifest(fileName string) *api.ReplicationController {
|
func RCFromManifest(fileName string) *v1.ReplicationController {
|
||||||
data, err := ioutil.ReadFile(fileName)
|
data, err := ioutil.ReadFile(fileName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Fatalf("Unexpected error reading rc manifest %v", err)
|
glog.Fatalf("Unexpected error reading rc manifest %v", err)
|
||||||
}
|
}
|
||||||
var controller api.ReplicationController
|
var controller v1.ReplicationController
|
||||||
if err := runtime.DecodeInto(testapi.Default.Codec(), data, &controller); err != nil {
|
if err := runtime.DecodeInto(testapi.Default.Codec(), data, &controller); err != nil {
|
||||||
glog.Fatalf("Unexpected error reading rc manifest %v", err)
|
glog.Fatalf("Unexpected error reading rc manifest %v", err)
|
||||||
}
|
}
|
||||||
@@ -419,7 +420,7 @@ func RCFromManifest(fileName string) *api.ReplicationController {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// StopRC stops the rc via kubectl's stop library
|
// StopRC stops the rc via kubectl's stop library
|
||||||
func StopRC(rc *api.ReplicationController, clientset clientset.Interface) error {
|
func StopRC(rc *v1.ReplicationController, clientset internalclientset.Interface) error {
|
||||||
reaper, err := kubectl.ReaperFor(api.Kind("ReplicationController"), clientset)
|
reaper, err := kubectl.ReaperFor(api.Kind("ReplicationController"), clientset)
|
||||||
if err != nil || reaper == nil {
|
if err != nil || reaper == nil {
|
||||||
return err
|
return err
|
||||||
@@ -432,7 +433,7 @@ func StopRC(rc *api.ReplicationController, clientset clientset.Interface) error
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ScaleRC scales the given rc to the given replicas.
|
// ScaleRC scales the given rc to the given replicas.
|
||||||
func ScaleRC(name, ns string, replicas int32, clientset clientset.Interface) (*api.ReplicationController, error) {
|
func ScaleRC(name, ns string, replicas int32, clientset internalclientset.Interface) (*api.ReplicationController, error) {
|
||||||
scaler, err := kubectl.ScalerFor(api.Kind("ReplicationController"), clientset)
|
scaler, err := kubectl.ScalerFor(api.Kind("ReplicationController"), clientset)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@@ -17,9 +17,9 @@ limitations under the License.
|
|||||||
package framework
|
package framework
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"k8s.io/kubernetes/pkg/api"
|
|
||||||
"k8s.io/kubernetes/pkg/api/resource"
|
"k8s.io/kubernetes/pkg/api/resource"
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||||
e2eframework "k8s.io/kubernetes/test/e2e/framework"
|
e2eframework "k8s.io/kubernetes/test/e2e/framework"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
|
|
||||||
@@ -51,23 +51,23 @@ func (p *IntegrationTestNodePreparer) PrepareNodes() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
glog.Infof("Making %d nodes", numNodes)
|
glog.Infof("Making %d nodes", numNodes)
|
||||||
baseNode := &api.Node{
|
baseNode := &v1.Node{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
GenerateName: p.nodeNamePrefix,
|
GenerateName: p.nodeNamePrefix,
|
||||||
},
|
},
|
||||||
Spec: api.NodeSpec{
|
Spec: v1.NodeSpec{
|
||||||
// TODO: investigate why this is needed.
|
// TODO: investigate why this is needed.
|
||||||
ExternalID: "foo",
|
ExternalID: "foo",
|
||||||
},
|
},
|
||||||
Status: api.NodeStatus{
|
Status: v1.NodeStatus{
|
||||||
Capacity: api.ResourceList{
|
Capacity: v1.ResourceList{
|
||||||
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
|
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
|
||||||
api.ResourceCPU: resource.MustParse("4"),
|
v1.ResourceCPU: resource.MustParse("4"),
|
||||||
api.ResourceMemory: resource.MustParse("32Gi"),
|
v1.ResourceMemory: resource.MustParse("32Gi"),
|
||||||
},
|
},
|
||||||
Phase: api.NodeRunning,
|
Phase: v1.NodeRunning,
|
||||||
Conditions: []api.NodeCondition{
|
Conditions: []v1.NodeCondition{
|
||||||
{Type: api.NodeReady, Status: api.ConditionTrue},
|
{Type: v1.NodeReady, Status: v1.ConditionTrue},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -95,7 +95,7 @@ func (p *IntegrationTestNodePreparer) PrepareNodes() error {
|
|||||||
func (p *IntegrationTestNodePreparer) CleanupNodes() error {
|
func (p *IntegrationTestNodePreparer) CleanupNodes() error {
|
||||||
nodes := e2eframework.GetReadySchedulableNodesOrDie(p.client)
|
nodes := e2eframework.GetReadySchedulableNodesOrDie(p.client)
|
||||||
for i := range nodes.Items {
|
for i := range nodes.Items {
|
||||||
if err := p.client.Core().Nodes().Delete(nodes.Items[i].Name, &api.DeleteOptions{}); err != nil {
|
if err := p.client.Core().Nodes().Delete(nodes.Items[i].Name, &v1.DeleteOptions{}); err != nil {
|
||||||
glog.Errorf("Error while deleting Node: %v", err)
|
glog.Errorf("Error while deleting Node: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -25,9 +25,9 @@ import (
|
|||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||||
"k8s.io/kubernetes/pkg/client/restclient"
|
"k8s.io/kubernetes/pkg/client/restclient"
|
||||||
"k8s.io/kubernetes/test/integration/framework"
|
"k8s.io/kubernetes/test/integration/framework"
|
||||||
|
|
||||||
@@ -108,8 +108,8 @@ func TestApiserverMetrics(t *testing.T) {
|
|||||||
|
|
||||||
// Make a request to the apiserver to ensure there's at least one data point
|
// Make a request to the apiserver to ensure there's at least one data point
|
||||||
// for the metrics we're expecting -- otherwise, they won't be exported.
|
// for the metrics we're expecting -- otherwise, they won't be exported.
|
||||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
if _, err := client.Core().Pods(api.NamespaceDefault).List(api.ListOptions{}); err != nil {
|
if _, err := client.Core().Pods(v1.NamespaceDefault).List(v1.ListOptions{}); err != nil {
|
||||||
t.Fatalf("unexpected error getting pods: %v", err)
|
t.Fatalf("unexpected error getting pods: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -21,10 +21,10 @@ import (
|
|||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
|
||||||
"k8s.io/kubernetes/pkg/api/testapi"
|
"k8s.io/kubernetes/pkg/api/testapi"
|
||||||
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||||
"k8s.io/kubernetes/pkg/client/restclient"
|
"k8s.io/kubernetes/pkg/client/restclient"
|
||||||
"k8s.io/kubernetes/pkg/genericapiserver"
|
"k8s.io/kubernetes/pkg/genericapiserver"
|
||||||
etcdstorage "k8s.io/kubernetes/pkg/storage/etcd"
|
etcdstorage "k8s.io/kubernetes/pkg/storage/etcd"
|
||||||
@@ -38,14 +38,14 @@ func TestIgnoreClusterName(t *testing.T) {
|
|||||||
_, s := framework.RunAMaster(config)
|
_, s := framework.RunAMaster(config)
|
||||||
defer s.Close()
|
defer s.Close()
|
||||||
|
|
||||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
etcdClient := framework.NewEtcdClient()
|
etcdClient := framework.NewEtcdClient()
|
||||||
etcdStorage := etcdstorage.NewEtcdStorage(etcdClient, testapi.Default.Codec(),
|
etcdStorage := etcdstorage.NewEtcdStorage(etcdClient, testapi.Default.Codec(),
|
||||||
prefix+"/namespaces/", false, etcdtest.DeserializationCacheSize)
|
prefix+"/namespaces/", false, etcdtest.DeserializationCacheSize)
|
||||||
ctx := context.TODO()
|
ctx := context.TODO()
|
||||||
|
|
||||||
ns := api.Namespace{
|
ns := v1.Namespace{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
Name: "test-namespace",
|
Name: "test-namespace",
|
||||||
ClusterName: "cluster-name-to-ignore",
|
ClusterName: "cluster-name-to-ignore",
|
||||||
},
|
},
|
||||||
@@ -55,7 +55,7 @@ func TestIgnoreClusterName(t *testing.T) {
|
|||||||
assert.Equal(t, ns.Name, nsNew.Name)
|
assert.Equal(t, ns.Name, nsNew.Name)
|
||||||
assert.Empty(t, nsNew.ClusterName)
|
assert.Empty(t, nsNew.ClusterName)
|
||||||
|
|
||||||
nsEtcd := api.Namespace{}
|
nsEtcd := v1.Namespace{}
|
||||||
err = etcdStorage.Get(ctx, ns.Name, &nsEtcd, false)
|
err = etcdStorage.Get(ctx, ns.Name, &nsEtcd, false)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.Equal(t, ns.Name, nsEtcd.Name)
|
assert.Equal(t, ns.Name, nsEtcd.Name)
|
||||||
@@ -66,7 +66,7 @@ func TestIgnoreClusterName(t *testing.T) {
|
|||||||
assert.Equal(t, ns.Name, nsNew.Name)
|
assert.Equal(t, ns.Name, nsNew.Name)
|
||||||
assert.Empty(t, nsNew.ClusterName)
|
assert.Empty(t, nsNew.ClusterName)
|
||||||
|
|
||||||
nsEtcd = api.Namespace{}
|
nsEtcd = v1.Namespace{}
|
||||||
err = etcdStorage.Get(ctx, ns.Name, &nsEtcd, false)
|
err = etcdStorage.Get(ctx, ns.Name, &nsEtcd, false)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.Equal(t, ns.Name, nsEtcd.Name)
|
assert.Equal(t, ns.Name, nsEtcd.Name)
|
||||||
|
@@ -27,13 +27,13 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
|
||||||
"k8s.io/kubernetes/pkg/api/resource"
|
"k8s.io/kubernetes/pkg/api/resource"
|
||||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||||
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||||
"k8s.io/kubernetes/pkg/apis/storage"
|
storage "k8s.io/kubernetes/pkg/apis/storage/v1beta1"
|
||||||
storageutil "k8s.io/kubernetes/pkg/apis/storage/util"
|
storageutil "k8s.io/kubernetes/pkg/apis/storage/v1beta1/util"
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||||
"k8s.io/kubernetes/pkg/client/restclient"
|
"k8s.io/kubernetes/pkg/client/restclient"
|
||||||
fake_cloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
|
fake_cloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
|
||||||
persistentvolumecontroller "k8s.io/kubernetes/pkg/controller/volume/persistentvolume"
|
persistentvolumecontroller "k8s.io/kubernetes/pkg/controller/volume/persistentvolume"
|
||||||
@@ -122,15 +122,15 @@ func TestPersistentVolumeRecycler(t *testing.T) {
|
|||||||
|
|
||||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||||
// non-namespaced objects (PersistenceVolumes).
|
// non-namespaced objects (PersistenceVolumes).
|
||||||
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, api.ListOptions{})
|
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, v1.ListOptions{})
|
||||||
|
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
ctrl.Run(stopCh)
|
ctrl.Run(stopCh)
|
||||||
defer close(stopCh)
|
defer close(stopCh)
|
||||||
|
|
||||||
// This PV will be claimed, released, and recycled.
|
// This PV will be claimed, released, and recycled.
|
||||||
pv := createPV("fake-pv-recycler", "/tmp/foo", "10G", []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, api.PersistentVolumeReclaimRecycle)
|
pv := createPV("fake-pv-recycler", "/tmp/foo", "10G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimRecycle)
|
||||||
pvc := createPVC("fake-pvc-recycler", ns.Name, "5G", []api.PersistentVolumeAccessMode{api.ReadWriteOnce})
|
pvc := createPVC("fake-pvc-recycler", ns.Name, "5G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce})
|
||||||
|
|
||||||
_, err := testClient.PersistentVolumes().Create(pv)
|
_, err := testClient.PersistentVolumes().Create(pv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -145,9 +145,9 @@ func TestPersistentVolumeRecycler(t *testing.T) {
|
|||||||
glog.V(2).Infof("TestPersistentVolumeRecycler pvc created")
|
glog.V(2).Infof("TestPersistentVolumeRecycler pvc created")
|
||||||
|
|
||||||
// wait until the controller pairs the volume and claim
|
// wait until the controller pairs the volume and claim
|
||||||
waitForPersistentVolumePhase(testClient, pv.Name, watchPV, api.VolumeBound)
|
waitForPersistentVolumePhase(testClient, pv.Name, watchPV, v1.VolumeBound)
|
||||||
glog.V(2).Infof("TestPersistentVolumeRecycler pv bound")
|
glog.V(2).Infof("TestPersistentVolumeRecycler pv bound")
|
||||||
waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, api.ClaimBound)
|
waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, v1.ClaimBound)
|
||||||
glog.V(2).Infof("TestPersistentVolumeRecycler pvc bound")
|
glog.V(2).Infof("TestPersistentVolumeRecycler pvc bound")
|
||||||
|
|
||||||
// deleting a claim releases the volume, after which it can be recycled
|
// deleting a claim releases the volume, after which it can be recycled
|
||||||
@@ -156,9 +156,9 @@ func TestPersistentVolumeRecycler(t *testing.T) {
|
|||||||
}
|
}
|
||||||
glog.V(2).Infof("TestPersistentVolumeRecycler pvc deleted")
|
glog.V(2).Infof("TestPersistentVolumeRecycler pvc deleted")
|
||||||
|
|
||||||
waitForPersistentVolumePhase(testClient, pv.Name, watchPV, api.VolumeReleased)
|
waitForPersistentVolumePhase(testClient, pv.Name, watchPV, v1.VolumeReleased)
|
||||||
glog.V(2).Infof("TestPersistentVolumeRecycler pv released")
|
glog.V(2).Infof("TestPersistentVolumeRecycler pv released")
|
||||||
waitForPersistentVolumePhase(testClient, pv.Name, watchPV, api.VolumeAvailable)
|
waitForPersistentVolumePhase(testClient, pv.Name, watchPV, v1.VolumeAvailable)
|
||||||
glog.V(2).Infof("TestPersistentVolumeRecycler pv available")
|
glog.V(2).Infof("TestPersistentVolumeRecycler pv available")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -176,15 +176,15 @@ func TestPersistentVolumeDeleter(t *testing.T) {
|
|||||||
|
|
||||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||||
// non-namespaced objects (PersistenceVolumes).
|
// non-namespaced objects (PersistenceVolumes).
|
||||||
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, api.ListOptions{})
|
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, v1.ListOptions{})
|
||||||
|
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
ctrl.Run(stopCh)
|
ctrl.Run(stopCh)
|
||||||
defer close(stopCh)
|
defer close(stopCh)
|
||||||
|
|
||||||
// This PV will be claimed, released, and deleted.
|
// This PV will be claimed, released, and deleted.
|
||||||
pv := createPV("fake-pv-deleter", "/tmp/foo", "10G", []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, api.PersistentVolumeReclaimDelete)
|
pv := createPV("fake-pv-deleter", "/tmp/foo", "10G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimDelete)
|
||||||
pvc := createPVC("fake-pvc-deleter", ns.Name, "5G", []api.PersistentVolumeAccessMode{api.ReadWriteOnce})
|
pvc := createPVC("fake-pvc-deleter", ns.Name, "5G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce})
|
||||||
|
|
||||||
_, err := testClient.PersistentVolumes().Create(pv)
|
_, err := testClient.PersistentVolumes().Create(pv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -196,9 +196,9 @@ func TestPersistentVolumeDeleter(t *testing.T) {
|
|||||||
t.Errorf("Failed to create PersistentVolumeClaim: %v", err)
|
t.Errorf("Failed to create PersistentVolumeClaim: %v", err)
|
||||||
}
|
}
|
||||||
glog.V(2).Infof("TestPersistentVolumeDeleter pvc created")
|
glog.V(2).Infof("TestPersistentVolumeDeleter pvc created")
|
||||||
waitForPersistentVolumePhase(testClient, pv.Name, watchPV, api.VolumeBound)
|
waitForPersistentVolumePhase(testClient, pv.Name, watchPV, v1.VolumeBound)
|
||||||
glog.V(2).Infof("TestPersistentVolumeDeleter pv bound")
|
glog.V(2).Infof("TestPersistentVolumeDeleter pv bound")
|
||||||
waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, api.ClaimBound)
|
waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, v1.ClaimBound)
|
||||||
glog.V(2).Infof("TestPersistentVolumeDeleter pvc bound")
|
glog.V(2).Infof("TestPersistentVolumeDeleter pvc bound")
|
||||||
|
|
||||||
// deleting a claim releases the volume, after which it can be recycled
|
// deleting a claim releases the volume, after which it can be recycled
|
||||||
@@ -207,7 +207,7 @@ func TestPersistentVolumeDeleter(t *testing.T) {
|
|||||||
}
|
}
|
||||||
glog.V(2).Infof("TestPersistentVolumeDeleter pvc deleted")
|
glog.V(2).Infof("TestPersistentVolumeDeleter pvc deleted")
|
||||||
|
|
||||||
waitForPersistentVolumePhase(testClient, pv.Name, watchPV, api.VolumeReleased)
|
waitForPersistentVolumePhase(testClient, pv.Name, watchPV, v1.VolumeReleased)
|
||||||
glog.V(2).Infof("TestPersistentVolumeDeleter pv released")
|
glog.V(2).Infof("TestPersistentVolumeDeleter pv released")
|
||||||
|
|
||||||
for {
|
for {
|
||||||
@@ -235,22 +235,22 @@ func TestPersistentVolumeBindRace(t *testing.T) {
|
|||||||
|
|
||||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||||
// non-namespaced objects (PersistenceVolumes).
|
// non-namespaced objects (PersistenceVolumes).
|
||||||
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, api.ListOptions{})
|
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, v1.ListOptions{})
|
||||||
|
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
ctrl.Run(stopCh)
|
ctrl.Run(stopCh)
|
||||||
defer close(stopCh)
|
defer close(stopCh)
|
||||||
|
|
||||||
pv := createPV("fake-pv-race", "/tmp/foo", "10G", []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, api.PersistentVolumeReclaimRetain)
|
pv := createPV("fake-pv-race", "/tmp/foo", "10G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimRetain)
|
||||||
pvc := createPVC("fake-pvc-race", ns.Name, "5G", []api.PersistentVolumeAccessMode{api.ReadWriteOnce})
|
pvc := createPVC("fake-pvc-race", ns.Name, "5G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce})
|
||||||
counter := 0
|
counter := 0
|
||||||
maxClaims := 100
|
maxClaims := 100
|
||||||
claims := []*api.PersistentVolumeClaim{}
|
claims := []*v1.PersistentVolumeClaim{}
|
||||||
for counter <= maxClaims {
|
for counter <= maxClaims {
|
||||||
counter += 1
|
counter += 1
|
||||||
clone, _ := conversion.NewCloner().DeepCopy(pvc)
|
clone, _ := conversion.NewCloner().DeepCopy(pvc)
|
||||||
newPvc, _ := clone.(*api.PersistentVolumeClaim)
|
newPvc, _ := clone.(*v1.PersistentVolumeClaim)
|
||||||
newPvc.ObjectMeta = api.ObjectMeta{Name: fmt.Sprintf("fake-pvc-race-%d", counter)}
|
newPvc.ObjectMeta = v1.ObjectMeta{Name: fmt.Sprintf("fake-pvc-race-%d", counter)}
|
||||||
claim, err := testClient.PersistentVolumeClaims(ns.Name).Create(newPvc)
|
claim, err := testClient.PersistentVolumeClaims(ns.Name).Create(newPvc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error creating newPvc: %v", err)
|
t.Fatalf("Error creating newPvc: %v", err)
|
||||||
@@ -262,7 +262,7 @@ func TestPersistentVolumeBindRace(t *testing.T) {
|
|||||||
// putting a bind manually on a pv should only match the claim it is bound to
|
// putting a bind manually on a pv should only match the claim it is bound to
|
||||||
rand.Seed(time.Now().Unix())
|
rand.Seed(time.Now().Unix())
|
||||||
claim := claims[rand.Intn(maxClaims-1)]
|
claim := claims[rand.Intn(maxClaims-1)]
|
||||||
claimRef, err := api.GetReference(claim)
|
claimRef, err := v1.GetReference(claim)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unexpected error getting claimRef: %v", err)
|
t.Fatalf("Unexpected error getting claimRef: %v", err)
|
||||||
}
|
}
|
||||||
@@ -275,9 +275,9 @@ func TestPersistentVolumeBindRace(t *testing.T) {
|
|||||||
}
|
}
|
||||||
glog.V(2).Infof("TestPersistentVolumeBindRace pv created, pre-bound to %s", claim.Name)
|
glog.V(2).Infof("TestPersistentVolumeBindRace pv created, pre-bound to %s", claim.Name)
|
||||||
|
|
||||||
waitForPersistentVolumePhase(testClient, pv.Name, watchPV, api.VolumeBound)
|
waitForPersistentVolumePhase(testClient, pv.Name, watchPV, v1.VolumeBound)
|
||||||
glog.V(2).Infof("TestPersistentVolumeBindRace pv bound")
|
glog.V(2).Infof("TestPersistentVolumeBindRace pv bound")
|
||||||
waitForAnyPersistentVolumeClaimPhase(watchPVC, api.ClaimBound)
|
waitForAnyPersistentVolumeClaimPhase(watchPVC, v1.ClaimBound)
|
||||||
glog.V(2).Infof("TestPersistentVolumeBindRace pvc bound")
|
glog.V(2).Infof("TestPersistentVolumeBindRace pvc bound")
|
||||||
|
|
||||||
pv, err = testClient.PersistentVolumes().Get(pv.Name)
|
pv, err = testClient.PersistentVolumes().Get(pv.Name)
|
||||||
@@ -306,7 +306,7 @@ func TestPersistentVolumeClaimLabelSelector(t *testing.T) {
|
|||||||
|
|
||||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||||
// non-namespaced objects (PersistenceVolumes).
|
// non-namespaced objects (PersistenceVolumes).
|
||||||
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, api.ListOptions{})
|
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, v1.ListOptions{})
|
||||||
|
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
controller.Run(stopCh)
|
controller.Run(stopCh)
|
||||||
@@ -314,8 +314,8 @@ func TestPersistentVolumeClaimLabelSelector(t *testing.T) {
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
err error
|
err error
|
||||||
modes = []api.PersistentVolumeAccessMode{api.ReadWriteOnce}
|
modes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
|
||||||
reclaim = api.PersistentVolumeReclaimRetain
|
reclaim = v1.PersistentVolumeReclaimRetain
|
||||||
|
|
||||||
pv_true = createPV("pv-true", "/tmp/foo-label", "1G", modes, reclaim)
|
pv_true = createPV("pv-true", "/tmp/foo-label", "1G", modes, reclaim)
|
||||||
pv_false = createPV("pv-false", "/tmp/foo-label", "1G", modes, reclaim)
|
pv_false = createPV("pv-false", "/tmp/foo-label", "1G", modes, reclaim)
|
||||||
@@ -347,9 +347,9 @@ func TestPersistentVolumeClaimLabelSelector(t *testing.T) {
|
|||||||
}
|
}
|
||||||
t.Log("claim created")
|
t.Log("claim created")
|
||||||
|
|
||||||
waitForAnyPersistentVolumePhase(watchPV, api.VolumeBound)
|
waitForAnyPersistentVolumePhase(watchPV, v1.VolumeBound)
|
||||||
t.Log("volume bound")
|
t.Log("volume bound")
|
||||||
waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, api.ClaimBound)
|
waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, v1.ClaimBound)
|
||||||
t.Log("claim bound")
|
t.Log("claim bound")
|
||||||
|
|
||||||
pv, err := testClient.PersistentVolumes().Get("pv-false")
|
pv, err := testClient.PersistentVolumes().Get("pv-false")
|
||||||
@@ -386,7 +386,7 @@ func TestPersistentVolumeClaimLabelSelectorMatchExpressions(t *testing.T) {
|
|||||||
|
|
||||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||||
// non-namespaced objects (PersistenceVolumes).
|
// non-namespaced objects (PersistenceVolumes).
|
||||||
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, api.ListOptions{})
|
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, v1.ListOptions{})
|
||||||
|
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
controller.Run(stopCh)
|
controller.Run(stopCh)
|
||||||
@@ -394,8 +394,8 @@ func TestPersistentVolumeClaimLabelSelectorMatchExpressions(t *testing.T) {
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
err error
|
err error
|
||||||
modes = []api.PersistentVolumeAccessMode{api.ReadWriteOnce}
|
modes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
|
||||||
reclaim = api.PersistentVolumeReclaimRetain
|
reclaim = v1.PersistentVolumeReclaimRetain
|
||||||
|
|
||||||
pv_true = createPV("pv-true", "/tmp/foo-label", "1G", modes, reclaim)
|
pv_true = createPV("pv-true", "/tmp/foo-label", "1G", modes, reclaim)
|
||||||
pv_false = createPV("pv-false", "/tmp/foo-label", "1G", modes, reclaim)
|
pv_false = createPV("pv-false", "/tmp/foo-label", "1G", modes, reclaim)
|
||||||
@@ -446,9 +446,9 @@ func TestPersistentVolumeClaimLabelSelectorMatchExpressions(t *testing.T) {
|
|||||||
}
|
}
|
||||||
t.Log("claim created")
|
t.Log("claim created")
|
||||||
|
|
||||||
waitForAnyPersistentVolumePhase(watchPV, api.VolumeBound)
|
waitForAnyPersistentVolumePhase(watchPV, v1.VolumeBound)
|
||||||
t.Log("volume bound")
|
t.Log("volume bound")
|
||||||
waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, api.ClaimBound)
|
waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, v1.ClaimBound)
|
||||||
t.Log("claim bound")
|
t.Log("claim bound")
|
||||||
|
|
||||||
pv, err := testClient.PersistentVolumes().Get("pv-false")
|
pv, err := testClient.PersistentVolumes().Get("pv-false")
|
||||||
@@ -485,28 +485,28 @@ func TestPersistentVolumeMultiPVs(t *testing.T) {
|
|||||||
|
|
||||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||||
// non-namespaced objects (PersistenceVolumes).
|
// non-namespaced objects (PersistenceVolumes).
|
||||||
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, api.ListOptions{})
|
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, v1.ListOptions{})
|
||||||
|
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
controller.Run(stopCh)
|
controller.Run(stopCh)
|
||||||
defer close(stopCh)
|
defer close(stopCh)
|
||||||
|
|
||||||
maxPVs := getObjectCount()
|
maxPVs := getObjectCount()
|
||||||
pvs := make([]*api.PersistentVolume, maxPVs)
|
pvs := make([]*v1.PersistentVolume, maxPVs)
|
||||||
for i := 0; i < maxPVs; i++ {
|
for i := 0; i < maxPVs; i++ {
|
||||||
// This PV will be claimed, released, and deleted
|
// This PV will be claimed, released, and deleted
|
||||||
pvs[i] = createPV("pv-"+strconv.Itoa(i), "/tmp/foo"+strconv.Itoa(i), strconv.Itoa(i)+"G",
|
pvs[i] = createPV("pv-"+strconv.Itoa(i), "/tmp/foo"+strconv.Itoa(i), strconv.Itoa(i)+"G",
|
||||||
[]api.PersistentVolumeAccessMode{api.ReadWriteOnce}, api.PersistentVolumeReclaimRetain)
|
[]v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimRetain)
|
||||||
}
|
}
|
||||||
|
|
||||||
pvc := createPVC("pvc-2", ns.Name, strconv.Itoa(maxPVs/2)+"G", []api.PersistentVolumeAccessMode{api.ReadWriteOnce})
|
pvc := createPVC("pvc-2", ns.Name, strconv.Itoa(maxPVs/2)+"G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce})
|
||||||
|
|
||||||
for i := 0; i < maxPVs; i++ {
|
for i := 0; i < maxPVs; i++ {
|
||||||
_, err := testClient.PersistentVolumes().Create(pvs[i])
|
_, err := testClient.PersistentVolumes().Create(pvs[i])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to create PersistentVolume %d: %v", i, err)
|
t.Errorf("Failed to create PersistentVolume %d: %v", i, err)
|
||||||
}
|
}
|
||||||
waitForPersistentVolumePhase(testClient, pvs[i].Name, watchPV, api.VolumeAvailable)
|
waitForPersistentVolumePhase(testClient, pvs[i].Name, watchPV, v1.VolumeAvailable)
|
||||||
}
|
}
|
||||||
t.Log("volumes created")
|
t.Log("volumes created")
|
||||||
|
|
||||||
@@ -517,9 +517,9 @@ func TestPersistentVolumeMultiPVs(t *testing.T) {
|
|||||||
t.Log("claim created")
|
t.Log("claim created")
|
||||||
|
|
||||||
// wait until the binder pairs the claim with a volume
|
// wait until the binder pairs the claim with a volume
|
||||||
waitForAnyPersistentVolumePhase(watchPV, api.VolumeBound)
|
waitForAnyPersistentVolumePhase(watchPV, v1.VolumeBound)
|
||||||
t.Log("volume bound")
|
t.Log("volume bound")
|
||||||
waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, api.ClaimBound)
|
waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, v1.ClaimBound)
|
||||||
t.Log("claim bound")
|
t.Log("claim bound")
|
||||||
|
|
||||||
// only one PV is bound
|
// only one PV is bound
|
||||||
@@ -533,14 +533,14 @@ func TestPersistentVolumeMultiPVs(t *testing.T) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// found a bounded PV
|
// found a bounded PV
|
||||||
p := pv.Spec.Capacity[api.ResourceStorage]
|
p := pv.Spec.Capacity[v1.ResourceStorage]
|
||||||
pvCap := p.Value()
|
pvCap := p.Value()
|
||||||
expectedCap := resource.MustParse(strconv.Itoa(maxPVs/2) + "G")
|
expectedCap := resource.MustParse(strconv.Itoa(maxPVs/2) + "G")
|
||||||
expectedCapVal := expectedCap.Value()
|
expectedCapVal := expectedCap.Value()
|
||||||
if pv.Spec.ClaimRef.Name != pvc.Name || pvCap != expectedCapVal {
|
if pv.Spec.ClaimRef.Name != pvc.Name || pvCap != expectedCapVal {
|
||||||
t.Fatalf("Bind mismatch! Expected %s capacity %d but got %s capacity %d", pvc.Name, expectedCapVal, pv.Spec.ClaimRef.Name, pvCap)
|
t.Fatalf("Bind mismatch! Expected %s capacity %d but got %s capacity %d", pvc.Name, expectedCapVal, pv.Spec.ClaimRef.Name, pvCap)
|
||||||
}
|
}
|
||||||
t.Logf("claim bounded to %s capacity %v", pv.Name, pv.Spec.Capacity[api.ResourceStorage])
|
t.Logf("claim bounded to %s capacity %v", pv.Name, pv.Spec.Capacity[v1.ResourceStorage])
|
||||||
bound += 1
|
bound += 1
|
||||||
}
|
}
|
||||||
t.Log("volumes checked")
|
t.Log("volumes checked")
|
||||||
@@ -555,7 +555,7 @@ func TestPersistentVolumeMultiPVs(t *testing.T) {
|
|||||||
}
|
}
|
||||||
t.Log("claim deleted")
|
t.Log("claim deleted")
|
||||||
|
|
||||||
waitForAnyPersistentVolumePhase(watchPV, api.VolumeReleased)
|
waitForAnyPersistentVolumePhase(watchPV, v1.VolumeReleased)
|
||||||
t.Log("volumes released")
|
t.Log("volumes released")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -574,20 +574,20 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) {
|
|||||||
|
|
||||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||||
// non-namespaced objects (PersistenceVolumes).
|
// non-namespaced objects (PersistenceVolumes).
|
||||||
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, api.ListOptions{})
|
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, v1.ListOptions{})
|
||||||
|
|
||||||
controllerStopCh := make(chan struct{})
|
controllerStopCh := make(chan struct{})
|
||||||
binder.Run(controllerStopCh)
|
binder.Run(controllerStopCh)
|
||||||
defer close(controllerStopCh)
|
defer close(controllerStopCh)
|
||||||
|
|
||||||
objCount := getObjectCount()
|
objCount := getObjectCount()
|
||||||
pvs := make([]*api.PersistentVolume, objCount)
|
pvs := make([]*v1.PersistentVolume, objCount)
|
||||||
pvcs := make([]*api.PersistentVolumeClaim, objCount)
|
pvcs := make([]*v1.PersistentVolumeClaim, objCount)
|
||||||
for i := 0; i < objCount; i++ {
|
for i := 0; i < objCount; i++ {
|
||||||
// This PV will be claimed, released, and deleted
|
// This PV will be claimed, released, and deleted
|
||||||
pvs[i] = createPV("pv-"+strconv.Itoa(i), "/tmp/foo"+strconv.Itoa(i), "1G",
|
pvs[i] = createPV("pv-"+strconv.Itoa(i), "/tmp/foo"+strconv.Itoa(i), "1G",
|
||||||
[]api.PersistentVolumeAccessMode{api.ReadWriteOnce}, api.PersistentVolumeReclaimRetain)
|
[]v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimRetain)
|
||||||
pvcs[i] = createPVC("pvc-"+strconv.Itoa(i), ns.Name, "1G", []api.PersistentVolumeAccessMode{api.ReadWriteOnce})
|
pvcs[i] = createPVC("pvc-"+strconv.Itoa(i), ns.Name, "1G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create PVs first
|
// Create PVs first
|
||||||
@@ -603,7 +603,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) {
|
|||||||
}()
|
}()
|
||||||
// Wait for them to get Available
|
// Wait for them to get Available
|
||||||
for i := 0; i < objCount; i++ {
|
for i := 0; i < objCount; i++ {
|
||||||
waitForAnyPersistentVolumePhase(watchPV, api.VolumeAvailable)
|
waitForAnyPersistentVolumePhase(watchPV, v1.VolumeAvailable)
|
||||||
glog.V(1).Infof("%d volumes available", i+1)
|
glog.V(1).Infof("%d volumes available", i+1)
|
||||||
}
|
}
|
||||||
glog.V(2).Infof("TestPersistentVolumeMultiPVsPVCs: volumes are Available")
|
glog.V(2).Infof("TestPersistentVolumeMultiPVsPVCs: volumes are Available")
|
||||||
@@ -643,7 +643,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) {
|
|||||||
// Modify PVC
|
// Modify PVC
|
||||||
i := rand.Intn(objCount)
|
i := rand.Intn(objCount)
|
||||||
name := "pvc-" + strconv.Itoa(i)
|
name := "pvc-" + strconv.Itoa(i)
|
||||||
pvc, err := testClient.PersistentVolumeClaims(api.NamespaceDefault).Get(name)
|
pvc, err := testClient.PersistentVolumeClaims(v1.NamespaceDefault).Get(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Silently ignore error, the PVC may have be already
|
// Silently ignore error, the PVC may have be already
|
||||||
// deleted or not exists yet.
|
// deleted or not exists yet.
|
||||||
@@ -655,7 +655,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) {
|
|||||||
} else {
|
} else {
|
||||||
pvc.Annotations["TestAnnotation"] = fmt.Sprint(rand.Int())
|
pvc.Annotations["TestAnnotation"] = fmt.Sprint(rand.Int())
|
||||||
}
|
}
|
||||||
_, err = testClient.PersistentVolumeClaims(api.NamespaceDefault).Update(pvc)
|
_, err = testClient.PersistentVolumeClaims(v1.NamespaceDefault).Update(pvc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Silently ignore error, the PVC may have been updated by
|
// Silently ignore error, the PVC may have been updated by
|
||||||
// the controller.
|
// the controller.
|
||||||
@@ -684,12 +684,12 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) {
|
|||||||
|
|
||||||
// wait until the binder pairs all claims
|
// wait until the binder pairs all claims
|
||||||
for i := 0; i < objCount; i++ {
|
for i := 0; i < objCount; i++ {
|
||||||
waitForAnyPersistentVolumeClaimPhase(watchPVC, api.ClaimBound)
|
waitForAnyPersistentVolumeClaimPhase(watchPVC, v1.ClaimBound)
|
||||||
glog.V(1).Infof("%d claims bound", i+1)
|
glog.V(1).Infof("%d claims bound", i+1)
|
||||||
}
|
}
|
||||||
// wait until the binder pairs all volumes
|
// wait until the binder pairs all volumes
|
||||||
for i := 0; i < objCount; i++ {
|
for i := 0; i < objCount; i++ {
|
||||||
waitForPersistentVolumePhase(testClient, pvs[i].Name, watchPV, api.VolumeBound)
|
waitForPersistentVolumePhase(testClient, pvs[i].Name, watchPV, v1.VolumeBound)
|
||||||
glog.V(1).Infof("%d claims bound", i+1)
|
glog.V(1).Infof("%d claims bound", i+1)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -738,13 +738,13 @@ func TestPersistentVolumeControllerStartup(t *testing.T) {
|
|||||||
defer watchPVC.Stop()
|
defer watchPVC.Stop()
|
||||||
|
|
||||||
// Create *bound* volumes and PVCs
|
// Create *bound* volumes and PVCs
|
||||||
pvs := make([]*api.PersistentVolume, objCount)
|
pvs := make([]*v1.PersistentVolume, objCount)
|
||||||
pvcs := make([]*api.PersistentVolumeClaim, objCount)
|
pvcs := make([]*v1.PersistentVolumeClaim, objCount)
|
||||||
for i := 0; i < objCount; i++ {
|
for i := 0; i < objCount; i++ {
|
||||||
pvName := "pv-startup-" + strconv.Itoa(i)
|
pvName := "pv-startup-" + strconv.Itoa(i)
|
||||||
pvcName := "pvc-startup-" + strconv.Itoa(i)
|
pvcName := "pvc-startup-" + strconv.Itoa(i)
|
||||||
|
|
||||||
pvc := createPVC(pvcName, ns.Name, "1G", []api.PersistentVolumeAccessMode{api.ReadWriteOnce})
|
pvc := createPVC(pvcName, ns.Name, "1G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce})
|
||||||
pvc.Annotations = map[string]string{"annBindCompleted": ""}
|
pvc.Annotations = map[string]string{"annBindCompleted": ""}
|
||||||
pvc.Spec.VolumeName = pvName
|
pvc.Spec.VolumeName = pvName
|
||||||
newPVC, err := testClient.PersistentVolumeClaims(ns.Name).Create(pvc)
|
newPVC, err := testClient.PersistentVolumeClaims(ns.Name).Create(pvc)
|
||||||
@@ -752,7 +752,7 @@ func TestPersistentVolumeControllerStartup(t *testing.T) {
|
|||||||
t.Fatalf("Cannot create claim %q: %v", pvc.Name, err)
|
t.Fatalf("Cannot create claim %q: %v", pvc.Name, err)
|
||||||
}
|
}
|
||||||
// Save Bound status as a separate transaction
|
// Save Bound status as a separate transaction
|
||||||
newPVC.Status.Phase = api.ClaimBound
|
newPVC.Status.Phase = v1.ClaimBound
|
||||||
newPVC, err = testClient.PersistentVolumeClaims(ns.Name).UpdateStatus(newPVC)
|
newPVC, err = testClient.PersistentVolumeClaims(ns.Name).UpdateStatus(newPVC)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Cannot update claim status %q: %v", pvc.Name, err)
|
t.Fatalf("Cannot update claim status %q: %v", pvc.Name, err)
|
||||||
@@ -761,11 +761,11 @@ func TestPersistentVolumeControllerStartup(t *testing.T) {
|
|||||||
// Drain watchPVC with all events generated by the PVC until it's bound
|
// Drain watchPVC with all events generated by the PVC until it's bound
|
||||||
// We don't want to catch "PVC craated with Status.Phase == Pending"
|
// We don't want to catch "PVC craated with Status.Phase == Pending"
|
||||||
// later in this test.
|
// later in this test.
|
||||||
waitForAnyPersistentVolumeClaimPhase(watchPVC, api.ClaimBound)
|
waitForAnyPersistentVolumeClaimPhase(watchPVC, v1.ClaimBound)
|
||||||
|
|
||||||
pv := createPV(pvName, "/tmp/foo"+strconv.Itoa(i), "1G",
|
pv := createPV(pvName, "/tmp/foo"+strconv.Itoa(i), "1G",
|
||||||
[]api.PersistentVolumeAccessMode{api.ReadWriteOnce}, api.PersistentVolumeReclaimRetain)
|
[]v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimRetain)
|
||||||
claimRef, err := api.GetReference(newPVC)
|
claimRef, err := v1.GetReference(newPVC)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(3).Infof("unexpected error getting claim reference: %v", err)
|
glog.V(3).Infof("unexpected error getting claim reference: %v", err)
|
||||||
return
|
return
|
||||||
@@ -776,7 +776,7 @@ func TestPersistentVolumeControllerStartup(t *testing.T) {
|
|||||||
t.Fatalf("Cannot create volume %q: %v", pv.Name, err)
|
t.Fatalf("Cannot create volume %q: %v", pv.Name, err)
|
||||||
}
|
}
|
||||||
// Save Bound status as a separate transaction
|
// Save Bound status as a separate transaction
|
||||||
newPV.Status.Phase = api.VolumeBound
|
newPV.Status.Phase = v1.VolumeBound
|
||||||
newPV, err = testClient.PersistentVolumes().UpdateStatus(newPV)
|
newPV, err = testClient.PersistentVolumes().UpdateStatus(newPV)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Cannot update volume status %q: %v", pv.Name, err)
|
t.Fatalf("Cannot update volume status %q: %v", pv.Name, err)
|
||||||
@@ -785,7 +785,7 @@ func TestPersistentVolumeControllerStartup(t *testing.T) {
|
|||||||
// Drain watchPV with all events generated by the PV until it's bound
|
// Drain watchPV with all events generated by the PV until it's bound
|
||||||
// We don't want to catch "PV craated with Status.Phase == Pending"
|
// We don't want to catch "PV craated with Status.Phase == Pending"
|
||||||
// later in this test.
|
// later in this test.
|
||||||
waitForAnyPersistentVolumePhase(watchPV, api.VolumeBound)
|
waitForAnyPersistentVolumePhase(watchPV, v1.VolumeBound)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start the controller when all PVs and PVCs are already saved in etcd
|
// Start the controller when all PVs and PVCs are already saved in etcd
|
||||||
@@ -801,20 +801,20 @@ func TestPersistentVolumeControllerStartup(t *testing.T) {
|
|||||||
for !finished {
|
for !finished {
|
||||||
select {
|
select {
|
||||||
case volumeEvent := <-watchPV.ResultChan():
|
case volumeEvent := <-watchPV.ResultChan():
|
||||||
volume, ok := volumeEvent.Object.(*api.PersistentVolume)
|
volume, ok := volumeEvent.Object.(*v1.PersistentVolume)
|
||||||
if !ok {
|
if !ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if volume.Status.Phase != api.VolumeBound {
|
if volume.Status.Phase != v1.VolumeBound {
|
||||||
t.Errorf("volume %s unexpectedly changed state to %s", volume.Name, volume.Status.Phase)
|
t.Errorf("volume %s unexpectedly changed state to %s", volume.Name, volume.Status.Phase)
|
||||||
}
|
}
|
||||||
|
|
||||||
case claimEvent := <-watchPVC.ResultChan():
|
case claimEvent := <-watchPVC.ResultChan():
|
||||||
claim, ok := claimEvent.Object.(*api.PersistentVolumeClaim)
|
claim, ok := claimEvent.Object.(*v1.PersistentVolumeClaim)
|
||||||
if !ok {
|
if !ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if claim.Status.Phase != api.ClaimBound {
|
if claim.Status.Phase != v1.ClaimBound {
|
||||||
t.Errorf("claim %s unexpectedly changed state to %s", claim.Name, claim.Status.Phase)
|
t.Errorf("claim %s unexpectedly changed state to %s", claim.Name, claim.Status.Phase)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -862,14 +862,14 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) {
|
|||||||
|
|
||||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||||
// non-namespaced objects (PersistenceVolumes and StorageClasses).
|
// non-namespaced objects (PersistenceVolumes and StorageClasses).
|
||||||
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, api.ListOptions{})
|
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, v1.ListOptions{})
|
||||||
defer testClient.Storage().StorageClasses().DeleteCollection(nil, api.ListOptions{})
|
defer testClient.Storage().StorageClasses().DeleteCollection(nil, v1.ListOptions{})
|
||||||
|
|
||||||
storageClass := storage.StorageClass{
|
storageClass := storage.StorageClass{
|
||||||
TypeMeta: unversioned.TypeMeta{
|
TypeMeta: unversioned.TypeMeta{
|
||||||
Kind: "StorageClass",
|
Kind: "StorageClass",
|
||||||
},
|
},
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
Name: "gold",
|
Name: "gold",
|
||||||
},
|
},
|
||||||
Provisioner: provisionerPluginName,
|
Provisioner: provisionerPluginName,
|
||||||
@@ -881,9 +881,9 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) {
|
|||||||
defer close(stopCh)
|
defer close(stopCh)
|
||||||
|
|
||||||
objCount := getObjectCount()
|
objCount := getObjectCount()
|
||||||
pvcs := make([]*api.PersistentVolumeClaim, objCount)
|
pvcs := make([]*v1.PersistentVolumeClaim, objCount)
|
||||||
for i := 0; i < objCount; i++ {
|
for i := 0; i < objCount; i++ {
|
||||||
pvc := createPVC("pvc-provision-"+strconv.Itoa(i), ns.Name, "1G", []api.PersistentVolumeAccessMode{api.ReadWriteOnce})
|
pvc := createPVC("pvc-provision-"+strconv.Itoa(i), ns.Name, "1G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce})
|
||||||
pvc.Annotations = map[string]string{
|
pvc.Annotations = map[string]string{
|
||||||
storageutil.StorageClassAnnotation: "gold",
|
storageutil.StorageClassAnnotation: "gold",
|
||||||
}
|
}
|
||||||
@@ -901,13 +901,13 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) {
|
|||||||
|
|
||||||
// Wait until the controller provisions and binds all of them
|
// Wait until the controller provisions and binds all of them
|
||||||
for i := 0; i < objCount; i++ {
|
for i := 0; i < objCount; i++ {
|
||||||
waitForAnyPersistentVolumeClaimPhase(watchPVC, api.ClaimBound)
|
waitForAnyPersistentVolumeClaimPhase(watchPVC, v1.ClaimBound)
|
||||||
glog.V(1).Infof("%d claims bound", i+1)
|
glog.V(1).Infof("%d claims bound", i+1)
|
||||||
}
|
}
|
||||||
glog.V(2).Infof("TestPersistentVolumeProvisionMultiPVCs: claims are bound")
|
glog.V(2).Infof("TestPersistentVolumeProvisionMultiPVCs: claims are bound")
|
||||||
|
|
||||||
// check that we have enough bound PVs
|
// check that we have enough bound PVs
|
||||||
pvList, err := testClient.PersistentVolumes().List(api.ListOptions{})
|
pvList, err := testClient.PersistentVolumes().List(v1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to list volumes: %s", err)
|
t.Fatalf("Failed to list volumes: %s", err)
|
||||||
}
|
}
|
||||||
@@ -916,7 +916,7 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) {
|
|||||||
}
|
}
|
||||||
for i := 0; i < objCount; i++ {
|
for i := 0; i < objCount; i++ {
|
||||||
pv := &pvList.Items[i]
|
pv := &pvList.Items[i]
|
||||||
if pv.Status.Phase != api.VolumeBound {
|
if pv.Status.Phase != v1.VolumeBound {
|
||||||
t.Fatalf("Expected volume %s to be bound, is %s instead", pv.Name, pv.Status.Phase)
|
t.Fatalf("Expected volume %s to be bound, is %s instead", pv.Name, pv.Status.Phase)
|
||||||
}
|
}
|
||||||
glog.V(2).Infof("PV %q is bound to PVC %q", pv.Name, pv.Spec.ClaimRef.Name)
|
glog.V(2).Infof("PV %q is bound to PVC %q", pv.Name, pv.Spec.ClaimRef.Name)
|
||||||
@@ -930,7 +930,7 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) {
|
|||||||
// Wait for the PVs to get deleted by listing remaining volumes
|
// Wait for the PVs to get deleted by listing remaining volumes
|
||||||
// (delete events were unreliable)
|
// (delete events were unreliable)
|
||||||
for {
|
for {
|
||||||
volumes, err := testClient.PersistentVolumes().List(api.ListOptions{})
|
volumes, err := testClient.PersistentVolumes().List(v1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to list volumes: %v", err)
|
t.Fatalf("Failed to list volumes: %v", err)
|
||||||
}
|
}
|
||||||
@@ -959,7 +959,7 @@ func TestPersistentVolumeMultiPVsDiffAccessModes(t *testing.T) {
|
|||||||
|
|
||||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||||
// non-namespaced objects (PersistenceVolumes).
|
// non-namespaced objects (PersistenceVolumes).
|
||||||
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, api.ListOptions{})
|
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, v1.ListOptions{})
|
||||||
|
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
controller.Run(stopCh)
|
controller.Run(stopCh)
|
||||||
@@ -967,11 +967,11 @@ func TestPersistentVolumeMultiPVsDiffAccessModes(t *testing.T) {
|
|||||||
|
|
||||||
// This PV will be claimed, released, and deleted
|
// This PV will be claimed, released, and deleted
|
||||||
pv_rwo := createPV("pv-rwo", "/tmp/foo", "10G",
|
pv_rwo := createPV("pv-rwo", "/tmp/foo", "10G",
|
||||||
[]api.PersistentVolumeAccessMode{api.ReadWriteOnce}, api.PersistentVolumeReclaimRetain)
|
[]v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimRetain)
|
||||||
pv_rwm := createPV("pv-rwm", "/tmp/bar", "10G",
|
pv_rwm := createPV("pv-rwm", "/tmp/bar", "10G",
|
||||||
[]api.PersistentVolumeAccessMode{api.ReadWriteMany}, api.PersistentVolumeReclaimRetain)
|
[]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}, v1.PersistentVolumeReclaimRetain)
|
||||||
|
|
||||||
pvc := createPVC("pvc-rwm", ns.Name, "5G", []api.PersistentVolumeAccessMode{api.ReadWriteMany})
|
pvc := createPVC("pvc-rwm", ns.Name, "5G", []v1.PersistentVolumeAccessMode{v1.ReadWriteMany})
|
||||||
|
|
||||||
_, err := testClient.PersistentVolumes().Create(pv_rwm)
|
_, err := testClient.PersistentVolumes().Create(pv_rwm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -990,9 +990,9 @@ func TestPersistentVolumeMultiPVsDiffAccessModes(t *testing.T) {
|
|||||||
t.Log("claim created")
|
t.Log("claim created")
|
||||||
|
|
||||||
// wait until the controller pairs the volume and claim
|
// wait until the controller pairs the volume and claim
|
||||||
waitForAnyPersistentVolumePhase(watchPV, api.VolumeBound)
|
waitForAnyPersistentVolumePhase(watchPV, v1.VolumeBound)
|
||||||
t.Log("volume bound")
|
t.Log("volume bound")
|
||||||
waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, api.ClaimBound)
|
waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, v1.ClaimBound)
|
||||||
t.Log("claim bound")
|
t.Log("claim bound")
|
||||||
|
|
||||||
// only RWM PV is bound
|
// only RWM PV is bound
|
||||||
@@ -1020,11 +1020,11 @@ func TestPersistentVolumeMultiPVsDiffAccessModes(t *testing.T) {
|
|||||||
}
|
}
|
||||||
t.Log("claim deleted")
|
t.Log("claim deleted")
|
||||||
|
|
||||||
waitForAnyPersistentVolumePhase(watchPV, api.VolumeReleased)
|
waitForAnyPersistentVolumePhase(watchPV, v1.VolumeReleased)
|
||||||
t.Log("volume released")
|
t.Log("volume released")
|
||||||
}
|
}
|
||||||
|
|
||||||
func waitForPersistentVolumePhase(client *clientset.Clientset, pvName string, w watch.Interface, phase api.PersistentVolumePhase) {
|
func waitForPersistentVolumePhase(client *clientset.Clientset, pvName string, w watch.Interface, phase v1.PersistentVolumePhase) {
|
||||||
// Check if the volume is already in requested phase
|
// Check if the volume is already in requested phase
|
||||||
volume, err := client.Core().PersistentVolumes().Get(pvName)
|
volume, err := client.Core().PersistentVolumes().Get(pvName)
|
||||||
if err == nil && volume.Status.Phase == phase {
|
if err == nil && volume.Status.Phase == phase {
|
||||||
@@ -1034,7 +1034,7 @@ func waitForPersistentVolumePhase(client *clientset.Clientset, pvName string, w
|
|||||||
// Wait for the phase
|
// Wait for the phase
|
||||||
for {
|
for {
|
||||||
event := <-w.ResultChan()
|
event := <-w.ResultChan()
|
||||||
volume, ok := event.Object.(*api.PersistentVolume)
|
volume, ok := event.Object.(*v1.PersistentVolume)
|
||||||
if !ok {
|
if !ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -1045,7 +1045,7 @@ func waitForPersistentVolumePhase(client *clientset.Clientset, pvName string, w
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func waitForPersistentVolumeClaimPhase(client *clientset.Clientset, claimName, namespace string, w watch.Interface, phase api.PersistentVolumeClaimPhase) {
|
func waitForPersistentVolumeClaimPhase(client *clientset.Clientset, claimName, namespace string, w watch.Interface, phase v1.PersistentVolumeClaimPhase) {
|
||||||
// Check if the claim is already in requested phase
|
// Check if the claim is already in requested phase
|
||||||
claim, err := client.Core().PersistentVolumeClaims(namespace).Get(claimName)
|
claim, err := client.Core().PersistentVolumeClaims(namespace).Get(claimName)
|
||||||
if err == nil && claim.Status.Phase == phase {
|
if err == nil && claim.Status.Phase == phase {
|
||||||
@@ -1055,7 +1055,7 @@ func waitForPersistentVolumeClaimPhase(client *clientset.Clientset, claimName, n
|
|||||||
// Wait for the phase
|
// Wait for the phase
|
||||||
for {
|
for {
|
||||||
event := <-w.ResultChan()
|
event := <-w.ResultChan()
|
||||||
claim, ok := event.Object.(*api.PersistentVolumeClaim)
|
claim, ok := event.Object.(*v1.PersistentVolumeClaim)
|
||||||
if !ok {
|
if !ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -1066,10 +1066,10 @@ func waitForPersistentVolumeClaimPhase(client *clientset.Clientset, claimName, n
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func waitForAnyPersistentVolumePhase(w watch.Interface, phase api.PersistentVolumePhase) {
|
func waitForAnyPersistentVolumePhase(w watch.Interface, phase v1.PersistentVolumePhase) {
|
||||||
for {
|
for {
|
||||||
event := <-w.ResultChan()
|
event := <-w.ResultChan()
|
||||||
volume, ok := event.Object.(*api.PersistentVolume)
|
volume, ok := event.Object.(*v1.PersistentVolume)
|
||||||
if !ok {
|
if !ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -1080,10 +1080,10 @@ func waitForAnyPersistentVolumePhase(w watch.Interface, phase api.PersistentVolu
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func waitForAnyPersistentVolumeClaimPhase(w watch.Interface, phase api.PersistentVolumeClaimPhase) {
|
func waitForAnyPersistentVolumeClaimPhase(w watch.Interface, phase v1.PersistentVolumeClaimPhase) {
|
||||||
for {
|
for {
|
||||||
event := <-w.ResultChan()
|
event := <-w.ResultChan()
|
||||||
claim, ok := event.Object.(*api.PersistentVolumeClaim)
|
claim, ok := event.Object.(*v1.PersistentVolumeClaim)
|
||||||
if !ok {
|
if !ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -1094,18 +1094,18 @@ func waitForAnyPersistentVolumeClaimPhase(w watch.Interface, phase api.Persisten
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func createClients(ns *api.Namespace, t *testing.T, s *httptest.Server, syncPeriod time.Duration) (*clientset.Clientset, *persistentvolumecontroller.PersistentVolumeController, watch.Interface, watch.Interface) {
|
func createClients(ns *v1.Namespace, t *testing.T, s *httptest.Server, syncPeriod time.Duration) (*clientset.Clientset, *persistentvolumecontroller.PersistentVolumeController, watch.Interface, watch.Interface) {
|
||||||
// Use higher QPS and Burst, there is a test for race conditions which
|
// Use higher QPS and Burst, there is a test for race conditions which
|
||||||
// creates many objects and default values were too low.
|
// creates many objects and default values were too low.
|
||||||
binderClient := clientset.NewForConfigOrDie(&restclient.Config{
|
binderClient := clientset.NewForConfigOrDie(&restclient.Config{
|
||||||
Host: s.URL,
|
Host: s.URL,
|
||||||
ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion},
|
ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion},
|
||||||
QPS: 1000000,
|
QPS: 1000000,
|
||||||
Burst: 1000000,
|
Burst: 1000000,
|
||||||
})
|
})
|
||||||
testClient := clientset.NewForConfigOrDie(&restclient.Config{
|
testClient := clientset.NewForConfigOrDie(&restclient.Config{
|
||||||
Host: s.URL,
|
Host: s.URL,
|
||||||
ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion},
|
ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion},
|
||||||
QPS: 1000000,
|
QPS: 1000000,
|
||||||
Burst: 1000000,
|
Burst: 1000000,
|
||||||
})
|
})
|
||||||
@@ -1134,11 +1134,11 @@ func createClients(ns *api.Namespace, t *testing.T, s *httptest.Server, syncPeri
|
|||||||
EnableDynamicProvisioning: true,
|
EnableDynamicProvisioning: true,
|
||||||
})
|
})
|
||||||
|
|
||||||
watchPV, err := testClient.PersistentVolumes().Watch(api.ListOptions{})
|
watchPV, err := testClient.PersistentVolumes().Watch(v1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to watch PersistentVolumes: %v", err)
|
t.Fatalf("Failed to watch PersistentVolumes: %v", err)
|
||||||
}
|
}
|
||||||
watchPVC, err := testClient.PersistentVolumeClaims(ns.Name).Watch(api.ListOptions{})
|
watchPVC, err := testClient.PersistentVolumeClaims(ns.Name).Watch(v1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to watch PersistentVolumeClaimss: %v", err)
|
t.Fatalf("Failed to watch PersistentVolumeClaimss: %v", err)
|
||||||
}
|
}
|
||||||
@@ -1146,26 +1146,26 @@ func createClients(ns *api.Namespace, t *testing.T, s *httptest.Server, syncPeri
|
|||||||
return testClient, ctrl, watchPV, watchPVC
|
return testClient, ctrl, watchPV, watchPVC
|
||||||
}
|
}
|
||||||
|
|
||||||
func createPV(name, path, cap string, mode []api.PersistentVolumeAccessMode, reclaim api.PersistentVolumeReclaimPolicy) *api.PersistentVolume {
|
func createPV(name, path, cap string, mode []v1.PersistentVolumeAccessMode, reclaim v1.PersistentVolumeReclaimPolicy) *v1.PersistentVolume {
|
||||||
return &api.PersistentVolume{
|
return &v1.PersistentVolume{
|
||||||
ObjectMeta: api.ObjectMeta{Name: name},
|
ObjectMeta: v1.ObjectMeta{Name: name},
|
||||||
Spec: api.PersistentVolumeSpec{
|
Spec: v1.PersistentVolumeSpec{
|
||||||
PersistentVolumeSource: api.PersistentVolumeSource{HostPath: &api.HostPathVolumeSource{Path: path}},
|
PersistentVolumeSource: v1.PersistentVolumeSource{HostPath: &v1.HostPathVolumeSource{Path: path}},
|
||||||
Capacity: api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse(cap)},
|
Capacity: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse(cap)},
|
||||||
AccessModes: mode,
|
AccessModes: mode,
|
||||||
PersistentVolumeReclaimPolicy: reclaim,
|
PersistentVolumeReclaimPolicy: reclaim,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func createPVC(name, namespace, cap string, mode []api.PersistentVolumeAccessMode) *api.PersistentVolumeClaim {
|
func createPVC(name, namespace, cap string, mode []v1.PersistentVolumeAccessMode) *v1.PersistentVolumeClaim {
|
||||||
return &api.PersistentVolumeClaim{
|
return &v1.PersistentVolumeClaim{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
Name: name,
|
Name: name,
|
||||||
Namespace: namespace,
|
Namespace: namespace,
|
||||||
},
|
},
|
||||||
Spec: api.PersistentVolumeClaimSpec{
|
Spec: v1.PersistentVolumeClaimSpec{
|
||||||
Resources: api.ResourceRequirements{Requests: api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse(cap)}},
|
Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse(cap)}},
|
||||||
AccessModes: mode,
|
AccessModes: mode,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@@ -22,9 +22,9 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||||
"k8s.io/kubernetes/pkg/client/restclient"
|
"k8s.io/kubernetes/pkg/client/restclient"
|
||||||
"k8s.io/kubernetes/test/integration"
|
"k8s.io/kubernetes/test/integration"
|
||||||
"k8s.io/kubernetes/test/integration/framework"
|
"k8s.io/kubernetes/test/integration/framework"
|
||||||
@@ -37,7 +37,7 @@ func TestPodUpdateActiveDeadlineSeconds(t *testing.T) {
|
|||||||
ns := framework.CreateTestingNamespace("pod-activedeadline-update", s, t)
|
ns := framework.CreateTestingNamespace("pod-activedeadline-update", s, t)
|
||||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||||
|
|
||||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
|
|
||||||
var (
|
var (
|
||||||
iZero = int64(0)
|
iZero = int64(0)
|
||||||
@@ -46,13 +46,13 @@ func TestPodUpdateActiveDeadlineSeconds(t *testing.T) {
|
|||||||
iNeg = int64(-1)
|
iNeg = int64(-1)
|
||||||
)
|
)
|
||||||
|
|
||||||
prototypePod := func() *api.Pod {
|
prototypePod := func() *v1.Pod {
|
||||||
return &api.Pod{
|
return &v1.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
Name: "xxx",
|
Name: "xxx",
|
||||||
},
|
},
|
||||||
Spec: api.PodSpec{
|
Spec: v1.PodSpec{
|
||||||
Containers: []api.Container{
|
Containers: []v1.Container{
|
||||||
{
|
{
|
||||||
Name: "fake-name",
|
Name: "fake-name",
|
||||||
Image: "fakeimage",
|
Image: "fakeimage",
|
||||||
@@ -155,18 +155,18 @@ func TestPodReadOnlyFilesystem(t *testing.T) {
|
|||||||
ns := framework.CreateTestingNamespace("pod-readonly-root", s, t)
|
ns := framework.CreateTestingNamespace("pod-readonly-root", s, t)
|
||||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||||
|
|
||||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
|
|
||||||
pod := &api.Pod{
|
pod := &v1.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
Name: "xxx",
|
Name: "xxx",
|
||||||
},
|
},
|
||||||
Spec: api.PodSpec{
|
Spec: v1.PodSpec{
|
||||||
Containers: []api.Container{
|
Containers: []v1.Container{
|
||||||
{
|
{
|
||||||
Name: "fake-name",
|
Name: "fake-name",
|
||||||
Image: "fakeimage",
|
Image: "fakeimage",
|
||||||
SecurityContext: &api.SecurityContext{
|
SecurityContext: &v1.SecurityContext{
|
||||||
ReadOnlyRootFilesystem: &isReadOnly,
|
ReadOnlyRootFilesystem: &isReadOnly,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@@ -28,8 +28,10 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/api/resource"
|
"k8s.io/kubernetes/pkg/api/resource"
|
||||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||||
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||||
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||||
"k8s.io/kubernetes/pkg/client/restclient"
|
"k8s.io/kubernetes/pkg/client/restclient"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
replicationcontroller "k8s.io/kubernetes/pkg/controller/replication"
|
replicationcontroller "k8s.io/kubernetes/pkg/controller/replication"
|
||||||
@@ -63,8 +65,9 @@ func TestQuota(t *testing.T) {
|
|||||||
defer s.Close()
|
defer s.Close()
|
||||||
|
|
||||||
admissionCh := make(chan struct{})
|
admissionCh := make(chan struct{})
|
||||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
clientset := clientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
admission, err := resourcequota.NewResourceQuota(clientset, quotainstall.NewRegistry(clientset, nil), 5, admissionCh)
|
internalClientset := internalclientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
|
admission, err := resourcequota.NewResourceQuota(internalClientset, quotainstall.NewRegistry(nil, nil), 5, admissionCh)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
@@ -104,14 +107,14 @@ func TestQuota(t *testing.T) {
|
|||||||
endTime := time.Now()
|
endTime := time.Now()
|
||||||
t.Logf("Took %v to scale up without quota", endTime.Sub(startTime))
|
t.Logf("Took %v to scale up without quota", endTime.Sub(startTime))
|
||||||
|
|
||||||
quota := &api.ResourceQuota{
|
quota := &v1.ResourceQuota{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
Name: "quota",
|
Name: "quota",
|
||||||
Namespace: ns.Name,
|
Namespace: ns.Name,
|
||||||
},
|
},
|
||||||
Spec: api.ResourceQuotaSpec{
|
Spec: v1.ResourceQuotaSpec{
|
||||||
Hard: api.ResourceList{
|
Hard: v1.ResourceList{
|
||||||
api.ResourcePods: resource.MustParse("1000"),
|
v1.ResourcePods: resource.MustParse("1000"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -123,8 +126,8 @@ func TestQuota(t *testing.T) {
|
|||||||
t.Logf("Took %v to scale up with quota", endTime.Sub(startTime))
|
t.Logf("Took %v to scale up with quota", endTime.Sub(startTime))
|
||||||
}
|
}
|
||||||
|
|
||||||
func waitForQuota(t *testing.T, quota *api.ResourceQuota, clientset *clientset.Clientset) {
|
func waitForQuota(t *testing.T, quota *v1.ResourceQuota, clientset *clientset.Clientset) {
|
||||||
w, err := clientset.Core().ResourceQuotas(quota.Namespace).Watch(api.SingleObject(api.ObjectMeta{Name: quota.Name}))
|
w, err := clientset.Core().ResourceQuotas(quota.Namespace).Watch(v1.SingleObject(v1.ObjectMeta{Name: quota.Name}))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
@@ -141,7 +144,7 @@ func waitForQuota(t *testing.T, quota *api.ResourceQuota, clientset *clientset.C
|
|||||||
}
|
}
|
||||||
|
|
||||||
switch cast := event.Object.(type) {
|
switch cast := event.Object.(type) {
|
||||||
case *api.ResourceQuota:
|
case *v1.ResourceQuota:
|
||||||
if len(cast.Status.Hard) > 0 {
|
if len(cast.Status.Hard) > 0 {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
@@ -155,23 +158,23 @@ func waitForQuota(t *testing.T, quota *api.ResourceQuota, clientset *clientset.C
|
|||||||
}
|
}
|
||||||
|
|
||||||
func scale(t *testing.T, namespace string, clientset *clientset.Clientset) {
|
func scale(t *testing.T, namespace string, clientset *clientset.Clientset) {
|
||||||
target := 100
|
target := int32(100)
|
||||||
rc := &api.ReplicationController{
|
rc := &v1.ReplicationController{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
Namespace: namespace,
|
Namespace: namespace,
|
||||||
},
|
},
|
||||||
Spec: api.ReplicationControllerSpec{
|
Spec: v1.ReplicationControllerSpec{
|
||||||
Replicas: int32(target),
|
Replicas: &target,
|
||||||
Selector: map[string]string{"foo": "bar"},
|
Selector: map[string]string{"foo": "bar"},
|
||||||
Template: &api.PodTemplateSpec{
|
Template: &v1.PodTemplateSpec{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
Labels: map[string]string{
|
Labels: map[string]string{
|
||||||
"foo": "bar",
|
"foo": "bar",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Spec: api.PodSpec{
|
Spec: v1.PodSpec{
|
||||||
Containers: []api.Container{
|
Containers: []v1.Container{
|
||||||
{
|
{
|
||||||
Name: "container",
|
Name: "container",
|
||||||
Image: "busybox",
|
Image: "busybox",
|
||||||
@@ -182,7 +185,7 @@ func scale(t *testing.T, namespace string, clientset *clientset.Clientset) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
w, err := clientset.Core().ReplicationControllers(namespace).Watch(api.SingleObject(api.ObjectMeta{Name: rc.Name}))
|
w, err := clientset.Core().ReplicationControllers(namespace).Watch(v1.SingleObject(v1.ObjectMeta{Name: rc.Name}))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
@@ -199,9 +202,9 @@ func scale(t *testing.T, namespace string, clientset *clientset.Clientset) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
switch cast := event.Object.(type) {
|
switch cast := event.Object.(type) {
|
||||||
case *api.ReplicationController:
|
case *v1.ReplicationController:
|
||||||
fmt.Printf("Found %v of %v replicas\n", int(cast.Status.Replicas), target)
|
fmt.Printf("Found %v of %v replicas\n", int(cast.Status.Replicas), target)
|
||||||
if int(cast.Status.Replicas) == target {
|
if cast.Status.Replicas == target {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -209,7 +212,7 @@ func scale(t *testing.T, namespace string, clientset *clientset.Clientset) {
|
|||||||
return false, nil
|
return false, nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
pods, _ := clientset.Core().Pods(namespace).List(api.ListOptions{LabelSelector: labels.Everything(), FieldSelector: fields.Everything()})
|
pods, _ := clientset.Core().Pods(namespace).List(v1.ListOptions{LabelSelector: labels.Everything().String(), FieldSelector: fields.Everything().String()})
|
||||||
t.Fatalf("unexpected error: %v, ended with %v pods", err, len(pods.Items))
|
t.Fatalf("unexpected error: %v, ended with %v pods", err, len(pods.Items))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -30,7 +30,6 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
"k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||||
"k8s.io/kubernetes/pkg/client/cache"
|
"k8s.io/kubernetes/pkg/client/cache"
|
||||||
internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||||
"k8s.io/kubernetes/pkg/client/restclient"
|
"k8s.io/kubernetes/pkg/client/restclient"
|
||||||
"k8s.io/kubernetes/pkg/controller/informers"
|
"k8s.io/kubernetes/pkg/controller/informers"
|
||||||
@@ -137,12 +136,12 @@ func rmSetup(t *testing.T, enableGarbageCollector bool) (*httptest.Server, *repl
|
|||||||
t.Fatalf("Error in create clientset: %v", err)
|
t.Fatalf("Error in create clientset: %v", err)
|
||||||
}
|
}
|
||||||
resyncPeriod := 12 * time.Hour
|
resyncPeriod := 12 * time.Hour
|
||||||
informers := informers.NewSharedInformerFactory(internalclientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "rs-informers")), resyncPeriod)
|
informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "rs-informers")), nil, resyncPeriod)
|
||||||
|
|
||||||
rm := replicaset.NewReplicaSetController(
|
rm := replicaset.NewReplicaSetController(
|
||||||
informers.ReplicaSets(),
|
informers.ReplicaSets(),
|
||||||
informers.Pods(),
|
informers.Pods(),
|
||||||
internalclientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "replicaset-controller")),
|
clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "replicaset-controller")),
|
||||||
replicaset.BurstReplicas,
|
replicaset.BurstReplicas,
|
||||||
4096,
|
4096,
|
||||||
enableGarbageCollector,
|
enableGarbageCollector,
|
||||||
|
@@ -29,7 +29,6 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/client/cache"
|
"k8s.io/kubernetes/pkg/client/cache"
|
||||||
internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||||
"k8s.io/kubernetes/pkg/client/restclient"
|
"k8s.io/kubernetes/pkg/client/restclient"
|
||||||
"k8s.io/kubernetes/pkg/controller/informers"
|
"k8s.io/kubernetes/pkg/controller/informers"
|
||||||
@@ -137,10 +136,10 @@ func rmSetup(t *testing.T, enableGarbageCollector bool) (*httptest.Server, *repl
|
|||||||
resyncPeriodFunc := func() time.Duration {
|
resyncPeriodFunc := func() time.Duration {
|
||||||
return resyncPeriod
|
return resyncPeriod
|
||||||
}
|
}
|
||||||
podInformer := informers.NewPodInformer(internalclientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "pod-informer")), resyncPeriod)
|
podInformer := informers.NewPodInformer(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "pod-informer")), resyncPeriod)
|
||||||
rm := replication.NewReplicationManager(
|
rm := replication.NewReplicationManager(
|
||||||
podInformer,
|
podInformer,
|
||||||
internalclientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "replication-controller")),
|
clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "replication-controller")),
|
||||||
resyncPeriodFunc,
|
resyncPeriodFunc,
|
||||||
replication.BurstReplicas,
|
replication.BurstReplicas,
|
||||||
4096,
|
4096,
|
||||||
|
@@ -29,12 +29,12 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
|
||||||
"k8s.io/kubernetes/pkg/api/resource"
|
"k8s.io/kubernetes/pkg/api/resource"
|
||||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||||
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
|
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
|
||||||
"k8s.io/kubernetes/pkg/client/record"
|
"k8s.io/kubernetes/pkg/client/record"
|
||||||
"k8s.io/kubernetes/pkg/client/restclient"
|
"k8s.io/kubernetes/pkg/client/restclient"
|
||||||
"k8s.io/kubernetes/pkg/util/wait"
|
"k8s.io/kubernetes/pkg/util/wait"
|
||||||
@@ -51,8 +51,8 @@ const (
|
|||||||
prioritize = "prioritize"
|
prioritize = "prioritize"
|
||||||
)
|
)
|
||||||
|
|
||||||
type fitPredicate func(pod *api.Pod, node *api.Node) (bool, error)
|
type fitPredicate func(pod *v1.Pod, node *v1.Node) (bool, error)
|
||||||
type priorityFunc func(pod *api.Pod, nodes *api.NodeList) (*schedulerapi.HostPriorityList, error)
|
type priorityFunc func(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostPriorityList, error)
|
||||||
|
|
||||||
type priorityConfig struct {
|
type priorityConfig struct {
|
||||||
function priorityFunc
|
function priorityFunc
|
||||||
@@ -104,15 +104,15 @@ func (e *Extender) serveHTTP(t *testing.T, w http.ResponseWriter, req *http.Requ
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *Extender) Filter(pod *api.Pod, nodes *api.NodeList) (*api.NodeList, schedulerapi.FailedNodesMap, error) {
|
func (e *Extender) Filter(pod *v1.Pod, nodes *v1.NodeList) (*v1.NodeList, schedulerapi.FailedNodesMap, error) {
|
||||||
filtered := []api.Node{}
|
filtered := []v1.Node{}
|
||||||
failedNodesMap := schedulerapi.FailedNodesMap{}
|
failedNodesMap := schedulerapi.FailedNodesMap{}
|
||||||
for _, node := range nodes.Items {
|
for _, node := range nodes.Items {
|
||||||
fits := true
|
fits := true
|
||||||
for _, predicate := range e.predicates {
|
for _, predicate := range e.predicates {
|
||||||
fit, err := predicate(pod, &node)
|
fit, err := predicate(pod, &node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return &api.NodeList{}, schedulerapi.FailedNodesMap{}, err
|
return &v1.NodeList{}, schedulerapi.FailedNodesMap{}, err
|
||||||
}
|
}
|
||||||
if !fit {
|
if !fit {
|
||||||
fits = false
|
fits = false
|
||||||
@@ -125,10 +125,10 @@ func (e *Extender) Filter(pod *api.Pod, nodes *api.NodeList) (*api.NodeList, sch
|
|||||||
failedNodesMap[node.Name] = fmt.Sprintf("extender failed: %s", e.name)
|
failedNodesMap[node.Name] = fmt.Sprintf("extender failed: %s", e.name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return &api.NodeList{Items: filtered}, failedNodesMap, nil
|
return &v1.NodeList{Items: filtered}, failedNodesMap, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *Extender) Prioritize(pod *api.Pod, nodes *api.NodeList) (*schedulerapi.HostPriorityList, error) {
|
func (e *Extender) Prioritize(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostPriorityList, error) {
|
||||||
result := schedulerapi.HostPriorityList{}
|
result := schedulerapi.HostPriorityList{}
|
||||||
combinedScores := map[string]int{}
|
combinedScores := map[string]int{}
|
||||||
for _, prioritizer := range e.prioritizers {
|
for _, prioritizer := range e.prioritizers {
|
||||||
@@ -151,21 +151,21 @@ func (e *Extender) Prioritize(pod *api.Pod, nodes *api.NodeList) (*schedulerapi.
|
|||||||
return &result, nil
|
return &result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func machine_1_2_3_Predicate(pod *api.Pod, node *api.Node) (bool, error) {
|
func machine_1_2_3_Predicate(pod *v1.Pod, node *v1.Node) (bool, error) {
|
||||||
if node.Name == "machine1" || node.Name == "machine2" || node.Name == "machine3" {
|
if node.Name == "machine1" || node.Name == "machine2" || node.Name == "machine3" {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func machine_2_3_5_Predicate(pod *api.Pod, node *api.Node) (bool, error) {
|
func machine_2_3_5_Predicate(pod *v1.Pod, node *v1.Node) (bool, error) {
|
||||||
if node.Name == "machine2" || node.Name == "machine3" || node.Name == "machine5" {
|
if node.Name == "machine2" || node.Name == "machine3" || node.Name == "machine5" {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func machine_2_Prioritizer(pod *api.Pod, nodes *api.NodeList) (*schedulerapi.HostPriorityList, error) {
|
func machine_2_Prioritizer(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostPriorityList, error) {
|
||||||
result := schedulerapi.HostPriorityList{}
|
result := schedulerapi.HostPriorityList{}
|
||||||
for _, node := range nodes.Items {
|
for _, node := range nodes.Items {
|
||||||
score := 1
|
score := 1
|
||||||
@@ -177,7 +177,7 @@ func machine_2_Prioritizer(pod *api.Pod, nodes *api.NodeList) (*schedulerapi.Hos
|
|||||||
return &result, nil
|
return &result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func machine_3_Prioritizer(pod *api.Pod, nodes *api.NodeList) (*schedulerapi.HostPriorityList, error) {
|
func machine_3_Prioritizer(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostPriorityList, error) {
|
||||||
result := schedulerapi.HostPriorityList{}
|
result := schedulerapi.HostPriorityList{}
|
||||||
for _, node := range nodes.Items {
|
for _, node := range nodes.Items {
|
||||||
score := 1
|
score := 1
|
||||||
@@ -196,7 +196,7 @@ func TestSchedulerExtender(t *testing.T) {
|
|||||||
ns := framework.CreateTestingNamespace("scheduler-extender", s, t)
|
ns := framework.CreateTestingNamespace("scheduler-extender", s, t)
|
||||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||||
|
|
||||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
|
|
||||||
extender1 := &Extender{
|
extender1 := &Extender{
|
||||||
name: "extender1",
|
name: "extender1",
|
||||||
@@ -236,16 +236,16 @@ func TestSchedulerExtender(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
policy.APIVersion = registered.GroupOrDie(api.GroupName).GroupVersion.String()
|
policy.APIVersion = registered.GroupOrDie(v1.GroupName).GroupVersion.String()
|
||||||
|
|
||||||
schedulerConfigFactory := factory.NewConfigFactory(clientSet, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains)
|
schedulerConfigFactory := factory.NewConfigFactory(clientSet, v1.DefaultSchedulerName, v1.DefaultHardPodAffinitySymmetricWeight, v1.DefaultFailureDomains)
|
||||||
schedulerConfig, err := schedulerConfigFactory.CreateFromConfig(policy)
|
schedulerConfig, err := schedulerConfigFactory.CreateFromConfig(policy)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Couldn't create scheduler config: %v", err)
|
t.Fatalf("Couldn't create scheduler config: %v", err)
|
||||||
}
|
}
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := record.NewBroadcaster()
|
||||||
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: api.DefaultSchedulerName})
|
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(v1.EventSource{Component: v1.DefaultSchedulerName})
|
||||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: clientSet.Core().Events("")})
|
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: clientSet.Core().Events("")})
|
||||||
scheduler.New(schedulerConfig).Run()
|
scheduler.New(schedulerConfig).Run()
|
||||||
|
|
||||||
defer close(schedulerConfig.StopEverything)
|
defer close(schedulerConfig.StopEverything)
|
||||||
@@ -253,24 +253,24 @@ func TestSchedulerExtender(t *testing.T) {
|
|||||||
DoTestPodScheduling(ns, t, clientSet)
|
DoTestPodScheduling(ns, t, clientSet)
|
||||||
}
|
}
|
||||||
|
|
||||||
func DoTestPodScheduling(ns *api.Namespace, t *testing.T, cs clientset.Interface) {
|
func DoTestPodScheduling(ns *v1.Namespace, t *testing.T, cs clientset.Interface) {
|
||||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||||
// non-namespaced objects (Nodes).
|
// non-namespaced objects (Nodes).
|
||||||
defer cs.Core().Nodes().DeleteCollection(nil, api.ListOptions{})
|
defer cs.Core().Nodes().DeleteCollection(nil, v1.ListOptions{})
|
||||||
|
|
||||||
goodCondition := api.NodeCondition{
|
goodCondition := v1.NodeCondition{
|
||||||
Type: api.NodeReady,
|
Type: v1.NodeReady,
|
||||||
Status: api.ConditionTrue,
|
Status: v1.ConditionTrue,
|
||||||
Reason: fmt.Sprintf("schedulable condition"),
|
Reason: fmt.Sprintf("schedulable condition"),
|
||||||
LastHeartbeatTime: unversioned.Time{time.Now()},
|
LastHeartbeatTime: unversioned.Time{time.Now()},
|
||||||
}
|
}
|
||||||
node := &api.Node{
|
node := &v1.Node{
|
||||||
Spec: api.NodeSpec{Unschedulable: false},
|
Spec: v1.NodeSpec{Unschedulable: false},
|
||||||
Status: api.NodeStatus{
|
Status: v1.NodeStatus{
|
||||||
Capacity: api.ResourceList{
|
Capacity: v1.ResourceList{
|
||||||
api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||||
},
|
},
|
||||||
Conditions: []api.NodeCondition{goodCondition},
|
Conditions: []v1.NodeCondition{goodCondition},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -281,10 +281,10 @@ func DoTestPodScheduling(ns *api.Namespace, t *testing.T, cs clientset.Interface
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pod := &api.Pod{
|
pod := &v1.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{Name: "extender-test-pod"},
|
ObjectMeta: v1.ObjectMeta{Name: "extender-test-pod"},
|
||||||
Spec: api.PodSpec{
|
Spec: v1.PodSpec{
|
||||||
Containers: []api.Container{{Name: "container", Image: e2e.GetPauseImageName(cs)}},
|
Containers: []v1.Container{{Name: "container", Image: e2e.GetPauseImageName(cs)}},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -25,14 +25,14 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
|
||||||
"k8s.io/kubernetes/pkg/api/errors"
|
"k8s.io/kubernetes/pkg/api/errors"
|
||||||
"k8s.io/kubernetes/pkg/api/resource"
|
"k8s.io/kubernetes/pkg/api/resource"
|
||||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||||
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||||
"k8s.io/kubernetes/pkg/client/cache"
|
"k8s.io/kubernetes/pkg/client/cache"
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
|
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
|
||||||
"k8s.io/kubernetes/pkg/client/record"
|
"k8s.io/kubernetes/pkg/client/record"
|
||||||
"k8s.io/kubernetes/pkg/client/restclient"
|
"k8s.io/kubernetes/pkg/client/restclient"
|
||||||
"k8s.io/kubernetes/pkg/util/wait"
|
"k8s.io/kubernetes/pkg/util/wait"
|
||||||
@@ -43,7 +43,7 @@ import (
|
|||||||
"k8s.io/kubernetes/test/integration/framework"
|
"k8s.io/kubernetes/test/integration/framework"
|
||||||
)
|
)
|
||||||
|
|
||||||
type nodeMutationFunc func(t *testing.T, n *api.Node, nodeStore cache.Store, c clientset.Interface)
|
type nodeMutationFunc func(t *testing.T, n *v1.Node, nodeStore cache.Store, c clientset.Interface)
|
||||||
|
|
||||||
type nodeStateManager struct {
|
type nodeStateManager struct {
|
||||||
makeSchedulable nodeMutationFunc
|
makeSchedulable nodeMutationFunc
|
||||||
@@ -57,16 +57,16 @@ func TestUnschedulableNodes(t *testing.T) {
|
|||||||
ns := framework.CreateTestingNamespace("unschedulable-nodes", s, t)
|
ns := framework.CreateTestingNamespace("unschedulable-nodes", s, t)
|
||||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||||
|
|
||||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
|
|
||||||
schedulerConfigFactory := factory.NewConfigFactory(clientSet, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains)
|
schedulerConfigFactory := factory.NewConfigFactory(clientSet, v1.DefaultSchedulerName, v1.DefaultHardPodAffinitySymmetricWeight, v1.DefaultFailureDomains)
|
||||||
schedulerConfig, err := schedulerConfigFactory.Create()
|
schedulerConfig, err := schedulerConfigFactory.Create()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Couldn't create scheduler config: %v", err)
|
t.Fatalf("Couldn't create scheduler config: %v", err)
|
||||||
}
|
}
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := record.NewBroadcaster()
|
||||||
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: api.DefaultSchedulerName})
|
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(v1.EventSource{Component: v1.DefaultSchedulerName})
|
||||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: clientSet.Core().Events(ns.Name)})
|
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: clientSet.Core().Events(ns.Name)})
|
||||||
scheduler.New(schedulerConfig).Run()
|
scheduler.New(schedulerConfig).Run()
|
||||||
|
|
||||||
defer close(schedulerConfig.StopEverything)
|
defer close(schedulerConfig.StopEverything)
|
||||||
@@ -94,7 +94,7 @@ func podScheduled(c clientset.Interface, podNamespace, podName string) wait.Cond
|
|||||||
// Wait till the passFunc confirms that the object it expects to see is in the store.
|
// Wait till the passFunc confirms that the object it expects to see is in the store.
|
||||||
// Used to observe reflected events.
|
// Used to observe reflected events.
|
||||||
func waitForReflection(t *testing.T, s cache.Store, key string, passFunc func(n interface{}) bool) error {
|
func waitForReflection(t *testing.T, s cache.Store, key string, passFunc func(n interface{}) bool) error {
|
||||||
nodes := []*api.Node{}
|
nodes := []*v1.Node{}
|
||||||
err := wait.Poll(time.Millisecond*100, wait.ForeverTestTimeout, func() (bool, error) {
|
err := wait.Poll(time.Millisecond*100, wait.ForeverTestTimeout, func() (bool, error) {
|
||||||
if n, _, err := s.GetByKey(key); err == nil && passFunc(n) {
|
if n, _, err := s.GetByKey(key); err == nil && passFunc(n) {
|
||||||
return true, nil
|
return true, nil
|
||||||
@@ -105,7 +105,7 @@ func waitForReflection(t *testing.T, s cache.Store, key string, passFunc func(n
|
|||||||
if n == nil {
|
if n == nil {
|
||||||
nodes = append(nodes, nil)
|
nodes = append(nodes, nil)
|
||||||
} else {
|
} else {
|
||||||
nodes = append(nodes, n.(*api.Node))
|
nodes = append(nodes, n.(*v1.Node))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return false, nil
|
return false, nil
|
||||||
@@ -120,33 +120,33 @@ func waitForReflection(t *testing.T, s cache.Store, key string, passFunc func(n
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func DoTestUnschedulableNodes(t *testing.T, cs clientset.Interface, ns *api.Namespace, nodeStore cache.Store) {
|
func DoTestUnschedulableNodes(t *testing.T, cs clientset.Interface, ns *v1.Namespace, nodeStore cache.Store) {
|
||||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||||
// non-namespaced objects (Nodes).
|
// non-namespaced objects (Nodes).
|
||||||
defer cs.Core().Nodes().DeleteCollection(nil, api.ListOptions{})
|
defer cs.Core().Nodes().DeleteCollection(nil, v1.ListOptions{})
|
||||||
|
|
||||||
goodCondition := api.NodeCondition{
|
goodCondition := v1.NodeCondition{
|
||||||
Type: api.NodeReady,
|
Type: v1.NodeReady,
|
||||||
Status: api.ConditionTrue,
|
Status: v1.ConditionTrue,
|
||||||
Reason: fmt.Sprintf("schedulable condition"),
|
Reason: fmt.Sprintf("schedulable condition"),
|
||||||
LastHeartbeatTime: unversioned.Time{time.Now()},
|
LastHeartbeatTime: unversioned.Time{time.Now()},
|
||||||
}
|
}
|
||||||
badCondition := api.NodeCondition{
|
badCondition := v1.NodeCondition{
|
||||||
Type: api.NodeReady,
|
Type: v1.NodeReady,
|
||||||
Status: api.ConditionUnknown,
|
Status: v1.ConditionUnknown,
|
||||||
Reason: fmt.Sprintf("unschedulable condition"),
|
Reason: fmt.Sprintf("unschedulable condition"),
|
||||||
LastHeartbeatTime: unversioned.Time{time.Now()},
|
LastHeartbeatTime: unversioned.Time{time.Now()},
|
||||||
}
|
}
|
||||||
// Create a new schedulable node, since we're first going to apply
|
// Create a new schedulable node, since we're first going to apply
|
||||||
// the unschedulable condition and verify that pods aren't scheduled.
|
// the unschedulable condition and verify that pods aren't scheduled.
|
||||||
node := &api.Node{
|
node := &v1.Node{
|
||||||
ObjectMeta: api.ObjectMeta{Name: "node-scheduling-test-node"},
|
ObjectMeta: v1.ObjectMeta{Name: "node-scheduling-test-node"},
|
||||||
Spec: api.NodeSpec{Unschedulable: false},
|
Spec: v1.NodeSpec{Unschedulable: false},
|
||||||
Status: api.NodeStatus{
|
Status: v1.NodeStatus{
|
||||||
Capacity: api.ResourceList{
|
Capacity: v1.ResourceList{
|
||||||
api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||||
},
|
},
|
||||||
Conditions: []api.NodeCondition{goodCondition},
|
Conditions: []v1.NodeCondition{goodCondition},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
nodeKey, err := cache.MetaNamespaceKeyFunc(node)
|
nodeKey, err := cache.MetaNamespaceKeyFunc(node)
|
||||||
@@ -166,7 +166,7 @@ func DoTestUnschedulableNodes(t *testing.T, cs clientset.Interface, ns *api.Name
|
|||||||
nodeModifications := []nodeStateManager{
|
nodeModifications := []nodeStateManager{
|
||||||
// Test node.Spec.Unschedulable=true/false
|
// Test node.Spec.Unschedulable=true/false
|
||||||
{
|
{
|
||||||
makeUnSchedulable: func(t *testing.T, n *api.Node, s cache.Store, c clientset.Interface) {
|
makeUnSchedulable: func(t *testing.T, n *v1.Node, s cache.Store, c clientset.Interface) {
|
||||||
n.Spec.Unschedulable = true
|
n.Spec.Unschedulable = true
|
||||||
if _, err := c.Core().Nodes().Update(n); err != nil {
|
if _, err := c.Core().Nodes().Update(n); err != nil {
|
||||||
t.Fatalf("Failed to update node with unschedulable=true: %v", err)
|
t.Fatalf("Failed to update node with unschedulable=true: %v", err)
|
||||||
@@ -176,19 +176,19 @@ func DoTestUnschedulableNodes(t *testing.T, cs clientset.Interface, ns *api.Name
|
|||||||
// Nodes that are unschedulable or that are not ready or
|
// Nodes that are unschedulable or that are not ready or
|
||||||
// have their disk full (Node.Spec.Conditions) are exluded
|
// have their disk full (Node.Spec.Conditions) are exluded
|
||||||
// based on NodeConditionPredicate, a separate check
|
// based on NodeConditionPredicate, a separate check
|
||||||
return node != nil && node.(*api.Node).Spec.Unschedulable == true
|
return node != nil && node.(*v1.Node).Spec.Unschedulable == true
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to observe reflected update for setting unschedulable=true: %v", err)
|
t.Fatalf("Failed to observe reflected update for setting unschedulable=true: %v", err)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
makeSchedulable: func(t *testing.T, n *api.Node, s cache.Store, c clientset.Interface) {
|
makeSchedulable: func(t *testing.T, n *v1.Node, s cache.Store, c clientset.Interface) {
|
||||||
n.Spec.Unschedulable = false
|
n.Spec.Unschedulable = false
|
||||||
if _, err := c.Core().Nodes().Update(n); err != nil {
|
if _, err := c.Core().Nodes().Update(n); err != nil {
|
||||||
t.Fatalf("Failed to update node with unschedulable=false: %v", err)
|
t.Fatalf("Failed to update node with unschedulable=false: %v", err)
|
||||||
}
|
}
|
||||||
err = waitForReflection(t, s, nodeKey, func(node interface{}) bool {
|
err = waitForReflection(t, s, nodeKey, func(node interface{}) bool {
|
||||||
return node != nil && node.(*api.Node).Spec.Unschedulable == false
|
return node != nil && node.(*v1.Node).Spec.Unschedulable == false
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to observe reflected update for setting unschedulable=false: %v", err)
|
t.Fatalf("Failed to observe reflected update for setting unschedulable=false: %v", err)
|
||||||
@@ -197,35 +197,35 @@ func DoTestUnschedulableNodes(t *testing.T, cs clientset.Interface, ns *api.Name
|
|||||||
},
|
},
|
||||||
// Test node.Status.Conditions=ConditionTrue/Unknown
|
// Test node.Status.Conditions=ConditionTrue/Unknown
|
||||||
{
|
{
|
||||||
makeUnSchedulable: func(t *testing.T, n *api.Node, s cache.Store, c clientset.Interface) {
|
makeUnSchedulable: func(t *testing.T, n *v1.Node, s cache.Store, c clientset.Interface) {
|
||||||
n.Status = api.NodeStatus{
|
n.Status = v1.NodeStatus{
|
||||||
Capacity: api.ResourceList{
|
Capacity: v1.ResourceList{
|
||||||
api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||||
},
|
},
|
||||||
Conditions: []api.NodeCondition{badCondition},
|
Conditions: []v1.NodeCondition{badCondition},
|
||||||
}
|
}
|
||||||
if _, err = c.Core().Nodes().UpdateStatus(n); err != nil {
|
if _, err = c.Core().Nodes().UpdateStatus(n); err != nil {
|
||||||
t.Fatalf("Failed to update node with bad status condition: %v", err)
|
t.Fatalf("Failed to update node with bad status condition: %v", err)
|
||||||
}
|
}
|
||||||
err = waitForReflection(t, s, nodeKey, func(node interface{}) bool {
|
err = waitForReflection(t, s, nodeKey, func(node interface{}) bool {
|
||||||
return node != nil && node.(*api.Node).Status.Conditions[0].Status == api.ConditionUnknown
|
return node != nil && node.(*v1.Node).Status.Conditions[0].Status == v1.ConditionUnknown
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to observe reflected update for status condition update: %v", err)
|
t.Fatalf("Failed to observe reflected update for status condition update: %v", err)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
makeSchedulable: func(t *testing.T, n *api.Node, s cache.Store, c clientset.Interface) {
|
makeSchedulable: func(t *testing.T, n *v1.Node, s cache.Store, c clientset.Interface) {
|
||||||
n.Status = api.NodeStatus{
|
n.Status = v1.NodeStatus{
|
||||||
Capacity: api.ResourceList{
|
Capacity: v1.ResourceList{
|
||||||
api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||||
},
|
},
|
||||||
Conditions: []api.NodeCondition{goodCondition},
|
Conditions: []v1.NodeCondition{goodCondition},
|
||||||
}
|
}
|
||||||
if _, err = c.Core().Nodes().UpdateStatus(n); err != nil {
|
if _, err = c.Core().Nodes().UpdateStatus(n); err != nil {
|
||||||
t.Fatalf("Failed to update node with healthy status condition: %v", err)
|
t.Fatalf("Failed to update node with healthy status condition: %v", err)
|
||||||
}
|
}
|
||||||
err = waitForReflection(t, s, nodeKey, func(node interface{}) bool {
|
err = waitForReflection(t, s, nodeKey, func(node interface{}) bool {
|
||||||
return node != nil && node.(*api.Node).Status.Conditions[0].Status == api.ConditionTrue
|
return node != nil && node.(*v1.Node).Status.Conditions[0].Status == v1.ConditionTrue
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to observe reflected update for status condition update: %v", err)
|
t.Fatalf("Failed to observe reflected update for status condition update: %v", err)
|
||||||
@@ -245,10 +245,10 @@ func DoTestUnschedulableNodes(t *testing.T, cs clientset.Interface, ns *api.Name
|
|||||||
|
|
||||||
// Create the new pod, note that this needs to happen post unschedulable
|
// Create the new pod, note that this needs to happen post unschedulable
|
||||||
// modification or we have a race in the test.
|
// modification or we have a race in the test.
|
||||||
pod := &api.Pod{
|
pod := &v1.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{Name: "node-scheduling-test-pod"},
|
ObjectMeta: v1.ObjectMeta{Name: "node-scheduling-test-pod"},
|
||||||
Spec: api.PodSpec{
|
Spec: v1.PodSpec{
|
||||||
Containers: []api.Container{{Name: "container", Image: e2e.GetPauseImageName(cs)}},
|
Containers: []v1.Container{{Name: "container", Image: e2e.GetPauseImageName(cs)}},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
myPod, err := cs.Core().Pods(ns.Name).Create(pod)
|
myPod, err := cs.Core().Pods(ns.Name).Create(pod)
|
||||||
@@ -282,7 +282,7 @@ func DoTestUnschedulableNodes(t *testing.T, cs clientset.Interface, ns *api.Name
|
|||||||
t.Logf("Test %d: Pod got scheduled on a schedulable node", i)
|
t.Logf("Test %d: Pod got scheduled on a schedulable node", i)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = cs.Core().Pods(ns.Name).Delete(myPod.Name, api.NewDeleteOptions(0))
|
err = cs.Core().Pods(ns.Name).Delete(myPod.Name, v1.NewDeleteOptions(0))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to delete pod: %v", err)
|
t.Errorf("Failed to delete pod: %v", err)
|
||||||
}
|
}
|
||||||
@@ -322,30 +322,30 @@ func TestMultiScheduler(t *testing.T) {
|
|||||||
- testPodNoAnnotation2 and testPodWithAnnotationFitsDefault2 shoule NOT be scheduled
|
- testPodNoAnnotation2 and testPodWithAnnotationFitsDefault2 shoule NOT be scheduled
|
||||||
*/
|
*/
|
||||||
// 1. create and start default-scheduler
|
// 1. create and start default-scheduler
|
||||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
|
|
||||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||||
// non-namespaced objects (Nodes).
|
// non-namespaced objects (Nodes).
|
||||||
defer clientSet.Core().Nodes().DeleteCollection(nil, api.ListOptions{})
|
defer clientSet.Core().Nodes().DeleteCollection(nil, v1.ListOptions{})
|
||||||
|
|
||||||
schedulerConfigFactory := factory.NewConfigFactory(clientSet, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains)
|
schedulerConfigFactory := factory.NewConfigFactory(clientSet, v1.DefaultSchedulerName, v1.DefaultHardPodAffinitySymmetricWeight, v1.DefaultFailureDomains)
|
||||||
schedulerConfig, err := schedulerConfigFactory.Create()
|
schedulerConfig, err := schedulerConfigFactory.Create()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Couldn't create scheduler config: %v", err)
|
t.Fatalf("Couldn't create scheduler config: %v", err)
|
||||||
}
|
}
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := record.NewBroadcaster()
|
||||||
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: api.DefaultSchedulerName})
|
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(v1.EventSource{Component: v1.DefaultSchedulerName})
|
||||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: clientSet.Core().Events(ns.Name)})
|
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: clientSet.Core().Events(ns.Name)})
|
||||||
scheduler.New(schedulerConfig).Run()
|
scheduler.New(schedulerConfig).Run()
|
||||||
// default-scheduler will be stopped later
|
// default-scheduler will be stopped later
|
||||||
|
|
||||||
// 2. create a node
|
// 2. create a node
|
||||||
node := &api.Node{
|
node := &v1.Node{
|
||||||
ObjectMeta: api.ObjectMeta{Name: "node-multi-scheduler-test-node"},
|
ObjectMeta: v1.ObjectMeta{Name: "node-multi-scheduler-test-node"},
|
||||||
Spec: api.NodeSpec{Unschedulable: false},
|
Spec: v1.NodeSpec{Unschedulable: false},
|
||||||
Status: api.NodeStatus{
|
Status: v1.NodeStatus{
|
||||||
Capacity: api.ResourceList{
|
Capacity: v1.ResourceList{
|
||||||
api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -397,16 +397,16 @@ func TestMultiScheduler(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// 5. create and start a scheduler with name "foo-scheduler"
|
// 5. create and start a scheduler with name "foo-scheduler"
|
||||||
clientSet2 := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
clientSet2 := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
|
|
||||||
schedulerConfigFactory2 := factory.NewConfigFactory(clientSet2, "foo-scheduler", api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains)
|
schedulerConfigFactory2 := factory.NewConfigFactory(clientSet2, "foo-scheduler", v1.DefaultHardPodAffinitySymmetricWeight, v1.DefaultFailureDomains)
|
||||||
schedulerConfig2, err := schedulerConfigFactory2.Create()
|
schedulerConfig2, err := schedulerConfigFactory2.Create()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Couldn't create scheduler config: %v", err)
|
t.Errorf("Couldn't create scheduler config: %v", err)
|
||||||
}
|
}
|
||||||
eventBroadcaster2 := record.NewBroadcaster()
|
eventBroadcaster2 := record.NewBroadcaster()
|
||||||
schedulerConfig2.Recorder = eventBroadcaster2.NewRecorder(api.EventSource{Component: "foo-scheduler"})
|
schedulerConfig2.Recorder = eventBroadcaster2.NewRecorder(v1.EventSource{Component: "foo-scheduler"})
|
||||||
eventBroadcaster2.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: clientSet2.Core().Events(ns.Name)})
|
eventBroadcaster2.StartRecordingToSink(&v1core.EventSinkImpl{Interface: clientSet2.Core().Events(ns.Name)})
|
||||||
scheduler.New(schedulerConfig2).Run()
|
scheduler.New(schedulerConfig2).Run()
|
||||||
|
|
||||||
defer close(schedulerConfig2.StopEverything)
|
defer close(schedulerConfig2.StopEverything)
|
||||||
@@ -421,11 +421,11 @@ func TestMultiScheduler(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// 7. delete the pods that were scheduled by the default scheduler, and stop the default scheduler
|
// 7. delete the pods that were scheduled by the default scheduler, and stop the default scheduler
|
||||||
err = clientSet.Core().Pods(ns.Name).Delete(testPodNoAnnotation.Name, api.NewDeleteOptions(0))
|
err = clientSet.Core().Pods(ns.Name).Delete(testPodNoAnnotation.Name, v1.NewDeleteOptions(0))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to delete pod: %v", err)
|
t.Errorf("Failed to delete pod: %v", err)
|
||||||
}
|
}
|
||||||
err = clientSet.Core().Pods(ns.Name).Delete(testPodWithAnnotationFitsDefault.Name, api.NewDeleteOptions(0))
|
err = clientSet.Core().Pods(ns.Name).Delete(testPodWithAnnotationFitsDefault.Name, v1.NewDeleteOptions(0))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to delete pod: %v", err)
|
t.Errorf("Failed to delete pod: %v", err)
|
||||||
}
|
}
|
||||||
@@ -469,11 +469,11 @@ func TestMultiScheduler(t *testing.T) {
|
|||||||
*/
|
*/
|
||||||
}
|
}
|
||||||
|
|
||||||
func createPod(client clientset.Interface, name string, annotation map[string]string) *api.Pod {
|
func createPod(client clientset.Interface, name string, annotation map[string]string) *v1.Pod {
|
||||||
return &api.Pod{
|
return &v1.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{Name: name, Annotations: annotation},
|
ObjectMeta: v1.ObjectMeta{Name: name, Annotations: annotation},
|
||||||
Spec: api.PodSpec{
|
Spec: v1.PodSpec{
|
||||||
Containers: []api.Container{{Name: "container", Image: e2e.GetPauseImageName(client)}},
|
Containers: []v1.Container{{Name: "container", Image: e2e.GetPauseImageName(client)}},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -487,33 +487,33 @@ func TestAllocatable(t *testing.T) {
|
|||||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||||
|
|
||||||
// 1. create and start default-scheduler
|
// 1. create and start default-scheduler
|
||||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
|
|
||||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||||
// non-namespaced objects (Nodes).
|
// non-namespaced objects (Nodes).
|
||||||
defer clientSet.Core().Nodes().DeleteCollection(nil, api.ListOptions{})
|
defer clientSet.Core().Nodes().DeleteCollection(nil, v1.ListOptions{})
|
||||||
|
|
||||||
schedulerConfigFactory := factory.NewConfigFactory(clientSet, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains)
|
schedulerConfigFactory := factory.NewConfigFactory(clientSet, v1.DefaultSchedulerName, v1.DefaultHardPodAffinitySymmetricWeight, v1.DefaultFailureDomains)
|
||||||
schedulerConfig, err := schedulerConfigFactory.Create()
|
schedulerConfig, err := schedulerConfigFactory.Create()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Couldn't create scheduler config: %v", err)
|
t.Fatalf("Couldn't create scheduler config: %v", err)
|
||||||
}
|
}
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := record.NewBroadcaster()
|
||||||
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: api.DefaultSchedulerName})
|
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(v1.EventSource{Component: v1.DefaultSchedulerName})
|
||||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: clientSet.Core().Events(ns.Name)})
|
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: clientSet.Core().Events(ns.Name)})
|
||||||
scheduler.New(schedulerConfig).Run()
|
scheduler.New(schedulerConfig).Run()
|
||||||
// default-scheduler will be stopped later
|
// default-scheduler will be stopped later
|
||||||
defer close(schedulerConfig.StopEverything)
|
defer close(schedulerConfig.StopEverything)
|
||||||
|
|
||||||
// 2. create a node without allocatable awareness
|
// 2. create a node without allocatable awareness
|
||||||
node := &api.Node{
|
node := &v1.Node{
|
||||||
ObjectMeta: api.ObjectMeta{Name: "node-allocatable-scheduler-test-node"},
|
ObjectMeta: v1.ObjectMeta{Name: "node-allocatable-scheduler-test-node"},
|
||||||
Spec: api.NodeSpec{Unschedulable: false},
|
Spec: v1.NodeSpec{Unschedulable: false},
|
||||||
Status: api.NodeStatus{
|
Status: v1.NodeStatus{
|
||||||
Capacity: api.ResourceList{
|
Capacity: v1.ResourceList{
|
||||||
api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||||
api.ResourceCPU: *resource.NewMilliQuantity(30, resource.DecimalSI),
|
v1.ResourceCPU: *resource.NewMilliQuantity(30, resource.DecimalSI),
|
||||||
api.ResourceMemory: *resource.NewQuantity(30, resource.BinarySI),
|
v1.ResourceMemory: *resource.NewQuantity(30, resource.BinarySI),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -524,17 +524,17 @@ func TestAllocatable(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// 3. create resource pod which requires less than Capacity
|
// 3. create resource pod which requires less than Capacity
|
||||||
podResource := &api.Pod{
|
podResource := &v1.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{Name: "pod-test-allocatable"},
|
ObjectMeta: v1.ObjectMeta{Name: "pod-test-allocatable"},
|
||||||
Spec: api.PodSpec{
|
Spec: v1.PodSpec{
|
||||||
Containers: []api.Container{
|
Containers: []v1.Container{
|
||||||
{
|
{
|
||||||
Name: "container",
|
Name: "container",
|
||||||
Image: e2e.GetPauseImageName(clientSet),
|
Image: e2e.GetPauseImageName(clientSet),
|
||||||
Resources: api.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: api.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
api.ResourceCPU: *resource.NewMilliQuantity(20, resource.DecimalSI),
|
v1.ResourceCPU: *resource.NewMilliQuantity(20, resource.DecimalSI),
|
||||||
api.ResourceMemory: *resource.NewQuantity(20, resource.BinarySI),
|
v1.ResourceMemory: *resource.NewQuantity(20, resource.BinarySI),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -556,16 +556,16 @@ func TestAllocatable(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// 5. Change the node status to allocatable aware, note that Allocatable is less than Pod's requirement
|
// 5. Change the node status to allocatable aware, note that Allocatable is less than Pod's requirement
|
||||||
allocNode.Status = api.NodeStatus{
|
allocNode.Status = v1.NodeStatus{
|
||||||
Capacity: api.ResourceList{
|
Capacity: v1.ResourceList{
|
||||||
api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||||
api.ResourceCPU: *resource.NewMilliQuantity(30, resource.DecimalSI),
|
v1.ResourceCPU: *resource.NewMilliQuantity(30, resource.DecimalSI),
|
||||||
api.ResourceMemory: *resource.NewQuantity(30, resource.BinarySI),
|
v1.ResourceMemory: *resource.NewQuantity(30, resource.BinarySI),
|
||||||
},
|
},
|
||||||
Allocatable: api.ResourceList{
|
Allocatable: v1.ResourceList{
|
||||||
api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||||
api.ResourceCPU: *resource.NewMilliQuantity(10, resource.DecimalSI),
|
v1.ResourceCPU: *resource.NewMilliQuantity(10, resource.DecimalSI),
|
||||||
api.ResourceMemory: *resource.NewQuantity(10, resource.BinarySI),
|
v1.ResourceMemory: *resource.NewQuantity(10, resource.BinarySI),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -573,7 +573,7 @@ func TestAllocatable(t *testing.T) {
|
|||||||
t.Fatalf("Failed to update node with Status.Allocatable: %v", err)
|
t.Fatalf("Failed to update node with Status.Allocatable: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := clientSet.Core().Pods(ns.Name).Delete(podResource.Name, &api.DeleteOptions{}); err != nil {
|
if err := clientSet.Core().Pods(ns.Name).Delete(podResource.Name, &v1.DeleteOptions{}); err != nil {
|
||||||
t.Fatalf("Failed to remove first resource pod: %v", err)
|
t.Fatalf("Failed to remove first resource pod: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -22,7 +22,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
|
||||||
"k8s.io/kubernetes/test/integration/framework"
|
"k8s.io/kubernetes/test/integration/framework"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
@@ -96,10 +96,10 @@ func TestSchedule100Node3KNodeAffinityPods(t *testing.T) {
|
|||||||
podCreatorConfig := testutils.NewTestPodCreatorConfig()
|
podCreatorConfig := testutils.NewTestPodCreatorConfig()
|
||||||
for i := 0; i < numGroups; i++ {
|
for i := 0; i < numGroups; i++ {
|
||||||
podCreatorConfig.AddStrategy("sched-perf-node-affinity", config.numPods/numGroups,
|
podCreatorConfig.AddStrategy("sched-perf-node-affinity", config.numPods/numGroups,
|
||||||
testutils.NewCustomCreatePodStrategy(&api.Pod{
|
testutils.NewCustomCreatePodStrategy(&v1.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
GenerateName: "sched-perf-node-affinity-pod-",
|
GenerateName: "sched-perf-node-affinity-pod-",
|
||||||
Annotations: map[string]string{api.AffinityAnnotationKey: fmt.Sprintf(affinityTemplate, i)},
|
Annotations: map[string]string{v1.AffinityAnnotationKey: fmt.Sprintf(affinityTemplate, i)},
|
||||||
},
|
},
|
||||||
Spec: testutils.MakePodSpec(),
|
Spec: testutils.MakePodSpec(),
|
||||||
}),
|
}),
|
||||||
|
@@ -21,10 +21,10 @@ import (
|
|||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
|
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
|
||||||
"k8s.io/kubernetes/pkg/client/record"
|
"k8s.io/kubernetes/pkg/client/record"
|
||||||
"k8s.io/kubernetes/pkg/client/restclient"
|
"k8s.io/kubernetes/pkg/client/restclient"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler"
|
"k8s.io/kubernetes/plugin/pkg/scheduler"
|
||||||
@@ -52,20 +52,20 @@ func mustSetupScheduler() (schedulerConfigFactory *factory.ConfigFactory, destro
|
|||||||
|
|
||||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{
|
clientSet := clientset.NewForConfigOrDie(&restclient.Config{
|
||||||
Host: s.URL,
|
Host: s.URL,
|
||||||
ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion},
|
ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion},
|
||||||
QPS: 5000.0,
|
QPS: 5000.0,
|
||||||
Burst: 5000,
|
Burst: 5000,
|
||||||
})
|
})
|
||||||
|
|
||||||
schedulerConfigFactory = factory.NewConfigFactory(clientSet, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains)
|
schedulerConfigFactory = factory.NewConfigFactory(clientSet, v1.DefaultSchedulerName, v1.DefaultHardPodAffinitySymmetricWeight, v1.DefaultFailureDomains)
|
||||||
|
|
||||||
schedulerConfig, err := schedulerConfigFactory.Create()
|
schedulerConfig, err := schedulerConfigFactory.Create()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic("Couldn't create scheduler config")
|
panic("Couldn't create scheduler config")
|
||||||
}
|
}
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := record.NewBroadcaster()
|
||||||
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"})
|
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(v1.EventSource{Component: "scheduler"})
|
||||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: clientSet.Core().Events("")})
|
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: clientSet.Core().Events("")})
|
||||||
scheduler.New(schedulerConfig).Run()
|
scheduler.New(schedulerConfig).Run()
|
||||||
|
|
||||||
destroyFunc = func() {
|
destroyFunc = func() {
|
||||||
|
@@ -23,9 +23,9 @@ package secrets
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||||
"k8s.io/kubernetes/pkg/client/restclient"
|
"k8s.io/kubernetes/pkg/client/restclient"
|
||||||
"k8s.io/kubernetes/test/integration"
|
"k8s.io/kubernetes/test/integration"
|
||||||
"k8s.io/kubernetes/test/integration/framework"
|
"k8s.io/kubernetes/test/integration/framework"
|
||||||
@@ -42,7 +42,7 @@ func TestSecrets(t *testing.T) {
|
|||||||
_, s := framework.RunAMaster(nil)
|
_, s := framework.RunAMaster(nil)
|
||||||
defer s.Close()
|
defer s.Close()
|
||||||
|
|
||||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
|
|
||||||
ns := framework.CreateTestingNamespace("secret", s, t)
|
ns := framework.CreateTestingNamespace("secret", s, t)
|
||||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||||
@@ -51,10 +51,10 @@ func TestSecrets(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DoTestSecrets test secrets for one api version.
|
// DoTestSecrets test secrets for one api version.
|
||||||
func DoTestSecrets(t *testing.T, client clientset.Interface, ns *api.Namespace) {
|
func DoTestSecrets(t *testing.T, client clientset.Interface, ns *v1.Namespace) {
|
||||||
// Make a secret object.
|
// Make a secret object.
|
||||||
s := api.Secret{
|
s := v1.Secret{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
Name: "secret",
|
Name: "secret",
|
||||||
Namespace: ns.Name,
|
Namespace: ns.Name,
|
||||||
},
|
},
|
||||||
@@ -69,27 +69,27 @@ func DoTestSecrets(t *testing.T, client clientset.Interface, ns *api.Namespace)
|
|||||||
defer deleteSecretOrErrorf(t, client, s.Namespace, s.Name)
|
defer deleteSecretOrErrorf(t, client, s.Namespace, s.Name)
|
||||||
|
|
||||||
// Template for pods that use a secret.
|
// Template for pods that use a secret.
|
||||||
pod := &api.Pod{
|
pod := &v1.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
Name: "XXX",
|
Name: "XXX",
|
||||||
Namespace: ns.Name,
|
Namespace: ns.Name,
|
||||||
},
|
},
|
||||||
Spec: api.PodSpec{
|
Spec: v1.PodSpec{
|
||||||
Volumes: []api.Volume{
|
Volumes: []v1.Volume{
|
||||||
{
|
{
|
||||||
Name: "secvol",
|
Name: "secvol",
|
||||||
VolumeSource: api.VolumeSource{
|
VolumeSource: v1.VolumeSource{
|
||||||
Secret: &api.SecretVolumeSource{
|
Secret: &v1.SecretVolumeSource{
|
||||||
SecretName: "secret",
|
SecretName: "secret",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Containers: []api.Container{
|
Containers: []v1.Container{
|
||||||
{
|
{
|
||||||
Name: "fake-name",
|
Name: "fake-name",
|
||||||
Image: "fakeimage",
|
Image: "fakeimage",
|
||||||
VolumeMounts: []api.VolumeMount{
|
VolumeMounts: []v1.VolumeMount{
|
||||||
{
|
{
|
||||||
Name: "secvol",
|
Name: "secvol",
|
||||||
MountPath: "/fake/path",
|
MountPath: "/fake/path",
|
||||||
|
@@ -31,14 +31,15 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
|
||||||
"k8s.io/kubernetes/pkg/api/errors"
|
"k8s.io/kubernetes/pkg/api/errors"
|
||||||
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||||
"k8s.io/kubernetes/pkg/auth/authenticator"
|
"k8s.io/kubernetes/pkg/auth/authenticator"
|
||||||
"k8s.io/kubernetes/pkg/auth/authenticator/bearertoken"
|
"k8s.io/kubernetes/pkg/auth/authenticator/bearertoken"
|
||||||
"k8s.io/kubernetes/pkg/auth/authorizer"
|
"k8s.io/kubernetes/pkg/auth/authorizer"
|
||||||
"k8s.io/kubernetes/pkg/auth/user"
|
"k8s.io/kubernetes/pkg/auth/user"
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||||
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||||
"k8s.io/kubernetes/pkg/client/restclient"
|
"k8s.io/kubernetes/pkg/client/restclient"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/informers"
|
"k8s.io/kubernetes/pkg/controller/informers"
|
||||||
@@ -71,7 +72,7 @@ func TestServiceAccountAutoCreate(t *testing.T) {
|
|||||||
ns := "test-service-account-creation"
|
ns := "test-service-account-creation"
|
||||||
|
|
||||||
// Create namespace
|
// Create namespace
|
||||||
_, err := c.Core().Namespaces().Create(&api.Namespace{ObjectMeta: api.ObjectMeta{Name: ns}})
|
_, err := c.Core().Namespaces().Create(&v1.Namespace{ObjectMeta: v1.ObjectMeta{Name: ns}})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("could not create namespace: %v", err)
|
t.Fatalf("could not create namespace: %v", err)
|
||||||
}
|
}
|
||||||
@@ -106,13 +107,13 @@ func TestServiceAccountTokenAutoCreate(t *testing.T) {
|
|||||||
name := "my-service-account"
|
name := "my-service-account"
|
||||||
|
|
||||||
// Create namespace
|
// Create namespace
|
||||||
_, err := c.Core().Namespaces().Create(&api.Namespace{ObjectMeta: api.ObjectMeta{Name: ns}})
|
_, err := c.Core().Namespaces().Create(&v1.Namespace{ObjectMeta: v1.ObjectMeta{Name: ns}})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("could not create namespace: %v", err)
|
t.Fatalf("could not create namespace: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create service account
|
// Create service account
|
||||||
serviceAccount, err := c.Core().ServiceAccounts(ns).Create(&api.ServiceAccount{ObjectMeta: api.ObjectMeta{Name: name}})
|
serviceAccount, err := c.Core().ServiceAccounts(ns).Create(&v1.ServiceAccount{ObjectMeta: v1.ObjectMeta{Name: name}})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Service Account not created: %v", err)
|
t.Fatalf("Service Account not created: %v", err)
|
||||||
}
|
}
|
||||||
@@ -146,7 +147,7 @@ func TestServiceAccountTokenAutoCreate(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
serviceAccount.Secrets = []api.ObjectReference{}
|
serviceAccount.Secrets = []v1.ObjectReference{}
|
||||||
_, err = c.Core().ServiceAccounts(ns).Update(serviceAccount)
|
_, err = c.Core().ServiceAccounts(ns).Update(serviceAccount)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@@ -174,7 +175,7 @@ func TestServiceAccountTokenAutoCreate(t *testing.T) {
|
|||||||
tokensToCleanup := sets.NewString(token1Name, token2Name, token3Name)
|
tokensToCleanup := sets.NewString(token1Name, token2Name, token3Name)
|
||||||
err = wait.Poll(time.Second, 10*time.Second, func() (bool, error) {
|
err = wait.Poll(time.Second, 10*time.Second, func() (bool, error) {
|
||||||
// Get all secrets in the namespace
|
// Get all secrets in the namespace
|
||||||
secrets, err := c.Core().Secrets(ns).List(api.ListOptions{})
|
secrets, err := c.Core().Secrets(ns).List(v1.ListOptions{})
|
||||||
// Retrieval errors should fail
|
// Retrieval errors should fail
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
@@ -200,7 +201,7 @@ func TestServiceAccountTokenAutoMount(t *testing.T) {
|
|||||||
ns := "auto-mount-ns"
|
ns := "auto-mount-ns"
|
||||||
|
|
||||||
// Create "my" namespace
|
// Create "my" namespace
|
||||||
_, err := c.Core().Namespaces().Create(&api.Namespace{ObjectMeta: api.ObjectMeta{Name: ns}})
|
_, err := c.Core().Namespaces().Create(&v1.Namespace{ObjectMeta: v1.ObjectMeta{Name: ns}})
|
||||||
if err != nil && !errors.IsAlreadyExists(err) {
|
if err != nil && !errors.IsAlreadyExists(err) {
|
||||||
t.Fatalf("could not create namespace: %v", err)
|
t.Fatalf("could not create namespace: %v", err)
|
||||||
}
|
}
|
||||||
@@ -212,10 +213,10 @@ func TestServiceAccountTokenAutoMount(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Pod to create
|
// Pod to create
|
||||||
protoPod := api.Pod{
|
protoPod := v1.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{Name: "protopod"},
|
ObjectMeta: v1.ObjectMeta{Name: "protopod"},
|
||||||
Spec: api.PodSpec{
|
Spec: v1.PodSpec{
|
||||||
Containers: []api.Container{
|
Containers: []v1.Container{
|
||||||
{
|
{
|
||||||
Name: "container-1",
|
Name: "container-1",
|
||||||
Image: "container-1-image",
|
Image: "container-1-image",
|
||||||
@@ -223,15 +224,15 @@ func TestServiceAccountTokenAutoMount(t *testing.T) {
|
|||||||
{
|
{
|
||||||
Name: "container-2",
|
Name: "container-2",
|
||||||
Image: "container-2-image",
|
Image: "container-2-image",
|
||||||
VolumeMounts: []api.VolumeMount{
|
VolumeMounts: []v1.VolumeMount{
|
||||||
{Name: "empty-dir", MountPath: serviceaccountadmission.DefaultAPITokenMountPath},
|
{Name: "empty-dir", MountPath: serviceaccountadmission.DefaultAPITokenMountPath},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Volumes: []api.Volume{
|
Volumes: []v1.Volume{
|
||||||
{
|
{
|
||||||
Name: "empty-dir",
|
Name: "empty-dir",
|
||||||
VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}},
|
VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -240,16 +241,16 @@ func TestServiceAccountTokenAutoMount(t *testing.T) {
|
|||||||
// Pod we expect to get created
|
// Pod we expect to get created
|
||||||
defaultMode := int32(0644)
|
defaultMode := int32(0644)
|
||||||
expectedServiceAccount := serviceaccountadmission.DefaultServiceAccountName
|
expectedServiceAccount := serviceaccountadmission.DefaultServiceAccountName
|
||||||
expectedVolumes := append(protoPod.Spec.Volumes, api.Volume{
|
expectedVolumes := append(protoPod.Spec.Volumes, v1.Volume{
|
||||||
Name: defaultTokenName,
|
Name: defaultTokenName,
|
||||||
VolumeSource: api.VolumeSource{
|
VolumeSource: v1.VolumeSource{
|
||||||
Secret: &api.SecretVolumeSource{
|
Secret: &v1.SecretVolumeSource{
|
||||||
SecretName: defaultTokenName,
|
SecretName: defaultTokenName,
|
||||||
DefaultMode: &defaultMode,
|
DefaultMode: &defaultMode,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
expectedContainer1VolumeMounts := []api.VolumeMount{
|
expectedContainer1VolumeMounts := []v1.VolumeMount{
|
||||||
{Name: defaultTokenName, MountPath: serviceaccountadmission.DefaultAPITokenMountPath, ReadOnly: true},
|
{Name: defaultTokenName, MountPath: serviceaccountadmission.DefaultAPITokenMountPath, ReadOnly: true},
|
||||||
}
|
}
|
||||||
expectedContainer2VolumeMounts := protoPod.Spec.Containers[1].VolumeMounts
|
expectedContainer2VolumeMounts := protoPod.Spec.Containers[1].VolumeMounts
|
||||||
@@ -261,13 +262,13 @@ func TestServiceAccountTokenAutoMount(t *testing.T) {
|
|||||||
if createdPod.Spec.ServiceAccountName != expectedServiceAccount {
|
if createdPod.Spec.ServiceAccountName != expectedServiceAccount {
|
||||||
t.Fatalf("Expected %s, got %s", expectedServiceAccount, createdPod.Spec.ServiceAccountName)
|
t.Fatalf("Expected %s, got %s", expectedServiceAccount, createdPod.Spec.ServiceAccountName)
|
||||||
}
|
}
|
||||||
if !api.Semantic.DeepEqual(&expectedVolumes, &createdPod.Spec.Volumes) {
|
if !v1.Semantic.DeepEqual(&expectedVolumes, &createdPod.Spec.Volumes) {
|
||||||
t.Fatalf("Expected\n\t%#v\n\tgot\n\t%#v", expectedVolumes, createdPod.Spec.Volumes)
|
t.Fatalf("Expected\n\t%#v\n\tgot\n\t%#v", expectedVolumes, createdPod.Spec.Volumes)
|
||||||
}
|
}
|
||||||
if !api.Semantic.DeepEqual(&expectedContainer1VolumeMounts, &createdPod.Spec.Containers[0].VolumeMounts) {
|
if !v1.Semantic.DeepEqual(&expectedContainer1VolumeMounts, &createdPod.Spec.Containers[0].VolumeMounts) {
|
||||||
t.Fatalf("Expected\n\t%#v\n\tgot\n\t%#v", expectedContainer1VolumeMounts, createdPod.Spec.Containers[0].VolumeMounts)
|
t.Fatalf("Expected\n\t%#v\n\tgot\n\t%#v", expectedContainer1VolumeMounts, createdPod.Spec.Containers[0].VolumeMounts)
|
||||||
}
|
}
|
||||||
if !api.Semantic.DeepEqual(&expectedContainer2VolumeMounts, &createdPod.Spec.Containers[1].VolumeMounts) {
|
if !v1.Semantic.DeepEqual(&expectedContainer2VolumeMounts, &createdPod.Spec.Containers[1].VolumeMounts) {
|
||||||
t.Fatalf("Expected\n\t%#v\n\tgot\n\t%#v", expectedContainer2VolumeMounts, createdPod.Spec.Containers[1].VolumeMounts)
|
t.Fatalf("Expected\n\t%#v\n\tgot\n\t%#v", expectedContainer2VolumeMounts, createdPod.Spec.Containers[1].VolumeMounts)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -280,19 +281,19 @@ func TestServiceAccountTokenAuthentication(t *testing.T) {
|
|||||||
otherns := "other-ns"
|
otherns := "other-ns"
|
||||||
|
|
||||||
// Create "my" namespace
|
// Create "my" namespace
|
||||||
_, err := c.Core().Namespaces().Create(&api.Namespace{ObjectMeta: api.ObjectMeta{Name: myns}})
|
_, err := c.Core().Namespaces().Create(&v1.Namespace{ObjectMeta: v1.ObjectMeta{Name: myns}})
|
||||||
if err != nil && !errors.IsAlreadyExists(err) {
|
if err != nil && !errors.IsAlreadyExists(err) {
|
||||||
t.Fatalf("could not create namespace: %v", err)
|
t.Fatalf("could not create namespace: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create "other" namespace
|
// Create "other" namespace
|
||||||
_, err = c.Core().Namespaces().Create(&api.Namespace{ObjectMeta: api.ObjectMeta{Name: otherns}})
|
_, err = c.Core().Namespaces().Create(&v1.Namespace{ObjectMeta: v1.ObjectMeta{Name: otherns}})
|
||||||
if err != nil && !errors.IsAlreadyExists(err) {
|
if err != nil && !errors.IsAlreadyExists(err) {
|
||||||
t.Fatalf("could not create namespace: %v", err)
|
t.Fatalf("could not create namespace: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create "ro" user in myns
|
// Create "ro" user in myns
|
||||||
_, err = c.Core().ServiceAccounts(myns).Create(&api.ServiceAccount{ObjectMeta: api.ObjectMeta{Name: readOnlyServiceAccountName}})
|
_, err = c.Core().ServiceAccounts(myns).Create(&v1.ServiceAccount{ObjectMeta: v1.ObjectMeta{Name: readOnlyServiceAccountName}})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Service Account not created: %v", err)
|
t.Fatalf("Service Account not created: %v", err)
|
||||||
}
|
}
|
||||||
@@ -312,7 +313,7 @@ func TestServiceAccountTokenAuthentication(t *testing.T) {
|
|||||||
doServiceAccountAPIRequests(t, roClient, myns, false, false, false)
|
doServiceAccountAPIRequests(t, roClient, myns, false, false, false)
|
||||||
|
|
||||||
// Create "rw" user in myns
|
// Create "rw" user in myns
|
||||||
_, err = c.Core().ServiceAccounts(myns).Create(&api.ServiceAccount{ObjectMeta: api.ObjectMeta{Name: readWriteServiceAccountName}})
|
_, err = c.Core().ServiceAccounts(myns).Create(&v1.ServiceAccount{ObjectMeta: v1.ObjectMeta{Name: readWriteServiceAccountName}})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Service Account not created: %v", err)
|
t.Fatalf("Service Account not created: %v", err)
|
||||||
}
|
}
|
||||||
@@ -348,10 +349,11 @@ func startServiceAccountTestServer(t *testing.T) (*clientset.Clientset, restclie
|
|||||||
}))
|
}))
|
||||||
|
|
||||||
// Anonymous client config
|
// Anonymous client config
|
||||||
clientConfig := restclient.Config{Host: apiServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}
|
clientConfig := restclient.Config{Host: apiServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}}
|
||||||
// Root client
|
// Root client
|
||||||
// TODO: remove rootClient after we refactor pkg/admission to use the clientset.
|
// TODO: remove rootClient after we refactor pkg/admission to use the clientset.
|
||||||
rootClientset := clientset.NewForConfigOrDie(&restclient.Config{Host: apiServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}, BearerToken: rootToken})
|
rootClientset := clientset.NewForConfigOrDie(&restclient.Config{Host: apiServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}, BearerToken: rootToken})
|
||||||
|
internalRootClientset := internalclientset.NewForConfigOrDie(&restclient.Config{Host: apiServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}, BearerToken: rootToken})
|
||||||
// Set up two authenticators:
|
// Set up two authenticators:
|
||||||
// 1. A token authenticator that maps the rootToken to the "root" user
|
// 1. A token authenticator that maps the rootToken to the "root" user
|
||||||
// 2. A ServiceAccountToken authenticator that validates ServiceAccount tokens
|
// 2. A ServiceAccountToken authenticator that validates ServiceAccount tokens
|
||||||
@@ -405,7 +407,7 @@ func startServiceAccountTestServer(t *testing.T) (*clientset.Clientset, restclie
|
|||||||
})
|
})
|
||||||
|
|
||||||
// Set up admission plugin to auto-assign serviceaccounts to pods
|
// Set up admission plugin to auto-assign serviceaccounts to pods
|
||||||
serviceAccountAdmission := serviceaccountadmission.NewServiceAccount(rootClientset)
|
serviceAccountAdmission := serviceaccountadmission.NewServiceAccount(internalRootClientset)
|
||||||
|
|
||||||
masterConfig := framework.NewMasterConfig()
|
masterConfig := framework.NewMasterConfig()
|
||||||
masterConfig.GenericConfig.EnableIndex = true
|
masterConfig.GenericConfig.EnableIndex = true
|
||||||
@@ -419,7 +421,7 @@ func startServiceAccountTestServer(t *testing.T) (*clientset.Clientset, restclie
|
|||||||
tokenController := serviceaccountcontroller.NewTokensController(rootClientset, serviceaccountcontroller.TokensControllerOptions{TokenGenerator: serviceaccount.JWTTokenGenerator(serviceAccountKey)})
|
tokenController := serviceaccountcontroller.NewTokensController(rootClientset, serviceaccountcontroller.TokensControllerOptions{TokenGenerator: serviceaccount.JWTTokenGenerator(serviceAccountKey)})
|
||||||
go tokenController.Run(1, stopCh)
|
go tokenController.Run(1, stopCh)
|
||||||
|
|
||||||
informers := informers.NewSharedInformerFactory(rootClientset, controller.NoResyncPeriodFunc())
|
informers := informers.NewSharedInformerFactory(rootClientset, nil, controller.NoResyncPeriodFunc())
|
||||||
serviceAccountController := serviceaccountcontroller.NewServiceAccountsController(informers.ServiceAccounts(), informers.Namespaces(), rootClientset, serviceaccountcontroller.DefaultServiceAccountsControllerOptions())
|
serviceAccountController := serviceaccountcontroller.NewServiceAccountsController(informers.ServiceAccounts(), informers.Namespaces(), rootClientset, serviceaccountcontroller.DefaultServiceAccountsControllerOptions())
|
||||||
informers.Start(stopCh)
|
informers.Start(stopCh)
|
||||||
go serviceAccountController.Run(5, stopCh)
|
go serviceAccountController.Run(5, stopCh)
|
||||||
@@ -435,12 +437,12 @@ func startServiceAccountTestServer(t *testing.T) (*clientset.Clientset, restclie
|
|||||||
return rootClientset, clientConfig, stop
|
return rootClientset, clientConfig, stop
|
||||||
}
|
}
|
||||||
|
|
||||||
func getServiceAccount(c *clientset.Clientset, ns string, name string, shouldWait bool) (*api.ServiceAccount, error) {
|
func getServiceAccount(c *clientset.Clientset, ns string, name string, shouldWait bool) (*v1.ServiceAccount, error) {
|
||||||
if !shouldWait {
|
if !shouldWait {
|
||||||
return c.Core().ServiceAccounts(ns).Get(name)
|
return c.Core().ServiceAccounts(ns).Get(name)
|
||||||
}
|
}
|
||||||
|
|
||||||
var user *api.ServiceAccount
|
var user *v1.ServiceAccount
|
||||||
var err error
|
var err error
|
||||||
err = wait.Poll(time.Second, 10*time.Second, func() (bool, error) {
|
err = wait.Poll(time.Second, 10*time.Second, func() (bool, error) {
|
||||||
user, err = c.Core().ServiceAccounts(ns).Get(name)
|
user, err = c.Core().ServiceAccounts(ns).Get(name)
|
||||||
@@ -476,12 +478,12 @@ func getReferencedServiceAccountToken(c *clientset.Clientset, ns string, name st
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
if secret.Type != api.SecretTypeServiceAccountToken {
|
if secret.Type != v1.SecretTypeServiceAccountToken {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
name := secret.Annotations[api.ServiceAccountNameKey]
|
name := secret.Annotations[v1.ServiceAccountNameKey]
|
||||||
uid := secret.Annotations[api.ServiceAccountUIDKey]
|
uid := secret.Annotations[v1.ServiceAccountUIDKey]
|
||||||
tokenData := secret.Data[api.ServiceAccountTokenKey]
|
tokenData := secret.Data[v1.ServiceAccountTokenKey]
|
||||||
if name == user.Name && uid == string(user.UID) && len(tokenData) > 0 {
|
if name == user.Name && uid == string(user.UID) && len(tokenData) > 0 {
|
||||||
tokenName = secret.Name
|
tokenName = secret.Name
|
||||||
token = string(tokenData)
|
token = string(tokenData)
|
||||||
@@ -512,18 +514,18 @@ func getReferencedServiceAccountToken(c *clientset.Clientset, ns string, name st
|
|||||||
type testOperation func() error
|
type testOperation func() error
|
||||||
|
|
||||||
func doServiceAccountAPIRequests(t *testing.T, c *clientset.Clientset, ns string, authenticated bool, canRead bool, canWrite bool) {
|
func doServiceAccountAPIRequests(t *testing.T, c *clientset.Clientset, ns string, authenticated bool, canRead bool, canWrite bool) {
|
||||||
testSecret := &api.Secret{
|
testSecret := &v1.Secret{
|
||||||
ObjectMeta: api.ObjectMeta{Name: "testSecret"},
|
ObjectMeta: v1.ObjectMeta{Name: "testSecret"},
|
||||||
Data: map[string][]byte{"test": []byte("data")},
|
Data: map[string][]byte{"test": []byte("data")},
|
||||||
}
|
}
|
||||||
|
|
||||||
readOps := []testOperation{
|
readOps := []testOperation{
|
||||||
func() error {
|
func() error {
|
||||||
_, err := c.Core().Secrets(ns).List(api.ListOptions{})
|
_, err := c.Core().Secrets(ns).List(v1.ListOptions{})
|
||||||
return err
|
return err
|
||||||
},
|
},
|
||||||
func() error {
|
func() error {
|
||||||
_, err := c.Core().Pods(ns).List(api.ListOptions{})
|
_, err := c.Core().Pods(ns).List(v1.ListOptions{})
|
||||||
return err
|
return err
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@@ -23,13 +23,13 @@ package storageclasses
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
|
||||||
"k8s.io/kubernetes/pkg/api/resource"
|
"k8s.io/kubernetes/pkg/api/resource"
|
||||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||||
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||||
"k8s.io/kubernetes/pkg/apis/storage"
|
storage "k8s.io/kubernetes/pkg/apis/storage/v1beta1"
|
||||||
storageutil "k8s.io/kubernetes/pkg/apis/storage/util"
|
storageutil "k8s.io/kubernetes/pkg/apis/storage/v1beta1/util"
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||||
"k8s.io/kubernetes/pkg/client/restclient"
|
"k8s.io/kubernetes/pkg/client/restclient"
|
||||||
"k8s.io/kubernetes/test/integration/framework"
|
"k8s.io/kubernetes/test/integration/framework"
|
||||||
)
|
)
|
||||||
@@ -41,7 +41,7 @@ func TestStorageClasses(t *testing.T) {
|
|||||||
_, s := framework.RunAMaster(nil)
|
_, s := framework.RunAMaster(nil)
|
||||||
defer s.Close()
|
defer s.Close()
|
||||||
|
|
||||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
|
|
||||||
ns := framework.CreateTestingNamespace("storageclass", s, t)
|
ns := framework.CreateTestingNamespace("storageclass", s, t)
|
||||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||||
@@ -50,13 +50,13 @@ func TestStorageClasses(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DoTestStorageClasses tests storage classes for one api version.
|
// DoTestStorageClasses tests storage classes for one api version.
|
||||||
func DoTestStorageClasses(t *testing.T, client clientset.Interface, ns *api.Namespace) {
|
func DoTestStorageClasses(t *testing.T, client clientset.Interface, ns *v1.Namespace) {
|
||||||
// Make a storage class object.
|
// Make a storage class object.
|
||||||
s := storage.StorageClass{
|
s := storage.StorageClass{
|
||||||
TypeMeta: unversioned.TypeMeta{
|
TypeMeta: unversioned.TypeMeta{
|
||||||
Kind: "StorageClass",
|
Kind: "StorageClass",
|
||||||
},
|
},
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
Name: "gold",
|
Name: "gold",
|
||||||
},
|
},
|
||||||
Provisioner: provisionerPluginName,
|
Provisioner: provisionerPluginName,
|
||||||
@@ -68,17 +68,17 @@ func DoTestStorageClasses(t *testing.T, client clientset.Interface, ns *api.Name
|
|||||||
defer deleteStorageClassOrErrorf(t, client, s.Namespace, s.Name)
|
defer deleteStorageClassOrErrorf(t, client, s.Namespace, s.Name)
|
||||||
|
|
||||||
// Template for pvcs that specify a storage class
|
// Template for pvcs that specify a storage class
|
||||||
pvc := &api.PersistentVolumeClaim{
|
pvc := &v1.PersistentVolumeClaim{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
Name: "XXX",
|
Name: "XXX",
|
||||||
Namespace: ns.Name,
|
Namespace: ns.Name,
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
storageutil.StorageClassAnnotation: "gold",
|
storageutil.StorageClassAnnotation: "gold",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Spec: api.PersistentVolumeClaimSpec{
|
Spec: v1.PersistentVolumeClaimSpec{
|
||||||
Resources: api.ResourceRequirements{Requests: api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse("1G")}},
|
Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("1G")}},
|
||||||
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
|
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
27
test/integration/thirdparty/thirdparty_test.go
vendored
27
test/integration/thirdparty/thirdparty_test.go
vendored
@@ -29,8 +29,9 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
apierrors "k8s.io/kubernetes/pkg/api/errors"
|
apierrors "k8s.io/kubernetes/pkg/api/errors"
|
||||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||||
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||||
"k8s.io/kubernetes/pkg/client/restclient"
|
"k8s.io/kubernetes/pkg/client/restclient"
|
||||||
"k8s.io/kubernetes/pkg/util/diff"
|
"k8s.io/kubernetes/pkg/util/diff"
|
||||||
"k8s.io/kubernetes/pkg/util/wait"
|
"k8s.io/kubernetes/pkg/util/wait"
|
||||||
@@ -43,7 +44,7 @@ func TestThirdPartyDelete(t *testing.T) {
|
|||||||
defer s.Close()
|
defer s.Close()
|
||||||
|
|
||||||
clientConfig := &restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{NegotiatedSerializer: api.Codecs}}
|
clientConfig := &restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{NegotiatedSerializer: api.Codecs}}
|
||||||
client := internalclientset.NewForConfigOrDie(clientConfig)
|
client := clientset.NewForConfigOrDie(clientConfig)
|
||||||
|
|
||||||
DoTestInstallThirdPartyAPIDelete(t, client, clientConfig)
|
DoTestInstallThirdPartyAPIDelete(t, client, clientConfig)
|
||||||
}
|
}
|
||||||
@@ -53,7 +54,7 @@ func TestThirdPartyMultiple(t *testing.T) {
|
|||||||
defer s.Close()
|
defer s.Close()
|
||||||
|
|
||||||
clientConfig := &restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{NegotiatedSerializer: api.Codecs}}
|
clientConfig := &restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{NegotiatedSerializer: api.Codecs}}
|
||||||
client := internalclientset.NewForConfigOrDie(clientConfig)
|
client := clientset.NewForConfigOrDie(clientConfig)
|
||||||
|
|
||||||
DoTestInstallMultipleAPIs(t, client, clientConfig)
|
DoTestInstallMultipleAPIs(t, client, clientConfig)
|
||||||
}
|
}
|
||||||
@@ -63,7 +64,7 @@ var versionsToTest = []string{"v1"}
|
|||||||
|
|
||||||
type Foo struct {
|
type Foo struct {
|
||||||
unversioned.TypeMeta `json:",inline"`
|
unversioned.TypeMeta `json:",inline"`
|
||||||
api.ObjectMeta `json:"metadata,omitempty" description:"standard object metadata"`
|
v1.ObjectMeta `json:"metadata,omitempty" description:"standard object metadata"`
|
||||||
|
|
||||||
SomeField string `json:"someField"`
|
SomeField string `json:"someField"`
|
||||||
OtherField int `json:"otherField"`
|
OtherField int `json:"otherField"`
|
||||||
@@ -77,7 +78,7 @@ type FooList struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// installThirdParty installs a third party resoure and returns a defer func
|
// installThirdParty installs a third party resoure and returns a defer func
|
||||||
func installThirdParty(t *testing.T, client internalclientset.Interface, clientConfig *restclient.Config, tpr *extensions.ThirdPartyResource, group, version, resource string) func() {
|
func installThirdParty(t *testing.T, client clientset.Interface, clientConfig *restclient.Config, tpr *extensions.ThirdPartyResource, group, version, resource string) func() {
|
||||||
var err error
|
var err error
|
||||||
_, err = client.Extensions().ThirdPartyResources().Create(tpr)
|
_, err = client.Extensions().ThirdPartyResources().Create(tpr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -123,13 +124,13 @@ func installThirdParty(t *testing.T, client internalclientset.Interface, clientC
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func DoTestInstallMultipleAPIs(t *testing.T, client internalclientset.Interface, clientConfig *restclient.Config) {
|
func DoTestInstallMultipleAPIs(t *testing.T, client clientset.Interface, clientConfig *restclient.Config) {
|
||||||
group := "company.com"
|
group := "company.com"
|
||||||
version := "v1"
|
version := "v1"
|
||||||
|
|
||||||
defer installThirdParty(t, client, clientConfig,
|
defer installThirdParty(t, client, clientConfig,
|
||||||
&extensions.ThirdPartyResource{
|
&extensions.ThirdPartyResource{
|
||||||
ObjectMeta: api.ObjectMeta{Name: "foo.company.com"},
|
ObjectMeta: v1.ObjectMeta{Name: "foo.company.com"},
|
||||||
Versions: []extensions.APIVersion{{Name: version}},
|
Versions: []extensions.APIVersion{{Name: version}},
|
||||||
}, group, version, "foos",
|
}, group, version, "foos",
|
||||||
)()
|
)()
|
||||||
@@ -137,24 +138,24 @@ func DoTestInstallMultipleAPIs(t *testing.T, client internalclientset.Interface,
|
|||||||
// TODO make multiple resources in one version work
|
// TODO make multiple resources in one version work
|
||||||
// defer installThirdParty(t, client, clientConfig,
|
// defer installThirdParty(t, client, clientConfig,
|
||||||
// &extensions.ThirdPartyResource{
|
// &extensions.ThirdPartyResource{
|
||||||
// ObjectMeta: api.ObjectMeta{Name: "bar.company.com"},
|
// ObjectMeta: v1.ObjectMeta{Name: "bar.company.com"},
|
||||||
// Versions: []extensions.APIVersion{{Name: version}},
|
// Versions: []extensions.APIVersion{{Name: version}},
|
||||||
// }, group, version, "bars",
|
// }, group, version, "bars",
|
||||||
// )()
|
// )()
|
||||||
}
|
}
|
||||||
|
|
||||||
func DoTestInstallThirdPartyAPIDelete(t *testing.T, client internalclientset.Interface, clientConfig *restclient.Config) {
|
func DoTestInstallThirdPartyAPIDelete(t *testing.T, client clientset.Interface, clientConfig *restclient.Config) {
|
||||||
for _, version := range versionsToTest {
|
for _, version := range versionsToTest {
|
||||||
testInstallThirdPartyAPIDeleteVersion(t, client, clientConfig, version)
|
testInstallThirdPartyAPIDeleteVersion(t, client, clientConfig, version)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func testInstallThirdPartyAPIDeleteVersion(t *testing.T, client internalclientset.Interface, clientConfig *restclient.Config, version string) {
|
func testInstallThirdPartyAPIDeleteVersion(t *testing.T, client clientset.Interface, clientConfig *restclient.Config, version string) {
|
||||||
group := "company.com"
|
group := "company.com"
|
||||||
|
|
||||||
defer installThirdParty(t, client, clientConfig,
|
defer installThirdParty(t, client, clientConfig,
|
||||||
&extensions.ThirdPartyResource{
|
&extensions.ThirdPartyResource{
|
||||||
ObjectMeta: api.ObjectMeta{Name: "foo.company.com"},
|
ObjectMeta: v1.ObjectMeta{Name: "foo.company.com"},
|
||||||
Versions: []extensions.APIVersion{{Name: version}},
|
Versions: []extensions.APIVersion{{Name: version}},
|
||||||
}, group, version, "foos",
|
}, group, version, "foos",
|
||||||
)()
|
)()
|
||||||
@@ -168,7 +169,7 @@ func testInstallThirdPartyAPIDeleteVersion(t *testing.T, client internalclientse
|
|||||||
}
|
}
|
||||||
|
|
||||||
expectedObj := Foo{
|
expectedObj := Foo{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
Name: "test",
|
Name: "test",
|
||||||
Namespace: "default",
|
Namespace: "default",
|
||||||
},
|
},
|
||||||
|
@@ -26,7 +26,7 @@ import (
|
|||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"k8s.io/kubernetes/pkg/api/errors"
|
"k8s.io/kubernetes/pkg/api/errors"
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||||
coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
|
coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
|
||||||
"k8s.io/kubernetes/pkg/util/wait"
|
"k8s.io/kubernetes/pkg/util/wait"
|
||||||
"k8s.io/kubernetes/test/integration/framework"
|
"k8s.io/kubernetes/test/integration/framework"
|
||||||
|
Reference in New Issue
Block a user