dependencies: pkg/volume
This commit is contained in:
@@ -24,14 +24,15 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
)
|
||||
|
||||
type testcase struct {
|
||||
// Input of the test
|
||||
name string
|
||||
existingPod *api.Pod
|
||||
createPod *api.Pod
|
||||
existingPod *v1.Pod
|
||||
createPod *v1.Pod
|
||||
// eventSequence is list of events that are simulated during recycling. It
|
||||
// can be either event generated by a recycler pod or a state change of
|
||||
// the pod. (see newPodEvent and newEvent below).
|
||||
@@ -44,7 +45,7 @@ type testcase struct {
|
||||
expectedError string
|
||||
}
|
||||
|
||||
func newPodEvent(eventtype watch.EventType, name string, phase api.PodPhase, message string) watch.Event {
|
||||
func newPodEvent(eventtype watch.EventType, name string, phase v1.PodPhase, message string) watch.Event {
|
||||
return watch.Event{
|
||||
Type: eventtype,
|
||||
Object: newPod(name, phase, message),
|
||||
@@ -54,9 +55,9 @@ func newPodEvent(eventtype watch.EventType, name string, phase api.PodPhase, mes
|
||||
func newEvent(eventtype, message string) watch.Event {
|
||||
return watch.Event{
|
||||
Type: watch.Added,
|
||||
Object: &api.Event{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Namespace: api.NamespaceDefault,
|
||||
Object: &v1.Event{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: v1.NamespaceDefault,
|
||||
},
|
||||
Reason: "MockEvent",
|
||||
Message: message,
|
||||
@@ -65,13 +66,13 @@ func newEvent(eventtype, message string) watch.Event {
|
||||
}
|
||||
}
|
||||
|
||||
func newPod(name string, phase api.PodPhase, message string) *api.Pod {
|
||||
return &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Namespace: api.NamespaceDefault,
|
||||
func newPod(name string, phase v1.PodPhase, message string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: v1.NamespaceDefault,
|
||||
Name: name,
|
||||
},
|
||||
Status: api.PodStatus{
|
||||
Status: v1.PodStatus{
|
||||
Phase: phase,
|
||||
Message: message,
|
||||
},
|
||||
@@ -83,70 +84,70 @@ func TestRecyclerPod(t *testing.T) {
|
||||
{
|
||||
// Test recycler success with some events
|
||||
name: "RecyclerSuccess",
|
||||
createPod: newPod("podRecyclerSuccess", api.PodPending, ""),
|
||||
createPod: newPod("podRecyclerSuccess", v1.PodPending, ""),
|
||||
eventSequence: []watch.Event{
|
||||
// Pod gets Running and Succeeded
|
||||
newPodEvent(watch.Added, "podRecyclerSuccess", api.PodPending, ""),
|
||||
newEvent(api.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerSuccess to 127.0.0.1"),
|
||||
newEvent(api.EventTypeNormal, "pulling image \"gcr.io/google_containers/busybox\""),
|
||||
newEvent(api.EventTypeNormal, "Successfully pulled image \"gcr.io/google_containers/busybox\""),
|
||||
newEvent(api.EventTypeNormal, "Created container with docker id 83d929aeac82"),
|
||||
newEvent(api.EventTypeNormal, "Started container with docker id 83d929aeac82"),
|
||||
newPodEvent(watch.Modified, "podRecyclerSuccess", api.PodRunning, ""),
|
||||
newPodEvent(watch.Modified, "podRecyclerSuccess", api.PodSucceeded, ""),
|
||||
newPodEvent(watch.Added, "podRecyclerSuccess", v1.PodPending, ""),
|
||||
newEvent(v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerSuccess to 127.0.0.1"),
|
||||
newEvent(v1.EventTypeNormal, "pulling image \"gcr.io/google_containers/busybox\""),
|
||||
newEvent(v1.EventTypeNormal, "Successfully pulled image \"gcr.io/google_containers/busybox\""),
|
||||
newEvent(v1.EventTypeNormal, "Created container with docker id 83d929aeac82"),
|
||||
newEvent(v1.EventTypeNormal, "Started container with docker id 83d929aeac82"),
|
||||
newPodEvent(watch.Modified, "podRecyclerSuccess", v1.PodRunning, ""),
|
||||
newPodEvent(watch.Modified, "podRecyclerSuccess", v1.PodSucceeded, ""),
|
||||
},
|
||||
expectedEvents: []mockEvent{
|
||||
{api.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerSuccess to 127.0.0.1"},
|
||||
{api.EventTypeNormal, "pulling image \"gcr.io/google_containers/busybox\""},
|
||||
{api.EventTypeNormal, "Successfully pulled image \"gcr.io/google_containers/busybox\""},
|
||||
{api.EventTypeNormal, "Created container with docker id 83d929aeac82"},
|
||||
{api.EventTypeNormal, "Started container with docker id 83d929aeac82"},
|
||||
{v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerSuccess to 127.0.0.1"},
|
||||
{v1.EventTypeNormal, "pulling image \"gcr.io/google_containers/busybox\""},
|
||||
{v1.EventTypeNormal, "Successfully pulled image \"gcr.io/google_containers/busybox\""},
|
||||
{v1.EventTypeNormal, "Created container with docker id 83d929aeac82"},
|
||||
{v1.EventTypeNormal, "Started container with docker id 83d929aeac82"},
|
||||
},
|
||||
expectedError: "",
|
||||
},
|
||||
{
|
||||
// Test recycler failure with some events
|
||||
name: "RecyclerFailure",
|
||||
createPod: newPod("podRecyclerFailure", api.PodPending, ""),
|
||||
createPod: newPod("podRecyclerFailure", v1.PodPending, ""),
|
||||
eventSequence: []watch.Event{
|
||||
// Pod gets Running and Succeeded
|
||||
newPodEvent(watch.Added, "podRecyclerFailure", api.PodPending, ""),
|
||||
newEvent(api.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerFailure to 127.0.0.1"),
|
||||
newEvent(api.EventTypeWarning, "Unable to mount volumes for pod \"recycler-for-podRecyclerFailure_default(3c9809e5-347c-11e6-a79b-3c970e965218)\": timeout expired waiting for volumes to attach/mount"),
|
||||
newEvent(api.EventTypeWarning, "Error syncing pod, skipping: timeout expired waiting for volumes to attach/mount for pod \"recycler-for-podRecyclerFailure\"/\"default\". list of unattached/unmounted"),
|
||||
newPodEvent(watch.Modified, "podRecyclerFailure", api.PodRunning, ""),
|
||||
newPodEvent(watch.Modified, "podRecyclerFailure", api.PodFailed, "Pod was active on the node longer than specified deadline"),
|
||||
newPodEvent(watch.Added, "podRecyclerFailure", v1.PodPending, ""),
|
||||
newEvent(v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerFailure to 127.0.0.1"),
|
||||
newEvent(v1.EventTypeWarning, "Unable to mount volumes for pod \"recycler-for-podRecyclerFailure_default(3c9809e5-347c-11e6-a79b-3c970e965218)\": timeout expired waiting for volumes to attach/mount"),
|
||||
newEvent(v1.EventTypeWarning, "Error syncing pod, skipping: timeout expired waiting for volumes to attach/mount for pod \"recycler-for-podRecyclerFailure\"/\"default\". list of unattached/unmounted"),
|
||||
newPodEvent(watch.Modified, "podRecyclerFailure", v1.PodRunning, ""),
|
||||
newPodEvent(watch.Modified, "podRecyclerFailure", v1.PodFailed, "Pod was active on the node longer than specified deadline"),
|
||||
},
|
||||
expectedEvents: []mockEvent{
|
||||
{api.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerFailure to 127.0.0.1"},
|
||||
{api.EventTypeWarning, "Unable to mount volumes for pod \"recycler-for-podRecyclerFailure_default(3c9809e5-347c-11e6-a79b-3c970e965218)\": timeout expired waiting for volumes to attach/mount"},
|
||||
{api.EventTypeWarning, "Error syncing pod, skipping: timeout expired waiting for volumes to attach/mount for pod \"recycler-for-podRecyclerFailure\"/\"default\". list of unattached/unmounted"},
|
||||
{v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerFailure to 127.0.0.1"},
|
||||
{v1.EventTypeWarning, "Unable to mount volumes for pod \"recycler-for-podRecyclerFailure_default(3c9809e5-347c-11e6-a79b-3c970e965218)\": timeout expired waiting for volumes to attach/mount"},
|
||||
{v1.EventTypeWarning, "Error syncing pod, skipping: timeout expired waiting for volumes to attach/mount for pod \"recycler-for-podRecyclerFailure\"/\"default\". list of unattached/unmounted"},
|
||||
},
|
||||
expectedError: "Pod was active on the node longer than specified deadline",
|
||||
},
|
||||
{
|
||||
// Recycler pod gets deleted
|
||||
name: "RecyclerDeleted",
|
||||
createPod: newPod("podRecyclerDeleted", api.PodPending, ""),
|
||||
createPod: newPod("podRecyclerDeleted", v1.PodPending, ""),
|
||||
eventSequence: []watch.Event{
|
||||
// Pod gets Running and Succeeded
|
||||
newPodEvent(watch.Added, "podRecyclerDeleted", api.PodPending, ""),
|
||||
newEvent(api.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerDeleted to 127.0.0.1"),
|
||||
newPodEvent(watch.Deleted, "podRecyclerDeleted", api.PodPending, ""),
|
||||
newPodEvent(watch.Added, "podRecyclerDeleted", v1.PodPending, ""),
|
||||
newEvent(v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerDeleted to 127.0.0.1"),
|
||||
newPodEvent(watch.Deleted, "podRecyclerDeleted", v1.PodPending, ""),
|
||||
},
|
||||
expectedEvents: []mockEvent{
|
||||
{api.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerDeleted to 127.0.0.1"},
|
||||
{v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerDeleted to 127.0.0.1"},
|
||||
},
|
||||
expectedError: "recycler pod was deleted",
|
||||
},
|
||||
{
|
||||
// Another recycler pod is already running
|
||||
name: "RecyclerRunning",
|
||||
existingPod: newPod("podOldRecycler", api.PodRunning, ""),
|
||||
createPod: newPod("podNewRecycler", api.PodFailed, "mock message"),
|
||||
existingPod: newPod("podOldRecycler", v1.PodRunning, ""),
|
||||
createPod: newPod("podNewRecycler", v1.PodFailed, "mock message"),
|
||||
eventSequence: []watch.Event{
|
||||
// Old pod succeeds
|
||||
newPodEvent(watch.Modified, "podOldRecycler", api.PodSucceeded, ""),
|
||||
newPodEvent(watch.Modified, "podOldRecycler", v1.PodSucceeded, ""),
|
||||
},
|
||||
// No error = old pod succeeded. If the new pod was used, there
|
||||
// would be error with "mock message".
|
||||
@@ -155,11 +156,11 @@ func TestRecyclerPod(t *testing.T) {
|
||||
{
|
||||
// Another recycler pod is already running and fails
|
||||
name: "FailedRecyclerRunning",
|
||||
existingPod: newPod("podOldRecycler", api.PodRunning, ""),
|
||||
createPod: newPod("podNewRecycler", api.PodFailed, "mock message"),
|
||||
existingPod: newPod("podOldRecycler", v1.PodRunning, ""),
|
||||
createPod: newPod("podNewRecycler", v1.PodFailed, "mock message"),
|
||||
eventSequence: []watch.Event{
|
||||
// Old pod failure
|
||||
newPodEvent(watch.Modified, "podOldRecycler", api.PodFailed, "Pod was active on the node longer than specified deadline"),
|
||||
newPodEvent(watch.Modified, "podOldRecycler", v1.PodFailed, "Pod was active on the node longer than specified deadline"),
|
||||
},
|
||||
// If the new pod was used, there would be error with "mock message".
|
||||
expectedError: "Pod was active on the node longer than specified deadline",
|
||||
@@ -205,7 +206,7 @@ func TestRecyclerPod(t *testing.T) {
|
||||
}
|
||||
|
||||
type mockRecyclerClient struct {
|
||||
pod *api.Pod
|
||||
pod *v1.Pod
|
||||
deletedCalled bool
|
||||
receivedEvents []mockEvent
|
||||
events []watch.Event
|
||||
@@ -215,7 +216,7 @@ type mockEvent struct {
|
||||
eventtype, message string
|
||||
}
|
||||
|
||||
func (c *mockRecyclerClient) CreatePod(pod *api.Pod) (*api.Pod, error) {
|
||||
func (c *mockRecyclerClient) CreatePod(pod *v1.Pod) (*v1.Pod, error) {
|
||||
if c.pod == nil {
|
||||
c.pod = pod
|
||||
return c.pod, nil
|
||||
@@ -224,7 +225,7 @@ func (c *mockRecyclerClient) CreatePod(pod *api.Pod) (*api.Pod, error) {
|
||||
return nil, errors.NewAlreadyExists(api.Resource("pods"), pod.Name)
|
||||
}
|
||||
|
||||
func (c *mockRecyclerClient) GetPod(name, namespace string) (*api.Pod, error) {
|
||||
func (c *mockRecyclerClient) GetPod(name, namespace string) (*v1.Pod, error) {
|
||||
if c.pod != nil {
|
||||
return c.pod, nil
|
||||
} else {
|
||||
@@ -252,10 +253,10 @@ func (c *mockRecyclerClient) Event(eventtype, message string) {
|
||||
}
|
||||
|
||||
func TestCalculateTimeoutForVolume(t *testing.T) {
|
||||
pv := &api.PersistentVolume{
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): resource.MustParse("500M"),
|
||||
pv := &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse("500M"),
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -265,13 +266,13 @@ func TestCalculateTimeoutForVolume(t *testing.T) {
|
||||
t.Errorf("Expected 50 for timeout but got %v", timeout)
|
||||
}
|
||||
|
||||
pv.Spec.Capacity[api.ResourceStorage] = resource.MustParse("2Gi")
|
||||
pv.Spec.Capacity[v1.ResourceStorage] = resource.MustParse("2Gi")
|
||||
timeout = CalculateTimeoutForVolume(50, 30, pv)
|
||||
if timeout != 60 {
|
||||
t.Errorf("Expected 60 for timeout but got %v", timeout)
|
||||
}
|
||||
|
||||
pv.Spec.Capacity[api.ResourceStorage] = resource.MustParse("150Gi")
|
||||
pv.Spec.Capacity[v1.ResourceStorage] = resource.MustParse("150Gi")
|
||||
timeout = CalculateTimeoutForVolume(50, 30, pv)
|
||||
if timeout != 4500 {
|
||||
t.Errorf("Expected 4500 for timeout but got %v", timeout)
|
||||
|
Reference in New Issue
Block a user