Merge pull request #12961 from smarterclayton/prepare_for_graceful

Update tests to prepare for graceful deletion (3/7)
This commit is contained in:
Wojciech Tyczynski
2015-08-20 10:04:32 +02:00
9 changed files with 47 additions and 40 deletions

View File

@@ -791,7 +791,7 @@ func getUDData(jpgExpected string, ns string) func(*client.Client, string) error
if strings.Contains(data.Image, jpgExpected) {
return nil
} else {
return errors.New(fmt.Sprintf("data served up in container is innaccurate, %s didn't contain %s", data, jpgExpected))
return errors.New(fmt.Sprintf("data served up in container is inaccurate, %s didn't contain %s", data, jpgExpected))
}
}
}

View File

@@ -78,8 +78,8 @@ var _ = Describe("Pod Disks", func() {
By("cleaning up PD-RW test environment")
// Teardown pods, PD. Ignore errors.
// Teardown should do nothing unless test failed.
podClient.Delete(host0Pod.Name, nil)
podClient.Delete(host1Pod.Name, nil)
podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0))
podClient.Delete(host1Pod.Name, api.NewDeleteOptions(0))
detachPD(host0Name, diskName)
detachPD(host1Name, diskName)
deletePD(diskName)
@@ -98,7 +98,7 @@ var _ = Describe("Pod Disks", func() {
Logf("Wrote value: %v", testFileContents)
By("deleting host0Pod")
expectNoError(podClient.Delete(host0Pod.Name, nil), "Failed to delete host0Pod")
expectNoError(podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host0Pod")
By("submitting host1Pod to kubernetes")
_, err = podClient.Create(host1Pod)
@@ -113,7 +113,7 @@ var _ = Describe("Pod Disks", func() {
Expect(strings.TrimSpace(v)).To(Equal(strings.TrimSpace(testFileContents)))
By("deleting host1Pod")
expectNoError(podClient.Delete(host1Pod.Name, nil), "Failed to delete host1Pod")
expectNoError(podClient.Delete(host1Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host1Pod")
By(fmt.Sprintf("deleting PD %q", diskName))
deletePDWithRetry(diskName)
@@ -136,9 +136,9 @@ var _ = Describe("Pod Disks", func() {
By("cleaning up PD-RO test environment")
// Teardown pods, PD. Ignore errors.
// Teardown should do nothing unless test failed.
podClient.Delete(rwPod.Name, nil)
podClient.Delete(host0ROPod.Name, nil)
podClient.Delete(host1ROPod.Name, nil)
podClient.Delete(rwPod.Name, api.NewDeleteOptions(0))
podClient.Delete(host0ROPod.Name, api.NewDeleteOptions(0))
podClient.Delete(host1ROPod.Name, api.NewDeleteOptions(0))
detachPD(host0Name, diskName)
detachPD(host1Name, diskName)
@@ -149,7 +149,7 @@ var _ = Describe("Pod Disks", func() {
_, err = podClient.Create(rwPod)
expectNoError(err, "Failed to create rwPod")
expectNoError(framework.WaitForPodRunning(rwPod.Name))
expectNoError(podClient.Delete(rwPod.Name, nil), "Failed to delete host0Pod")
expectNoError(podClient.Delete(rwPod.Name, api.NewDeleteOptions(0)), "Failed to delete host0Pod")
expectNoError(waitForPDDetach(diskName, host0Name))
By("submitting host0ROPod to kubernetes")
@@ -165,10 +165,10 @@ var _ = Describe("Pod Disks", func() {
expectNoError(framework.WaitForPodRunning(host1ROPod.Name))
By("deleting host0ROPod")
expectNoError(podClient.Delete(host0ROPod.Name, nil), "Failed to delete host0ROPod")
expectNoError(podClient.Delete(host0ROPod.Name, api.NewDeleteOptions(0)), "Failed to delete host0ROPod")
By("deleting host1ROPod")
expectNoError(podClient.Delete(host1ROPod.Name, nil), "Failed to delete host1ROPod")
expectNoError(podClient.Delete(host1ROPod.Name, api.NewDeleteOptions(0)), "Failed to delete host1ROPod")
By(fmt.Sprintf("deleting PD %q", diskName))
deletePDWithRetry(diskName)

View File

@@ -43,7 +43,7 @@ func runLivenessTest(c *client.Client, ns string, podDescr *api.Pod, expectResta
// At the end of the test, clean up by removing the pod.
defer func() {
By("deleting the pod")
c.Pods(ns).Delete(podDescr.Name, nil)
c.Pods(ns).Delete(podDescr.Name, api.NewDeleteOptions(0))
}()
// Wait until the pod is not pending. (Here we need to check for something other than
@@ -86,15 +86,14 @@ func runLivenessTest(c *client.Client, ns string, podDescr *api.Pod, expectResta
func testHostIP(c *client.Client, ns string, pod *api.Pod) {
podClient := c.Pods(ns)
By("creating pod")
defer podClient.Delete(pod.Name, nil)
_, err := podClient.Create(pod)
if err != nil {
defer podClient.Delete(pod.Name, api.NewDeleteOptions(0))
if _, err := podClient.Create(pod); err != nil {
Failf("Failed to create pod: %v", err)
}
By("ensuring that pod is running and has a hostIP")
// Wait for the pods to enter the running state. Waiting loops until the pods
// are running so non-running pods cause a timeout for this test.
err = waitForPodRunningInNamespace(c, pod.Name, ns)
err := waitForPodRunningInNamespace(c, pod.Name, ns)
Expect(err).NotTo(HaveOccurred())
// Try to make sure we get a hostIP for each pod.
hostIPTimeout := 2 * time.Minute
@@ -222,7 +221,7 @@ var _ = Describe("Pods", func() {
// We call defer here in case there is a problem with
// the test so we can ensure that we clean up after
// ourselves
defer podClient.Delete(pod.Name, nil)
defer podClient.Delete(pod.Name, api.NewDeleteOptions(0))
_, err = podClient.Create(pod)
if err != nil {
Failf("Failed to create pod: %v", err)
@@ -235,7 +234,7 @@ var _ = Describe("Pods", func() {
}
Expect(len(pods.Items)).To(Equal(1))
By("veryfying pod creation was observed")
By("verifying pod creation was observed")
select {
case event, _ := <-w.ResultChan():
if event.Type != watch.Added {
@@ -312,7 +311,7 @@ var _ = Describe("Pods", func() {
By("submitting the pod to kubernetes")
defer func() {
By("deleting the pod")
podClient.Delete(pod.Name, nil)
podClient.Delete(pod.Name, api.NewDeleteOptions(0))
}()
pod, err := podClient.Create(pod)
if err != nil {
@@ -376,7 +375,7 @@ var _ = Describe("Pods", func() {
},
},
}
defer framework.Client.Pods(framework.Namespace.Name).Delete(serverPod.Name, nil)
defer framework.Client.Pods(framework.Namespace.Name).Delete(serverPod.Name, api.NewDeleteOptions(0))
_, err := framework.Client.Pods(framework.Namespace.Name).Create(serverPod)
if err != nil {
Failf("Failed to create serverPod: %v", err)
@@ -600,7 +599,7 @@ var _ = Describe("Pods", func() {
// We call defer here in case there is a problem with
// the test so we can ensure that we clean up after
// ourselves
podClient.Delete(pod.Name)
podClient.Delete(pod.Name, api.NewDeleteOptions(0))
}()
By("waiting for the pod to start running")
@@ -673,7 +672,7 @@ var _ = Describe("Pods", func() {
// We call defer here in case there is a problem with
// the test so we can ensure that we clean up after
// ourselves
podClient.Delete(pod.Name)
podClient.Delete(pod.Name, api.NewDeleteOptions(0))
}()
By("waiting for the pod to start running")

View File

@@ -831,20 +831,24 @@ func expectNoError(err error, explain ...interface{}) {
ExpectWithOffset(1, err).NotTo(HaveOccurred(), explain...)
}
// Stops everything from filePath from namespace ns and checks if everything maching selectors from the given namespace is correctly stopped.
// Stops everything from filePath from namespace ns and checks if everything matching selectors from the given namespace is correctly stopped.
func cleanup(filePath string, ns string, selectors ...string) {
By("using stop to clean up resources")
By("using delete to clean up resources")
var nsArg string
if ns != "" {
nsArg = fmt.Sprintf("--namespace=%s", ns)
}
runKubectl("stop", "-f", filePath, nsArg)
runKubectl("stop", "--grace-period=0", "-f", filePath, nsArg)
for _, selector := range selectors {
resources := runKubectl("get", "pods,rc,svc", "-l", selector, "--no-headers", nsArg)
resources := runKubectl("get", "rc,svc", "-l", selector, "--no-headers", nsArg)
if resources != "" {
Failf("Resources left running after stop:\n%s", resources)
}
pods := runKubectl("get", "pods", "-l", selector, nsArg, "-t", "{{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ \"\\n\" }}{{ end }}{{ end }}")
if pods != "" {
Failf("Pods left unterminated after stop:\n%s", pods)
}
}
}