Revert "Revert "Gracefully delete pods from the Kubelet""

This reverts commit 98115facfd.
This commit is contained in:
Clayton Coleman
2015-06-02 20:36:58 -04:00
parent 919c7e94e2
commit b842a7dd15
60 changed files with 841 additions and 246 deletions

View File

@@ -791,7 +791,7 @@ func getUDData(jpgExpected string, ns string) func(*client.Client, string) error
if strings.Contains(data.Image, jpgExpected) {
return nil
} else {
return errors.New(fmt.Sprintf("data served up in container is innaccurate, %s didn't contain %s", data, jpgExpected))
return errors.New(fmt.Sprintf("data served up in container is inaccurate, %s didn't contain %s", data, jpgExpected))
}
}
}

View File

@@ -78,8 +78,8 @@ var _ = Describe("Pod Disks", func() {
By("cleaning up PD-RW test environment")
// Teardown pods, PD. Ignore errors.
// Teardown should do nothing unless test failed.
podClient.Delete(host0Pod.Name, nil)
podClient.Delete(host1Pod.Name, nil)
podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0))
podClient.Delete(host1Pod.Name, api.NewDeleteOptions(0))
detachPD(host0Name, diskName)
detachPD(host1Name, diskName)
deletePD(diskName)
@@ -98,7 +98,7 @@ var _ = Describe("Pod Disks", func() {
Logf("Wrote value: %v", testFileContents)
By("deleting host0Pod")
expectNoError(podClient.Delete(host0Pod.Name, nil), "Failed to delete host0Pod")
expectNoError(podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host0Pod")
By("submitting host1Pod to kubernetes")
_, err = podClient.Create(host1Pod)
@@ -113,7 +113,7 @@ var _ = Describe("Pod Disks", func() {
Expect(strings.TrimSpace(v)).To(Equal(strings.TrimSpace(testFileContents)))
By("deleting host1Pod")
expectNoError(podClient.Delete(host1Pod.Name, nil), "Failed to delete host1Pod")
expectNoError(podClient.Delete(host1Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host1Pod")
By(fmt.Sprintf("deleting PD %q", diskName))
deletePDWithRetry(diskName)
@@ -136,9 +136,9 @@ var _ = Describe("Pod Disks", func() {
By("cleaning up PD-RO test environment")
// Teardown pods, PD. Ignore errors.
// Teardown should do nothing unless test failed.
podClient.Delete(rwPod.Name, nil)
podClient.Delete(host0ROPod.Name, nil)
podClient.Delete(host1ROPod.Name, nil)
podClient.Delete(rwPod.Name, api.NewDeleteOptions(0))
podClient.Delete(host0ROPod.Name, api.NewDeleteOptions(0))
podClient.Delete(host1ROPod.Name, api.NewDeleteOptions(0))
detachPD(host0Name, diskName)
detachPD(host1Name, diskName)
@@ -149,7 +149,7 @@ var _ = Describe("Pod Disks", func() {
_, err = podClient.Create(rwPod)
expectNoError(err, "Failed to create rwPod")
expectNoError(framework.WaitForPodRunning(rwPod.Name))
expectNoError(podClient.Delete(rwPod.Name, nil), "Failed to delete host0Pod")
expectNoError(podClient.Delete(rwPod.Name, api.NewDeleteOptions(0)), "Failed to delete host0Pod")
expectNoError(waitForPDDetach(diskName, host0Name))
By("submitting host0ROPod to kubernetes")
@@ -165,10 +165,10 @@ var _ = Describe("Pod Disks", func() {
expectNoError(framework.WaitForPodRunning(host1ROPod.Name))
By("deleting host0ROPod")
expectNoError(podClient.Delete(host0ROPod.Name, nil), "Failed to delete host0ROPod")
expectNoError(podClient.Delete(host0ROPod.Name, api.NewDeleteOptions(0)), "Failed to delete host0ROPod")
By("deleting host1ROPod")
expectNoError(podClient.Delete(host1ROPod.Name, nil), "Failed to delete host1ROPod")
expectNoError(podClient.Delete(host1ROPod.Name, api.NewDeleteOptions(0)), "Failed to delete host1ROPod")
By(fmt.Sprintf("deleting PD %q", diskName))
deletePDWithRetry(diskName)

View File

@@ -43,7 +43,7 @@ func runLivenessTest(c *client.Client, ns string, podDescr *api.Pod, expectResta
// At the end of the test, clean up by removing the pod.
defer func() {
By("deleting the pod")
c.Pods(ns).Delete(podDescr.Name, nil)
c.Pods(ns).Delete(podDescr.Name, api.NewDeleteOptions(0))
}()
// Wait until the pod is not pending. (Here we need to check for something other than
@@ -86,15 +86,14 @@ func runLivenessTest(c *client.Client, ns string, podDescr *api.Pod, expectResta
func testHostIP(c *client.Client, ns string, pod *api.Pod) {
podClient := c.Pods(ns)
By("creating pod")
defer podClient.Delete(pod.Name, nil)
_, err := podClient.Create(pod)
if err != nil {
defer podClient.Delete(pod.Name, api.NewDeleteOptions(0))
if _, err := podClient.Create(pod); err != nil {
Failf("Failed to create pod: %v", err)
}
By("ensuring that pod is running and has a hostIP")
// Wait for the pods to enter the running state. Waiting loops until the pods
// are running so non-running pods cause a timeout for this test.
err = waitForPodRunningInNamespace(c, pod.Name, ns)
err := waitForPodRunningInNamespace(c, pod.Name, ns)
Expect(err).NotTo(HaveOccurred())
// Try to make sure we get a hostIP for each pod.
hostIPTimeout := 2 * time.Minute
@@ -222,7 +221,7 @@ var _ = Describe("Pods", func() {
// We call defer here in case there is a problem with
// the test so we can ensure that we clean up after
// ourselves
defer podClient.Delete(pod.Name, nil)
defer podClient.Delete(pod.Name, api.NewDeleteOptions(0))
_, err = podClient.Create(pod)
if err != nil {
Failf("Failed to create pod: %v", err)
@@ -235,7 +234,7 @@ var _ = Describe("Pods", func() {
}
Expect(len(pods.Items)).To(Equal(1))
By("veryfying pod creation was observed")
By("verifying pod creation was observed")
select {
case event, _ := <-w.ResultChan():
if event.Type != watch.Added {
@@ -245,7 +244,7 @@ var _ = Describe("Pods", func() {
Fail("Timeout while waiting for pod creation")
}
By("deleting the pod")
By("deleting the pod gracefully")
if err := podClient.Delete(pod.Name, nil); err != nil {
Failf("Failed to delete pod: %v", err)
}
@@ -253,11 +252,13 @@ var _ = Describe("Pods", func() {
By("verifying pod deletion was observed")
deleted := false
timeout := false
var lastPod *api.Pod
timer := time.After(podStartTimeout)
for !deleted && !timeout {
select {
case event, _ := <-w.ResultChan():
if event.Type == watch.Deleted {
lastPod = event.Object.(*api.Pod)
deleted = true
}
case <-timer:
@@ -268,6 +269,9 @@ var _ = Describe("Pods", func() {
Fail("Failed to observe pod deletion")
}
Expect(lastPod.DeletionTimestamp).ToNot(BeNil())
Expect(lastPod.Spec.TerminationGracePeriodSeconds).ToNot(BeZero())
pods, err = podClient.List(labels.SelectorFromSet(labels.Set(map[string]string{"time": value})), fields.Everything())
if err != nil {
Fail(fmt.Sprintf("Failed to list pods to verify deletion: %v", err))
@@ -312,7 +316,7 @@ var _ = Describe("Pods", func() {
By("submitting the pod to kubernetes")
defer func() {
By("deleting the pod")
podClient.Delete(pod.Name, nil)
podClient.Delete(pod.Name, api.NewDeleteOptions(0))
}()
pod, err := podClient.Create(pod)
if err != nil {
@@ -376,7 +380,7 @@ var _ = Describe("Pods", func() {
},
},
}
defer framework.Client.Pods(framework.Namespace.Name).Delete(serverPod.Name, nil)
defer framework.Client.Pods(framework.Namespace.Name).Delete(serverPod.Name, api.NewDeleteOptions(0))
_, err := framework.Client.Pods(framework.Namespace.Name).Create(serverPod)
if err != nil {
Failf("Failed to create serverPod: %v", err)
@@ -600,7 +604,7 @@ var _ = Describe("Pods", func() {
// We call defer here in case there is a problem with
// the test so we can ensure that we clean up after
// ourselves
podClient.Delete(pod.Name)
podClient.Delete(pod.Name, api.NewDeleteOptions(0))
}()
By("waiting for the pod to start running")
@@ -673,7 +677,7 @@ var _ = Describe("Pods", func() {
// We call defer here in case there is a problem with
// the test so we can ensure that we clean up after
// ourselves
podClient.Delete(pod.Name)
podClient.Delete(pod.Name, api.NewDeleteOptions(0))
}()
By("waiting for the pod to start running")

View File

@@ -831,20 +831,24 @@ func expectNoError(err error, explain ...interface{}) {
ExpectWithOffset(1, err).NotTo(HaveOccurred(), explain...)
}
// Stops everything from filePath from namespace ns and checks if everything maching selectors from the given namespace is correctly stopped.
// Stops everything from filePath from namespace ns and checks if everything matching selectors from the given namespace is correctly stopped.
func cleanup(filePath string, ns string, selectors ...string) {
By("using stop to clean up resources")
By("using delete to clean up resources")
var nsArg string
if ns != "" {
nsArg = fmt.Sprintf("--namespace=%s", ns)
}
runKubectl("stop", "-f", filePath, nsArg)
runKubectl("stop", "--grace-period=0", "-f", filePath, nsArg)
for _, selector := range selectors {
resources := runKubectl("get", "pods,rc,svc", "-l", selector, "--no-headers", nsArg)
resources := runKubectl("get", "rc,se", "-l", selector, "--no-headers", nsArg)
if resources != "" {
Failf("Resources left running after stop:\n%s", resources)
}
pods := runKubectl("get", "pods", "-l", selector, nsArg, "-t", "{{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ \"\\n\" }}{{ end }}{{ end }}")
if pods != "" {
Failf("Pods left unterminated after stop:\n%s", pods)
}
}
}

View File

@@ -39,7 +39,9 @@ import (
"math/rand"
"net/http"
"os"
"os/signal"
"sync"
"syscall"
"time"
"k8s.io/kubernetes/pkg/client"
@@ -47,10 +49,11 @@ import (
)
var (
port = flag.Int("port", 8080, "Port number to serve at.")
peerCount = flag.Int("peers", 8, "Must find at least this many peers for the test to pass.")
service = flag.String("service", "nettest", "Service to find other network test pods in.")
namespace = flag.String("namespace", "default", "Namespace of this pod. TODO: kubernetes should make this discoverable.")
port = flag.Int("port", 8080, "Port number to serve at.")
peerCount = flag.Int("peers", 8, "Must find at least this many peers for the test to pass.")
service = flag.String("service", "nettest", "Service to find other network test pods in.")
namespace = flag.String("namespace", "default", "Namespace of this pod. TODO: kubernetes should make this discoverable.")
delayShutdown = flag.Int("delay-shutdown", 0, "Number of seconds to delay shutdown when receiving SIGTERM.")
)
// State tracks the internal state of our little http server.
@@ -178,6 +181,17 @@ func main() {
log.Fatalf("Error getting hostname: %v", err)
}
if *delayShutdown > 0 {
termCh := make(chan os.Signal)
signal.Notify(termCh, syscall.SIGTERM)
go func() {
<-termCh
log.Printf("Sleeping %d seconds before exit ...", *delayShutdown)
time.Sleep(time.Duration(*delayShutdown) * time.Second)
os.Exit(0)
}()
}
state := State{
Hostname: hostname,
StillContactingPeers: true,

View File

@@ -232,7 +232,7 @@ var deleteNow string = `
{
"kind": "DeleteOptions",
"apiVersion": "` + testapi.Version() + `",
"gracePeriodSeconds": null%s
"gracePeriodSeconds": 0%s
}
`

View File

@@ -80,6 +80,58 @@ func TestGet(t *testing.T) {
})
}
func TestWriteTTL(t *testing.T) {
client := framework.NewEtcdClient()
helper := tools.EtcdHelper{Client: client, Codec: stringCodec{}}
framework.WithEtcdKey(func(key string) {
_, err := client.Set(key, "object", 0)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
s := fakeAPIObject("")
err = helper.GuaranteedUpdate(key, &s, false, func(obj runtime.Object, res tools.ResponseMeta) (runtime.Object, *uint64, error) {
if *(obj.(*fakeAPIObject)) != "object" {
t.Fatalf("unexpected existing object: %v", obj)
}
if res.TTL != 0 {
t.Fatalf("unexpected TTL: %#v", res)
}
ttl := uint64(10)
out := fakeAPIObject("test")
return &out, &ttl, nil
})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if s != "test" {
t.Errorf("unexpected response: %#v", s)
}
if res, err := client.Get(key, false, false); err != nil || res == nil || res.Node.TTL != 10 {
t.Fatalf("unexpected get: %v %#v", err, res)
}
err = helper.GuaranteedUpdate(key, &s, false, func(obj runtime.Object, res tools.ResponseMeta) (runtime.Object, *uint64, error) {
if *(obj.(*fakeAPIObject)) != "test" {
t.Fatalf("unexpected existing object: %v", obj)
}
if res.TTL <= 1 {
t.Fatalf("unexpected TTL: %#v", res)
}
out := fakeAPIObject("test2")
return &out, nil, nil
})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if s != "test2" {
t.Errorf("unexpected response: %#v", s)
}
if res, err := client.Get(key, false, false); err != nil || res == nil || res.Node.TTL <= 1 {
t.Fatalf("unexpected get: %v %#v", err, res)
}
})
}
func TestWatch(t *testing.T) {
client := framework.NewEtcdClient()
etcdStorage := etcd.NewEtcdStorage(client, testapi.Codec(), etcdtest.PathPrefix())

View File

@@ -277,7 +277,7 @@ func DoTestUnschedulableNodes(t *testing.T, restClient *client.Client, nodeStore
t.Logf("Test %d: Pod got scheduled on a schedulable node", i)
}
err = restClient.Pods(api.NamespaceDefault).Delete(myPod.Name, nil)
err = restClient.Pods(api.NamespaceDefault).Delete(myPod.Name, api.NewDeleteOptions(0))
if err != nil {
t.Errorf("Failed to delete pod: %v", err)
}