Remove pods from the assumed pod list when they are deleted

This commit is contained in:
Daniel Smith
2015-03-27 15:33:03 -07:00
parent 31324a0ba5
commit fd952862c3
4 changed files with 156 additions and 24 deletions

View File

@@ -71,8 +71,8 @@ var (
type fakeKubeletClient struct{}
func (fakeKubeletClient) GetPodStatus(host, podNamespace, podID string) (api.PodStatusResult, error) {
glog.V(3).Infof("Trying to get container info for %v/%v/%v", host, podNamespace, podID)
func (fakeKubeletClient) GetPodStatus(host, podNamespace, podName string) (api.PodStatusResult, error) {
glog.V(3).Infof("Trying to get container info for %v/%v/%v", host, podNamespace, podName)
// This is a horrible hack to get around the fact that we can't provide
// different port numbers per kubelet...
var c client.PodInfoGetter
@@ -88,9 +88,9 @@ func (fakeKubeletClient) GetPodStatus(host, podNamespace, podID string) (api.Pod
Port: 10251,
}
default:
glog.Fatalf("Can't get info for: '%v', '%v - %v'", host, podNamespace, podID)
glog.Fatalf("Can't get info for: '%v', '%v - %v'", host, podNamespace, podName)
}
r, err := c.GetPodStatus("127.0.0.1", podNamespace, podID)
r, err := c.GetPodStatus("127.0.0.1", podNamespace, podName)
if err != nil {
return r, err
}
@@ -304,29 +304,29 @@ func countEndpoints(eps *api.Endpoints) int {
return count
}
func podExists(c *client.Client, podNamespace string, podID string) wait.ConditionFunc {
func podExists(c *client.Client, podNamespace string, podName string) wait.ConditionFunc {
return func() (bool, error) {
_, err := c.Pods(podNamespace).Get(podID)
_, err := c.Pods(podNamespace).Get(podName)
return err == nil, nil
}
}
func podNotFound(c *client.Client, podNamespace string, podID string) wait.ConditionFunc {
func podNotFound(c *client.Client, podNamespace string, podName string) wait.ConditionFunc {
return func() (bool, error) {
_, err := c.Pods(podNamespace).Get(podID)
_, err := c.Pods(podNamespace).Get(podName)
return apierrors.IsNotFound(err), nil
}
}
func podRunning(c *client.Client, podNamespace string, podID string) wait.ConditionFunc {
func podRunning(c *client.Client, podNamespace string, podName string) wait.ConditionFunc {
return func() (bool, error) {
pod, err := c.Pods(podNamespace).Get(podID)
pod, err := c.Pods(podNamespace).Get(podName)
if apierrors.IsNotFound(err) {
return false, nil
}
if err != nil {
// This could be a connection error so we want to retry, but log the error.
glog.Errorf("Error when reading pod %q: %v", podID, err)
glog.Errorf("Error when reading pod %q: %v", podName, err)
return false, nil
}
if pod.Status.Phase != api.PodRunning {
@@ -814,6 +814,61 @@ func runServiceTest(client *client.Client) {
glog.Info("Service test passed.")
}
func runSchedulerNoPhantomPodsTest(client *client.Client) {
pod := &api.Pod{
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "c1",
Image: "kubernetes/pause",
Ports: []api.ContainerPort{
{ContainerPort: 1234, HostPort: 9999},
},
ImagePullPolicy: api.PullIfNotPresent,
},
},
},
}
// Assuming we only have two kublets, the third pod here won't schedule
// if the scheduler doesn't correctly handle the delete for the second
// pod.
pod.ObjectMeta.Name = "phantom.foo"
foo, err := client.Pods(api.NamespaceDefault).Create(pod)
if err != nil {
glog.Fatalf("Failed to create pod: %v, %v", pod, err)
}
if err := wait.Poll(time.Second, time.Second*10, podRunning(client, foo.Namespace, foo.Name)); err != nil {
glog.Fatalf("FAILED: pod never started running %v", err)
}
pod.ObjectMeta.Name = "phantom.bar"
bar, err := client.Pods(api.NamespaceDefault).Create(pod)
if err != nil {
glog.Fatalf("Failed to create pod: %v, %v", pod, err)
}
if err := wait.Poll(time.Second, time.Second*10, podRunning(client, bar.Namespace, bar.Name)); err != nil {
glog.Fatalf("FAILED: pod never started running %v", err)
}
// Delete a pod to free up room.
err = client.Pods(api.NamespaceDefault).Delete(bar.Name)
if err != nil {
glog.Fatalf("FAILED: couldn't delete pod %q: %v", bar.Name, err)
}
pod.ObjectMeta.Name = "phantom.baz"
baz, err := client.Pods(api.NamespaceDefault).Create(pod)
if err != nil {
glog.Fatalf("Failed to create pod: %v, %v", pod, err)
}
if err := wait.Poll(time.Second, time.Second*10, podRunning(client, baz.Namespace, baz.Name)); err != nil {
glog.Fatalf("FAILED: (Scheduler probably didn't process deletion of 'phantom.bar') Pod never started running: %v", err)
}
glog.Info("Scheduler doesn't make phantom pods: test passed.")
}
type testFunc func(*client.Client)
func addFlags(fs *pflag.FlagSet) {
@@ -904,6 +959,11 @@ func main() {
glog.Fatalf("Expected 16 containers; got %v\n\nlist of created containers:\n\n%#v\n\nDocker 1 Created:\n\n%#v\n\nDocker 2 Created:\n\n%#v\n\n", len(createdConts), createdConts.List(), fakeDocker1.Created, fakeDocker2.Created)
}
glog.Infof("OK - found created containers: %#v", createdConts.List())
// This test doesn't run with the others because it can't run in
// parallel and also it schedules extra pods which would change the
// above pod counting logic.
runSchedulerNoPhantomPodsTest(kubeClient)
}
// ServeCachedManifestFile serves a file for kubelet to read.