Don't double count graceful deletion.
This commit is contained in:
@@ -893,6 +893,87 @@ func TestOverlappingRCs(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeletionTimestamp(t *testing.T) {
|
||||
c := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, 10, 0)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
|
||||
controllerSpec := newReplicationController(1)
|
||||
manager.rcStore.Store.Add(controllerSpec)
|
||||
rcKey, err := controller.KeyFunc(controllerSpec)
|
||||
if err != nil {
|
||||
t.Errorf("Couldn't get key for object %+v: %v", controllerSpec, err)
|
||||
}
|
||||
pod := newPodList(nil, 1, api.PodPending, controllerSpec).Items[0]
|
||||
pod.DeletionTimestamp = &unversioned.Time{time.Now()}
|
||||
manager.expectations.SetExpectations(rcKey, 0, 1)
|
||||
|
||||
// A pod added with a deletion timestamp should decrement deletions, not creations.
|
||||
manager.addPod(&pod)
|
||||
|
||||
queueRC, _ := manager.queue.Get()
|
||||
if queueRC != rcKey {
|
||||
t.Fatalf("Expected to find key %v in queue, found %v", rcKey, queueRC)
|
||||
}
|
||||
manager.queue.Done(rcKey)
|
||||
|
||||
podExp, exists, err := manager.expectations.GetExpectations(rcKey)
|
||||
if !exists || err != nil || !podExp.Fulfilled() {
|
||||
t.Fatalf("Wrong expectations %+v", podExp)
|
||||
}
|
||||
|
||||
// An update from no deletion timestamp to having one should be treated
|
||||
// as a deletion.
|
||||
oldPod := newPodList(nil, 1, api.PodPending, controllerSpec).Items[0]
|
||||
manager.expectations.SetExpectations(rcKey, 0, 1)
|
||||
manager.updatePod(&oldPod, &pod)
|
||||
|
||||
queueRC, _ = manager.queue.Get()
|
||||
if queueRC != rcKey {
|
||||
t.Fatalf("Expected to find key %v in queue, found %v", rcKey, queueRC)
|
||||
}
|
||||
manager.queue.Done(rcKey)
|
||||
|
||||
podExp, exists, err = manager.expectations.GetExpectations(rcKey)
|
||||
if !exists || err != nil || !podExp.Fulfilled() {
|
||||
t.Fatalf("Wrong expectations %+v", podExp)
|
||||
}
|
||||
|
||||
// An update to the pod (including an update to the deletion timestamp)
|
||||
// should not be counted as a second delete.
|
||||
manager.expectations.SetExpectations(rcKey, 0, 1)
|
||||
oldPod.DeletionTimestamp = &unversioned.Time{time.Now()}
|
||||
manager.updatePod(&oldPod, &pod)
|
||||
|
||||
podExp, exists, err = manager.expectations.GetExpectations(rcKey)
|
||||
if !exists || err != nil || podExp.Fulfilled() {
|
||||
t.Fatalf("Wrong expectations %+v", podExp)
|
||||
}
|
||||
|
||||
// A pod with a non-nil deletion timestamp should also be ignored by the
|
||||
// delete handler, because it's already been counted in the update.
|
||||
manager.deletePod(&pod)
|
||||
podExp, exists, err = manager.expectations.GetExpectations(rcKey)
|
||||
if !exists || err != nil || podExp.Fulfilled() {
|
||||
t.Fatalf("Wrong expectations %+v", podExp)
|
||||
}
|
||||
|
||||
// A pod with a nil timestamp should be counted as a deletion.
|
||||
pod.DeletionTimestamp = nil
|
||||
manager.deletePod(&pod)
|
||||
|
||||
queueRC, _ = manager.queue.Get()
|
||||
if queueRC != rcKey {
|
||||
t.Fatalf("Expected to find key %v in queue, found %v", rcKey, queueRC)
|
||||
}
|
||||
manager.queue.Done(rcKey)
|
||||
|
||||
podExp, exists, err = manager.expectations.GetExpectations(rcKey)
|
||||
if !exists || err != nil || !podExp.Fulfilled() {
|
||||
t.Fatalf("Wrong expectations %+v", podExp)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkGetPodControllerMultiNS(b *testing.B) {
|
||||
client := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||
|
Reference in New Issue
Block a user