RC/RS: Use ControllerRef to route watch events.
This is part of the completion of ControllerRef, as described here: https://github.com/kubernetes/community/blob/master/contributors/design-proposals/controller-ref.md#watches This also removes the need for the Pod->Controller mapping cache in RC and RS. This mapping is now persisted in the Pod's ControllerRef instead.
This commit is contained in:
@@ -167,11 +167,11 @@ type serverResponse struct {
|
||||
obj interface{}
|
||||
}
|
||||
|
||||
func NewReplicationManagerFromClient(kubeClient clientset.Interface, burstReplicas int, lookupCacheSize int) (*ReplicationManager, coreinformers.PodInformer, coreinformers.ReplicationControllerInformer) {
|
||||
func newReplicationManagerFromClient(kubeClient clientset.Interface, burstReplicas int) (*ReplicationManager, coreinformers.PodInformer, coreinformers.ReplicationControllerInformer) {
|
||||
informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc())
|
||||
podInformer := informerFactory.Core().V1().Pods()
|
||||
rcInformer := informerFactory.Core().V1().ReplicationControllers()
|
||||
rm := NewReplicationManager(podInformer, rcInformer, kubeClient, burstReplicas, lookupCacheSize)
|
||||
rm := NewReplicationManager(podInformer, rcInformer, kubeClient, burstReplicas)
|
||||
rm.podListerSynced = alwaysReady
|
||||
rm.rcListerSynced = alwaysReady
|
||||
return rm, podInformer, rcInformer
|
||||
@@ -180,7 +180,7 @@ func NewReplicationManagerFromClient(kubeClient clientset.Interface, burstReplic
|
||||
func TestSyncReplicationControllerDoesNothing(t *testing.T) {
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager, podInformer, rcInformer := NewReplicationManagerFromClient(c, BurstReplicas, 0)
|
||||
manager, podInformer, rcInformer := newReplicationManagerFromClient(c, BurstReplicas)
|
||||
|
||||
// 2 running pods, a controller with 2 replicas, sync is a no-op
|
||||
controllerSpec := newReplicationController(2)
|
||||
@@ -195,7 +195,7 @@ func TestSyncReplicationControllerDoesNothing(t *testing.T) {
|
||||
func TestSyncReplicationControllerDeletes(t *testing.T) {
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager, podInformer, rcInformer := NewReplicationManagerFromClient(c, BurstReplicas, 0)
|
||||
manager, podInformer, rcInformer := newReplicationManagerFromClient(c, BurstReplicas)
|
||||
manager.podControl = &fakePodControl
|
||||
|
||||
// 2 running pods and a controller with 1 replica, one pod delete expected
|
||||
@@ -210,7 +210,7 @@ func TestSyncReplicationControllerDeletes(t *testing.T) {
|
||||
func TestDeleteFinalStateUnknown(t *testing.T) {
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager, _, rcInformer := NewReplicationManagerFromClient(c, BurstReplicas, 0)
|
||||
manager, _, rcInformer := newReplicationManagerFromClient(c, BurstReplicas)
|
||||
manager.podControl = &fakePodControl
|
||||
|
||||
received := make(chan string)
|
||||
@@ -241,7 +241,7 @@ func TestDeleteFinalStateUnknown(t *testing.T) {
|
||||
|
||||
func TestSyncReplicationControllerCreates(t *testing.T) {
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
manager, _, rcInformer := NewReplicationManagerFromClient(c, BurstReplicas, 0)
|
||||
manager, _, rcInformer := newReplicationManagerFromClient(c, BurstReplicas)
|
||||
|
||||
// A controller with 2 replicas and no pods in the store, 2 creates expected
|
||||
rc := newReplicationController(2)
|
||||
@@ -262,7 +262,7 @@ func TestStatusUpdatesWithoutReplicasChange(t *testing.T) {
|
||||
testServer := httptest.NewServer(&fakeHandler)
|
||||
defer testServer.Close()
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
manager, podInformer, rcInformer := NewReplicationManagerFromClient(c, BurstReplicas, 0)
|
||||
manager, podInformer, rcInformer := newReplicationManagerFromClient(c, BurstReplicas)
|
||||
|
||||
// Steady state for the replication controller, no Status.Replicas updates expected
|
||||
activePods := 5
|
||||
@@ -302,7 +302,7 @@ func TestControllerUpdateReplicas(t *testing.T) {
|
||||
testServer := httptest.NewServer(&fakeHandler)
|
||||
defer testServer.Close()
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
manager, podInformer, rcInformer := NewReplicationManagerFromClient(c, BurstReplicas, 0)
|
||||
manager, podInformer, rcInformer := newReplicationManagerFromClient(c, BurstReplicas)
|
||||
|
||||
// Insufficient number of pods in the system, and Status.Replicas is wrong;
|
||||
// Status.Replica should update to match number of pods in system, 1 new pod should be created.
|
||||
@@ -347,7 +347,7 @@ func TestSyncReplicationControllerDormancy(t *testing.T) {
|
||||
defer testServer.Close()
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager, podInformer, rcInformer := NewReplicationManagerFromClient(c, BurstReplicas, 0)
|
||||
manager, podInformer, rcInformer := newReplicationManagerFromClient(c, BurstReplicas)
|
||||
manager.podControl = &fakePodControl
|
||||
|
||||
controllerSpec := newReplicationController(2)
|
||||
@@ -399,7 +399,7 @@ func TestSyncReplicationControllerDormancy(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPodControllerLookup(t *testing.T) {
|
||||
manager, _, rcInformer := NewReplicationManagerFromClient(clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}}), BurstReplicas, 0)
|
||||
manager, _, rcInformer := newReplicationManagerFromClient(clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}}), BurstReplicas)
|
||||
testCases := []struct {
|
||||
inRCs []*v1.ReplicationController
|
||||
pod *v1.Pod
|
||||
@@ -447,7 +447,12 @@ func TestPodControllerLookup(t *testing.T) {
|
||||
for _, r := range c.inRCs {
|
||||
rcInformer.Informer().GetIndexer().Add(r)
|
||||
}
|
||||
if rc := manager.getPodController(c.pod); rc != nil {
|
||||
if rcs := manager.getPodControllers(c.pod); rcs != nil {
|
||||
if len(rcs) != 1 {
|
||||
t.Errorf("len(rcs) = %v, want %v", len(rcs), 1)
|
||||
continue
|
||||
}
|
||||
rc := rcs[0]
|
||||
if c.outRCName != rc.Name {
|
||||
t.Errorf("Got controller %+v expected %+v", rc.Name, c.outRCName)
|
||||
}
|
||||
@@ -466,7 +471,7 @@ func TestWatchControllers(t *testing.T) {
|
||||
informers := informers.NewSharedInformerFactory(c, controller.NoResyncPeriodFunc())
|
||||
podInformer := informers.Core().V1().Pods()
|
||||
rcInformer := informers.Core().V1().ReplicationControllers()
|
||||
manager := NewReplicationManager(podInformer, rcInformer, c, BurstReplicas, 0)
|
||||
manager := NewReplicationManager(podInformer, rcInformer, c, BurstReplicas)
|
||||
informers.Start(stopCh)
|
||||
|
||||
var testControllerSpec v1.ReplicationController
|
||||
@@ -506,7 +511,7 @@ func TestWatchPods(t *testing.T) {
|
||||
fakeWatch := watch.NewFake()
|
||||
c := &fake.Clientset{}
|
||||
c.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
|
||||
manager, podInformer, rcInformer := NewReplicationManagerFromClient(c, BurstReplicas, 0)
|
||||
manager, podInformer, rcInformer := newReplicationManagerFromClient(c, BurstReplicas)
|
||||
|
||||
// Put one rc and one pod into the controller's stores
|
||||
testControllerSpec := newReplicationController(1)
|
||||
@@ -547,7 +552,7 @@ func TestWatchPods(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUpdatePods(t *testing.T) {
|
||||
manager, podInformer, rcInformer := NewReplicationManagerFromClient(fake.NewSimpleClientset(), BurstReplicas, 0)
|
||||
manager, podInformer, rcInformer := newReplicationManagerFromClient(fake.NewSimpleClientset(), BurstReplicas)
|
||||
|
||||
received := make(chan string)
|
||||
|
||||
@@ -565,23 +570,29 @@ func TestUpdatePods(t *testing.T) {
|
||||
go wait.Until(manager.worker, 10*time.Millisecond, stopCh)
|
||||
|
||||
// Put 2 rcs and one pod into the controller's stores
|
||||
labelMap1 := map[string]string{"foo": "bar"}
|
||||
testControllerSpec1 := newReplicationController(1)
|
||||
testControllerSpec1.Spec.Selector = labelMap1
|
||||
rcInformer.Informer().GetIndexer().Add(testControllerSpec1)
|
||||
labelMap2 := map[string]string{"bar": "foo"}
|
||||
testControllerSpec2 := *testControllerSpec1
|
||||
testControllerSpec2.Spec.Selector = map[string]string{"bar": "foo"}
|
||||
testControllerSpec2.Spec.Selector = labelMap2
|
||||
testControllerSpec2.Name = "barfoo"
|
||||
rcInformer.Informer().GetIndexer().Add(&testControllerSpec2)
|
||||
|
||||
// case 1: We put in the podLister a pod with labels matching
|
||||
// testControllerSpec1, then update its labels to match testControllerSpec2.
|
||||
// We expect to receive a sync request for both controllers.
|
||||
isController := true
|
||||
controllerRef1 := metav1.OwnerReference{UID: testControllerSpec1.UID, APIVersion: "v1", Kind: "ReplicationController", Name: testControllerSpec1.Name, Controller: &isController}
|
||||
controllerRef2 := metav1.OwnerReference{UID: testControllerSpec2.UID, APIVersion: "v1", Kind: "ReplicationController", Name: testControllerSpec2.Name, Controller: &isController}
|
||||
|
||||
// case 1: Pod with a ControllerRef
|
||||
pod1 := newPodList(podInformer.Informer().GetIndexer(), 1, v1.PodRunning, testControllerSpec1, "pod").Items[0]
|
||||
pod1.OwnerReferences = []metav1.OwnerReference{controllerRef1}
|
||||
pod1.ResourceVersion = "1"
|
||||
pod2 := pod1
|
||||
pod2.Labels = testControllerSpec2.Spec.Selector
|
||||
pod2.Labels = labelMap2
|
||||
pod2.ResourceVersion = "2"
|
||||
manager.updatePod(&pod1, &pod2)
|
||||
expected := sets.NewString(testControllerSpec1.Name, testControllerSpec2.Name)
|
||||
expected := sets.NewString(testControllerSpec1.Name)
|
||||
for _, name := range expected.List() {
|
||||
t.Logf("Expecting update for %+v", name)
|
||||
select {
|
||||
@@ -590,17 +601,20 @@ func TestUpdatePods(t *testing.T) {
|
||||
t.Errorf("Expected keys %#v got %v", expected, got)
|
||||
}
|
||||
case <-time.After(wait.ForeverTestTimeout):
|
||||
t.Errorf("Expected update notifications for controllers within 100ms each")
|
||||
t.Errorf("Expected update notifications for ReplicationControllers")
|
||||
}
|
||||
}
|
||||
|
||||
// case 2: pod1 in the podLister has labels matching testControllerSpec1.
|
||||
// We update its labels to match no replication controller. We expect to
|
||||
// receive a sync request for testControllerSpec1.
|
||||
pod2.Labels = make(map[string]string)
|
||||
// case 2: Remove ControllerRef (orphan). Expect to sync label-matching RC.
|
||||
pod1 = newPodList(podInformer.Informer().GetIndexer(), 1, v1.PodRunning, testControllerSpec1, "pod").Items[0]
|
||||
pod1.ResourceVersion = "1"
|
||||
pod1.Labels = labelMap2
|
||||
pod1.OwnerReferences = []metav1.OwnerReference{controllerRef2}
|
||||
pod2 = pod1
|
||||
pod2.OwnerReferences = nil
|
||||
pod2.ResourceVersion = "2"
|
||||
manager.updatePod(&pod1, &pod2)
|
||||
expected = sets.NewString(testControllerSpec1.Name)
|
||||
expected = sets.NewString(testControllerSpec2.Name)
|
||||
for _, name := range expected.List() {
|
||||
t.Logf("Expecting update for %+v", name)
|
||||
select {
|
||||
@@ -609,7 +623,52 @@ func TestUpdatePods(t *testing.T) {
|
||||
t.Errorf("Expected keys %#v got %v", expected, got)
|
||||
}
|
||||
case <-time.After(wait.ForeverTestTimeout):
|
||||
t.Errorf("Expected update notifications for controllers within 100ms each")
|
||||
t.Errorf("Expected update notifications for ReplicationControllers")
|
||||
}
|
||||
}
|
||||
|
||||
// case 2: Remove ControllerRef (orphan). Expect to sync both former owner and
|
||||
// any label-matching RC.
|
||||
pod1 = newPodList(podInformer.Informer().GetIndexer(), 1, v1.PodRunning, testControllerSpec1, "pod").Items[0]
|
||||
pod1.ResourceVersion = "1"
|
||||
pod1.Labels = labelMap2
|
||||
pod1.OwnerReferences = []metav1.OwnerReference{controllerRef1}
|
||||
pod2 = pod1
|
||||
pod2.OwnerReferences = nil
|
||||
pod2.ResourceVersion = "2"
|
||||
manager.updatePod(&pod1, &pod2)
|
||||
expected = sets.NewString(testControllerSpec1.Name, testControllerSpec2.Name)
|
||||
for _, name := range expected.List() {
|
||||
t.Logf("Expecting update for %+v", name)
|
||||
select {
|
||||
case got := <-received:
|
||||
if !expected.Has(got) {
|
||||
t.Errorf("Expected keys %#v got %v", expected, got)
|
||||
}
|
||||
case <-time.After(wait.ForeverTestTimeout):
|
||||
t.Errorf("Expected update notifications for ReplicationControllers")
|
||||
}
|
||||
}
|
||||
|
||||
// case 4: Keep ControllerRef, change labels. Expect to sync owning RC.
|
||||
pod1 = newPodList(podInformer.Informer().GetIndexer(), 1, v1.PodRunning, testControllerSpec1, "pod").Items[0]
|
||||
pod1.ResourceVersion = "1"
|
||||
pod1.Labels = labelMap1
|
||||
pod1.OwnerReferences = []metav1.OwnerReference{controllerRef2}
|
||||
pod2 = pod1
|
||||
pod2.Labels = labelMap2
|
||||
pod2.ResourceVersion = "2"
|
||||
manager.updatePod(&pod1, &pod2)
|
||||
expected = sets.NewString(testControllerSpec2.Name)
|
||||
for _, name := range expected.List() {
|
||||
t.Logf("Expecting update for %+v", name)
|
||||
select {
|
||||
case got := <-received:
|
||||
if !expected.Has(got) {
|
||||
t.Errorf("Expected keys %#v got %v", expected, got)
|
||||
}
|
||||
case <-time.After(wait.ForeverTestTimeout):
|
||||
t.Errorf("Expected update notifications for ReplicationControllers")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -624,7 +683,7 @@ func TestControllerUpdateRequeue(t *testing.T) {
|
||||
defer testServer.Close()
|
||||
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
manager, podInformer, rcInformer := NewReplicationManagerFromClient(c, BurstReplicas, 0)
|
||||
manager, podInformer, rcInformer := newReplicationManagerFromClient(c, BurstReplicas)
|
||||
|
||||
rc := newReplicationController(1)
|
||||
rcInformer.Informer().GetIndexer().Add(rc)
|
||||
@@ -694,7 +753,7 @@ func TestControllerUpdateStatusWithFailure(t *testing.T) {
|
||||
func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) {
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager, podInformer, rcInformer := NewReplicationManagerFromClient(c, burstReplicas, 0)
|
||||
manager, podInformer, rcInformer := newReplicationManagerFromClient(c, burstReplicas)
|
||||
manager.podControl = &fakePodControl
|
||||
|
||||
controllerSpec := newReplicationController(numReplicas)
|
||||
@@ -755,6 +814,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
||||
// the rc is waiting for.
|
||||
expectedDels := manager.expectations.GetUIDs(getKey(controllerSpec, t))
|
||||
podsToDelete := []*v1.Pod{}
|
||||
isController := true
|
||||
for _, key := range expectedDels.List() {
|
||||
nsName := strings.Split(key, "/")
|
||||
podsToDelete = append(podsToDelete, &v1.Pod{
|
||||
@@ -762,6 +822,9 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
||||
Name: nsName[1],
|
||||
Namespace: nsName[0],
|
||||
Labels: controllerSpec.Spec.Selector,
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{UID: controllerSpec.UID, APIVersion: "v1", Kind: "ReplicationController", Name: controllerSpec.Name, Controller: &isController},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
@@ -798,11 +861,15 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
||||
t.Fatalf("Waiting on unexpected number of deletes.")
|
||||
}
|
||||
nsName := strings.Split(expectedDel.List()[0], "/")
|
||||
isController := true
|
||||
lastPod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nsName[1],
|
||||
Namespace: nsName[0],
|
||||
Labels: controllerSpec.Spec.Selector,
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{UID: controllerSpec.UID, APIVersion: "v1", Kind: "ReplicationController", Name: controllerSpec.Name, Controller: &isController},
|
||||
},
|
||||
},
|
||||
}
|
||||
podInformer.Informer().GetIndexer().Delete(lastPod)
|
||||
@@ -843,7 +910,7 @@ func (fe FakeRCExpectations) SatisfiedExpectations(controllerKey string) bool {
|
||||
func TestRCSyncExpectations(t *testing.T) {
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager, podInformer, rcInformer := NewReplicationManagerFromClient(c, 2, 0)
|
||||
manager, podInformer, rcInformer := newReplicationManagerFromClient(c, 2)
|
||||
manager.podControl = &fakePodControl
|
||||
|
||||
controllerSpec := newReplicationController(2)
|
||||
@@ -866,7 +933,7 @@ func TestRCSyncExpectations(t *testing.T) {
|
||||
|
||||
func TestDeleteControllerAndExpectations(t *testing.T) {
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
manager, podInformer, rcInformer := NewReplicationManagerFromClient(c, 10, 0)
|
||||
manager, podInformer, rcInformer := newReplicationManagerFromClient(c, 10)
|
||||
|
||||
rc := newReplicationController(1)
|
||||
rcInformer.Informer().GetIndexer().Add(rc)
|
||||
@@ -919,36 +986,46 @@ func shuffle(controllers []*v1.ReplicationController) []*v1.ReplicationControlle
|
||||
func TestOverlappingRCs(t *testing.T) {
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
manager, _, rcInformer := NewReplicationManagerFromClient(c, 10, 0)
|
||||
manager, _, rcInformer := newReplicationManagerFromClient(c, 10)
|
||||
|
||||
// Create 10 rcs, shuffled them randomly and insert them into the rc manager's store
|
||||
var controllers []*v1.ReplicationController
|
||||
for j := 1; j < 10; j++ {
|
||||
controllerSpec := newReplicationController(1)
|
||||
controllerSpec.CreationTimestamp = metav1.Date(2014, time.December, j, 0, 0, 0, 0, time.Local)
|
||||
controllerSpec.Name = string(uuid.NewUUID())
|
||||
controllers = append(controllers, controllerSpec)
|
||||
}
|
||||
shuffledControllers := shuffle(controllers)
|
||||
for j := range shuffledControllers {
|
||||
rcInformer.Informer().GetIndexer().Add(shuffledControllers[j])
|
||||
}
|
||||
// Add a pod and make sure only the oldest rc is synced
|
||||
pods := newPodList(nil, 1, v1.PodPending, controllers[0], "pod")
|
||||
rcKey := getKey(controllers[0], t)
|
||||
// Create 10 rcs, shuffled them randomly and insert them into the
|
||||
// rc manager's store.
|
||||
// All use the same CreationTimestamp since ControllerRef should be able
|
||||
// to handle that.
|
||||
var controllers []*v1.ReplicationController
|
||||
timestamp := metav1.Date(2014, time.December, 0, 0, 0, 0, 0, time.Local)
|
||||
for j := 1; j < 10; j++ {
|
||||
controllerSpec := newReplicationController(1)
|
||||
controllerSpec.CreationTimestamp = timestamp
|
||||
controllerSpec.Name = fmt.Sprintf("rc%d", j)
|
||||
controllers = append(controllers, controllerSpec)
|
||||
}
|
||||
shuffledControllers := shuffle(controllers)
|
||||
for j := range shuffledControllers {
|
||||
rcInformer.Informer().GetIndexer().Add(shuffledControllers[j])
|
||||
}
|
||||
// Add a pod with a ControllerRef and make sure only the corresponding
|
||||
// ReplicationController is synced. Pick a RC in the middle since the old code
|
||||
// used to sort by name if all timestamps were equal.
|
||||
rc := controllers[3]
|
||||
pods := newPodList(nil, 1, v1.PodPending, rc, "pod")
|
||||
pod := &pods.Items[0]
|
||||
isController := true
|
||||
pod.OwnerReferences = []metav1.OwnerReference{
|
||||
{UID: rc.UID, APIVersion: "v1", Kind: "ReplicationController", Name: rc.Name, Controller: &isController},
|
||||
}
|
||||
rcKey := getKey(rc, t)
|
||||
|
||||
manager.addPod(&pods.Items[0])
|
||||
queueRC, _ := manager.queue.Get()
|
||||
if queueRC != rcKey {
|
||||
t.Fatalf("Expected to find key %v in queue, found %v", rcKey, queueRC)
|
||||
}
|
||||
manager.addPod(pod)
|
||||
queueRC, _ := manager.queue.Get()
|
||||
if queueRC != rcKey {
|
||||
t.Fatalf("Expected to find key %v in queue, found %v", rcKey, queueRC)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeletionTimestamp(t *testing.T) {
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
manager, _, rcInformer := NewReplicationManagerFromClient(c, 10, 0)
|
||||
manager, _, rcInformer := newReplicationManagerFromClient(c, 10)
|
||||
|
||||
controllerSpec := newReplicationController(1)
|
||||
rcInformer.Informer().GetIndexer().Add(controllerSpec)
|
||||
@@ -995,11 +1072,15 @@ func TestDeletionTimestamp(t *testing.T) {
|
||||
|
||||
// An update to the pod (including an update to the deletion timestamp)
|
||||
// should not be counted as a second delete.
|
||||
isController := true
|
||||
secondPod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: pod.Namespace,
|
||||
Name: "secondPod",
|
||||
Labels: pod.Labels,
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{UID: controllerSpec.UID, APIVersion: "v1", Kind: "ReplicationController", Name: controllerSpec.Name, Controller: &isController},
|
||||
},
|
||||
},
|
||||
}
|
||||
manager.expectations.ExpectDeletions(rcKey, []string{controller.PodKey(secondPod)})
|
||||
@@ -1037,7 +1118,7 @@ func TestDeletionTimestamp(t *testing.T) {
|
||||
|
||||
func BenchmarkGetPodControllerMultiNS(b *testing.B) {
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
manager, _, rcInformer := NewReplicationManagerFromClient(client, BurstReplicas, 0)
|
||||
manager, _, rcInformer := newReplicationManagerFromClient(client, BurstReplicas)
|
||||
|
||||
const nsNum = 1000
|
||||
|
||||
@@ -1076,14 +1157,14 @@ func BenchmarkGetPodControllerMultiNS(b *testing.B) {
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
for _, pod := range pods {
|
||||
manager.getPodController(&pod)
|
||||
manager.getPodControllers(&pod)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkGetPodControllerSingleNS(b *testing.B) {
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
manager, _, rcInformer := NewReplicationManagerFromClient(client, BurstReplicas, 0)
|
||||
manager, _, rcInformer := newReplicationManagerFromClient(client, BurstReplicas)
|
||||
|
||||
const rcNum = 1000
|
||||
const replicaNum = 3
|
||||
@@ -1116,7 +1197,7 @@ func BenchmarkGetPodControllerSingleNS(b *testing.B) {
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
for _, pod := range pods {
|
||||
manager.getPodController(&pod)
|
||||
manager.getPodControllers(&pod)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1125,7 +1206,7 @@ func BenchmarkGetPodControllerSingleNS(b *testing.B) {
|
||||
func setupManagerWithGCEnabled(objs ...runtime.Object) (manager *ReplicationManager, fakePodControl *controller.FakePodControl, podInformer coreinformers.PodInformer, rcInformer coreinformers.ReplicationControllerInformer) {
|
||||
c := fakeclientset.NewSimpleClientset(objs...)
|
||||
fakePodControl = &controller.FakePodControl{}
|
||||
manager, podInformer, rcInformer = NewReplicationManagerFromClient(c, BurstReplicas, 0)
|
||||
manager, podInformer, rcInformer = newReplicationManagerFromClient(c, BurstReplicas)
|
||||
manager.podControl = fakePodControl
|
||||
return manager, fakePodControl, podInformer, rcInformer
|
||||
}
|
||||
@@ -1327,7 +1408,7 @@ func TestReadyReplicas(t *testing.T) {
|
||||
defer testServer.Close()
|
||||
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
manager, podInformer, rcInformer := NewReplicationManagerFromClient(c, BurstReplicas, 0)
|
||||
manager, podInformer, rcInformer := newReplicationManagerFromClient(c, BurstReplicas)
|
||||
|
||||
// Status.Replica should update to match number of pods in system, 1 new pod should be created.
|
||||
rc := newReplicationController(2)
|
||||
@@ -1365,7 +1446,7 @@ func TestAvailableReplicas(t *testing.T) {
|
||||
defer testServer.Close()
|
||||
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
manager, podInformer, rcInformer := NewReplicationManagerFromClient(c, BurstReplicas, 0)
|
||||
manager, podInformer, rcInformer := newReplicationManagerFromClient(c, BurstReplicas)
|
||||
|
||||
// Status.Replica should update to match number of pods in system, 1 new pod should be created.
|
||||
rc := newReplicationController(2)
|
||||
|
Reference in New Issue
Block a user