Merge pull request #39355 from kargakis/update-rc-manager

Automatic merge from submit-queue

Share rc cache from the rc manager

@kubernetes/sig-apps-misc @hodovska
This commit is contained in:
Kubernetes Submit Queue
2017-01-04 05:18:29 -08:00
committed by GitHub
11 changed files with 241 additions and 272 deletions

View File

@@ -31,8 +31,10 @@ import (
"k8s.io/kubernetes/pkg/apimachinery/registered"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/informers"
replicationcontroller "k8s.io/kubernetes/pkg/controller/replication"
resourcequotacontroller "k8s.io/kubernetes/pkg/controller/resourcequota"
"k8s.io/kubernetes/pkg/fields"
@@ -82,8 +84,13 @@ func TestQuota(t *testing.T) {
controllerCh := make(chan struct{})
defer close(controllerCh)
go replicationcontroller.NewReplicationManagerFromClientForIntegration(clientset, controller.NoResyncPeriodFunc, replicationcontroller.BurstReplicas, 4096).
Run(3, controllerCh)
informers := informers.NewSharedInformerFactory(clientset, nil, controller.NoResyncPeriodFunc())
podInformer := informers.Pods().Informer()
rcInformer := informers.ReplicationControllers().Informer()
rm := replicationcontroller.NewReplicationManager(podInformer, rcInformer, clientset, replicationcontroller.BurstReplicas, 4096, false)
rm.SetEventRecorder(&record.FakeRecorder{})
informers.Start(controllerCh)
go rm.Run(3, controllerCh)
resourceQuotaRegistry := quotainstall.NewRegistry(clientset, nil)
groupKindsToReplenish := []schema.GroupKind{

View File

@@ -123,7 +123,7 @@ func verifyRemainingObjects(t *testing.T, clientSet clientset.Interface, namespa
return ret, nil
}
func rmSetup(t *testing.T, enableGarbageCollector bool) (*httptest.Server, *replication.ReplicationManager, cache.SharedIndexInformer, clientset.Interface) {
func rmSetup(t *testing.T, stopCh chan struct{}, enableGarbageCollector bool) (*httptest.Server, *replication.ReplicationManager, cache.SharedIndexInformer, clientset.Interface) {
masterConfig := framework.NewIntegrationTestMasterConfig()
_, s := framework.RunAMaster(masterConfig)
@@ -133,22 +133,13 @@ func rmSetup(t *testing.T, enableGarbageCollector bool) (*httptest.Server, *repl
t.Fatalf("Error in create clientset: %v", err)
}
resyncPeriod := 12 * time.Hour
resyncPeriodFunc := func() time.Duration {
return resyncPeriod
}
podInformer := informers.NewPodInformer(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "pod-informer")), resyncPeriod)
rm := replication.NewReplicationManager(
podInformer,
clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "replication-controller")),
resyncPeriodFunc,
replication.BurstReplicas,
4096,
enableGarbageCollector,
)
if err != nil {
t.Fatalf("Failed to create replication manager")
}
informers := informers.NewSharedInformerFactory(clientSet, nil, resyncPeriod)
podInformer := informers.Pods().Informer()
rcInformer := informers.ReplicationControllers().Informer()
rm := replication.NewReplicationManager(podInformer, rcInformer, clientSet, replication.BurstReplicas, 4096, enableGarbageCollector)
informers.Start(stopCh)
return s, rm, podInformer, clientSet
}
@@ -219,7 +210,8 @@ func TestAdoption(t *testing.T) {
},
}
for i, tc := range testCases {
s, rm, podInformer, clientSet := rmSetup(t, true)
stopCh := make(chan struct{})
s, rm, podInformer, clientSet := rmSetup(t, stopCh, true)
ns := framework.CreateTestingNamespace(fmt.Sprintf("adoption-%d", i), s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
@@ -238,7 +230,6 @@ func TestAdoption(t *testing.T) {
t.Fatalf("Failed to create Pod: %v", err)
}
stopCh := make(chan struct{})
go podInformer.Run(stopCh)
waitToObservePods(t, podInformer, 1)
go rm.Run(5, stopCh)
@@ -296,7 +287,8 @@ func TestUpdateSelectorToAdopt(t *testing.T) {
// We have pod1, pod2 and rc. rc.spec.replicas=1. At first rc.Selector
// matches pod1 only; change the selector to match pod2 as well. Verify
// there is only one pod left.
s, rm, podInformer, clientSet := rmSetup(t, true)
stopCh := make(chan struct{})
s, rm, _, clientSet := rmSetup(t, stopCh, true)
ns := framework.CreateTestingNamespace("update-selector-to-adopt", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
rc := newRC("rc", ns.Name, 1)
@@ -309,8 +301,6 @@ func TestUpdateSelectorToAdopt(t *testing.T) {
pod2.Labels["uniqueKey"] = "2"
createRCsPods(t, clientSet, []*v1.ReplicationController{rc}, []*v1.Pod{pod1, pod2}, ns.Name)
stopCh := make(chan struct{})
go podInformer.Run(stopCh)
go rm.Run(5, stopCh)
waitRCStable(t, clientSet, rc, ns.Name)
@@ -336,7 +326,8 @@ func TestUpdateSelectorToRemoveControllerRef(t *testing.T) {
// matches pod1 and pod2; change the selector to match only pod1. Verify
// that rc creates one more pod, so there are 3 pods. Also verify that
// pod2's controllerRef is cleared.
s, rm, podInformer, clientSet := rmSetup(t, true)
stopCh := make(chan struct{})
s, rm, podInformer, clientSet := rmSetup(t, stopCh, true)
ns := framework.CreateTestingNamespace("update-selector-to-remove-controllerref", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
rc := newRC("rc", ns.Name, 2)
@@ -346,8 +337,6 @@ func TestUpdateSelectorToRemoveControllerRef(t *testing.T) {
pod2.Labels["uniqueKey"] = "2"
createRCsPods(t, clientSet, []*v1.ReplicationController{rc}, []*v1.Pod{pod1, pod2}, ns.Name)
stopCh := make(chan struct{})
go podInformer.Run(stopCh)
waitToObservePods(t, podInformer, 2)
go rm.Run(5, stopCh)
waitRCStable(t, clientSet, rc, ns.Name)
@@ -382,7 +371,8 @@ func TestUpdateLabelToRemoveControllerRef(t *testing.T) {
// matches pod1 and pod2; change pod2's labels to non-matching. Verify
// that rc creates one more pod, so there are 3 pods. Also verify that
// pod2's controllerRef is cleared.
s, rm, podInformer, clientSet := rmSetup(t, true)
stopCh := make(chan struct{})
s, rm, _, clientSet := rmSetup(t, stopCh, true)
ns := framework.CreateTestingNamespace("update-label-to-remove-controllerref", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
rc := newRC("rc", ns.Name, 2)
@@ -390,8 +380,6 @@ func TestUpdateLabelToRemoveControllerRef(t *testing.T) {
pod2 := newMatchingPod("pod2", ns.Name)
createRCsPods(t, clientSet, []*v1.ReplicationController{rc}, []*v1.Pod{pod1, pod2}, ns.Name)
stopCh := make(chan struct{})
go podInformer.Run(stopCh)
go rm.Run(5, stopCh)
waitRCStable(t, clientSet, rc, ns.Name)
@@ -424,7 +412,8 @@ func TestUpdateLabelToBeAdopted(t *testing.T) {
// matches pod1 only; change pod2's labels to be matching. Verify the RC
// controller adopts pod2 and delete one of them, so there is only 1 pod
// left.
s, rm, podInformer, clientSet := rmSetup(t, true)
stopCh := make(chan struct{})
s, rm, _, clientSet := rmSetup(t, stopCh, true)
ns := framework.CreateTestingNamespace("update-label-to-be-adopted", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
rc := newRC("rc", ns.Name, 1)
@@ -437,8 +426,6 @@ func TestUpdateLabelToBeAdopted(t *testing.T) {
pod2.Labels["uniqueKey"] = "2"
createRCsPods(t, clientSet, []*v1.ReplicationController{rc}, []*v1.Pod{pod1, pod2}, ns.Name)
stopCh := make(chan struct{})
go podInformer.Run(stopCh)
go rm.Run(5, stopCh)
waitRCStable(t, clientSet, rc, ns.Name)