Merge pull request #40385 from ncdc/shared-informers-02-swap-existing
Automatic merge from submit-queue Replace hand-written informers with generated ones Replace existing uses of hand-written informers with generated ones. Follow-up commits will switch the use of one-off informers to shared informers. This is a precursor to #40097. That PR will switch one-off informers to shared informers for the majority of the code base (but not quite all of it...). NOTE: this does create a second set of shared informers in the kube-controller-manager. This will be resolved back down to a single factory once #40097 is reviewed and merged. There are a couple of places where I expanded the # of caches we wait for in the calls to `WaitForCacheSync` - please pay attention to those. I also added in a commented-out wait in the attach/detach controller. If @kubernetes/sig-storage-pr-reviews is ok with enabling the waiting, I'll do it (I'll just need to tweak an integration test slightly). @deads2k @sttts @smarterclayton @liggitt @soltysh @timothysc @lavalamp @wojtek-t @gmarek @sjenning @derekwaynecarr @kubernetes/sig-scalability-pr-reviews
This commit is contained in:
@@ -32,8 +32,8 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
||||
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
"k8s.io/kubernetes/pkg/securitycontext"
|
||||
)
|
||||
|
||||
@@ -147,11 +147,25 @@ func addFailedPods(podStore cache.Store, nodeName string, label map[string]strin
|
||||
}
|
||||
}
|
||||
|
||||
func newTestController(initialObjects ...runtime.Object) (*DaemonSetsController, *controller.FakePodControl, *fake.Clientset) {
|
||||
clientset := fake.NewSimpleClientset(initialObjects...)
|
||||
informerFactory := informers.NewSharedInformerFactory(clientset, nil, controller.NoResyncPeriodFunc())
|
||||
type daemonSetsController struct {
|
||||
*DaemonSetsController
|
||||
|
||||
manager := NewDaemonSetsController(informerFactory.DaemonSets(), informerFactory.Pods(), informerFactory.Nodes(), clientset, 0)
|
||||
dsStore cache.Store
|
||||
podStore cache.Store
|
||||
nodeStore cache.Store
|
||||
}
|
||||
|
||||
func newTestController(initialObjects ...runtime.Object) (*daemonSetsController, *controller.FakePodControl, *fake.Clientset) {
|
||||
clientset := fake.NewSimpleClientset(initialObjects...)
|
||||
informerFactory := informers.NewSharedInformerFactory(nil, clientset, controller.NoResyncPeriodFunc())
|
||||
|
||||
manager := NewDaemonSetsController(
|
||||
informerFactory.Extensions().V1beta1().DaemonSets(),
|
||||
informerFactory.Core().V1().Pods(),
|
||||
informerFactory.Core().V1().Nodes(),
|
||||
clientset,
|
||||
0,
|
||||
)
|
||||
manager.eventRecorder = record.NewFakeRecorder(100)
|
||||
|
||||
manager.podStoreSynced = alwaysReady
|
||||
@@ -159,7 +173,13 @@ func newTestController(initialObjects ...runtime.Object) (*DaemonSetsController,
|
||||
manager.dsStoreSynced = alwaysReady
|
||||
podControl := &controller.FakePodControl{}
|
||||
manager.podControl = podControl
|
||||
return manager, podControl, clientset
|
||||
|
||||
return &daemonSetsController{
|
||||
manager,
|
||||
informerFactory.Extensions().V1beta1().DaemonSets().Informer().GetStore(),
|
||||
informerFactory.Core().V1().Pods().Informer().GetStore(),
|
||||
informerFactory.Core().V1().Nodes().Informer().GetStore(),
|
||||
}, podControl, clientset
|
||||
}
|
||||
|
||||
func validateSyncDaemonSets(t *testing.T, fakePodControl *controller.FakePodControl, expectedCreates, expectedDeletes int) {
|
||||
@@ -171,7 +191,7 @@ func validateSyncDaemonSets(t *testing.T, fakePodControl *controller.FakePodCont
|
||||
}
|
||||
}
|
||||
|
||||
func syncAndValidateDaemonSets(t *testing.T, manager *DaemonSetsController, ds *extensions.DaemonSet, podControl *controller.FakePodControl, expectedCreates, expectedDeletes int) {
|
||||
func syncAndValidateDaemonSets(t *testing.T, manager *daemonSetsController, ds *extensions.DaemonSet, podControl *controller.FakePodControl, expectedCreates, expectedDeletes int) {
|
||||
key, err := controller.KeyFunc(ds)
|
||||
if err != nil {
|
||||
t.Errorf("Could not get key for daemon.")
|
||||
@@ -182,7 +202,7 @@ func syncAndValidateDaemonSets(t *testing.T, manager *DaemonSetsController, ds *
|
||||
|
||||
func TestDeleteFinalStateUnknown(t *testing.T) {
|
||||
manager, _, _ := newTestController()
|
||||
addNodes(manager.nodeStore.Store, 0, 1, nil)
|
||||
addNodes(manager.nodeStore, 0, 1, nil)
|
||||
ds := newDaemonSet("foo")
|
||||
// DeletedFinalStateUnknown should queue the embedded DS if found.
|
||||
manager.deleteDaemonset(cache.DeletedFinalStateUnknown{Key: "foo", Obj: ds})
|
||||
@@ -195,7 +215,7 @@ func TestDeleteFinalStateUnknown(t *testing.T) {
|
||||
// DaemonSets without node selectors should launch pods on every node.
|
||||
func TestSimpleDaemonSetLaunchesPods(t *testing.T) {
|
||||
manager, podControl, _ := newTestController()
|
||||
addNodes(manager.nodeStore.Store, 0, 5, nil)
|
||||
addNodes(manager.nodeStore, 0, 5, nil)
|
||||
ds := newDaemonSet("foo")
|
||||
manager.dsStore.Add(ds)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0)
|
||||
@@ -269,7 +289,7 @@ func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
||||
node := newNode("too-much-mem", nil)
|
||||
node.Status.Allocatable = allocatableResources("100M", "200m")
|
||||
manager.nodeStore.Add(node)
|
||||
manager.podStore.Indexer.Add(&v1.Pod{
|
||||
manager.podStore.Add(&v1.Pod{
|
||||
Spec: podSpec,
|
||||
})
|
||||
ds := newDaemonSet("foo")
|
||||
@@ -286,7 +306,7 @@ func TestInsufficentCapacityNodeDaemonDoesNotUnscheduleRunningPod(t *testing.T)
|
||||
node := newNode("too-much-mem", nil)
|
||||
node.Status.Allocatable = allocatableResources("100M", "200m")
|
||||
manager.nodeStore.Add(node)
|
||||
manager.podStore.Indexer.Add(&v1.Pod{
|
||||
manager.podStore.Add(&v1.Pod{
|
||||
Spec: podSpec,
|
||||
})
|
||||
ds := newDaemonSet("foo")
|
||||
@@ -301,7 +321,7 @@ func TestSufficientCapacityWithTerminatedPodsDaemonLaunchesPod(t *testing.T) {
|
||||
node := newNode("too-much-mem", nil)
|
||||
node.Status.Allocatable = allocatableResources("100M", "200m")
|
||||
manager.nodeStore.Add(node)
|
||||
manager.podStore.Indexer.Add(&v1.Pod{
|
||||
manager.podStore.Add(&v1.Pod{
|
||||
Spec: podSpec,
|
||||
Status: v1.PodStatus{Phase: v1.PodSucceeded},
|
||||
})
|
||||
@@ -318,7 +338,7 @@ func TestSufficientCapacityNodeDaemonLaunchesPod(t *testing.T) {
|
||||
node := newNode("not-too-much-mem", nil)
|
||||
node.Status.Allocatable = allocatableResources("200M", "200m")
|
||||
manager.nodeStore.Add(node)
|
||||
manager.podStore.Indexer.Add(&v1.Pod{
|
||||
manager.podStore.Add(&v1.Pod{
|
||||
Spec: podSpec,
|
||||
})
|
||||
ds := newDaemonSet("foo")
|
||||
@@ -334,7 +354,7 @@ func TestDontDoAnythingIfBeingDeleted(t *testing.T) {
|
||||
node := newNode("not-too-much-mem", nil)
|
||||
node.Status.Allocatable = allocatableResources("200M", "200m")
|
||||
manager.nodeStore.Add(node)
|
||||
manager.podStore.Indexer.Add(&v1.Pod{
|
||||
manager.podStore.Add(&v1.Pod{
|
||||
Spec: podSpec,
|
||||
})
|
||||
ds := newDaemonSet("foo")
|
||||
@@ -358,7 +378,7 @@ func TestPortConflictNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
||||
manager, podControl, _ := newTestController()
|
||||
node := newNode("port-conflict", nil)
|
||||
manager.nodeStore.Add(node)
|
||||
manager.podStore.Indexer.Add(&v1.Pod{
|
||||
manager.podStore.Add(&v1.Pod{
|
||||
Spec: podSpec,
|
||||
})
|
||||
|
||||
@@ -384,7 +404,7 @@ func TestPortConflictWithSameDaemonPodDoesNotDeletePod(t *testing.T) {
|
||||
manager, podControl, _ := newTestController()
|
||||
node := newNode("port-conflict", nil)
|
||||
manager.nodeStore.Add(node)
|
||||
manager.podStore.Indexer.Add(&v1.Pod{
|
||||
manager.podStore.Add(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: simpleDaemonSetLabel,
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
@@ -418,7 +438,7 @@ func TestNoPortConflictNodeDaemonLaunchesPod(t *testing.T) {
|
||||
manager, podControl, _ := newTestController()
|
||||
node := newNode("no-port-conflict", nil)
|
||||
manager.nodeStore.Add(node)
|
||||
manager.podStore.Indexer.Add(&v1.Pod{
|
||||
manager.podStore.Add(&v1.Pod{
|
||||
Spec: podSpec1,
|
||||
})
|
||||
ds := newDaemonSet("foo")
|
||||
@@ -432,9 +452,9 @@ func TestNoPortConflictNodeDaemonLaunchesPod(t *testing.T) {
|
||||
// issue https://github.com/kubernetes/kubernetes/pull/23223
|
||||
func TestPodIsNotDeletedByDaemonsetWithEmptyLabelSelector(t *testing.T) {
|
||||
manager, podControl, _ := newTestController()
|
||||
manager.nodeStore.Store.Add(newNode("node1", nil))
|
||||
manager.nodeStore.Add(newNode("node1", nil))
|
||||
// Create pod not controlled by a daemonset.
|
||||
manager.podStore.Indexer.Add(&v1.Pod{
|
||||
manager.podStore.Add(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"bang": "boom"},
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
@@ -465,11 +485,11 @@ func TestPodIsNotDeletedByDaemonsetWithEmptyLabelSelector(t *testing.T) {
|
||||
// Controller should not create pods on nodes which have daemon pods, and should remove excess pods from nodes that have extra pods.
|
||||
func TestDealsWithExistingPods(t *testing.T) {
|
||||
manager, podControl, _ := newTestController()
|
||||
addNodes(manager.nodeStore.Store, 0, 5, nil)
|
||||
addPods(manager.podStore.Indexer, "node-1", simpleDaemonSetLabel, 1)
|
||||
addPods(manager.podStore.Indexer, "node-2", simpleDaemonSetLabel, 2)
|
||||
addPods(manager.podStore.Indexer, "node-3", simpleDaemonSetLabel, 5)
|
||||
addPods(manager.podStore.Indexer, "node-4", simpleDaemonSetLabel2, 2)
|
||||
addNodes(manager.nodeStore, 0, 5, nil)
|
||||
addPods(manager.podStore, "node-1", simpleDaemonSetLabel, 1)
|
||||
addPods(manager.podStore, "node-2", simpleDaemonSetLabel, 2)
|
||||
addPods(manager.podStore, "node-3", simpleDaemonSetLabel, 5)
|
||||
addPods(manager.podStore, "node-4", simpleDaemonSetLabel2, 2)
|
||||
ds := newDaemonSet("foo")
|
||||
manager.dsStore.Add(ds)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 2, 5)
|
||||
@@ -478,8 +498,8 @@ func TestDealsWithExistingPods(t *testing.T) {
|
||||
// Daemon with node selector should launch pods on nodes matching selector.
|
||||
func TestSelectorDaemonLaunchesPods(t *testing.T) {
|
||||
manager, podControl, _ := newTestController()
|
||||
addNodes(manager.nodeStore.Store, 0, 4, nil)
|
||||
addNodes(manager.nodeStore.Store, 4, 3, simpleNodeLabel)
|
||||
addNodes(manager.nodeStore, 0, 4, nil)
|
||||
addNodes(manager.nodeStore, 4, 3, simpleNodeLabel)
|
||||
daemon := newDaemonSet("foo")
|
||||
daemon.Spec.Template.Spec.NodeSelector = simpleNodeLabel
|
||||
manager.dsStore.Add(daemon)
|
||||
@@ -489,12 +509,12 @@ func TestSelectorDaemonLaunchesPods(t *testing.T) {
|
||||
// Daemon with node selector should delete pods from nodes that do not satisfy selector.
|
||||
func TestSelectorDaemonDeletesUnselectedPods(t *testing.T) {
|
||||
manager, podControl, _ := newTestController()
|
||||
addNodes(manager.nodeStore.Store, 0, 5, nil)
|
||||
addNodes(manager.nodeStore.Store, 5, 5, simpleNodeLabel)
|
||||
addPods(manager.podStore.Indexer, "node-0", simpleDaemonSetLabel2, 2)
|
||||
addPods(manager.podStore.Indexer, "node-1", simpleDaemonSetLabel, 3)
|
||||
addPods(manager.podStore.Indexer, "node-1", simpleDaemonSetLabel2, 1)
|
||||
addPods(manager.podStore.Indexer, "node-4", simpleDaemonSetLabel, 1)
|
||||
addNodes(manager.nodeStore, 0, 5, nil)
|
||||
addNodes(manager.nodeStore, 5, 5, simpleNodeLabel)
|
||||
addPods(manager.podStore, "node-0", simpleDaemonSetLabel2, 2)
|
||||
addPods(manager.podStore, "node-1", simpleDaemonSetLabel, 3)
|
||||
addPods(manager.podStore, "node-1", simpleDaemonSetLabel2, 1)
|
||||
addPods(manager.podStore, "node-4", simpleDaemonSetLabel, 1)
|
||||
daemon := newDaemonSet("foo")
|
||||
daemon.Spec.Template.Spec.NodeSelector = simpleNodeLabel
|
||||
manager.dsStore.Add(daemon)
|
||||
@@ -504,16 +524,16 @@ func TestSelectorDaemonDeletesUnselectedPods(t *testing.T) {
|
||||
// DaemonSet with node selector should launch pods on nodes matching selector, but also deal with existing pods on nodes.
|
||||
func TestSelectorDaemonDealsWithExistingPods(t *testing.T) {
|
||||
manager, podControl, _ := newTestController()
|
||||
addNodes(manager.nodeStore.Store, 0, 5, nil)
|
||||
addNodes(manager.nodeStore.Store, 5, 5, simpleNodeLabel)
|
||||
addPods(manager.podStore.Indexer, "node-0", simpleDaemonSetLabel, 1)
|
||||
addPods(manager.podStore.Indexer, "node-1", simpleDaemonSetLabel, 3)
|
||||
addPods(manager.podStore.Indexer, "node-1", simpleDaemonSetLabel2, 2)
|
||||
addPods(manager.podStore.Indexer, "node-2", simpleDaemonSetLabel, 4)
|
||||
addPods(manager.podStore.Indexer, "node-6", simpleDaemonSetLabel, 13)
|
||||
addPods(manager.podStore.Indexer, "node-7", simpleDaemonSetLabel2, 4)
|
||||
addPods(manager.podStore.Indexer, "node-9", simpleDaemonSetLabel, 1)
|
||||
addPods(manager.podStore.Indexer, "node-9", simpleDaemonSetLabel2, 1)
|
||||
addNodes(manager.nodeStore, 0, 5, nil)
|
||||
addNodes(manager.nodeStore, 5, 5, simpleNodeLabel)
|
||||
addPods(manager.podStore, "node-0", simpleDaemonSetLabel, 1)
|
||||
addPods(manager.podStore, "node-1", simpleDaemonSetLabel, 3)
|
||||
addPods(manager.podStore, "node-1", simpleDaemonSetLabel2, 2)
|
||||
addPods(manager.podStore, "node-2", simpleDaemonSetLabel, 4)
|
||||
addPods(manager.podStore, "node-6", simpleDaemonSetLabel, 13)
|
||||
addPods(manager.podStore, "node-7", simpleDaemonSetLabel2, 4)
|
||||
addPods(manager.podStore, "node-9", simpleDaemonSetLabel, 1)
|
||||
addPods(manager.podStore, "node-9", simpleDaemonSetLabel2, 1)
|
||||
ds := newDaemonSet("foo")
|
||||
ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
|
||||
manager.dsStore.Add(ds)
|
||||
@@ -523,8 +543,8 @@ func TestSelectorDaemonDealsWithExistingPods(t *testing.T) {
|
||||
// DaemonSet with node selector which does not match any node labels should not launch pods.
|
||||
func TestBadSelectorDaemonDoesNothing(t *testing.T) {
|
||||
manager, podControl, _ := newTestController()
|
||||
addNodes(manager.nodeStore.Store, 0, 4, nil)
|
||||
addNodes(manager.nodeStore.Store, 4, 3, simpleNodeLabel)
|
||||
addNodes(manager.nodeStore, 0, 4, nil)
|
||||
addNodes(manager.nodeStore, 4, 3, simpleNodeLabel)
|
||||
ds := newDaemonSet("foo")
|
||||
ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel2
|
||||
manager.dsStore.Add(ds)
|
||||
@@ -534,7 +554,7 @@ func TestBadSelectorDaemonDoesNothing(t *testing.T) {
|
||||
// DaemonSet with node name should launch pod on node with corresponding name.
|
||||
func TestNameDaemonSetLaunchesPods(t *testing.T) {
|
||||
manager, podControl, _ := newTestController()
|
||||
addNodes(manager.nodeStore.Store, 0, 5, nil)
|
||||
addNodes(manager.nodeStore, 0, 5, nil)
|
||||
ds := newDaemonSet("foo")
|
||||
ds.Spec.Template.Spec.NodeName = "node-0"
|
||||
manager.dsStore.Add(ds)
|
||||
@@ -544,7 +564,7 @@ func TestNameDaemonSetLaunchesPods(t *testing.T) {
|
||||
// DaemonSet with node name that does not exist should not launch pods.
|
||||
func TestBadNameDaemonSetDoesNothing(t *testing.T) {
|
||||
manager, podControl, _ := newTestController()
|
||||
addNodes(manager.nodeStore.Store, 0, 5, nil)
|
||||
addNodes(manager.nodeStore, 0, 5, nil)
|
||||
ds := newDaemonSet("foo")
|
||||
ds.Spec.Template.Spec.NodeName = "node-10"
|
||||
manager.dsStore.Add(ds)
|
||||
@@ -554,8 +574,8 @@ func TestBadNameDaemonSetDoesNothing(t *testing.T) {
|
||||
// DaemonSet with node selector, and node name, matching a node, should launch a pod on the node.
|
||||
func TestNameAndSelectorDaemonSetLaunchesPods(t *testing.T) {
|
||||
manager, podControl, _ := newTestController()
|
||||
addNodes(manager.nodeStore.Store, 0, 4, nil)
|
||||
addNodes(manager.nodeStore.Store, 4, 3, simpleNodeLabel)
|
||||
addNodes(manager.nodeStore, 0, 4, nil)
|
||||
addNodes(manager.nodeStore, 4, 3, simpleNodeLabel)
|
||||
ds := newDaemonSet("foo")
|
||||
ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
|
||||
ds.Spec.Template.Spec.NodeName = "node-6"
|
||||
@@ -566,8 +586,8 @@ func TestNameAndSelectorDaemonSetLaunchesPods(t *testing.T) {
|
||||
// DaemonSet with node selector that matches some nodes, and node name that matches a different node, should do nothing.
|
||||
func TestInconsistentNameSelectorDaemonSetDoesNothing(t *testing.T) {
|
||||
manager, podControl, _ := newTestController()
|
||||
addNodes(manager.nodeStore.Store, 0, 4, nil)
|
||||
addNodes(manager.nodeStore.Store, 4, 3, simpleNodeLabel)
|
||||
addNodes(manager.nodeStore, 0, 4, nil)
|
||||
addNodes(manager.nodeStore, 4, 3, simpleNodeLabel)
|
||||
ds := newDaemonSet("foo")
|
||||
ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
|
||||
ds.Spec.Template.Spec.NodeName = "node-0"
|
||||
@@ -578,8 +598,8 @@ func TestInconsistentNameSelectorDaemonSetDoesNothing(t *testing.T) {
|
||||
// Daemon with node affinity should launch pods on nodes matching affinity.
|
||||
func TestNodeAffinityDaemonLaunchesPods(t *testing.T) {
|
||||
manager, podControl, _ := newTestController()
|
||||
addNodes(manager.nodeStore.Store, 0, 4, nil)
|
||||
addNodes(manager.nodeStore.Store, 4, 3, simpleNodeLabel)
|
||||
addNodes(manager.nodeStore, 0, 4, nil)
|
||||
addNodes(manager.nodeStore, 4, 3, simpleNodeLabel)
|
||||
daemon := newDaemonSet("foo")
|
||||
daemon.Spec.Template.Spec.Affinity = &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
@@ -615,9 +635,9 @@ func TestNumberReadyStatus(t *testing.T) {
|
||||
}
|
||||
return false, nil, nil
|
||||
})
|
||||
addNodes(manager.nodeStore.Store, 0, 2, simpleNodeLabel)
|
||||
addPods(manager.podStore.Indexer, "node-0", simpleDaemonSetLabel, 1)
|
||||
addPods(manager.podStore.Indexer, "node-1", simpleDaemonSetLabel, 1)
|
||||
addNodes(manager.nodeStore, 0, 2, simpleNodeLabel)
|
||||
addPods(manager.podStore, "node-0", simpleDaemonSetLabel, 1)
|
||||
addPods(manager.podStore, "node-1", simpleDaemonSetLabel, 1)
|
||||
manager.dsStore.Add(daemon)
|
||||
|
||||
syncAndValidateDaemonSets(t, manager, daemon, podControl, 0, 0)
|
||||
@@ -626,7 +646,7 @@ func TestNumberReadyStatus(t *testing.T) {
|
||||
}
|
||||
|
||||
selector, _ := metav1.LabelSelectorAsSelector(daemon.Spec.Selector)
|
||||
daemonPods, _ := manager.podStore.Pods(daemon.Namespace).List(selector)
|
||||
daemonPods, _ := manager.podLister.Pods(daemon.Namespace).List(selector)
|
||||
for _, pod := range daemonPods {
|
||||
condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue}
|
||||
pod.Status.Conditions = append(pod.Status.Conditions, condition)
|
||||
@@ -653,8 +673,8 @@ func TestObservedGeneration(t *testing.T) {
|
||||
return false, nil, nil
|
||||
})
|
||||
|
||||
addNodes(manager.nodeStore.Store, 0, 1, simpleNodeLabel)
|
||||
addPods(manager.podStore.Indexer, "node-0", simpleDaemonSetLabel, 1)
|
||||
addNodes(manager.nodeStore, 0, 1, simpleNodeLabel)
|
||||
addPods(manager.podStore, "node-0", simpleDaemonSetLabel, 1)
|
||||
manager.dsStore.Add(daemon)
|
||||
|
||||
syncAndValidateDaemonSets(t, manager, daemon, podControl, 0, 0)
|
||||
@@ -679,9 +699,9 @@ func TestDaemonKillFailedPods(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
t.Logf("test case: %s\n", test.test)
|
||||
manager, podControl, _ := newTestController()
|
||||
addNodes(manager.nodeStore.Store, 0, 1, nil)
|
||||
addFailedPods(manager.podStore.Indexer, "node-0", simpleDaemonSetLabel, test.numFailedPods)
|
||||
addPods(manager.podStore.Indexer, "node-0", simpleDaemonSetLabel, test.numNormalPods)
|
||||
addNodes(manager.nodeStore, 0, 1, nil)
|
||||
addFailedPods(manager.podStore, "node-0", simpleDaemonSetLabel, test.numFailedPods)
|
||||
addPods(manager.podStore, "node-0", simpleDaemonSetLabel, test.numNormalPods)
|
||||
ds := newDaemonSet("foo")
|
||||
manager.dsStore.Add(ds)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, test.expectedCreates, test.expectedDeletes)
|
||||
@@ -782,9 +802,9 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
||||
node := newNode("test-node", nil)
|
||||
node.Status.Allocatable = allocatableResources("100M", "1")
|
||||
manager, _, _ := newTestController()
|
||||
manager.nodeStore.Store.Add(node)
|
||||
manager.nodeStore.Add(node)
|
||||
for _, p := range c.podsOnNode {
|
||||
manager.podStore.Indexer.Add(p)
|
||||
manager.podStore.Add(p)
|
||||
p.Spec.NodeName = "test-node"
|
||||
}
|
||||
wantToRun, shouldSchedule, shouldContinueRunning, err := manager.nodeShouldRunDaemonPod(node, c.ds)
|
||||
|
Reference in New Issue
Block a user