Use node out of disk condition in the scheduler while scheduling pods.

Set the out of disk node condition to unknown in the node controller if
the kubelet does not report its node condition in a long time. Update
node controller unit tests.

Implement a node condition predicate function that checks if a given
node satisfies the conditions defined by the predicate and if it
does, use that node for scheduling pods. The predicate function takes
both NodeReady and NodeOutOfDisk into consideration to determine if a
node is fit for scheduling pods.

The predicate is then passed to the node lister in the scheduler factory
so that the node lister can run the predicate function on the nodes when
schedling pods thereby omitting nodes that does not satisfy the
predicate.

Also update listers test.
This commit is contained in:
Madhusudan.C.S
2015-10-22 12:47:43 -07:00
parent b74b2aa43e
commit ce257b5a0e
5 changed files with 171 additions and 42 deletions

View File

@@ -18,7 +18,6 @@ package node
import (
"errors"
"fmt"
"sync"
"testing"
"time"
@@ -402,7 +401,15 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
Type: api.NodeReady,
Status: api.ConditionUnknown,
Reason: "NodeStatusNeverUpdated",
Message: fmt.Sprintf("Kubelet never posted node status."),
Message: "Kubelet never posted node status.",
LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
LastTransitionTime: fakeNow,
},
{
Type: api.NodeOutOfDisk,
Status: api.ConditionUnknown,
Reason: "NodeStatusNeverUpdated",
Message: "Kubelet never posted node status.",
LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
LastTransitionTime: fakeNow,
},
@@ -447,6 +454,13 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
{
Type: api.NodeOutOfDisk,
Status: api.ConditionFalse,
// Node status hasn't been updated for 1hr.
LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
Capacity: api.ResourceList{
api.ResourceName(api.ResourceCPU): resource.MustParse("10"),
@@ -471,6 +485,13 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
{
Type: api.NodeOutOfDisk,
Status: api.ConditionFalse,
// Node status hasn't been updated for 1hr.
LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
Capacity: api.ResourceList{
api.ResourceName(api.ResourceCPU): resource.MustParse("10"),
@@ -488,8 +509,16 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
{
Type: api.NodeReady,
Status: api.ConditionUnknown,
Reason: "NodeStatusStopUpdated",
Message: fmt.Sprintf("Kubelet stopped posting node status."),
Reason: "NodeStatusUnknown",
Message: "Kubelet stopped posting node status.",
LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: unversioned.Time{Time: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC).Add(time.Hour)},
},
{
Type: api.NodeOutOfDisk,
Status: api.ConditionUnknown,
Reason: "NodeStatusUnknown",
Message: "Kubelet stopped posting node status.",
LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: unversioned.Time{Time: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC).Add(time.Hour)},
},
@@ -542,7 +571,7 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
},
}
for _, item := range table {
for i, item := range table {
nodeController := NewNodeController(nil, item.fakeNodeHandler, 5*time.Minute, util.NewFakeRateLimiter(),
util.NewFakeRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, false)
nodeController.now = func() unversioned.Time { return fakeNow }
@@ -560,8 +589,10 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
t.Errorf("expected %v call, but got %v.", item.expectedRequestCount, item.fakeNodeHandler.RequestCount)
}
if len(item.fakeNodeHandler.UpdatedNodes) > 0 && !api.Semantic.DeepEqual(item.expectedNodes, item.fakeNodeHandler.UpdatedNodes) {
t.Errorf("expected nodes %+v, got %+v", item.expectedNodes[0],
item.fakeNodeHandler.UpdatedNodes[0])
t.Errorf("Case[%d] unexpected nodes: %s", i, util.ObjectDiff(item.expectedNodes[0], item.fakeNodeHandler.UpdatedNodes[0]))
}
if len(item.fakeNodeHandler.UpdatedNodeStatuses) > 0 && !api.Semantic.DeepEqual(item.expectedNodes, item.fakeNodeHandler.UpdatedNodeStatuses) {
t.Errorf("Case[%d] unexpected nodes: %s", i, util.ObjectDiff(item.expectedNodes[0], item.fakeNodeHandler.UpdatedNodeStatuses[0]))
}
}
}