Delete all pods based on condition transition time.
This commit is contained in:
@@ -75,8 +75,18 @@ const (
|
||||
initialNodeStatusUpdateFrequency = 100 * time.Millisecond
|
||||
nodeStatusUpdateFrequencyInc = 500 * time.Millisecond
|
||||
|
||||
// Node status update frequency and retry count. Note: be cautious when changing nodeStatusUpdateFrequency,
|
||||
// it must work with nodecontroller.nodeMonitorGracePeriod.
|
||||
// Node status update frequency and retry count.
|
||||
// Note: be cautious when changing the constant, it must work with nodeMonitorGracePeriod
|
||||
// in nodecontroller. There are several constraints:
|
||||
// 1. nodeMonitorGracePeriod must be N times more than nodeStatusUpdateFrequency, where
|
||||
// N means number of retries allowed for kubelet to post node status. It is pointless
|
||||
// to make nodeMonitorGracePeriod be less than nodeStatusUpdateFrequency, since there
|
||||
// will only be fresh values from Kubelet at an interval of nodeStatusUpdateFrequency.
|
||||
// 2. nodeMonitorGracePeriod can't be too large for user experience - larger value takes
|
||||
// longer for user to see up-to-date node status.
|
||||
// 3. nodeStatusUpdateFrequency needs to be large enough for Kubelet to generate node
|
||||
// status. Kubelet may fail to update node status reliablly if the value is too small,
|
||||
// as it takes time to gather all necessary node information.
|
||||
nodeStatusUpdateFrequency = 2 * time.Second
|
||||
nodeStatusUpdateRetry = 5
|
||||
)
|
||||
@@ -1837,20 +1847,23 @@ func (kl *Kubelet) tryUpdateNodeStatus() error {
|
||||
node.Spec.Capacity = CapacityFromMachineInfo(info)
|
||||
}
|
||||
|
||||
currentTime := util.Now()
|
||||
newCondition := api.NodeCondition{
|
||||
Type: api.NodeReady,
|
||||
Status: api.ConditionFull,
|
||||
Reason: fmt.Sprintf("kubelet is posting ready status"),
|
||||
LastProbeTime: util.Now(),
|
||||
LastProbeTime: currentTime,
|
||||
}
|
||||
updated := false
|
||||
for i := range node.Status.Conditions {
|
||||
if node.Status.Conditions[i].Type == api.NodeReady {
|
||||
newCondition.LastTransitionTime = node.Status.Conditions[i].LastTransitionTime
|
||||
node.Status.Conditions[i] = newCondition
|
||||
updated = true
|
||||
}
|
||||
}
|
||||
if !updated {
|
||||
newCondition.LastTransitionTime = currentTime
|
||||
node.Status.Conditions = append(node.Status.Conditions, newCondition)
|
||||
}
|
||||
|
||||
|
||||
@@ -3102,10 +3102,11 @@ func TestUpdateNewNodeStatus(t *testing.T) {
|
||||
Status: api.NodeStatus{
|
||||
Conditions: []api.NodeCondition{
|
||||
{
|
||||
Type: api.NodeReady,
|
||||
Status: api.ConditionFull,
|
||||
Reason: fmt.Sprintf("kubelet is posting ready status"),
|
||||
LastProbeTime: util.Time{},
|
||||
Type: api.NodeReady,
|
||||
Status: api.ConditionFull,
|
||||
Reason: fmt.Sprintf("kubelet is posting ready status"),
|
||||
LastProbeTime: util.Time{},
|
||||
LastTransitionTime: util.Time{},
|
||||
},
|
||||
},
|
||||
NodeInfo: api.NodeSystemInfo{
|
||||
@@ -3128,7 +3129,11 @@ func TestUpdateNewNodeStatus(t *testing.T) {
|
||||
if updatedNode.Status.Conditions[0].LastProbeTime.IsZero() {
|
||||
t.Errorf("unexpected zero last probe timestamp")
|
||||
}
|
||||
if updatedNode.Status.Conditions[0].LastTransitionTime.IsZero() {
|
||||
t.Errorf("unexpected zero last transition timestamp")
|
||||
}
|
||||
updatedNode.Status.Conditions[0].LastProbeTime = util.Time{}
|
||||
updatedNode.Status.Conditions[0].LastTransitionTime = util.Time{}
|
||||
if !reflect.DeepEqual(expectedNode, updatedNode) {
|
||||
t.Errorf("expected \n%v\n, got \n%v", expectedNode, updatedNode)
|
||||
}
|
||||
@@ -3151,10 +3156,11 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
|
||||
Status: api.NodeStatus{
|
||||
Conditions: []api.NodeCondition{
|
||||
{
|
||||
Type: api.NodeReady,
|
||||
Status: api.ConditionFull,
|
||||
Reason: fmt.Sprintf("kubelet is posting ready status"),
|
||||
LastProbeTime: util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Type: api.NodeReady,
|
||||
Status: api.ConditionFull,
|
||||
Reason: fmt.Sprintf("kubelet is posting ready status"),
|
||||
LastProbeTime: util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
LastTransitionTime: util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -3173,10 +3179,11 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
|
||||
Status: api.NodeStatus{
|
||||
Conditions: []api.NodeCondition{
|
||||
{
|
||||
Type: api.NodeReady,
|
||||
Status: api.ConditionFull,
|
||||
Reason: fmt.Sprintf("kubelet is posting ready status"),
|
||||
LastProbeTime: util.Time{}, // placeholder
|
||||
Type: api.NodeReady,
|
||||
Status: api.ConditionFull,
|
||||
Reason: fmt.Sprintf("kubelet is posting ready status"),
|
||||
LastProbeTime: util.Time{}, // placeholder
|
||||
LastTransitionTime: util.Time{}, // placeholder
|
||||
},
|
||||
},
|
||||
NodeInfo: api.NodeSystemInfo{
|
||||
@@ -3196,11 +3203,16 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
|
||||
if !ok {
|
||||
t.Errorf("unexpected object type")
|
||||
}
|
||||
// Expect LastProbeTime to be updated to Now, while LastTransitionTime to be the same.
|
||||
if reflect.DeepEqual(updatedNode.Status.Conditions[0].LastProbeTime, util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC)) {
|
||||
t.Errorf("expected \n%v\n, got \n%v", updatedNode.Status.Conditions[0].LastProbeTime,
|
||||
t.Errorf("expected \n%v\n, got \n%v", util.Now(), util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC))
|
||||
}
|
||||
if !reflect.DeepEqual(updatedNode.Status.Conditions[0].LastTransitionTime, util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC)) {
|
||||
t.Errorf("expected \n%v\n, got \n%v", updatedNode.Status.Conditions[0].LastTransitionTime,
|
||||
util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC))
|
||||
}
|
||||
updatedNode.Status.Conditions[0].LastProbeTime = util.Time{}
|
||||
updatedNode.Status.Conditions[0].LastTransitionTime = util.Time{}
|
||||
if !reflect.DeepEqual(expectedNode, updatedNode) {
|
||||
t.Errorf("expected \n%v\n, got \n%v", expectedNode, updatedNode)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user