Merge pull request #35839 from Random-Liu/add-cri-runtime-status
Automatic merge from submit-queue CRI: Add Status into CRI. For https://github.com/kubernetes/kubernetes/issues/35701. Fixes https://github.com/kubernetes/kubernetes/issues/35701. This PR added a `Status` call in CRI, and the `RuntimeStatus` is defined as following: ``` protobuf message RuntimeCondition { // Type of runtime condition. optional string type = 1; // Status of the condition, one of true/false. optional bool status = 2; // Brief reason for the condition's last transition. optional string reason = 3; // Human readable message indicating details about last transition. optional string message = 4; } message RuntimeStatus { // Conditions is an array of current observed runtime conditions. repeated RuntimeCondition conditions = 1; } ``` Currently, only `conditions` is included in `RuntimeStatus`, and the definition is almost the same with `NodeCondition` and `PodCondition` in K8s api. @yujuhong @feiskyer @bprashanth If this makes sense, I'll send a follow up PR to let dockershim return `RuntimeStatus` and let kubelet make use of it. @yifan-gu @euank Does this make sense to rkt? /cc @kubernetes/sig-node
This commit is contained in:
@@ -762,7 +762,7 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
checkNodeStatus := func(status api.ConditionStatus, reason, message string) {
|
||||
checkNodeStatus := func(status api.ConditionStatus, reason string) {
|
||||
kubeClient.ClearActions()
|
||||
if err := kubelet.updateNodeStatus(); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
@@ -795,11 +795,14 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
|
||||
if updatedNode.Status.Conditions[lastIndex].Type != api.NodeReady {
|
||||
t.Errorf("unexpected node condition order. NodeReady should be last.")
|
||||
}
|
||||
if updatedNode.Status.Conditions[lastIndex].Message == "" {
|
||||
t.Errorf("unexpected empty condition message")
|
||||
}
|
||||
updatedNode.Status.Conditions[lastIndex].Message = ""
|
||||
expectedNode.Status.Conditions[lastIndex] = api.NodeCondition{
|
||||
Type: api.NodeReady,
|
||||
Status: status,
|
||||
Reason: reason,
|
||||
Message: message,
|
||||
LastHeartbeatTime: unversioned.Time{},
|
||||
LastTransitionTime: unversioned.Time{},
|
||||
}
|
||||
@@ -808,23 +811,21 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
readyMessage := "kubelet is posting ready status"
|
||||
downMessage := "container runtime is down"
|
||||
|
||||
// TODO(random-liu): Refactor the unit test to be table driven test.
|
||||
// Should report kubelet not ready if the runtime check is out of date
|
||||
clock.SetTime(time.Now().Add(-maxWaitForContainerRuntime))
|
||||
kubelet.updateRuntimeUp()
|
||||
checkNodeStatus(api.ConditionFalse, "KubeletNotReady", downMessage)
|
||||
checkNodeStatus(api.ConditionFalse, "KubeletNotReady")
|
||||
|
||||
// Should report kubelet ready if the runtime check is updated
|
||||
clock.SetTime(time.Now())
|
||||
kubelet.updateRuntimeUp()
|
||||
checkNodeStatus(api.ConditionTrue, "KubeletReady", readyMessage)
|
||||
checkNodeStatus(api.ConditionTrue, "KubeletReady")
|
||||
|
||||
// Should report kubelet not ready if the runtime check is out of date
|
||||
clock.SetTime(time.Now().Add(-maxWaitForContainerRuntime))
|
||||
kubelet.updateRuntimeUp()
|
||||
checkNodeStatus(api.ConditionFalse, "KubeletNotReady", downMessage)
|
||||
checkNodeStatus(api.ConditionFalse, "KubeletNotReady")
|
||||
|
||||
// Should report kubelet not ready if the runtime check failed
|
||||
fakeRuntime := testKubelet.fakeRuntime
|
||||
@@ -832,7 +833,51 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
|
||||
fakeRuntime.StatusErr = fmt.Errorf("injected runtime status error")
|
||||
clock.SetTime(time.Now())
|
||||
kubelet.updateRuntimeUp()
|
||||
checkNodeStatus(api.ConditionFalse, "KubeletNotReady", downMessage)
|
||||
checkNodeStatus(api.ConditionFalse, "KubeletNotReady")
|
||||
|
||||
// Test cri integration.
|
||||
kubelet.kubeletConfiguration.ExperimentalRuntimeIntegrationType = "cri"
|
||||
fakeRuntime.StatusErr = nil
|
||||
|
||||
// Should report node not ready if runtime status is nil.
|
||||
fakeRuntime.RuntimeStatus = nil
|
||||
kubelet.updateRuntimeUp()
|
||||
checkNodeStatus(api.ConditionFalse, "KubeletNotReady")
|
||||
|
||||
// Should report node not ready if runtime status is empty.
|
||||
fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{}
|
||||
kubelet.updateRuntimeUp()
|
||||
checkNodeStatus(api.ConditionFalse, "KubeletNotReady")
|
||||
|
||||
// Should report node not ready if RuntimeReady is false.
|
||||
fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{
|
||||
Conditions: []kubecontainer.RuntimeCondition{
|
||||
{Type: kubecontainer.RuntimeReady, Status: false},
|
||||
{Type: kubecontainer.NetworkReady, Status: true},
|
||||
},
|
||||
}
|
||||
kubelet.updateRuntimeUp()
|
||||
checkNodeStatus(api.ConditionFalse, "KubeletNotReady")
|
||||
|
||||
// Should report node ready if RuntimeReady is true.
|
||||
fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{
|
||||
Conditions: []kubecontainer.RuntimeCondition{
|
||||
{Type: kubecontainer.RuntimeReady, Status: true},
|
||||
{Type: kubecontainer.NetworkReady, Status: true},
|
||||
},
|
||||
}
|
||||
kubelet.updateRuntimeUp()
|
||||
checkNodeStatus(api.ConditionTrue, "KubeletReady")
|
||||
|
||||
// Should report node not ready if NetworkReady is false.
|
||||
fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{
|
||||
Conditions: []kubecontainer.RuntimeCondition{
|
||||
{Type: kubecontainer.RuntimeReady, Status: true},
|
||||
{Type: kubecontainer.NetworkReady, Status: false},
|
||||
},
|
||||
}
|
||||
kubelet.updateRuntimeUp()
|
||||
checkNodeStatus(api.ConditionFalse, "KubeletNotReady")
|
||||
}
|
||||
|
||||
func TestUpdateNodeStatusError(t *testing.T) {
|
||||
|
Reference in New Issue
Block a user