kubelet: Make condition processing in one spot
The list of status conditions should be calculated all together, this made review more complex. Readability only.
This commit is contained in:
parent
c2a6d07b8f
commit
d7ee024cc5
@ -1421,21 +1421,26 @@ func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.Po
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
spec := &pod.Spec
|
|
||||||
|
|
||||||
// ensure the probe managers have up to date status for containers
|
// ensure the probe managers have up to date status for containers
|
||||||
kl.probeManager.UpdatePodStatus(pod.UID, s)
|
kl.probeManager.UpdatePodStatus(pod.UID, s)
|
||||||
s.Conditions = append(s.Conditions, status.GeneratePodInitializedCondition(spec, s.InitContainerStatuses, s.Phase))
|
|
||||||
s.Conditions = append(s.Conditions, status.GeneratePodReadyCondition(spec, s.Conditions, s.ContainerStatuses, s.Phase))
|
// preserve all conditions not owned by the kubelet
|
||||||
s.Conditions = append(s.Conditions, status.GenerateContainersReadyCondition(spec, s.ContainerStatuses, s.Phase))
|
s.Conditions = make([]v1.PodCondition, 0, len(pod.Status.Conditions)+1)
|
||||||
// Status manager will take care of the LastTransitionTimestamp, either preserve
|
for _, c := range pod.Status.Conditions {
|
||||||
// the timestamp from apiserver, or set a new one. When kubelet sees the pod,
|
if !kubetypes.PodConditionByKubelet(c.Type) {
|
||||||
// `PodScheduled` condition must be true.
|
s.Conditions = append(s.Conditions, c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// set all Kubelet-owned conditions
|
||||||
|
s.Conditions = append(s.Conditions, status.GeneratePodInitializedCondition(&pod.Spec, s.InitContainerStatuses, s.Phase))
|
||||||
|
s.Conditions = append(s.Conditions, status.GeneratePodReadyCondition(&pod.Spec, s.Conditions, s.ContainerStatuses, s.Phase))
|
||||||
|
s.Conditions = append(s.Conditions, status.GenerateContainersReadyCondition(&pod.Spec, s.ContainerStatuses, s.Phase))
|
||||||
s.Conditions = append(s.Conditions, v1.PodCondition{
|
s.Conditions = append(s.Conditions, v1.PodCondition{
|
||||||
Type: v1.PodScheduled,
|
Type: v1.PodScheduled,
|
||||||
Status: v1.ConditionTrue,
|
Status: v1.ConditionTrue,
|
||||||
})
|
})
|
||||||
|
|
||||||
|
// set HostIP and initialize PodIP/PodIPs for host network pods
|
||||||
if kl.kubeClient != nil {
|
if kl.kubeClient != nil {
|
||||||
hostIPs, err := kl.getHostIPsAnyWay()
|
hostIPs, err := kl.getHostIPsAnyWay()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1541,13 +1546,6 @@ func (kl *Kubelet) convertStatusToAPIStatus(pod *v1.Pod, podStatus *kubecontaine
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Preserves conditions not controlled by kubelet
|
|
||||||
for _, c := range pod.Status.Conditions {
|
|
||||||
if !kubetypes.PodConditionByKubelet(c.Type) {
|
|
||||||
apiPodStatus.Conditions = append(apiPodStatus.Conditions, c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return &apiPodStatus
|
return &apiPodStatus
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user