Merge pull request #4048 from mikedanese/ready
Support readiness checks
This commit is contained in:
@@ -89,6 +89,7 @@ func (fakeKubeletClient) GetPodStatus(host, podNamespace, podID string) (api.Pod
|
|||||||
r.Status.PodIP = "1.2.3.4"
|
r.Status.PodIP = "1.2.3.4"
|
||||||
m := make(api.PodInfo)
|
m := make(api.PodInfo)
|
||||||
for k, v := range r.Status.Info {
|
for k, v := range r.Status.Info {
|
||||||
|
v.Ready = true
|
||||||
v.PodIP = "1.2.3.4"
|
v.PodIP = "1.2.3.4"
|
||||||
m[k] = v
|
m[k] = v
|
||||||
}
|
}
|
||||||
|
104
pkg/api/types.go
104
pkg/api/types.go
@@ -339,10 +339,11 @@ type Container struct {
|
|||||||
Ports []Port `json:"ports,omitempty"`
|
Ports []Port `json:"ports,omitempty"`
|
||||||
Env []EnvVar `json:"env,omitempty"`
|
Env []EnvVar `json:"env,omitempty"`
|
||||||
// Compute resource requirements.
|
// Compute resource requirements.
|
||||||
Resources ResourceRequirements `json:"resources,omitempty"`
|
Resources ResourceRequirements `json:"resources,omitempty"`
|
||||||
VolumeMounts []VolumeMount `json:"volumeMounts,omitempty"`
|
VolumeMounts []VolumeMount `json:"volumeMounts,omitempty"`
|
||||||
LivenessProbe *Probe `json:"livenessProbe,omitempty"`
|
LivenessProbe *Probe `json:"livenessProbe,omitempty"`
|
||||||
Lifecycle *Lifecycle `json:"lifecycle,omitempty"`
|
ReadinessProbe *Probe `json:"readinessProbe,omitempty"`
|
||||||
|
Lifecycle *Lifecycle `json:"lifecycle,omitempty"`
|
||||||
// Optional: Defaults to /dev/termination-log
|
// Optional: Defaults to /dev/termination-log
|
||||||
TerminationMessagePath string `json:"terminationMessagePath,omitempty"`
|
TerminationMessagePath string `json:"terminationMessagePath,omitempty"`
|
||||||
// Optional: Default to false.
|
// Optional: Default to false.
|
||||||
@@ -380,27 +381,16 @@ type Lifecycle struct {
|
|||||||
|
|
||||||
// The below types are used by kube_client and api_server.
|
// The below types are used by kube_client and api_server.
|
||||||
|
|
||||||
// PodPhase is a label for the condition of a pod at the current time.
|
type ConditionStatus string
|
||||||
type PodPhase string
|
|
||||||
|
|
||||||
// These are the valid statuses of pods.
|
// These are valid condition statuses. "ConditionFull" means a resource is in the condition;
|
||||||
|
// "ConditionNone" means a resource is not in the condition; "ConditionUnknown" means kubernetes
|
||||||
|
// can't decide if a resource is in the condition or not. In the future, we could add other
|
||||||
|
// intermediate conditions, e.g. ConditionDegraded.
|
||||||
const (
|
const (
|
||||||
// PodPending means the pod has been accepted by the system, but one or more of the containers
|
ConditionFull ConditionStatus = "Full"
|
||||||
// has not been started. This includes time before being bound to a node, as well as time spent
|
ConditionNone ConditionStatus = "None"
|
||||||
// pulling images onto the host.
|
ConditionUnknown ConditionStatus = "Unknown"
|
||||||
PodPending PodPhase = "Pending"
|
|
||||||
// PodRunning means the pod has been bound to a node and all of the containers have been started.
|
|
||||||
// At least one container is still running or is in the process of being restarted.
|
|
||||||
PodRunning PodPhase = "Running"
|
|
||||||
// PodSucceeded means that all containers in the pod have voluntarily terminated
|
|
||||||
// with a container exit code of 0, and the system is not going to restart any of these containers.
|
|
||||||
PodSucceeded PodPhase = "Succeeded"
|
|
||||||
// PodFailed means that all containers in the pod have terminated, and at least one container has
|
|
||||||
// terminated in a failure (exited with a non-zero exit code or was stopped by the system).
|
|
||||||
PodFailed PodPhase = "Failed"
|
|
||||||
// PodUnknown means that for some reason the state of the pod could not be obtained, typically due
|
|
||||||
// to an error in communicating with the host of the pod.
|
|
||||||
PodUnknown PodPhase = "Unknown"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type ContainerStateWaiting struct {
|
type ContainerStateWaiting struct {
|
||||||
@@ -434,6 +424,8 @@ type ContainerStatus struct {
|
|||||||
// TODO(dchen1107): Should we rename PodStatus to a more generic name or have a separate states
|
// TODO(dchen1107): Should we rename PodStatus to a more generic name or have a separate states
|
||||||
// defined for container?
|
// defined for container?
|
||||||
State ContainerState `json:"state,omitempty"`
|
State ContainerState `json:"state,omitempty"`
|
||||||
|
// Ready specifies whether the conatiner has passed its readiness check.
|
||||||
|
Ready bool `json:"ready"`
|
||||||
// Note that this is calculated from dead containers. But those containers are subject to
|
// Note that this is calculated from dead containers. But those containers are subject to
|
||||||
// garbage collection. This value will get capped at 5 by GC.
|
// garbage collection. This value will get capped at 5 by GC.
|
||||||
RestartCount int `json:"restartCount"`
|
RestartCount int `json:"restartCount"`
|
||||||
@@ -446,6 +438,44 @@ type ContainerStatus struct {
|
|||||||
ContainerID string `json:"containerID,omitempty" description:"container's ID in the format 'docker://<container_id>'"`
|
ContainerID string `json:"containerID,omitempty" description:"container's ID in the format 'docker://<container_id>'"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PodPhase is a label for the condition of a pod at the current time.
|
||||||
|
type PodPhase string
|
||||||
|
|
||||||
|
// These are the valid statuses of pods.
|
||||||
|
const (
|
||||||
|
// PodPending means the pod has been accepted by the system, but one or more of the containers
|
||||||
|
// has not been started. This includes time before being bound to a node, as well as time spent
|
||||||
|
// pulling images onto the host.
|
||||||
|
PodPending PodPhase = "Pending"
|
||||||
|
// PodRunning means the pod has been bound to a node and all of the containers have been started.
|
||||||
|
// At least one container is still running or is in the process of being restarted.
|
||||||
|
PodRunning PodPhase = "Running"
|
||||||
|
// PodSucceeded means that all containers in the pod have voluntarily terminated
|
||||||
|
// with a container exit code of 0, and the system is not going to restart any of these containers.
|
||||||
|
PodSucceeded PodPhase = "Succeeded"
|
||||||
|
// PodFailed means that all containers in the pod have terminated, and at least one container has
|
||||||
|
// terminated in a failure (exited with a non-zero exit code or was stopped by the system).
|
||||||
|
PodFailed PodPhase = "Failed"
|
||||||
|
// PodUnknown means that for some reason the state of the pod could not be obtained, typically due
|
||||||
|
// to an error in communicating with the host of the pod.
|
||||||
|
PodUnknown PodPhase = "Unknown"
|
||||||
|
)
|
||||||
|
|
||||||
|
type PodConditionKind string
|
||||||
|
|
||||||
|
// These are valid conditions of pod.
|
||||||
|
const (
|
||||||
|
// PodReady means the pod is able to service requests and should be added to the
|
||||||
|
// load balancing pools of all matching services.
|
||||||
|
PodReady PodConditionKind = "Ready"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO: add LastTransitionTime, Reason, Message to match NodeCondition api.
|
||||||
|
type PodCondition struct {
|
||||||
|
Kind PodConditionKind `json:"kind"`
|
||||||
|
Status ConditionStatus `json:"status"`
|
||||||
|
}
|
||||||
|
|
||||||
// PodInfo contains one entry for every container with available info.
|
// PodInfo contains one entry for every container with available info.
|
||||||
type PodInfo map[string]ContainerStatus
|
type PodInfo map[string]ContainerStatus
|
||||||
|
|
||||||
@@ -516,8 +546,8 @@ type PodSpec struct {
|
|||||||
// PodStatus represents information about the status of a pod. Status may trail the actual
|
// PodStatus represents information about the status of a pod. Status may trail the actual
|
||||||
// state of a system.
|
// state of a system.
|
||||||
type PodStatus struct {
|
type PodStatus struct {
|
||||||
Phase PodPhase `json:"phase,omitempty"`
|
Phase PodPhase `json:"phase,omitempty"`
|
||||||
|
Conditions []PodCondition `json:"Condition,omitempty"`
|
||||||
// A human readable message indicating details about why the pod is in this state.
|
// A human readable message indicating details about why the pod is in this state.
|
||||||
Message string `json:"message,omitempty"`
|
Message string `json:"message,omitempty"`
|
||||||
|
|
||||||
@@ -759,25 +789,13 @@ const (
|
|||||||
NodeReady NodeConditionKind = "Ready"
|
NodeReady NodeConditionKind = "Ready"
|
||||||
)
|
)
|
||||||
|
|
||||||
type NodeConditionStatus string
|
|
||||||
|
|
||||||
// These are valid condition status. "ConditionFull" means node is in the condition;
|
|
||||||
// "ConditionNone" means node is not in the condition; "ConditionUnknown" means kubernetes
|
|
||||||
// can't decide if node is in the condition or not. In the future, we could add other
|
|
||||||
// intermediate conditions, e.g. ConditionDegraded.
|
|
||||||
const (
|
|
||||||
ConditionFull NodeConditionStatus = "Full"
|
|
||||||
ConditionNone NodeConditionStatus = "None"
|
|
||||||
ConditionUnknown NodeConditionStatus = "Unknown"
|
|
||||||
)
|
|
||||||
|
|
||||||
type NodeCondition struct {
|
type NodeCondition struct {
|
||||||
Kind NodeConditionKind `json:"kind"`
|
Kind NodeConditionKind `json:"kind"`
|
||||||
Status NodeConditionStatus `json:"status"`
|
Status ConditionStatus `json:"status"`
|
||||||
LastProbeTime util.Time `json:"lastProbeTime,omitempty"`
|
LastProbeTime util.Time `json:"lastProbeTime,omitempty"`
|
||||||
LastTransitionTime util.Time `json:"lastTransitionTime,omitempty"`
|
LastTransitionTime util.Time `json:"lastTransitionTime,omitempty"`
|
||||||
Reason string `json:"reason,omitempty"`
|
Reason string `json:"reason,omitempty"`
|
||||||
Message string `json:"message,omitempty"`
|
Message string `json:"message,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// NodeResources is an object for conveying resource information about a node.
|
// NodeResources is an object for conveying resource information about a node.
|
||||||
|
@@ -181,6 +181,9 @@ func init() {
|
|||||||
if err := s.Convert(&in.Phase, &out.Status, 0); err != nil {
|
if err := s.Convert(&in.Phase, &out.Status, 0); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if err := s.Convert(&in.Conditions, &out.Conditions, 0); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if err := s.Convert(&in.Info, &out.Info, 0); err != nil {
|
if err := s.Convert(&in.Info, &out.Info, 0); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -194,6 +197,9 @@ func init() {
|
|||||||
if err := s.Convert(&in.Status, &out.Phase, 0); err != nil {
|
if err := s.Convert(&in.Status, &out.Phase, 0); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if err := s.Convert(&in.Conditions, &out.Conditions, 0); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if err := s.Convert(&in.Info, &out.Info, 0); err != nil {
|
if err := s.Convert(&in.Info, &out.Info, 0); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -489,6 +495,9 @@ func init() {
|
|||||||
if err := s.Convert(&in.LivenessProbe, &out.LivenessProbe, 0); err != nil {
|
if err := s.Convert(&in.LivenessProbe, &out.LivenessProbe, 0); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if err := s.Convert(&in.ReadinessProbe, &out.ReadinessProbe, 0); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if err := s.Convert(&in.Lifecycle, &out.Lifecycle, 0); err != nil {
|
if err := s.Convert(&in.Lifecycle, &out.Lifecycle, 0); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -569,6 +578,9 @@ func init() {
|
|||||||
if err := s.Convert(&in.LivenessProbe, &out.LivenessProbe, 0); err != nil {
|
if err := s.Convert(&in.LivenessProbe, &out.LivenessProbe, 0); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if err := s.Convert(&in.ReadinessProbe, &out.ReadinessProbe, 0); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if err := s.Convert(&in.Lifecycle, &out.Lifecycle, 0); err != nil {
|
if err := s.Convert(&in.Lifecycle, &out.Lifecycle, 0); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@@ -281,10 +281,11 @@ type Container struct {
|
|||||||
// Optional: Defaults to unlimited.
|
// Optional: Defaults to unlimited.
|
||||||
CPU int `json:"cpu,omitempty" description:"CPU share in thousandths of a core"`
|
CPU int `json:"cpu,omitempty" description:"CPU share in thousandths of a core"`
|
||||||
// Optional: Defaults to unlimited.
|
// Optional: Defaults to unlimited.
|
||||||
Memory int64 `json:"memory,omitempty" description:"memory limit in bytes; defaults to unlimited"`
|
Memory int64 `json:"memory,omitempty" description:"memory limit in bytes; defaults to unlimited"`
|
||||||
VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" description:"pod volumes to mount into the container's filesystem"`
|
VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" description:"pod volumes to mount into the container's filesystem"`
|
||||||
LivenessProbe *LivenessProbe `json:"livenessProbe,omitempty" description:"periodic probe of container liveness; container will be restarted if the probe fails"`
|
LivenessProbe *LivenessProbe `json:"livenessProbe,omitempty" description:"periodic probe of container liveness; container will be restarted if the probe fails"`
|
||||||
Lifecycle *Lifecycle `json:"lifecycle,omitempty" description:"actions that the management system should take in response to container lifecycle events"`
|
ReadinessProbe *LivenessProbe `json:"readinessProbe,omitempty" description:"periodic probe of container service readiness; container will be removed from service endpoints if the probe fails"`
|
||||||
|
Lifecycle *Lifecycle `json:"lifecycle,omitempty" description:"actions that the management system should take in response to container lifecycle events"`
|
||||||
// Optional: Defaults to /dev/termination-log
|
// Optional: Defaults to /dev/termination-log
|
||||||
TerminationMessagePath string `json:"terminationMessagePath,omitempty" description:"path at which the file to which the container's termination message will be written is mounted into the container's filesystem; message written is intended to be brief final status, such as an assertion failure message; defaults to /dev/termination-log"`
|
TerminationMessagePath string `json:"terminationMessagePath,omitempty" description:"path at which the file to which the container's termination message will be written is mounted into the container's filesystem; message written is intended to be brief final status, such as an assertion failure message; defaults to /dev/termination-log"`
|
||||||
// Optional: Default to false.
|
// Optional: Default to false.
|
||||||
@@ -352,6 +353,18 @@ type TypeMeta struct {
|
|||||||
Annotations map[string]string `json:"annotations,omitempty" description:"map of string keys and values that can be used by external tooling to store and retrieve arbitrary metadata about the object"`
|
Annotations map[string]string `json:"annotations,omitempty" description:"map of string keys and values that can be used by external tooling to store and retrieve arbitrary metadata about the object"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ConditionStatus string
|
||||||
|
|
||||||
|
// These are valid condition statuses. "ConditionFull" means a resource is in the condition;
|
||||||
|
// "ConditionNone" means a resource is not in the condition; "ConditionUnknown" means kubernetes
|
||||||
|
// can't decide if a resource is in the condition or not. In the future, we could add other
|
||||||
|
// intermediate conditions, e.g. ConditionDegraded.
|
||||||
|
const (
|
||||||
|
ConditionFull ConditionStatus = "Full"
|
||||||
|
ConditionNone ConditionStatus = "None"
|
||||||
|
ConditionUnknown ConditionStatus = "Unknown"
|
||||||
|
)
|
||||||
|
|
||||||
// PodStatus represents a status of a pod.
|
// PodStatus represents a status of a pod.
|
||||||
type PodStatus string
|
type PodStatus string
|
||||||
|
|
||||||
@@ -400,6 +413,7 @@ type ContainerStatus struct {
|
|||||||
// TODO(dchen1107): Should we rename PodStatus to a more generic name or have a separate states
|
// TODO(dchen1107): Should we rename PodStatus to a more generic name or have a separate states
|
||||||
// defined for container?
|
// defined for container?
|
||||||
State ContainerState `json:"state,omitempty" description:"details about the container's current condition"`
|
State ContainerState `json:"state,omitempty" description:"details about the container's current condition"`
|
||||||
|
Ready bool `json:"ready" description:"specifies whether the container has passed its readiness probe"`
|
||||||
// Note that this is calculated from dead containers. But those containers are subject to
|
// Note that this is calculated from dead containers. But those containers are subject to
|
||||||
// garbage collection. This value will get capped at 5 by GC.
|
// garbage collection. This value will get capped at 5 by GC.
|
||||||
RestartCount int `json:"restartCount" description:"the number of times the container has been restarted, currently based on the number of dead containers that have not yet been removed"`
|
RestartCount int `json:"restartCount" description:"the number of times the container has been restarted, currently based on the number of dead containers that have not yet been removed"`
|
||||||
@@ -412,6 +426,21 @@ type ContainerStatus struct {
|
|||||||
ContainerID string `json:"containerID,omitempty" description:"container's ID in the format 'docker://<container_id>'"`
|
ContainerID string `json:"containerID,omitempty" description:"container's ID in the format 'docker://<container_id>'"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type PodConditionKind string
|
||||||
|
|
||||||
|
// These are valid conditions of pod.
|
||||||
|
const (
|
||||||
|
// PodReady means the pod is able to service requests and should be added to the
|
||||||
|
// load balancing pools of all matching services.
|
||||||
|
PodReady PodConditionKind = "Ready"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO: add LastTransitionTime, Reason, Message to match NodeCondition api.
|
||||||
|
type PodCondition struct {
|
||||||
|
Kind PodConditionKind `json:"kind"`
|
||||||
|
Status ConditionStatus `json:"status"`
|
||||||
|
}
|
||||||
|
|
||||||
// PodInfo contains one entry for every container with available info.
|
// PodInfo contains one entry for every container with available info.
|
||||||
type PodInfo map[string]ContainerStatus
|
type PodInfo map[string]ContainerStatus
|
||||||
|
|
||||||
@@ -440,8 +469,9 @@ type RestartPolicy struct {
|
|||||||
|
|
||||||
// PodState is the state of a pod, used as either input (desired state) or output (current state).
|
// PodState is the state of a pod, used as either input (desired state) or output (current state).
|
||||||
type PodState struct {
|
type PodState struct {
|
||||||
Manifest ContainerManifest `json:"manifest,omitempty" description:"manifest of containers and volumes comprising the pod"`
|
Manifest ContainerManifest `json:"manifest,omitempty" description:"manifest of containers and volumes comprising the pod"`
|
||||||
Status PodStatus `json:"status,omitempty" description:"current condition of the pod, Waiting, Running, or Terminated"`
|
Status PodStatus `json:"status,omitempty" description:"current condition of the pod, Waiting, Running, or Terminated"`
|
||||||
|
Conditions []PodCondition `json:"Condition,omitempty" description:"current service state of pod"`
|
||||||
// A human readable message indicating details about why the pod is in this state.
|
// A human readable message indicating details about why the pod is in this state.
|
||||||
Message string `json:"message,omitempty" description:"human readable message indicating details about why the pod is in this condition"`
|
Message string `json:"message,omitempty" description:"human readable message indicating details about why the pod is in this condition"`
|
||||||
Host string `json:"host,omitempty" description:"host to which the pod is assigned; empty if not yet scheduled"`
|
Host string `json:"host,omitempty" description:"host to which the pod is assigned; empty if not yet scheduled"`
|
||||||
@@ -604,25 +634,13 @@ const (
|
|||||||
NodeReady NodeConditionKind = "Ready"
|
NodeReady NodeConditionKind = "Ready"
|
||||||
)
|
)
|
||||||
|
|
||||||
type NodeConditionStatus string
|
|
||||||
|
|
||||||
// These are valid condition status. "ConditionFull" means node is in the condition;
|
|
||||||
// "ConditionNone" means node is not in the condition; "ConditionUnknown" means kubernetes
|
|
||||||
// can't decide if node is in the condition or not. In the future, we could add other
|
|
||||||
// intermediate conditions, e.g. ConditionDegraded.
|
|
||||||
const (
|
|
||||||
ConditionFull NodeConditionStatus = "Full"
|
|
||||||
ConditionNone NodeConditionStatus = "None"
|
|
||||||
ConditionUnknown NodeConditionStatus = "Unknown"
|
|
||||||
)
|
|
||||||
|
|
||||||
type NodeCondition struct {
|
type NodeCondition struct {
|
||||||
Kind NodeConditionKind `json:"kind" description:"kind of the condition, one of reachable, ready"`
|
Kind NodeConditionKind `json:"kind" description:"kind of the condition, one of reachable, ready"`
|
||||||
Status NodeConditionStatus `json:"status" description:"status of the condition, one of full, none, unknown"`
|
Status ConditionStatus `json:"status" description:"status of the condition, one of full, none, unknown"`
|
||||||
LastProbeTime util.Time `json:"lastProbeTime,omitempty" description:"last time the condition was probed"`
|
LastProbeTime util.Time `json:"lastProbeTime,omitempty" description:"last time the condition was probed"`
|
||||||
LastTransitionTime util.Time `json:"lastTransitionTime,omitempty" description:"last time the condition transit from one status to another"`
|
LastTransitionTime util.Time `json:"lastTransitionTime,omitempty" description:"last time the condition transit from one status to another"`
|
||||||
Reason string `json:"reason,omitempty" description:"(brief) reason for the condition's last transition"`
|
Reason string `json:"reason,omitempty" description:"(brief) reason for the condition's last transition"`
|
||||||
Message string `json:"message,omitempty" description:"human readable message indicating details about last transition"`
|
Message string `json:"message,omitempty" description:"human readable message indicating details about last transition"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// NodeResources represents resources on a Kubernetes system node
|
// NodeResources represents resources on a Kubernetes system node
|
||||||
|
@@ -341,6 +341,9 @@ func init() {
|
|||||||
if err := s.Convert(&in.LivenessProbe, &out.LivenessProbe, 0); err != nil {
|
if err := s.Convert(&in.LivenessProbe, &out.LivenessProbe, 0); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if err := s.Convert(&in.ReadinessProbe, &out.ReadinessProbe, 0); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if err := s.Convert(&in.Lifecycle, &out.Lifecycle, 0); err != nil {
|
if err := s.Convert(&in.Lifecycle, &out.Lifecycle, 0); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -423,6 +426,9 @@ func init() {
|
|||||||
if err := s.Convert(&in.LivenessProbe, &out.LivenessProbe, 0); err != nil {
|
if err := s.Convert(&in.LivenessProbe, &out.LivenessProbe, 0); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if err := s.Convert(&in.ReadinessProbe, &out.ReadinessProbe, 0); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if err := s.Convert(&in.Lifecycle, &out.Lifecycle, 0); err != nil {
|
if err := s.Convert(&in.Lifecycle, &out.Lifecycle, 0); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -475,6 +481,9 @@ func init() {
|
|||||||
if err := s.Convert(&in.Info, &out.Info, 0); err != nil {
|
if err := s.Convert(&in.Info, &out.Info, 0); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if err := s.Convert(&in.Conditions, &out.Conditions, 0); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
out.Message = in.Message
|
out.Message = in.Message
|
||||||
out.Host = in.Host
|
out.Host = in.Host
|
||||||
out.HostIP = in.HostIP
|
out.HostIP = in.HostIP
|
||||||
@@ -488,6 +497,9 @@ func init() {
|
|||||||
if err := s.Convert(&in.Info, &out.Info, 0); err != nil {
|
if err := s.Convert(&in.Info, &out.Info, 0); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if err := s.Convert(&in.Conditions, &out.Conditions, 0); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
out.Message = in.Message
|
out.Message = in.Message
|
||||||
out.Host = in.Host
|
out.Host = in.Host
|
||||||
out.HostIP = in.HostIP
|
out.HostIP = in.HostIP
|
||||||
|
@@ -240,10 +240,11 @@ type Container struct {
|
|||||||
// Optional: Defaults to unlimited.
|
// Optional: Defaults to unlimited.
|
||||||
CPU int `json:"cpu,omitempty" description:"CPU share in thousandths of a core"`
|
CPU int `json:"cpu,omitempty" description:"CPU share in thousandths of a core"`
|
||||||
// Optional: Defaults to unlimited.
|
// Optional: Defaults to unlimited.
|
||||||
Memory int64 `json:"memory,omitempty" description:"memory limit in bytes; defaults to unlimited"`
|
Memory int64 `json:"memory,omitempty" description:"memory limit in bytes; defaults to unlimited"`
|
||||||
VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" description:"pod volumes to mount into the container's filesystem"`
|
VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" description:"pod volumes to mount into the container's filesystem"`
|
||||||
LivenessProbe *LivenessProbe `json:"livenessProbe,omitempty" description:"periodic probe of container liveness; container will be restarted if the probe fails"`
|
LivenessProbe *LivenessProbe `json:"livenessProbe,omitempty" description:"periodic probe of container liveness; container will be restarted if the probe fails"`
|
||||||
Lifecycle *Lifecycle `json:"lifecycle,omitempty" description:"actions that the management system should take in response to container lifecycle events"`
|
ReadinessProbe *LivenessProbe `json:"readinessProbe,omitempty" description:"periodic probe of container service readiness; container will be removed from service endpoints if the probe fails"`
|
||||||
|
Lifecycle *Lifecycle `json:"lifecycle,omitempty" description:"actions that the management system should take in response to container lifecycle events"`
|
||||||
// Optional: Defaults to /dev/termination-log
|
// Optional: Defaults to /dev/termination-log
|
||||||
TerminationMessagePath string `json:"terminationMessagePath,omitempty" description:"path at which the file to which the container's termination message will be written is mounted into the container's filesystem; message written is intended to be brief final status, such as an assertion failure message; defaults to /dev/termination-log"`
|
TerminationMessagePath string `json:"terminationMessagePath,omitempty" description:"path at which the file to which the container's termination message will be written is mounted into the container's filesystem; message written is intended to be brief final status, such as an assertion failure message; defaults to /dev/termination-log"`
|
||||||
// Optional: Default to false.
|
// Optional: Default to false.
|
||||||
@@ -316,6 +317,18 @@ type TypeMeta struct {
|
|||||||
Annotations map[string]string `json:"annotations,omitempty" description:"map of string keys and values that can be used by external tooling to store and retrieve arbitrary metadata about the object"`
|
Annotations map[string]string `json:"annotations,omitempty" description:"map of string keys and values that can be used by external tooling to store and retrieve arbitrary metadata about the object"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ConditionStatus string
|
||||||
|
|
||||||
|
// These are valid condition statuses. "ConditionFull" means a resource is in the condition;
|
||||||
|
// "ConditionNone" means a resource is not in the condition; "ConditionUnknown" means kubernetes
|
||||||
|
// can't decide if a resource is in the condition or not. In the future, we could add other
|
||||||
|
// intermediate conditions, e.g. ConditionDegraded.
|
||||||
|
const (
|
||||||
|
ConditionFull ConditionStatus = "Full"
|
||||||
|
ConditionNone ConditionStatus = "None"
|
||||||
|
ConditionUnknown ConditionStatus = "Unknown"
|
||||||
|
)
|
||||||
|
|
||||||
// PodStatus represents a status of a pod.
|
// PodStatus represents a status of a pod.
|
||||||
type PodStatus string
|
type PodStatus string
|
||||||
|
|
||||||
@@ -364,6 +377,7 @@ type ContainerStatus struct {
|
|||||||
// TODO(dchen1107): Should we rename PodStatus to a more generic name or have a separate states
|
// TODO(dchen1107): Should we rename PodStatus to a more generic name or have a separate states
|
||||||
// defined for container?
|
// defined for container?
|
||||||
State ContainerState `json:"state,omitempty" description:"details about the container's current condition"`
|
State ContainerState `json:"state,omitempty" description:"details about the container's current condition"`
|
||||||
|
Ready bool `json:"ready" description:"specifies whether the container has passed its readiness probe"`
|
||||||
// Note that this is calculated from dead containers. But those containers are subject to
|
// Note that this is calculated from dead containers. But those containers are subject to
|
||||||
// garbage collection. This value will get capped at 5 by GC.
|
// garbage collection. This value will get capped at 5 by GC.
|
||||||
RestartCount int `json:"restartCount" description:"the number of times the container has been restarted, currently based on the number of dead containers that have not yet been removed"`
|
RestartCount int `json:"restartCount" description:"the number of times the container has been restarted, currently based on the number of dead containers that have not yet been removed"`
|
||||||
@@ -376,6 +390,21 @@ type ContainerStatus struct {
|
|||||||
ContainerID string `json:"containerID,omitempty" description:"container's ID in the format 'docker://<container_id>'"`
|
ContainerID string `json:"containerID,omitempty" description:"container's ID in the format 'docker://<container_id>'"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type PodConditionKind string
|
||||||
|
|
||||||
|
// These are valid conditions of pod.
|
||||||
|
const (
|
||||||
|
// PodReady means the pod is able to service requests and should be added to the
|
||||||
|
// load balancing pools of all matching services.
|
||||||
|
PodReady PodConditionKind = "Ready"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO: add LastTransitionTime, Reason, Message to match NodeCondition api.
|
||||||
|
type PodCondition struct {
|
||||||
|
Kind PodConditionKind `json:"kind"`
|
||||||
|
Status ConditionStatus `json:"status"`
|
||||||
|
}
|
||||||
|
|
||||||
// PodInfo contains one entry for every container with available info.
|
// PodInfo contains one entry for every container with available info.
|
||||||
type PodInfo map[string]ContainerStatus
|
type PodInfo map[string]ContainerStatus
|
||||||
|
|
||||||
@@ -404,8 +433,9 @@ type RestartPolicy struct {
|
|||||||
|
|
||||||
// PodState is the state of a pod, used as either input (desired state) or output (current state).
|
// PodState is the state of a pod, used as either input (desired state) or output (current state).
|
||||||
type PodState struct {
|
type PodState struct {
|
||||||
Manifest ContainerManifest `json:"manifest,omitempty" description:"manifest of containers and volumes comprising the pod"`
|
Manifest ContainerManifest `json:"manifest,omitempty" description:"manifest of containers and volumes comprising the pod"`
|
||||||
Status PodStatus `json:"status,omitempty" description:"current condition of the pod, Waiting, Running, or Terminated"`
|
Status PodStatus `json:"status,omitempty" description:"current condition of the pod, Waiting, Running, or Terminated"`
|
||||||
|
Conditions []PodCondition `json:"Condition,omitempty" description:"current service state of pod"`
|
||||||
// A human readable message indicating details about why the pod is in this state.
|
// A human readable message indicating details about why the pod is in this state.
|
||||||
Message string `json:"message,omitempty" description:"human readable message indicating details about why the pod is in this condition"`
|
Message string `json:"message,omitempty" description:"human readable message indicating details about why the pod is in this condition"`
|
||||||
Host string `json:"host,omitempty" description:"host to which the pod is assigned; empty if not yet scheduled"`
|
Host string `json:"host,omitempty" description:"host to which the pod is assigned; empty if not yet scheduled"`
|
||||||
@@ -568,25 +598,13 @@ const (
|
|||||||
NodeReady NodeConditionKind = "Ready"
|
NodeReady NodeConditionKind = "Ready"
|
||||||
)
|
)
|
||||||
|
|
||||||
type NodeConditionStatus string
|
|
||||||
|
|
||||||
// These are valid condition status. "ConditionFull" means node is in the condition;
|
|
||||||
// "ConditionNone" means node is not in the condition; "ConditionUnknown" means kubernetes
|
|
||||||
// can't decide if node is in the condition or not. In the future, we could add other
|
|
||||||
// intermediate conditions, e.g. ConditionDegraded.
|
|
||||||
const (
|
|
||||||
ConditionFull NodeConditionStatus = "Full"
|
|
||||||
ConditionNone NodeConditionStatus = "None"
|
|
||||||
ConditionUnknown NodeConditionStatus = "Unknown"
|
|
||||||
)
|
|
||||||
|
|
||||||
type NodeCondition struct {
|
type NodeCondition struct {
|
||||||
Kind NodeConditionKind `json:"kind" description:"kind of the condition, one of reachable, ready"`
|
Kind NodeConditionKind `json:"kind" description:"kind of the condition, one of reachable, ready"`
|
||||||
Status NodeConditionStatus `json:"status" description:"status of the condition, one of full, none, unknown"`
|
Status ConditionStatus `json:"status" description:"status of the condition, one of full, none, unknown"`
|
||||||
LastProbeTime util.Time `json:"lastProbeTime,omitempty" description:"last time the condition was probed"`
|
LastProbeTime util.Time `json:"lastProbeTime,omitempty" description:"last time the condition was probed"`
|
||||||
LastTransitionTime util.Time `json:"lastTransitionTime,omitempty" description:"last time the condition transit from one status to another"`
|
LastTransitionTime util.Time `json:"lastTransitionTime,omitempty" description:"last time the condition transit from one status to another"`
|
||||||
Reason string `json:"reason,omitempty" description:"(brief) reason for the condition's last transition"`
|
Reason string `json:"reason,omitempty" description:"(brief) reason for the condition's last transition"`
|
||||||
Message string `json:"message,omitempty" description:"human readable message indicating details about last transition"`
|
Message string `json:"message,omitempty" description:"human readable message indicating details about last transition"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// NodeResources represents resources on a Kubernetes system node
|
// NodeResources represents resources on a Kubernetes system node
|
||||||
|
@@ -358,13 +358,14 @@ type Container struct {
|
|||||||
// Optional: Defaults to whatever is defined in the image.
|
// Optional: Defaults to whatever is defined in the image.
|
||||||
Command []string `json:"command,omitempty"`
|
Command []string `json:"command,omitempty"`
|
||||||
// Optional: Defaults to Docker's default.
|
// Optional: Defaults to Docker's default.
|
||||||
WorkingDir string `json:"workingDir,omitempty"`
|
WorkingDir string `json:"workingDir,omitempty"`
|
||||||
Ports []Port `json:"ports,omitempty"`
|
Ports []Port `json:"ports,omitempty"`
|
||||||
Env []EnvVar `json:"env,omitempty"`
|
Env []EnvVar `json:"env,omitempty"`
|
||||||
Resources ResourceRequirements `json:"resources,omitempty" description:"Compute Resources required by this container"`
|
Resources ResourceRequirements `json:"resources,omitempty" description:"Compute Resources required by this container"`
|
||||||
VolumeMounts []VolumeMount `json:"volumeMounts,omitempty"`
|
VolumeMounts []VolumeMount `json:"volumeMounts,omitempty"`
|
||||||
LivenessProbe *Probe `json:"livenessProbe,omitempty"`
|
LivenessProbe *Probe `json:"livenessProbe,omitempty"`
|
||||||
Lifecycle *Lifecycle `json:"lifecycle,omitempty"`
|
ReadinessProbe *Probe `json:"readinessProbe,omitempty"`
|
||||||
|
Lifecycle *Lifecycle `json:"lifecycle,omitempty"`
|
||||||
// Optional: Defaults to /dev/termination-log
|
// Optional: Defaults to /dev/termination-log
|
||||||
TerminationMessagePath string `json:"terminationMessagePath,omitempty"`
|
TerminationMessagePath string `json:"terminationMessagePath,omitempty"`
|
||||||
// Optional: Default to false.
|
// Optional: Default to false.
|
||||||
@@ -400,27 +401,16 @@ type Lifecycle struct {
|
|||||||
PreStop *Handler `json:"preStop,omitempty"`
|
PreStop *Handler `json:"preStop,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// PodPhase is a label for the condition of a pod at the current time.
|
type ConditionStatus string
|
||||||
type PodPhase string
|
|
||||||
|
|
||||||
// These are the valid states of pods.
|
// These are valid condition statuses. "ConditionFull" means a resource is in the condition;
|
||||||
|
// "ConditionNone" means a resource is not in the condition; "ConditionUnknown" means kubernetes
|
||||||
|
// can't decide if a resource is in the condition or not. In the future, we could add other
|
||||||
|
// intermediate conditions, e.g. ConditionDegraded.
|
||||||
const (
|
const (
|
||||||
// PodPending means the pod has been accepted by the system, but one or more of the containers
|
ConditionFull ConditionStatus = "Full"
|
||||||
// has not been started. This includes time before being bound to a node, as well as time spent
|
ConditionNone ConditionStatus = "None"
|
||||||
// pulling images onto the host.
|
ConditionUnknown ConditionStatus = "Unknown"
|
||||||
PodPending PodPhase = "Pending"
|
|
||||||
// PodRunning means the pod has been bound to a node and all of the containers have been started.
|
|
||||||
// At least one container is still running or is in the process of being restarted.
|
|
||||||
PodRunning PodPhase = "Running"
|
|
||||||
// PodSucceeded means that all containers in the pod have voluntarily terminated
|
|
||||||
// with a container exit code of 0, and the system is not going to restart any of these containers.
|
|
||||||
PodSucceeded PodPhase = "Succeeded"
|
|
||||||
// PodFailed means that all containers in the pod have terminated, and at least one container has
|
|
||||||
// terminated in a failure (exited with a non-zero exit code or was stopped by the system).
|
|
||||||
PodFailed PodPhase = "Failed"
|
|
||||||
// PodUnknown means that for some reason the state of the pod could not be obtained, typically due
|
|
||||||
// to an error in communicating with the host of the pod.
|
|
||||||
PodUnknown PodPhase = "Unknown"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type ContainerStateWaiting struct {
|
type ContainerStateWaiting struct {
|
||||||
@@ -454,6 +444,7 @@ type ContainerStatus struct {
|
|||||||
// TODO(dchen1107): Should we rename PodStatus to a more generic name or have a separate states
|
// TODO(dchen1107): Should we rename PodStatus to a more generic name or have a separate states
|
||||||
// defined for container?
|
// defined for container?
|
||||||
State ContainerState `json:"state,omitempty"`
|
State ContainerState `json:"state,omitempty"`
|
||||||
|
Ready bool `json:"ready"`
|
||||||
// Note that this is calculated from dead containers. But those containers are subject to
|
// Note that this is calculated from dead containers. But those containers are subject to
|
||||||
// garbage collection. This value will get capped at 5 by GC.
|
// garbage collection. This value will get capped at 5 by GC.
|
||||||
RestartCount int `json:"restartCount"`
|
RestartCount int `json:"restartCount"`
|
||||||
@@ -468,6 +459,44 @@ type ContainerStatus struct {
|
|||||||
ImageID string `json:"imageID" description:"ID of the container's image"`
|
ImageID string `json:"imageID" description:"ID of the container's image"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PodPhase is a label for the condition of a pod at the current time.
|
||||||
|
type PodPhase string
|
||||||
|
|
||||||
|
// These are the valid statuses of pods.
|
||||||
|
const (
|
||||||
|
// PodPending means the pod has been accepted by the system, but one or more of the containers
|
||||||
|
// has not been started. This includes time before being bound to a node, as well as time spent
|
||||||
|
// pulling images onto the host.
|
||||||
|
PodPending PodPhase = "Pending"
|
||||||
|
// PodRunning means the pod has been bound to a node and all of the containers have been started.
|
||||||
|
// At least one container is still running or is in the process of being restarted.
|
||||||
|
PodRunning PodPhase = "Running"
|
||||||
|
// PodSucceeded means that all containers in the pod have voluntarily terminated
|
||||||
|
// with a container exit code of 0, and the system is not going to restart any of these containers.
|
||||||
|
PodSucceeded PodPhase = "Succeeded"
|
||||||
|
// PodFailed means that all containers in the pod have terminated, and at least one container has
|
||||||
|
// terminated in a failure (exited with a non-zero exit code or was stopped by the system).
|
||||||
|
PodFailed PodPhase = "Failed"
|
||||||
|
// PodUnknown means that for some reason the state of the pod could not be obtained, typically due
|
||||||
|
// to an error in communicating with the host of the pod.
|
||||||
|
PodUnknown PodPhase = "Unknown"
|
||||||
|
)
|
||||||
|
|
||||||
|
type PodConditionKind string
|
||||||
|
|
||||||
|
// These are valid conditions of pod.
|
||||||
|
const (
|
||||||
|
// PodReady means the pod is able to service requests and should be added to the
|
||||||
|
// load balancing pools of all matching services.
|
||||||
|
PodReady PodConditionKind = "Ready"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO: add LastTransitionTime, Reason, Message to match NodeCondition api.
|
||||||
|
type PodCondition struct {
|
||||||
|
Kind PodConditionKind `json:"kind"`
|
||||||
|
Status ConditionStatus `json:"status"`
|
||||||
|
}
|
||||||
|
|
||||||
// PodInfo contains one entry for every container with available info.
|
// PodInfo contains one entry for every container with available info.
|
||||||
type PodInfo map[string]ContainerStatus
|
type PodInfo map[string]ContainerStatus
|
||||||
|
|
||||||
@@ -521,7 +550,8 @@ type PodSpec struct {
|
|||||||
// PodStatus represents information about the status of a pod. Status may trail the actual
|
// PodStatus represents information about the status of a pod. Status may trail the actual
|
||||||
// state of a system.
|
// state of a system.
|
||||||
type PodStatus struct {
|
type PodStatus struct {
|
||||||
Phase PodPhase `json:"phase,omitempty"`
|
Phase PodPhase `json:"phase,omitempty"`
|
||||||
|
Conditions []PodCondition `json:"Condition,omitempty"`
|
||||||
// A human readable message indicating details about why the pod is in this state.
|
// A human readable message indicating details about why the pod is in this state.
|
||||||
Message string `json:"message,omitempty"`
|
Message string `json:"message,omitempty"`
|
||||||
|
|
||||||
@@ -793,25 +823,13 @@ const (
|
|||||||
NodeReady NodeConditionKind = "Ready"
|
NodeReady NodeConditionKind = "Ready"
|
||||||
)
|
)
|
||||||
|
|
||||||
type NodeConditionStatus string
|
|
||||||
|
|
||||||
// These are valid condition status. "ConditionFull" means node is in the condition;
|
|
||||||
// "ConditionNone" means node is not in the condition; "ConditionUnknown" means kubernetes
|
|
||||||
// can't decide if node is in the condition or not. In the future, we could add other
|
|
||||||
// intermediate conditions, e.g. ConditionDegraded.
|
|
||||||
const (
|
|
||||||
ConditionFull NodeConditionStatus = "Full"
|
|
||||||
ConditionNone NodeConditionStatus = "None"
|
|
||||||
ConditionUnknown NodeConditionStatus = "Unknown"
|
|
||||||
)
|
|
||||||
|
|
||||||
type NodeCondition struct {
|
type NodeCondition struct {
|
||||||
Kind NodeConditionKind `json:"kind"`
|
Kind NodeConditionKind `json:"kind"`
|
||||||
Status NodeConditionStatus `json:"status"`
|
Status ConditionStatus `json:"status"`
|
||||||
LastProbeTime util.Time `json:"lastProbeTime,omitempty"`
|
LastProbeTime util.Time `json:"lastProbeTime,omitempty"`
|
||||||
LastTransitionTime util.Time `json:"lastTransitionTime,omitempty"`
|
LastTransitionTime util.Time `json:"lastTransitionTime,omitempty"`
|
||||||
Reason string `json:"reason,omitempty"`
|
Reason string `json:"reason,omitempty"`
|
||||||
Message string `json:"message,omitempty"`
|
Message string `json:"message,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ResourceName is the name identifying various resources in a ResourceList.
|
// ResourceName is the name identifying various resources in a ResourceList.
|
||||||
|
@@ -120,6 +120,7 @@ func (f *FakeDockerClient) StartContainer(id string, hostConfig *docker.HostConf
|
|||||||
ID: id,
|
ID: id,
|
||||||
Config: &docker.Config{Image: "testimage"},
|
Config: &docker.Config{Image: "testimage"},
|
||||||
HostConfig: hostConfig,
|
HostConfig: hostConfig,
|
||||||
|
State: docker.State{Running: true},
|
||||||
}
|
}
|
||||||
return f.Err
|
return f.Err
|
||||||
}
|
}
|
||||||
|
@@ -55,7 +55,6 @@ const defaultChanSize = 1024
|
|||||||
const minShares = 2
|
const minShares = 2
|
||||||
const sharesPerCPU = 1024
|
const sharesPerCPU = 1024
|
||||||
const milliCPUToCPU = 1000
|
const milliCPUToCPU = 1000
|
||||||
const maxRetries int = 3
|
|
||||||
|
|
||||||
// SyncHandler is an interface implemented by Kubelet, for testability
|
// SyncHandler is an interface implemented by Kubelet, for testability
|
||||||
type SyncHandler interface {
|
type SyncHandler interface {
|
||||||
@@ -121,6 +120,8 @@ func NewMainKubelet(
|
|||||||
clusterDNS: clusterDNS,
|
clusterDNS: clusterDNS,
|
||||||
serviceLister: serviceLister,
|
serviceLister: serviceLister,
|
||||||
masterServiceNamespace: masterServiceNamespace,
|
masterServiceNamespace: masterServiceNamespace,
|
||||||
|
prober: newProbeHolder(),
|
||||||
|
readiness: newReadinessStates(),
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := klet.setupDataDirs(); err != nil {
|
if err := klet.setupDataDirs(); err != nil {
|
||||||
@@ -197,6 +198,11 @@ type Kubelet struct {
|
|||||||
|
|
||||||
// Volume plugins.
|
// Volume plugins.
|
||||||
volumePluginMgr volume.PluginMgr
|
volumePluginMgr volume.PluginMgr
|
||||||
|
|
||||||
|
// probe runner holder
|
||||||
|
prober probeHolder
|
||||||
|
// container readiness state holder
|
||||||
|
readiness *readinessStates
|
||||||
}
|
}
|
||||||
|
|
||||||
// getRootDir returns the full path to the directory under which kubelet can
|
// getRootDir returns the full path to the directory under which kubelet can
|
||||||
@@ -876,6 +882,7 @@ func (kl *Kubelet) killContainer(dockerContainer *docker.APIContainers) error {
|
|||||||
|
|
||||||
func (kl *Kubelet) killContainerByID(ID, name string) error {
|
func (kl *Kubelet) killContainerByID(ID, name string) error {
|
||||||
glog.V(2).Infof("Killing container with id %q and name %q", ID, name)
|
glog.V(2).Infof("Killing container with id %q and name %q", ID, name)
|
||||||
|
kl.readiness.remove(ID)
|
||||||
err := kl.dockerClient.StopContainer(ID, 10)
|
err := kl.dockerClient.StopContainer(ID, 10)
|
||||||
if len(name) == 0 {
|
if len(name) == 0 {
|
||||||
return err
|
return err
|
||||||
@@ -1048,17 +1055,29 @@ func (kl *Kubelet) syncPod(pod *api.BoundPod, dockerContainers dockertools.Docke
|
|||||||
// look for changes in the container.
|
// look for changes in the container.
|
||||||
if hash == 0 || hash == expectedHash {
|
if hash == 0 || hash == expectedHash {
|
||||||
// TODO: This should probably be separated out into a separate goroutine.
|
// TODO: This should probably be separated out into a separate goroutine.
|
||||||
healthy, err := kl.probeLiveness(podFullName, uid, podStatus, container, dockerContainer)
|
// If the container's liveness probe is unsuccessful, set readiness to false. If liveness is succesful, do a readiness check and set
|
||||||
|
// readiness accordingly. If the initalDelay since container creation on liveness probe has not passed the probe will return Success.
|
||||||
|
// If the initial delay on the readiness probe has not passed the probe will return Failure.
|
||||||
|
ready := probe.Unknown
|
||||||
|
live, err := kl.probeContainer(container.LivenessProbe, podFullName, uid, podStatus, container, dockerContainer, probe.Success)
|
||||||
|
if live == probe.Success {
|
||||||
|
ready, _ = kl.probeContainer(container.ReadinessProbe, podFullName, uid, podStatus, container, dockerContainer, probe.Failure)
|
||||||
|
}
|
||||||
|
if ready == probe.Success {
|
||||||
|
kl.readiness.set(dockerContainer.ID, true)
|
||||||
|
} else {
|
||||||
|
kl.readiness.set(dockerContainer.ID, false)
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(1).Infof("health check errored: %v", err)
|
glog.V(1).Infof("health check errored: %v", err)
|
||||||
containersToKeep[containerID] = empty{}
|
containersToKeep[containerID] = empty{}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if healthy == probe.Success {
|
if live == probe.Success {
|
||||||
containersToKeep[containerID] = empty{}
|
containersToKeep[containerID] = empty{}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
glog.V(1).Infof("pod %q container %q is unhealthy. Container will be killed and re-created.", podFullName, container.Name, healthy)
|
glog.V(1).Infof("pod %q container %q is unhealthy. Container will be killed and re-created.", podFullName, container.Name, live)
|
||||||
} else {
|
} else {
|
||||||
glog.V(1).Infof("pod %q container %q hash changed (%d vs %d). Container will be killed and re-created.", podFullName, container.Name, hash, expectedHash)
|
glog.V(1).Infof("pod %q container %q hash changed (%d vs %d). Container will be killed and re-created.", podFullName, container.Name, hash, expectedHash)
|
||||||
}
|
}
|
||||||
@@ -1083,6 +1102,10 @@ func (kl *Kubelet) syncPod(pod *api.BoundPod, dockerContainers dockertools.Docke
|
|||||||
glog.Errorf("Error listing recent containers:%s", dockerContainerName)
|
glog.Errorf("Error listing recent containers:%s", dockerContainerName)
|
||||||
// TODO(dawnchen): error handling here?
|
// TODO(dawnchen): error handling here?
|
||||||
}
|
}
|
||||||
|
// set dead containers to unready state
|
||||||
|
for _, c := range recentContainers {
|
||||||
|
kl.readiness.remove(c.ID)
|
||||||
|
}
|
||||||
|
|
||||||
if len(recentContainers) > 0 && pod.Spec.RestartPolicy.Always == nil {
|
if len(recentContainers) > 0 && pod.Spec.RestartPolicy.Always == nil {
|
||||||
if pod.Spec.RestartPolicy.Never != nil {
|
if pod.Spec.RestartPolicy.Never != nil {
|
||||||
@@ -1098,6 +1121,7 @@ func (kl *Kubelet) syncPod(pod *api.BoundPod, dockerContainers dockertools.Docke
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(3).Infof("Container with name %s doesn't exist, creating %#v", dockerContainerName)
|
glog.V(3).Infof("Container with name %s doesn't exist, creating %#v", dockerContainerName)
|
||||||
@@ -1487,6 +1511,31 @@ func getPhase(spec *api.PodSpec, info api.PodInfo) api.PodPhase {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getPodReadyCondition returns ready condition if all containers in a pod are ready, else it returns an unready condition.
|
||||||
|
func getPodReadyCondition(spec *api.PodSpec, info api.PodInfo) []api.PodCondition {
|
||||||
|
ready := []api.PodCondition{{
|
||||||
|
Kind: api.PodReady,
|
||||||
|
Status: api.ConditionFull,
|
||||||
|
}}
|
||||||
|
unready := []api.PodCondition{{
|
||||||
|
Kind: api.PodReady,
|
||||||
|
Status: api.ConditionNone,
|
||||||
|
}}
|
||||||
|
if info == nil {
|
||||||
|
return unready
|
||||||
|
}
|
||||||
|
for _, container := range spec.Containers {
|
||||||
|
if containerStatus, ok := info[container.Name]; ok {
|
||||||
|
if !containerStatus.Ready {
|
||||||
|
return unready
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return unready
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ready
|
||||||
|
}
|
||||||
|
|
||||||
// GetPodStatus returns information from Docker about the containers in a pod
|
// GetPodStatus returns information from Docker about the containers in a pod
|
||||||
func (kl *Kubelet) GetPodStatus(podFullName string, uid types.UID) (api.PodStatus, error) {
|
func (kl *Kubelet) GetPodStatus(podFullName string, uid types.UID) (api.PodStatus, error) {
|
||||||
var spec api.PodSpec
|
var spec api.PodSpec
|
||||||
@@ -1499,8 +1548,15 @@ func (kl *Kubelet) GetPodStatus(podFullName string, uid types.UID) (api.PodStatu
|
|||||||
|
|
||||||
info, err := dockertools.GetDockerPodInfo(kl.dockerClient, spec, podFullName, uid)
|
info, err := dockertools.GetDockerPodInfo(kl.dockerClient, spec, podFullName, uid)
|
||||||
|
|
||||||
|
for _, c := range spec.Containers {
|
||||||
|
containerStatus := info[c.Name]
|
||||||
|
containerStatus.Ready = kl.readiness.IsReady(containerStatus)
|
||||||
|
info[c.Name] = containerStatus
|
||||||
|
}
|
||||||
|
|
||||||
var podStatus api.PodStatus
|
var podStatus api.PodStatus
|
||||||
podStatus.Phase = getPhase(&spec, info)
|
podStatus.Phase = getPhase(&spec, info)
|
||||||
|
podStatus.Conditions = append(podStatus.Conditions, getPodReadyCondition(&spec, info)...)
|
||||||
netContainerInfo, found := info[dockertools.PodInfraContainerName]
|
netContainerInfo, found := info[dockertools.PodInfraContainerName]
|
||||||
if found {
|
if found {
|
||||||
podStatus.PodIP = netContainerInfo.PodIP
|
podStatus.PodIP = netContainerInfo.PodIP
|
||||||
@@ -1512,23 +1568,6 @@ func (kl *Kubelet) GetPodStatus(podFullName string, uid types.UID) (api.PodStatu
|
|||||||
return podStatus, err
|
return podStatus, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (kl *Kubelet) probeLiveness(podFullName string, podUID types.UID, status api.PodStatus, container api.Container, dockerContainer *docker.APIContainers) (healthStatus probe.Result, err error) {
|
|
||||||
// Give the container 60 seconds to start up.
|
|
||||||
if container.LivenessProbe == nil {
|
|
||||||
return probe.Success, nil
|
|
||||||
}
|
|
||||||
if time.Now().Unix()-dockerContainer.Created < container.LivenessProbe.InitialDelaySeconds {
|
|
||||||
return probe.Success, nil
|
|
||||||
}
|
|
||||||
for i := 0; i < maxRetries; i++ {
|
|
||||||
healthStatus, err = kl.probeContainer(container.LivenessProbe, podFullName, podUID, status, container)
|
|
||||||
if healthStatus == probe.Success {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return healthStatus, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns logs of current machine.
|
// Returns logs of current machine.
|
||||||
func (kl *Kubelet) ServeLogs(w http.ResponseWriter, req *http.Request) {
|
func (kl *Kubelet) ServeLogs(w http.ResponseWriter, req *http.Request) {
|
||||||
// TODO: whitelist logs we are willing to serve
|
// TODO: whitelist logs we are willing to serve
|
||||||
|
@@ -66,6 +66,7 @@ func newTestKubelet(t *testing.T) (*Kubelet, *dockertools.FakeDockerClient) {
|
|||||||
kubelet.sourceReady = func(source string) bool { return true }
|
kubelet.sourceReady = func(source string) bool { return true }
|
||||||
kubelet.masterServiceNamespace = api.NamespaceDefault
|
kubelet.masterServiceNamespace = api.NamespaceDefault
|
||||||
kubelet.serviceLister = testServiceLister{}
|
kubelet.serviceLister = testServiceLister{}
|
||||||
|
kubelet.readiness = newReadinessStates()
|
||||||
if err := kubelet.setupDataDirs(); err != nil {
|
if err := kubelet.setupDataDirs(); err != nil {
|
||||||
t.Fatalf("can't initialize kubelet data dirs: %v", err)
|
t.Fatalf("can't initialize kubelet data dirs: %v", err)
|
||||||
}
|
}
|
||||||
@@ -254,31 +255,7 @@ func TestKubeletDirsCompat(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestKillContainerWithError(t *testing.T) {
|
func TestKillContainerWithError(t *testing.T) {
|
||||||
fakeDocker := &dockertools.FakeDockerClient{
|
containers := []docker.APIContainers{
|
||||||
Err: fmt.Errorf("sample error"),
|
|
||||||
ContainerList: []docker.APIContainers{
|
|
||||||
{
|
|
||||||
ID: "1234",
|
|
||||||
Names: []string{"/k8s_foo_qux_1234_42"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: "5678",
|
|
||||||
Names: []string{"/k8s_bar_qux_5678_42"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
kubelet, _ := newTestKubelet(t)
|
|
||||||
kubelet.dockerClient = fakeDocker
|
|
||||||
err := kubelet.killContainer(&fakeDocker.ContainerList[0])
|
|
||||||
if err == nil {
|
|
||||||
t.Errorf("expected error, found nil")
|
|
||||||
}
|
|
||||||
verifyCalls(t, fakeDocker, []string{"stop"})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestKillContainer(t *testing.T) {
|
|
||||||
kubelet, fakeDocker := newTestKubelet(t)
|
|
||||||
fakeDocker.ContainerList = []docker.APIContainers{
|
|
||||||
{
|
{
|
||||||
ID: "1234",
|
ID: "1234",
|
||||||
Names: []string{"/k8s_foo_qux_1234_42"},
|
Names: []string{"/k8s_foo_qux_1234_42"},
|
||||||
@@ -288,15 +265,63 @@ func TestKillContainer(t *testing.T) {
|
|||||||
Names: []string{"/k8s_bar_qux_5678_42"},
|
Names: []string{"/k8s_bar_qux_5678_42"},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
fakeDocker := &dockertools.FakeDockerClient{
|
||||||
|
Err: fmt.Errorf("sample error"),
|
||||||
|
ContainerList: append([]docker.APIContainers{}, containers...),
|
||||||
|
}
|
||||||
|
kubelet, _ := newTestKubelet(t)
|
||||||
|
for _, c := range fakeDocker.ContainerList {
|
||||||
|
kubelet.readiness.set(c.ID, true)
|
||||||
|
}
|
||||||
|
kubelet.dockerClient = fakeDocker
|
||||||
|
err := kubelet.killContainer(&fakeDocker.ContainerList[0])
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("expected error, found nil")
|
||||||
|
}
|
||||||
|
verifyCalls(t, fakeDocker, []string{"stop"})
|
||||||
|
killedContainer := containers[0]
|
||||||
|
liveContainer := containers[1]
|
||||||
|
if _, found := kubelet.readiness.states[killedContainer.ID]; found {
|
||||||
|
t.Errorf("exepcted container entry ID '%v' to not be found. states: %+v", killedContainer.ID, kubelet.readiness.states)
|
||||||
|
}
|
||||||
|
if _, found := kubelet.readiness.states[liveContainer.ID]; !found {
|
||||||
|
t.Errorf("exepcted container entry ID '%v' to be found. states: %+v", liveContainer.ID, kubelet.readiness.states)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestKillContainer(t *testing.T) {
|
||||||
|
containers := []docker.APIContainers{
|
||||||
|
{
|
||||||
|
ID: "1234",
|
||||||
|
Names: []string{"/k8s_foo_qux_1234_42"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "5678",
|
||||||
|
Names: []string{"/k8s_bar_qux_5678_42"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
kubelet, fakeDocker := newTestKubelet(t)
|
||||||
|
fakeDocker.ContainerList = append([]docker.APIContainers{}, containers...)
|
||||||
fakeDocker.Container = &docker.Container{
|
fakeDocker.Container = &docker.Container{
|
||||||
Name: "foobar",
|
Name: "foobar",
|
||||||
}
|
}
|
||||||
|
for _, c := range fakeDocker.ContainerList {
|
||||||
|
kubelet.readiness.set(c.ID, true)
|
||||||
|
}
|
||||||
|
|
||||||
err := kubelet.killContainer(&fakeDocker.ContainerList[0])
|
err := kubelet.killContainer(&fakeDocker.ContainerList[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
verifyCalls(t, fakeDocker, []string{"stop"})
|
verifyCalls(t, fakeDocker, []string{"stop"})
|
||||||
|
killedContainer := containers[0]
|
||||||
|
liveContainer := containers[1]
|
||||||
|
if _, found := kubelet.readiness.states[killedContainer.ID]; found {
|
||||||
|
t.Errorf("exepcted container entry ID '%v' to not be found. states: %+v", killedContainer.ID, kubelet.readiness.states)
|
||||||
|
}
|
||||||
|
if _, found := kubelet.readiness.states[liveContainer.ID]; !found {
|
||||||
|
t.Errorf("exepcted container entry ID '%v' to be found. states: %+v", liveContainer.ID, kubelet.readiness.states)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type channelReader struct {
|
type channelReader struct {
|
||||||
@@ -2559,3 +2584,96 @@ func TestPodPhaseWithRestartOnFailure(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestGetPodReadyCondition(t *testing.T) {
|
||||||
|
ready := []api.PodCondition{{
|
||||||
|
Kind: api.PodReady,
|
||||||
|
Status: api.ConditionFull,
|
||||||
|
}}
|
||||||
|
unready := []api.PodCondition{{
|
||||||
|
Kind: api.PodReady,
|
||||||
|
Status: api.ConditionNone,
|
||||||
|
}}
|
||||||
|
tests := []struct {
|
||||||
|
spec *api.PodSpec
|
||||||
|
info api.PodInfo
|
||||||
|
expected []api.PodCondition
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
spec: nil,
|
||||||
|
info: nil,
|
||||||
|
expected: unready,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
spec: &api.PodSpec{},
|
||||||
|
info: api.PodInfo{},
|
||||||
|
expected: ready,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
spec: &api.PodSpec{
|
||||||
|
Containers: []api.Container{
|
||||||
|
{Name: "1234"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
info: api.PodInfo{},
|
||||||
|
expected: unready,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
spec: &api.PodSpec{
|
||||||
|
Containers: []api.Container{
|
||||||
|
{Name: "1234"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
info: api.PodInfo{
|
||||||
|
"1234": api.ContainerStatus{Ready: true},
|
||||||
|
},
|
||||||
|
expected: ready,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
spec: &api.PodSpec{
|
||||||
|
Containers: []api.Container{
|
||||||
|
{Name: "1234"},
|
||||||
|
{Name: "5678"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
info: api.PodInfo{
|
||||||
|
"1234": api.ContainerStatus{Ready: true},
|
||||||
|
"5678": api.ContainerStatus{Ready: true},
|
||||||
|
},
|
||||||
|
expected: ready,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
spec: &api.PodSpec{
|
||||||
|
Containers: []api.Container{
|
||||||
|
{Name: "1234"},
|
||||||
|
{Name: "5678"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
info: api.PodInfo{
|
||||||
|
"1234": api.ContainerStatus{Ready: true},
|
||||||
|
},
|
||||||
|
expected: unready,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
spec: &api.PodSpec{
|
||||||
|
Containers: []api.Container{
|
||||||
|
{Name: "1234"},
|
||||||
|
{Name: "5678"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
info: api.PodInfo{
|
||||||
|
"1234": api.ContainerStatus{Ready: true},
|
||||||
|
"5678": api.ContainerStatus{Ready: false},
|
||||||
|
},
|
||||||
|
expected: unready,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, test := range tests {
|
||||||
|
condition := getPodReadyCondition(test.spec, test.info)
|
||||||
|
if !reflect.DeepEqual(condition, test.expected) {
|
||||||
|
t.Errorf("On test case %v, expected:\n%+v\ngot\n%+v\n", i, test.expected, condition)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
@@ -19,6 +19,8 @@ package kubelet
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||||
@@ -30,25 +32,54 @@ import (
|
|||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/exec"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/exec"
|
||||||
|
|
||||||
|
"github.com/fsouza/go-dockerclient"
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
const (
|
||||||
execprober = execprobe.New()
|
defaultProbeTimeout = 1 * time.Second
|
||||||
httprober = httprobe.New()
|
maxProbeRetries = 3
|
||||||
tcprober = tcprobe.New()
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func (kl *Kubelet) probeContainer(p *api.Probe, podFullName string, podUID types.UID, status api.PodStatus, container api.Container) (probe.Result, error) {
|
// probeContainer executes the given probe on a container and returns the result.
|
||||||
|
// If the probe is nil this returns Success. If the probe's initial delay has not passed
|
||||||
|
// since the creation of the container, this returns the defaultResult. It will then attempt
|
||||||
|
// to execute the probe repeatedly up to maxProbeRetries times, and return on the first
|
||||||
|
// successful result, else returning the last unsucessful result and error.
|
||||||
|
func (kl *Kubelet) probeContainer(p *api.Probe,
|
||||||
|
podFullName string,
|
||||||
|
podUID types.UID,
|
||||||
|
status api.PodStatus,
|
||||||
|
container api.Container,
|
||||||
|
dockerContainer *docker.APIContainers,
|
||||||
|
defaultResult probe.Result) (probe.Result, error) {
|
||||||
|
var err error
|
||||||
|
result := probe.Unknown
|
||||||
|
if p == nil {
|
||||||
|
return probe.Success, nil
|
||||||
|
}
|
||||||
|
if time.Now().Unix()-dockerContainer.Created < p.InitialDelaySeconds {
|
||||||
|
return defaultResult, nil
|
||||||
|
}
|
||||||
|
for i := 0; i < maxProbeRetries; i++ {
|
||||||
|
result, err = kl.runProbe(p, podFullName, podUID, status, container)
|
||||||
|
if result == probe.Success {
|
||||||
|
return result, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (kl *Kubelet) runProbe(p *api.Probe, podFullName string, podUID types.UID, status api.PodStatus, container api.Container) (probe.Result, error) {
|
||||||
var timeout time.Duration
|
var timeout time.Duration
|
||||||
secs := container.LivenessProbe.TimeoutSeconds
|
secs := p.TimeoutSeconds
|
||||||
if secs > 0 {
|
if secs > 0 {
|
||||||
timeout = time.Duration(secs) * time.Second
|
timeout = time.Duration(secs) * time.Second
|
||||||
} else {
|
} else {
|
||||||
timeout = 1 * time.Second
|
timeout = defaultProbeTimeout
|
||||||
}
|
}
|
||||||
if p.Exec != nil {
|
if p.Exec != nil {
|
||||||
return execprober.Probe(kl.newExecInContainer(podFullName, podUID, container))
|
return kl.prober.exec.Probe(kl.newExecInContainer(podFullName, podUID, container))
|
||||||
}
|
}
|
||||||
if p.HTTPGet != nil {
|
if p.HTTPGet != nil {
|
||||||
port, err := extractPort(p.HTTPGet.Port, container)
|
port, err := extractPort(p.HTTPGet.Port, container)
|
||||||
@@ -56,14 +87,14 @@ func (kl *Kubelet) probeContainer(p *api.Probe, podFullName string, podUID types
|
|||||||
return probe.Unknown, err
|
return probe.Unknown, err
|
||||||
}
|
}
|
||||||
host, port, path := extractGetParams(p.HTTPGet, status, port)
|
host, port, path := extractGetParams(p.HTTPGet, status, port)
|
||||||
return httprober.Probe(host, port, path, timeout)
|
return kl.prober.http.Probe(host, port, path, timeout)
|
||||||
}
|
}
|
||||||
if p.TCPSocket != nil {
|
if p.TCPSocket != nil {
|
||||||
port, err := extractPort(p.TCPSocket.Port, container)
|
port, err := extractPort(p.TCPSocket.Port, container)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return probe.Unknown, err
|
return probe.Unknown, err
|
||||||
}
|
}
|
||||||
return tcprober.Probe(status.PodIP, port, timeout)
|
return kl.prober.tcp.Probe(status.PodIP, port, timeout)
|
||||||
}
|
}
|
||||||
glog.Warningf("Failed to find probe builder for %s %+v", container.Name, container.LivenessProbe)
|
glog.Warningf("Failed to find probe builder for %s %+v", container.Name, container.LivenessProbe)
|
||||||
return probe.Unknown, nil
|
return probe.Unknown, nil
|
||||||
@@ -132,3 +163,55 @@ func (eic execInContainer) CombinedOutput() ([]byte, error) {
|
|||||||
func (eic execInContainer) SetDir(dir string) {
|
func (eic execInContainer) SetDir(dir string) {
|
||||||
//unimplemented
|
//unimplemented
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// This will eventually maintain info about probe results over time
|
||||||
|
// to allow for implementation of health thresholds
|
||||||
|
func newReadinessStates() *readinessStates {
|
||||||
|
return &readinessStates{states: make(map[string]bool)}
|
||||||
|
}
|
||||||
|
|
||||||
|
type readinessStates struct {
|
||||||
|
sync.Mutex
|
||||||
|
states map[string]bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *readinessStates) IsReady(c api.ContainerStatus) bool {
|
||||||
|
if c.State.Running == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return r.get(strings.TrimPrefix(c.ContainerID, "docker://"))
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *readinessStates) get(key string) bool {
|
||||||
|
r.Lock()
|
||||||
|
defer r.Unlock()
|
||||||
|
state, found := r.states[key]
|
||||||
|
return state && found
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *readinessStates) set(key string, value bool) {
|
||||||
|
r.Lock()
|
||||||
|
defer r.Unlock()
|
||||||
|
r.states[key] = value
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *readinessStates) remove(key string) {
|
||||||
|
r.Lock()
|
||||||
|
defer r.Unlock()
|
||||||
|
delete(r.states, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newProbeHolder() probeHolder {
|
||||||
|
return probeHolder{
|
||||||
|
exec: execprobe.New(),
|
||||||
|
http: httprobe.New(),
|
||||||
|
tcp: tcprobe.New(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type probeHolder struct {
|
||||||
|
exec execprobe.ExecProber
|
||||||
|
http httprobe.HTTPProber
|
||||||
|
tcp tcprobe.TCPProber
|
||||||
|
}
|
||||||
|
@@ -17,10 +17,17 @@ limitations under the License.
|
|||||||
package kubelet
|
package kubelet
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||||
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/probe"
|
||||||
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/types"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||||
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/exec"
|
||||||
|
|
||||||
|
"github.com/fsouza/go-dockerclient"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestFindPortByName(t *testing.T) {
|
func TestFindPortByName(t *testing.T) {
|
||||||
@@ -128,3 +135,110 @@ func TestGetTCPAddrParts(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type fakeExecProber struct {
|
||||||
|
result probe.Result
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p fakeExecProber) Probe(_ exec.Cmd) (probe.Result, error) {
|
||||||
|
return p.result, p.err
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeTestKubelet(result probe.Result, err error) *Kubelet {
|
||||||
|
return &Kubelet{
|
||||||
|
prober: probeHolder{
|
||||||
|
exec: fakeExecProber{
|
||||||
|
result: result,
|
||||||
|
err: err,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProbeContainer(t *testing.T) {
|
||||||
|
dc := &docker.APIContainers{Created: time.Now().Unix()}
|
||||||
|
tests := []struct {
|
||||||
|
p *api.Probe
|
||||||
|
defaultResult probe.Result
|
||||||
|
expectError bool
|
||||||
|
expectedResult probe.Result
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
defaultResult: probe.Success,
|
||||||
|
expectedResult: probe.Success,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
defaultResult: probe.Failure,
|
||||||
|
expectedResult: probe.Success,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
p: &api.Probe{InitialDelaySeconds: 100},
|
||||||
|
defaultResult: probe.Failure,
|
||||||
|
expectError: false,
|
||||||
|
expectedResult: probe.Failure,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
p: &api.Probe{
|
||||||
|
InitialDelaySeconds: -100,
|
||||||
|
},
|
||||||
|
defaultResult: probe.Failure,
|
||||||
|
expectError: false,
|
||||||
|
expectedResult: probe.Unknown,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
p: &api.Probe{
|
||||||
|
InitialDelaySeconds: -100,
|
||||||
|
Handler: api.Handler{
|
||||||
|
Exec: &api.ExecAction{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
defaultResult: probe.Failure,
|
||||||
|
expectError: false,
|
||||||
|
expectedResult: probe.Success,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
p: &api.Probe{
|
||||||
|
InitialDelaySeconds: -100,
|
||||||
|
Handler: api.Handler{
|
||||||
|
Exec: &api.ExecAction{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
defaultResult: probe.Failure,
|
||||||
|
expectError: true,
|
||||||
|
expectedResult: probe.Unknown,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
p: &api.Probe{
|
||||||
|
InitialDelaySeconds: -100,
|
||||||
|
Handler: api.Handler{
|
||||||
|
Exec: &api.ExecAction{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
defaultResult: probe.Success,
|
||||||
|
expectError: false,
|
||||||
|
expectedResult: probe.Failure,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
var kl *Kubelet
|
||||||
|
|
||||||
|
if test.expectError {
|
||||||
|
kl = makeTestKubelet(test.expectedResult, errors.New("error"))
|
||||||
|
} else {
|
||||||
|
kl = makeTestKubelet(test.expectedResult, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
result, err := kl.probeContainer(test.p, "", types.UID(""), api.PodStatus{}, api.Container{}, dc, test.defaultResult)
|
||||||
|
if test.expectError && err == nil {
|
||||||
|
t.Error("Expected error but did no error was returned.")
|
||||||
|
}
|
||||||
|
if !test.expectError && err != nil {
|
||||||
|
t.Errorf("Expected error but got: %v", err)
|
||||||
|
}
|
||||||
|
if test.expectedResult != result {
|
||||||
|
t.Errorf("Expected result was %v but probeContainer() returned %v", test.expectedResult, result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@@ -162,6 +162,7 @@ func (p *PodCache) computePodStatus(pod *api.Pod) (api.PodStatus, error) {
|
|||||||
if pod.Status.Host == "" {
|
if pod.Status.Host == "" {
|
||||||
// Not assigned.
|
// Not assigned.
|
||||||
newStatus.Phase = api.PodPending
|
newStatus.Phase = api.PodPending
|
||||||
|
newStatus.Conditions = append(newStatus.Conditions, pod.Status.Conditions...)
|
||||||
return newStatus, nil
|
return newStatus, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -171,6 +172,7 @@ func (p *PodCache) computePodStatus(pod *api.Pod) (api.PodStatus, error) {
|
|||||||
if err != nil || len(nodeStatus.Conditions) == 0 {
|
if err != nil || len(nodeStatus.Conditions) == 0 {
|
||||||
glog.V(5).Infof("node doesn't exist: %v %v, setting pod status to unknown", err, nodeStatus)
|
glog.V(5).Infof("node doesn't exist: %v %v, setting pod status to unknown", err, nodeStatus)
|
||||||
newStatus.Phase = api.PodUnknown
|
newStatus.Phase = api.PodUnknown
|
||||||
|
newStatus.Conditions = append(newStatus.Conditions, pod.Status.Conditions...)
|
||||||
return newStatus, nil
|
return newStatus, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -179,6 +181,7 @@ func (p *PodCache) computePodStatus(pod *api.Pod) (api.PodStatus, error) {
|
|||||||
if (condition.Kind == api.NodeReady || condition.Kind == api.NodeReachable) && condition.Status == api.ConditionNone {
|
if (condition.Kind == api.NodeReady || condition.Kind == api.NodeReachable) && condition.Status == api.ConditionNone {
|
||||||
glog.V(5).Infof("node status: %v, setting pod status to unknown", condition)
|
glog.V(5).Infof("node status: %v, setting pod status to unknown", condition)
|
||||||
newStatus.Phase = api.PodUnknown
|
newStatus.Phase = api.PodUnknown
|
||||||
|
newStatus.Conditions = append(newStatus.Conditions, pod.Status.Conditions...)
|
||||||
return newStatus, nil
|
return newStatus, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -189,6 +192,7 @@ func (p *PodCache) computePodStatus(pod *api.Pod) (api.PodStatus, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("error getting pod status: %v, setting status to unknown", err)
|
glog.Errorf("error getting pod status: %v, setting status to unknown", err)
|
||||||
newStatus.Phase = api.PodUnknown
|
newStatus.Phase = api.PodUnknown
|
||||||
|
newStatus.Conditions = append(newStatus.Conditions, pod.Status.Conditions...)
|
||||||
} else {
|
} else {
|
||||||
newStatus.Info = result.Status.Info
|
newStatus.Info = result.Status.Info
|
||||||
newStatus.PodIP = result.Status.PodIP
|
newStatus.PodIP = result.Status.PodIP
|
||||||
@@ -197,8 +201,10 @@ func (p *PodCache) computePodStatus(pod *api.Pod) (api.PodStatus, error) {
|
|||||||
// propulated the status yet. This should go away once
|
// propulated the status yet. This should go away once
|
||||||
// we removed boundPods
|
// we removed boundPods
|
||||||
newStatus.Phase = api.PodPending
|
newStatus.Phase = api.PodPending
|
||||||
|
newStatus.Conditions = append(newStatus.Conditions, pod.Status.Conditions...)
|
||||||
} else {
|
} else {
|
||||||
newStatus.Phase = result.Status.Phase
|
newStatus.Phase = result.Status.Phase
|
||||||
|
newStatus.Conditions = result.Status.Conditions
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return newStatus, err
|
return newStatus, err
|
||||||
|
@@ -28,12 +28,16 @@ import (
|
|||||||
const defaultHealthyOutput = "ok"
|
const defaultHealthyOutput = "ok"
|
||||||
|
|
||||||
func New() ExecProber {
|
func New() ExecProber {
|
||||||
return ExecProber{}
|
return execProber{}
|
||||||
}
|
}
|
||||||
|
|
||||||
type ExecProber struct{}
|
type ExecProber interface {
|
||||||
|
Probe(e uexec.Cmd) (probe.Result, error)
|
||||||
|
}
|
||||||
|
|
||||||
func (pr ExecProber) Probe(e uexec.Cmd) (probe.Result, error) {
|
type execProber struct{}
|
||||||
|
|
||||||
|
func (pr execProber) Probe(e uexec.Cmd) (probe.Result, error) {
|
||||||
data, err := e.CombinedOutput()
|
data, err := e.CombinedOutput()
|
||||||
glog.V(4).Infof("health check response: %s", string(data))
|
glog.V(4).Infof("health check response: %s", string(data))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@@ -30,15 +30,19 @@ import (
|
|||||||
|
|
||||||
func New() HTTPProber {
|
func New() HTTPProber {
|
||||||
transport := &http.Transport{}
|
transport := &http.Transport{}
|
||||||
return HTTPProber{transport}
|
return httpProber{transport}
|
||||||
}
|
}
|
||||||
|
|
||||||
type HTTPProber struct {
|
type HTTPProber interface {
|
||||||
|
Probe(host string, port int, path string, timeout time.Duration) (probe.Result, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type httpProber struct {
|
||||||
transport *http.Transport
|
transport *http.Transport
|
||||||
}
|
}
|
||||||
|
|
||||||
// Probe returns a ProbeRunner capable of running an http check.
|
// Probe returns a ProbeRunner capable of running an http check.
|
||||||
func (pr *HTTPProber) Probe(host string, port int, path string, timeout time.Duration) (probe.Result, error) {
|
func (pr httpProber) Probe(host string, port int, path string, timeout time.Duration) (probe.Result, error) {
|
||||||
return DoHTTPProbe(formatURL(host, port, path), &http.Client{Timeout: timeout, Transport: pr.transport})
|
return DoHTTPProbe(formatURL(host, port, path), &http.Client{Timeout: timeout, Transport: pr.transport})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -27,12 +27,16 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func New() TCPProber {
|
func New() TCPProber {
|
||||||
return TCPProber{}
|
return tcpProber{}
|
||||||
}
|
}
|
||||||
|
|
||||||
type TCPProber struct{}
|
type TCPProber interface {
|
||||||
|
Probe(host string, port int, timeout time.Duration) (probe.Result, error)
|
||||||
|
}
|
||||||
|
|
||||||
func (pr TCPProber) Probe(host string, port int, timeout time.Duration) (probe.Result, error) {
|
type tcpProber struct{}
|
||||||
|
|
||||||
|
func (pr tcpProber) Probe(host string, port int, timeout time.Duration) (probe.Result, error) {
|
||||||
return DoTCPProbe(net.JoinHostPort(host, strconv.Itoa(port)), timeout)
|
return DoTCPProbe(net.JoinHostPort(host, strconv.Itoa(port)), timeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -76,6 +76,19 @@ func (e *EndpointController) SyncServiceEndpoints() error {
|
|||||||
glog.Errorf("Failed to find an IP for pod %s/%s", pod.Namespace, pod.Name)
|
glog.Errorf("Failed to find an IP for pod %s/%s", pod.Namespace, pod.Name)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inService := false
|
||||||
|
for _, c := range pod.Status.Conditions {
|
||||||
|
if c.Kind == api.PodReady && c.Status == api.ConditionFull {
|
||||||
|
inService = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !inService {
|
||||||
|
glog.V(5).Infof("Pod is out of service: %v/%v", pod.Namespace, pod.Name)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
endpoints = append(endpoints, net.JoinHostPort(pod.Status.PodIP, strconv.Itoa(port)))
|
endpoints = append(endpoints, net.JoinHostPort(pod.Status.PodIP, strconv.Itoa(port)))
|
||||||
}
|
}
|
||||||
currentEndpoints, err := e.client.Endpoints(service.Namespace).Get(service.Name)
|
currentEndpoints, err := e.client.Endpoints(service.Namespace).Get(service.Name)
|
||||||
|
@@ -49,6 +49,12 @@ func newPodList(count int) *api.PodList {
|
|||||||
},
|
},
|
||||||
Status: api.PodStatus{
|
Status: api.PodStatus{
|
||||||
PodIP: "1.2.3.4",
|
PodIP: "1.2.3.4",
|
||||||
|
Conditions: []api.PodCondition{
|
||||||
|
{
|
||||||
|
Kind: api.PodReady,
|
||||||
|
Status: api.ConditionFull,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user