Merge pull request #8069 from yujuhong/runcontainer

Kubelet: minor cleanups
This commit is contained in:
Victor Marmol 2015-05-11 12:34:33 -07:00
commit 65cf37ab6d
5 changed files with 15 additions and 15 deletions

View File

@ -55,6 +55,6 @@ func toRuntimeImage(image *docker.APIImages) (*kubecontainer.Image, error) {
return &kubecontainer.Image{
ID: image.ID,
Tags: image.RepoTags,
Size: image.Size,
Size: image.VirtualSize,
}, nil
}

View File

@ -51,9 +51,9 @@ func TestToRuntimeContainer(t *testing.T) {
func TestToRuntimeImage(t *testing.T) {
original := &docker.APIImages{
ID: "aeeea",
RepoTags: []string{"abc", "def"},
Size: 1234,
ID: "aeeea",
RepoTags: []string{"abc", "def"},
VirtualSize: 1234,
}
expected := &kubecontainer.Image{
ID: "aeeea",

View File

@ -1067,9 +1067,8 @@ func (dm *DockerManager) killContainer(containerID types.UID) error {
return err
}
// TODO(vmarmol): Unexport this as it is no longer used externally.
// Run a single container from a pod. Returns the docker container ID
func (dm *DockerManager) RunContainer(pod *api.Pod, container *api.Container, netMode, ipcMode string) (kubeletTypes.DockerID, error) {
func (dm *DockerManager) runContainerInPod(pod *api.Pod, container *api.Container, netMode, ipcMode string) (kubeletTypes.DockerID, error) {
ref, err := kubecontainer.GenerateContainerRef(pod, container)
if err != nil {
glog.Errorf("Couldn't make a ref to pod %v, container %v: '%v'", pod.Name, container.Name, err)
@ -1157,7 +1156,7 @@ func (dm *DockerManager) createPodInfraContainer(pod *api.Pod) (kubeletTypes.Doc
dm.recorder.Eventf(ref, "pulled", "Successfully pulled image %q", container.Image)
}
id, err := dm.RunContainer(pod, container, netNamespace, "")
id, err := dm.runContainerInPod(pod, container, netNamespace, "")
if err != nil {
return "", err
}
@ -1405,7 +1404,7 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, runningPod kubecontainer.Pod, pod
// TODO(dawnchen): Check RestartPolicy.DelaySeconds before restart a container
namespaceMode := fmt.Sprintf("container:%v", podInfraContainerID)
_, err = dm.RunContainer(pod, container, namespaceMode, namespaceMode)
_, err = dm.runContainerInPod(pod, container, namespaceMode, namespaceMode)
dm.updateReasonCache(pod, container, err)
if err != nil {
// TODO(bburns) : Perhaps blacklist a container after N failures?

View File

@ -1173,7 +1173,7 @@ func (kl *Kubelet) SyncPods(allPods []*api.Pod, podSyncTypes map[types.UID]metri
}
// Note that we just killed the unwanted pods. This may not have reflected
// in the cache. We need to bypass the cach to get the latest set of
// in the cache. We need to bypass the cache to get the latest set of
// running pods to clean up the volumes.
// TODO: Evaluate the performance impact of bypassing the runtime cache.
runningPods, err = kl.containerRuntime.GetPods(false)
@ -1696,7 +1696,8 @@ func getPodReadyCondition(spec *api.PodSpec, statuses []api.ContainerStatus) []a
return ready
}
// GetPodStatus returns information from Docker about the containers in a pod
// GetPodStatus returns information of the containers in the pod from the
// container runtime.
func (kl *Kubelet) GetPodStatus(podFullName string) (api.PodStatus, error) {
// Check to see if we have a cached version of the status.
cachedPodStatus, found := kl.statusManager.GetPodStatus(podFullName)
@ -1722,14 +1723,14 @@ func (kl *Kubelet) generatePodStatus(pod *api.Pod) (api.PodStatus, error) {
if err != nil {
// Error handling
glog.Infof("Query docker container info for pod %q failed with error (%v)", podFullName, err)
glog.Infof("Query container info for pod %q failed with error (%v)", podFullName, err)
if strings.Contains(err.Error(), "resource temporarily unavailable") {
// Leave upstream layer to decide what to do
return api.PodStatus{}, err
} else {
pendingStatus := api.PodStatus{
Phase: api.PodPending,
Message: fmt.Sprintf("Query docker container info failed with error (%v)", err),
Message: fmt.Sprintf("Query container info failed with error (%v)", err),
}
return pendingStatus, nil
}

View File

@ -84,10 +84,10 @@ func (p *podWorkers) managePodLoop(podUpdates <-chan workUpdate) {
for newWork := range podUpdates {
func() {
defer p.checkForUpdates(newWork.pod.UID, newWork.updateCompleteFn)
// We would like to have the state of Docker from at least the moment
// when we finished the previous processing of that pod.
// We would like to have the state of the containers from at least
// the moment when we finished the previous processing of that pod.
if err := p.runtimeCache.ForceUpdateIfOlder(minRuntimeCacheTime); err != nil {
glog.Errorf("Error updating docker cache: %v", err)
glog.Errorf("Error updating the container runtime cache: %v", err)
return
}
pods, err := p.runtimeCache.GetPods()