Sync static pods from Kubelet to the API server

Currently, API server is not aware of the static pods (manifests from
sources other than the API server, e.g. file and http) at all. This is
inconvenient since users cannot check the static pods through kubectl.
It is also sub-optimal because scheduler is unaware of the resource
consumption by these static pods on the node.

This change syncs the information back to the API server by creating a
mirror pod via API server for each static pod.

 - Kubelet creates containers for the static pod, as it would do
   normally.

 - If a mirror pod gets deleted, Kubelet will re-create one. The
   containers are sync'd to the static pods, so they will not be
   affected.

 - If a static pod gets removed from the source (e.g. manifest file
   removed from the directory), the orphaned mirror pod will be deleted.

Note that because events are associated with UID, and the mirror pod has
a different UID than the original static pod, the events will not be
shown for the mirror pod when running `kubectl describe pod
<mirror_pod>`.
This commit is contained in:
Yu-Ju Hong
2015-03-09 15:46:47 -07:00
parent 2c97522692
commit 929fb63b33
14 changed files with 554 additions and 79 deletions

View File

@@ -28,7 +28,7 @@ import (
"github.com/golang/glog"
)
type syncPodFnType func(*api.Pod, dockertools.DockerContainers) error
type syncPodFnType func(*api.Pod, bool, dockertools.DockerContainers) error
type podWorkers struct {
// Protects podUpdates field.
@@ -60,11 +60,15 @@ type workUpdate struct {
// The pod state to reflect.
pod *api.Pod
// Whether there exists a mirror pod for pod.
hasMirrorPod bool
// Function to call when the update is complete.
updateCompleteFn func()
}
func newPodWorkers(dockerCache dockertools.DockerCache, syncPodFn syncPodFnType, recorder record.EventRecorder) *podWorkers {
func newPodWorkers(dockerCache dockertools.DockerCache, syncPodFn syncPodFnType,
recorder record.EventRecorder) *podWorkers {
return &podWorkers{
podUpdates: map[types.UID]chan workUpdate{},
isWorking: map[types.UID]bool{},
@@ -92,7 +96,8 @@ func (p *podWorkers) managePodLoop(podUpdates <-chan workUpdate) {
return
}
err = p.syncPodFn(newWork.pod, containers.FindContainersByPod(newWork.pod.UID, GetPodFullName(newWork.pod)))
err = p.syncPodFn(newWork.pod, newWork.hasMirrorPod,
containers.FindContainersByPod(newWork.pod.UID, GetPodFullName(newWork.pod)))
if err != nil {
glog.Errorf("Error syncing pod %s, skipping: %v", newWork.pod.UID, err)
p.recorder.Eventf(newWork.pod, "failedSync", "Error syncing pod, skipping: %v", err)
@@ -106,7 +111,7 @@ func (p *podWorkers) managePodLoop(podUpdates <-chan workUpdate) {
}
// Apply the new setting to the specified pod. updateComplete is called when the update is completed.
func (p *podWorkers) UpdatePod(pod *api.Pod, updateComplete func()) {
func (p *podWorkers) UpdatePod(pod *api.Pod, hasMirrorPod bool, updateComplete func()) {
uid := pod.UID
var podUpdates chan workUpdate
var exists bool
@@ -129,11 +134,13 @@ func (p *podWorkers) UpdatePod(pod *api.Pod, updateComplete func()) {
p.isWorking[pod.UID] = true
podUpdates <- workUpdate{
pod: pod,
hasMirrorPod: hasMirrorPod,
updateCompleteFn: updateComplete,
}
} else {
p.lastUndeliveredWorkUpdate[pod.UID] = workUpdate{
pod: pod,
hasMirrorPod: hasMirrorPod,
updateCompleteFn: updateComplete,
}
}