Adding sync pod latency metric.
Latency is broken down by create, update, and sync pods. Part of #4604.
This commit is contained in:
@@ -35,7 +35,7 @@ type podWorkers struct {
|
||||
|
||||
// Tracks all running per-pod goroutines - per-pod goroutine will be
|
||||
// processing updates received through its corresponding channel.
|
||||
podUpdates map[types.UID]chan api.BoundPod
|
||||
podUpdates map[types.UID]chan workUpdate
|
||||
// DockerCache is used for listing running containers.
|
||||
dockerCache dockertools.DockerCache
|
||||
|
||||
@@ -45,16 +45,24 @@ type podWorkers struct {
|
||||
syncPodFun syncPodFunType
|
||||
}
|
||||
|
||||
type workUpdate struct {
|
||||
// The pod state to reflect.
|
||||
pod *api.BoundPod
|
||||
|
||||
// Function to call when the update is complete.
|
||||
updateCompleteFun func()
|
||||
}
|
||||
|
||||
func newPodWorkers(dockerCache dockertools.DockerCache, syncPodFun syncPodFunType) *podWorkers {
|
||||
return &podWorkers{
|
||||
podUpdates: map[types.UID]chan api.BoundPod{},
|
||||
podUpdates: map[types.UID]chan workUpdate{},
|
||||
dockerCache: dockerCache,
|
||||
syncPodFun: syncPodFun,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *podWorkers) managePodLoop(podUpdates <-chan api.BoundPod) {
|
||||
for newPod := range podUpdates {
|
||||
func (p *podWorkers) managePodLoop(podUpdates <-chan workUpdate) {
|
||||
for newWork := range podUpdates {
|
||||
// Since we use docker cache, getting current state shouldn't cause
|
||||
// performance overhead on Docker. Moreover, as long as we run syncPod
|
||||
// no matter if it changes anything, having an old version of "containers"
|
||||
@@ -64,29 +72,33 @@ func (p *podWorkers) managePodLoop(podUpdates <-chan api.BoundPod) {
|
||||
glog.Errorf("Error listing containers while syncing pod: %v", err)
|
||||
continue
|
||||
}
|
||||
err = p.syncPodFun(&newPod, containers)
|
||||
err = p.syncPodFun(newWork.pod, containers)
|
||||
if err != nil {
|
||||
glog.Errorf("Error syncing pod %s, skipping: %v", newPod.UID, err)
|
||||
record.Eventf(&newPod, "failedSync", "Error syncing pod, skipping: %v", err)
|
||||
glog.Errorf("Error syncing pod %s, skipping: %v", newWork.pod.UID, err)
|
||||
record.Eventf(newWork.pod, "failedSync", "Error syncing pod, skipping: %v", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *podWorkers) UpdatePod(pod api.BoundPod) {
|
||||
// Apply the new setting to the specified pod. updateComplete is called when the update is completed.
|
||||
func (p *podWorkers) UpdatePod(pod *api.BoundPod, updateComplete func()) {
|
||||
uid := pod.UID
|
||||
var podUpdates chan api.BoundPod
|
||||
var podUpdates chan workUpdate
|
||||
var exists bool
|
||||
|
||||
p.podLock.Lock()
|
||||
defer p.podLock.Unlock()
|
||||
if podUpdates, exists = p.podUpdates[uid]; !exists {
|
||||
// TODO(wojtek-t): Adjust the size of the buffer in this channel
|
||||
podUpdates = make(chan api.BoundPod, 5)
|
||||
podUpdates = make(chan workUpdate, 5)
|
||||
p.podUpdates[uid] = podUpdates
|
||||
go p.managePodLoop(podUpdates)
|
||||
}
|
||||
podUpdates <- pod
|
||||
podUpdates <- workUpdate{
|
||||
pod: pod,
|
||||
updateCompleteFun: updateComplete,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *podWorkers) ForgetNonExistingPodWorkers(desiredPods map[types.UID]empty) {
|
||||
|
Reference in New Issue
Block a user