Start using reason cache in kubelet
This commit is contained in:
		| @@ -441,6 +441,7 @@ func NewMainKubelet( | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	klet.runtimeCache = runtimeCache | ||||
| 	klet.reasonCache = NewReasonCache() | ||||
| 	klet.workQueue = queue.NewBasicWorkQueue() | ||||
| 	klet.podWorkers = newPodWorkers(runtimeCache, klet.syncPod, recorder, klet.workQueue, klet.resyncInterval, backOffPeriod, klet.podCache) | ||||
|  | ||||
| @@ -563,6 +564,10 @@ type Kubelet struct { | ||||
| 	// Container runtime. | ||||
| 	containerRuntime kubecontainer.Runtime | ||||
|  | ||||
| 	// reasonCache caches the failure reason of the last creation of all containers, which is | ||||
| 	// used for generating ContainerStatus. | ||||
| 	reasonCache *ReasonCache | ||||
|  | ||||
| 	// nodeStatusUpdateFrequency specifies how often kubelet posts node status to master. | ||||
| 	// Note: be cautious when changing the constant, it must work with nodeMonitorGracePeriod | ||||
| 	// in nodecontroller. There are several constraints: | ||||
| @@ -1676,8 +1681,8 @@ func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecont | ||||
| 	} | ||||
|  | ||||
| 	result := kl.containerRuntime.SyncPod(pod, apiPodStatus, podStatus, pullSecrets, kl.backOff) | ||||
| 	err = result.Error() | ||||
| 	if err != nil { | ||||
| 	kl.reasonCache.Update(pod.UID, result) | ||||
| 	if err = result.Error(); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
|   | ||||
| @@ -148,6 +148,7 @@ func newTestKubelet(t *testing.T) *TestKubelet { | ||||
|  | ||||
| 	kubelet.containerRuntime = fakeRuntime | ||||
| 	kubelet.runtimeCache = kubecontainer.NewFakeRuntimeCache(kubelet.containerRuntime) | ||||
| 	kubelet.reasonCache = NewReasonCache() | ||||
| 	kubelet.podWorkers = &fakePodWorkers{ | ||||
| 		syncPodFn:    kubelet.syncPod, | ||||
| 		runtimeCache: kubelet.runtimeCache, | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Random-Liu
					Random-Liu