daemonset: use contextual logging
This commit is contained in:
@@ -130,6 +130,7 @@ type DaemonSetsController struct {
|
||||
|
||||
// NewDaemonSetsController creates a new DaemonSetsController
|
||||
func NewDaemonSetsController(
|
||||
ctx context.Context,
|
||||
daemonSetInformer appsinformers.DaemonSetInformer,
|
||||
historyInformer appsinformers.ControllerRevisionInformer,
|
||||
podInformer coreinformers.PodInformer,
|
||||
@@ -138,7 +139,7 @@ func NewDaemonSetsController(
|
||||
failedPodsBackoff *flowcontrol.Backoff,
|
||||
) (*DaemonSetsController, error) {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
|
||||
logger := klog.FromContext(ctx)
|
||||
dsc := &DaemonSetsController{
|
||||
kubeClient: kubeClient,
|
||||
eventBroadcaster: eventBroadcaster,
|
||||
@@ -156,17 +157,29 @@ func NewDaemonSetsController(
|
||||
}
|
||||
|
||||
daemonSetInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: dsc.addDaemonset,
|
||||
UpdateFunc: dsc.updateDaemonset,
|
||||
DeleteFunc: dsc.deleteDaemonset,
|
||||
AddFunc: func(obj interface{}) {
|
||||
dsc.addDaemonset(logger, obj)
|
||||
},
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
dsc.updateDaemonset(logger, oldObj, newObj)
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
dsc.deleteDaemonset(logger, obj)
|
||||
},
|
||||
})
|
||||
dsc.dsLister = daemonSetInformer.Lister()
|
||||
dsc.dsStoreSynced = daemonSetInformer.Informer().HasSynced
|
||||
|
||||
historyInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: dsc.addHistory,
|
||||
UpdateFunc: dsc.updateHistory,
|
||||
DeleteFunc: dsc.deleteHistory,
|
||||
AddFunc: func(obj interface{}) {
|
||||
dsc.addHistory(logger, obj)
|
||||
},
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
dsc.updateHistory(logger, oldObj, newObj)
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
dsc.deleteHistory(logger, obj)
|
||||
},
|
||||
})
|
||||
dsc.historyLister = historyInformer.Lister()
|
||||
dsc.historyStoreSynced = historyInformer.Informer().HasSynced
|
||||
@@ -174,16 +187,26 @@ func NewDaemonSetsController(
|
||||
// Watch for creation/deletion of pods. The reason we watch is that we don't want a daemon set to create/delete
|
||||
// more pods until all the effects (expectations) of a daemon set's create/delete have been observed.
|
||||
podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: dsc.addPod,
|
||||
UpdateFunc: dsc.updatePod,
|
||||
DeleteFunc: dsc.deletePod,
|
||||
AddFunc: func(obj interface{}) {
|
||||
dsc.addPod(logger, obj)
|
||||
},
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
dsc.updatePod(logger, oldObj, newObj)
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
dsc.deletePod(logger, obj)
|
||||
},
|
||||
})
|
||||
dsc.podLister = podInformer.Lister()
|
||||
dsc.podStoreSynced = podInformer.Informer().HasSynced
|
||||
|
||||
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: dsc.addNode,
|
||||
UpdateFunc: dsc.updateNode,
|
||||
AddFunc: func(obj interface{}) {
|
||||
dsc.addNode(logger, obj)
|
||||
},
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
dsc.updateNode(logger, oldObj, newObj)
|
||||
},
|
||||
},
|
||||
)
|
||||
dsc.nodeStoreSynced = nodeInformer.Informer().HasSynced
|
||||
@@ -197,13 +220,13 @@ func NewDaemonSetsController(
|
||||
return dsc, nil
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) addDaemonset(obj interface{}) {
|
||||
func (dsc *DaemonSetsController) addDaemonset(logger klog.Logger, obj interface{}) {
|
||||
ds := obj.(*apps.DaemonSet)
|
||||
klog.V(4).Infof("Adding daemon set %s", ds.Name)
|
||||
logger.V(4).Info("Adding daemon set", "daemonset", klog.KObj(ds))
|
||||
dsc.enqueueDaemonSet(ds)
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) updateDaemonset(cur, old interface{}) {
|
||||
func (dsc *DaemonSetsController) updateDaemonset(logger klog.Logger, cur, old interface{}) {
|
||||
oldDS := old.(*apps.DaemonSet)
|
||||
curDS := cur.(*apps.DaemonSet)
|
||||
|
||||
@@ -214,17 +237,17 @@ func (dsc *DaemonSetsController) updateDaemonset(cur, old interface{}) {
|
||||
utilruntime.HandleError(fmt.Errorf("couldn't get key for object %#v: %v", oldDS, err))
|
||||
return
|
||||
}
|
||||
dsc.deleteDaemonset(cache.DeletedFinalStateUnknown{
|
||||
dsc.deleteDaemonset(logger, cache.DeletedFinalStateUnknown{
|
||||
Key: key,
|
||||
Obj: oldDS,
|
||||
})
|
||||
}
|
||||
|
||||
klog.V(4).Infof("Updating daemon set %s", oldDS.Name)
|
||||
logger.V(4).Info("Updating daemon set", "daemonset", klog.KObj(oldDS))
|
||||
dsc.enqueueDaemonSet(curDS)
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) deleteDaemonset(obj interface{}) {
|
||||
func (dsc *DaemonSetsController) deleteDaemonset(logger klog.Logger, obj interface{}) {
|
||||
ds, ok := obj.(*apps.DaemonSet)
|
||||
if !ok {
|
||||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
@@ -238,7 +261,7 @@ func (dsc *DaemonSetsController) deleteDaemonset(obj interface{}) {
|
||||
return
|
||||
}
|
||||
}
|
||||
klog.V(4).Infof("Deleting daemon set %s", ds.Name)
|
||||
logger.V(4).Info("Deleting daemon set", "daemonset", klog.KObj(ds))
|
||||
|
||||
key, err := controller.KeyFunc(ds)
|
||||
if err != nil {
|
||||
@@ -262,8 +285,9 @@ func (dsc *DaemonSetsController) Run(ctx context.Context, workers int) {
|
||||
|
||||
defer dsc.queue.ShutDown()
|
||||
|
||||
klog.Infof("Starting daemon sets controller")
|
||||
defer klog.Infof("Shutting down daemon sets controller")
|
||||
logger := klog.FromContext(ctx)
|
||||
logger.Info("Starting daemon sets controller")
|
||||
defer logger.Info("Shutting down daemon sets controller")
|
||||
|
||||
if !cache.WaitForNamedCacheSync("daemon sets", ctx.Done(), dsc.podStoreSynced, dsc.nodeStoreSynced, dsc.historyStoreSynced, dsc.dsStoreSynced) {
|
||||
return
|
||||
@@ -341,7 +365,7 @@ func (dsc *DaemonSetsController) getDaemonSetsForPod(pod *v1.Pod) []*apps.Daemon
|
||||
|
||||
// getDaemonSetsForHistory returns a list of DaemonSets that potentially
|
||||
// match a ControllerRevision.
|
||||
func (dsc *DaemonSetsController) getDaemonSetsForHistory(history *apps.ControllerRevision) []*apps.DaemonSet {
|
||||
func (dsc *DaemonSetsController) getDaemonSetsForHistory(logger klog.Logger, history *apps.ControllerRevision) []*apps.DaemonSet {
|
||||
daemonSets, err := dsc.dsLister.GetHistoryDaemonSets(history)
|
||||
if err != nil || len(daemonSets) == 0 {
|
||||
return nil
|
||||
@@ -349,20 +373,20 @@ func (dsc *DaemonSetsController) getDaemonSetsForHistory(history *apps.Controlle
|
||||
if len(daemonSets) > 1 {
|
||||
// ControllerRef will ensure we don't do anything crazy, but more than one
|
||||
// item in this list nevertheless constitutes user error.
|
||||
klog.V(4).Infof("User error! more than one DaemonSets is selecting ControllerRevision %s/%s with labels: %#v",
|
||||
history.Namespace, history.Name, history.Labels)
|
||||
logger.V(4).Info("Found more than one DaemonSet selecting the ControllerRevision. This is potentially a user error",
|
||||
"controllerRevision", klog.KObj(history), "labels", history.Labels)
|
||||
}
|
||||
return daemonSets
|
||||
}
|
||||
|
||||
// addHistory enqueues the DaemonSet that manages a ControllerRevision when the ControllerRevision is created
|
||||
// or when the controller manager is restarted.
|
||||
func (dsc *DaemonSetsController) addHistory(obj interface{}) {
|
||||
func (dsc *DaemonSetsController) addHistory(logger klog.Logger, obj interface{}) {
|
||||
history := obj.(*apps.ControllerRevision)
|
||||
if history.DeletionTimestamp != nil {
|
||||
// On a restart of the controller manager, it's possible for an object to
|
||||
// show up in a state that is already pending deletion.
|
||||
dsc.deleteHistory(history)
|
||||
dsc.deleteHistory(logger, history)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -372,17 +396,17 @@ func (dsc *DaemonSetsController) addHistory(obj interface{}) {
|
||||
if ds == nil {
|
||||
return
|
||||
}
|
||||
klog.V(4).Infof("ControllerRevision %s added.", history.Name)
|
||||
logger.V(4).Info("Observed a ControllerRevision", "controllerRevision", klog.KObj(history))
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise, it's an orphan. Get a list of all matching DaemonSets and sync
|
||||
// them to see if anyone wants to adopt it.
|
||||
daemonSets := dsc.getDaemonSetsForHistory(history)
|
||||
daemonSets := dsc.getDaemonSetsForHistory(logger, history)
|
||||
if len(daemonSets) == 0 {
|
||||
return
|
||||
}
|
||||
klog.V(4).Infof("Orphan ControllerRevision %s added.", history.Name)
|
||||
logger.V(4).Info("Orphan ControllerRevision added", "controllerRevision", klog.KObj(history))
|
||||
for _, ds := range daemonSets {
|
||||
dsc.enqueueDaemonSet(ds)
|
||||
}
|
||||
@@ -391,7 +415,7 @@ func (dsc *DaemonSetsController) addHistory(obj interface{}) {
|
||||
// updateHistory figures out what DaemonSet(s) manage a ControllerRevision when the ControllerRevision
|
||||
// is updated and wake them up. If anything of the ControllerRevision has changed, we need to awaken
|
||||
// both the old and new DaemonSets.
|
||||
func (dsc *DaemonSetsController) updateHistory(old, cur interface{}) {
|
||||
func (dsc *DaemonSetsController) updateHistory(logger klog.Logger, old, cur interface{}) {
|
||||
curHistory := cur.(*apps.ControllerRevision)
|
||||
oldHistory := old.(*apps.ControllerRevision)
|
||||
if curHistory.ResourceVersion == oldHistory.ResourceVersion {
|
||||
@@ -415,7 +439,7 @@ func (dsc *DaemonSetsController) updateHistory(old, cur interface{}) {
|
||||
if ds == nil {
|
||||
return
|
||||
}
|
||||
klog.V(4).Infof("ControllerRevision %s updated.", curHistory.Name)
|
||||
logger.V(4).Info("Observed an update to a ControllerRevision", "controllerRevision", klog.KObj(curHistory))
|
||||
dsc.enqueueDaemonSet(ds)
|
||||
return
|
||||
}
|
||||
@@ -424,11 +448,11 @@ func (dsc *DaemonSetsController) updateHistory(old, cur interface{}) {
|
||||
// to see if anyone wants to adopt it now.
|
||||
labelChanged := !reflect.DeepEqual(curHistory.Labels, oldHistory.Labels)
|
||||
if labelChanged || controllerRefChanged {
|
||||
daemonSets := dsc.getDaemonSetsForHistory(curHistory)
|
||||
daemonSets := dsc.getDaemonSetsForHistory(logger, curHistory)
|
||||
if len(daemonSets) == 0 {
|
||||
return
|
||||
}
|
||||
klog.V(4).Infof("Orphan ControllerRevision %s updated.", curHistory.Name)
|
||||
logger.V(4).Info("Orphan ControllerRevision updated", "controllerRevision", klog.KObj(curHistory))
|
||||
for _, ds := range daemonSets {
|
||||
dsc.enqueueDaemonSet(ds)
|
||||
}
|
||||
@@ -438,7 +462,7 @@ func (dsc *DaemonSetsController) updateHistory(old, cur interface{}) {
|
||||
// deleteHistory enqueues the DaemonSet that manages a ControllerRevision when
|
||||
// the ControllerRevision is deleted. obj could be an *app.ControllerRevision, or
|
||||
// a DeletionFinalStateUnknown marker item.
|
||||
func (dsc *DaemonSetsController) deleteHistory(obj interface{}) {
|
||||
func (dsc *DaemonSetsController) deleteHistory(logger klog.Logger, obj interface{}) {
|
||||
history, ok := obj.(*apps.ControllerRevision)
|
||||
|
||||
// When a delete is dropped, the relist will notice a ControllerRevision in the store not
|
||||
@@ -467,17 +491,17 @@ func (dsc *DaemonSetsController) deleteHistory(obj interface{}) {
|
||||
if ds == nil {
|
||||
return
|
||||
}
|
||||
klog.V(4).Infof("ControllerRevision %s deleted.", history.Name)
|
||||
logger.V(4).Info("ControllerRevision deleted", "controllerRevision", klog.KObj(history))
|
||||
dsc.enqueueDaemonSet(ds)
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) addPod(obj interface{}) {
|
||||
func (dsc *DaemonSetsController) addPod(logger klog.Logger, obj interface{}) {
|
||||
pod := obj.(*v1.Pod)
|
||||
|
||||
if pod.DeletionTimestamp != nil {
|
||||
// on a restart of the controller manager, it's possible a new pod shows up in a state that
|
||||
// is already pending deletion. Prevent the pod from being a creation observation.
|
||||
dsc.deletePod(pod)
|
||||
dsc.deletePod(logger, pod)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -491,7 +515,7 @@ func (dsc *DaemonSetsController) addPod(obj interface{}) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
klog.V(4).Infof("Pod %s added.", pod.Name)
|
||||
logger.V(4).Info("Pod added", "pod", klog.KObj(pod))
|
||||
dsc.expectations.CreationObserved(dsKey)
|
||||
dsc.enqueueDaemonSet(ds)
|
||||
return
|
||||
@@ -505,7 +529,7 @@ func (dsc *DaemonSetsController) addPod(obj interface{}) {
|
||||
if len(dss) == 0 {
|
||||
return
|
||||
}
|
||||
klog.V(4).Infof("Orphan Pod %s added.", pod.Name)
|
||||
logger.V(4).Info("Orphan Pod added", "pod", klog.KObj(pod))
|
||||
for _, ds := range dss {
|
||||
dsc.enqueueDaemonSet(ds)
|
||||
}
|
||||
@@ -514,7 +538,7 @@ func (dsc *DaemonSetsController) addPod(obj interface{}) {
|
||||
// When a pod is updated, figure out what sets manage it and wake them
|
||||
// up. If the labels of the pod have changed we need to awaken both the old
|
||||
// and new set. old and cur must be *v1.Pod types.
|
||||
func (dsc *DaemonSetsController) updatePod(old, cur interface{}) {
|
||||
func (dsc *DaemonSetsController) updatePod(logger klog.Logger, old, cur interface{}) {
|
||||
curPod := cur.(*v1.Pod)
|
||||
oldPod := old.(*v1.Pod)
|
||||
if curPod.ResourceVersion == oldPod.ResourceVersion {
|
||||
@@ -528,7 +552,7 @@ func (dsc *DaemonSetsController) updatePod(old, cur interface{}) {
|
||||
// and after such time has passed, the kubelet actually deletes it from the store. We receive an update
|
||||
// for modification of the deletion timestamp and expect an ds to create more replicas asap, not wait
|
||||
// until the kubelet actually deletes the pod.
|
||||
dsc.deletePod(curPod)
|
||||
dsc.deletePod(logger, curPod)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -548,7 +572,7 @@ func (dsc *DaemonSetsController) updatePod(old, cur interface{}) {
|
||||
if ds == nil {
|
||||
return
|
||||
}
|
||||
klog.V(4).Infof("Pod %s updated.", curPod.Name)
|
||||
logger.V(4).Info("Pod updated", "pod", klog.KObj(curPod))
|
||||
dsc.enqueueDaemonSet(ds)
|
||||
changedToReady := !podutil.IsPodReady(oldPod) && podutil.IsPodReady(curPod)
|
||||
// See https://github.com/kubernetes/kubernetes/pull/38076 for more details
|
||||
@@ -566,7 +590,7 @@ func (dsc *DaemonSetsController) updatePod(old, cur interface{}) {
|
||||
if len(dss) == 0 {
|
||||
return
|
||||
}
|
||||
klog.V(4).Infof("Orphan Pod %s updated.", curPod.Name)
|
||||
logger.V(4).Info("Orphan Pod updated", "pod", klog.KObj(curPod))
|
||||
labelChanged := !reflect.DeepEqual(curPod.Labels, oldPod.Labels)
|
||||
if labelChanged || controllerRefChanged {
|
||||
for _, ds := range dss {
|
||||
@@ -575,7 +599,7 @@ func (dsc *DaemonSetsController) updatePod(old, cur interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) deletePod(obj interface{}) {
|
||||
func (dsc *DaemonSetsController) deletePod(logger klog.Logger, obj interface{}) {
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
// When a delete is dropped, the relist will notice a pod in the store not
|
||||
// in the list, leading to the insertion of a tombstone object which contains
|
||||
@@ -608,16 +632,16 @@ func (dsc *DaemonSetsController) deletePod(obj interface{}) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
klog.V(4).Infof("Pod %s deleted.", pod.Name)
|
||||
logger.V(4).Info("Pod deleted", "pod", klog.KObj(pod))
|
||||
dsc.expectations.DeletionObserved(dsKey)
|
||||
dsc.enqueueDaemonSet(ds)
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) addNode(obj interface{}) {
|
||||
func (dsc *DaemonSetsController) addNode(logger klog.Logger, obj interface{}) {
|
||||
// TODO: it'd be nice to pass a hint with these enqueues, so that each ds would only examine the added node (unless it has other work to do, too).
|
||||
dsList, err := dsc.dsLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
klog.V(4).Infof("Error enqueueing daemon sets: %v", err)
|
||||
logger.V(4).Info("Error enqueueing daemon sets", "err", err)
|
||||
return
|
||||
}
|
||||
node := obj.(*v1.Node)
|
||||
@@ -666,7 +690,7 @@ func shouldIgnoreNodeUpdate(oldNode, curNode v1.Node) bool {
|
||||
return apiequality.Semantic.DeepEqual(oldNode, curNode)
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) updateNode(old, cur interface{}) {
|
||||
func (dsc *DaemonSetsController) updateNode(logger klog.Logger, old, cur interface{}) {
|
||||
oldNode := old.(*v1.Node)
|
||||
curNode := cur.(*v1.Node)
|
||||
if shouldIgnoreNodeUpdate(*oldNode, *curNode) {
|
||||
@@ -675,7 +699,7 @@ func (dsc *DaemonSetsController) updateNode(old, cur interface{}) {
|
||||
|
||||
dsList, err := dsc.dsLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
klog.V(4).Infof("Error listing daemon sets: %v", err)
|
||||
logger.V(4).Info("Error listing daemon sets", "err", err)
|
||||
return
|
||||
}
|
||||
// TODO: it'd be nice to pass a hint with these enqueues, so that each ds would only examine the added node (unless it has other work to do, too).
|
||||
@@ -733,11 +757,12 @@ func (dsc *DaemonSetsController) getNodesToDaemonPods(ctx context.Context, ds *a
|
||||
}
|
||||
// Group Pods by Node name.
|
||||
nodeToDaemonPods := make(map[string][]*v1.Pod)
|
||||
logger := klog.FromContext(ctx)
|
||||
for _, pod := range claimedPods {
|
||||
nodeName, err := util.GetTargetNodeName(pod)
|
||||
if err != nil {
|
||||
klog.Warningf("Failed to get target node name of Pod %v/%v in DaemonSet %v/%v",
|
||||
pod.Namespace, pod.Name, ds.Namespace, ds.Name)
|
||||
logger.Info("Failed to get target node name of Pod in DaemonSet",
|
||||
"pod", klog.KObj(pod), "daemonset", klog.KObj(ds))
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -773,6 +798,7 @@ func (dsc *DaemonSetsController) resolveControllerRef(namespace string, controll
|
||||
// - podsToDelete: the Pods need to be deleted on the node
|
||||
// - err: unexpected error
|
||||
func (dsc *DaemonSetsController) podsShouldBeOnNode(
|
||||
logger klog.Logger,
|
||||
node *v1.Node,
|
||||
nodeToDaemonPods map[string][]*v1.Pod,
|
||||
ds *apps.DaemonSet,
|
||||
@@ -803,8 +829,8 @@ func (dsc *DaemonSetsController) podsShouldBeOnNode(
|
||||
inBackoff := dsc.failedPodsBackoff.IsInBackOffSinceUpdate(backoffKey, now)
|
||||
if inBackoff {
|
||||
delay := dsc.failedPodsBackoff.Get(backoffKey)
|
||||
klog.V(4).Infof("Deleting failed pod %s/%s on node %s has been limited by backoff - %v remaining",
|
||||
pod.Namespace, pod.Name, node.Name, delay)
|
||||
logger.V(4).Info("Deleting failed pod on node has been limited by backoff",
|
||||
"pod", klog.KObj(pod), "node", klog.KObj(node), "currentDelay", delay)
|
||||
dsc.enqueueDaemonSetAfter(ds, delay)
|
||||
continue
|
||||
}
|
||||
@@ -812,7 +838,7 @@ func (dsc *DaemonSetsController) podsShouldBeOnNode(
|
||||
dsc.failedPodsBackoff.Next(backoffKey, now)
|
||||
|
||||
msg := fmt.Sprintf("Found failed daemon pod %s/%s on node %s, will try to kill it", pod.Namespace, pod.Name, node.Name)
|
||||
klog.V(2).Infof(msg)
|
||||
logger.V(2).Info("Found failed daemon pod on node, will try to kill it", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
// Emit an event so that it's discoverable to users.
|
||||
dsc.eventRecorder.Eventf(ds, v1.EventTypeWarning, FailedDaemonPodReason, msg)
|
||||
podsToDelete = append(podsToDelete, pod.Name)
|
||||
@@ -866,10 +892,10 @@ func (dsc *DaemonSetsController) podsShouldBeOnNode(
|
||||
if oldestNewPod != nil && oldestOldPod != nil {
|
||||
switch {
|
||||
case !podutil.IsPodReady(oldestOldPod):
|
||||
klog.V(5).Infof("Pod %s/%s from daemonset %s is no longer ready and will be replaced with newer pod %s", oldestOldPod.Namespace, oldestOldPod.Name, ds.Name, oldestNewPod.Name)
|
||||
logger.V(5).Info("Pod from daemonset is no longer ready and will be replaced with newer pod", "oldPod", klog.KObj(oldestOldPod), "daemonset", klog.KObj(ds), "newPod", klog.KObj(oldestNewPod))
|
||||
podsToDelete = append(podsToDelete, oldestOldPod.Name)
|
||||
case podutil.IsPodAvailable(oldestNewPod, ds.Spec.MinReadySeconds, metav1.Time{Time: dsc.failedPodsBackoff.Clock.Now()}):
|
||||
klog.V(5).Infof("Pod %s/%s from daemonset %s is now ready and will replace older pod %s", oldestNewPod.Namespace, oldestNewPod.Name, ds.Name, oldestOldPod.Name)
|
||||
logger.V(5).Info("Pod from daemonset is now ready and will replace older pod", "newPod", klog.KObj(oldestNewPod), "daemonset", klog.KObj(ds), "oldPod", klog.KObj(oldestOldPod))
|
||||
podsToDelete = append(podsToDelete, oldestOldPod.Name)
|
||||
}
|
||||
}
|
||||
@@ -926,10 +952,11 @@ func (dsc *DaemonSetsController) manage(ctx context.Context, ds *apps.DaemonSet,
|
||||
|
||||
// For each node, if the node is running the daemon pod but isn't supposed to, kill the daemon
|
||||
// pod. If the node is supposed to run the daemon pod, but isn't, create the daemon pod on the node.
|
||||
logger := klog.FromContext(ctx)
|
||||
var nodesNeedingDaemonPods, podsToDelete []string
|
||||
for _, node := range nodeList {
|
||||
nodesNeedingDaemonPodsOnNode, podsToDeleteOnNode := dsc.podsShouldBeOnNode(
|
||||
node, nodeToDaemonPods, ds, hash)
|
||||
logger, node, nodeToDaemonPods, ds, hash)
|
||||
|
||||
nodesNeedingDaemonPods = append(nodesNeedingDaemonPods, nodesNeedingDaemonPodsOnNode...)
|
||||
podsToDelete = append(podsToDelete, podsToDeleteOnNode...)
|
||||
@@ -951,6 +978,7 @@ func (dsc *DaemonSetsController) manage(ctx context.Context, ds *apps.DaemonSet,
|
||||
// returns slice with errors if any
|
||||
func (dsc *DaemonSetsController) syncNodes(ctx context.Context, ds *apps.DaemonSet, podsToDelete, nodesNeedingDaemonPods []string, hash string) error {
|
||||
// We need to set expectations before creating/deleting pods to avoid race conditions.
|
||||
logger := klog.FromContext(ctx)
|
||||
dsKey, err := controller.KeyFunc(ds)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't get key for object %#v: %v", ds, err)
|
||||
@@ -971,7 +999,7 @@ func (dsc *DaemonSetsController) syncNodes(ctx context.Context, ds *apps.DaemonS
|
||||
// error channel to communicate back failures. make the buffer big enough to avoid any blocking
|
||||
errCh := make(chan error, createDiff+deleteDiff)
|
||||
|
||||
klog.V(4).Infof("Nodes needing daemon pods for daemon set %s: %+v, creating %d", ds.Name, nodesNeedingDaemonPods, createDiff)
|
||||
logger.V(4).Info("Nodes needing daemon pods for daemon set, creating", "daemonset", klog.KObj(ds), "needCount", nodesNeedingDaemonPods, "createCount", createDiff)
|
||||
createWait := sync.WaitGroup{}
|
||||
// If the returned error is not nil we have a parse error.
|
||||
// The controller handles this via the hash.
|
||||
@@ -1014,7 +1042,7 @@ func (dsc *DaemonSetsController) syncNodes(ctx context.Context, ds *apps.DaemonS
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Failed creation, decrementing expectations for set %q/%q", ds.Namespace, ds.Name)
|
||||
logger.V(2).Info("Failed creation, decrementing expectations for daemon set", "daemonset", klog.KObj(ds))
|
||||
dsc.expectations.CreationObserved(dsKey)
|
||||
errCh <- err
|
||||
utilruntime.HandleError(err)
|
||||
@@ -1025,7 +1053,7 @@ func (dsc *DaemonSetsController) syncNodes(ctx context.Context, ds *apps.DaemonS
|
||||
// any skipped pods that we never attempted to start shouldn't be expected.
|
||||
skippedPods := createDiff - (batchSize + pos)
|
||||
if errorCount < len(errCh) && skippedPods > 0 {
|
||||
klog.V(2).Infof("Slow-start failure. Skipping creation of %d pods, decrementing expectations for set %q/%q", skippedPods, ds.Namespace, ds.Name)
|
||||
logger.V(2).Info("Slow-start failure. Skipping creation pods, decrementing expectations for daemon set", "skippedPods", skippedPods, "daemonset", klog.KObj(ds))
|
||||
dsc.expectations.LowerExpectations(dsKey, skippedPods, 0)
|
||||
// The skipped pods will be retried later. The next controller resync will
|
||||
// retry the slow start process.
|
||||
@@ -1033,7 +1061,7 @@ func (dsc *DaemonSetsController) syncNodes(ctx context.Context, ds *apps.DaemonS
|
||||
}
|
||||
}
|
||||
|
||||
klog.V(4).Infof("Pods to delete for daemon set %s: %+v, deleting %d", ds.Name, podsToDelete, deleteDiff)
|
||||
logger.V(4).Info("Pods to delete for daemon set, deleting", "daemonset", klog.KObj(ds), "toDeleteCount", podsToDelete, "deleteCount", deleteDiff)
|
||||
deleteWait := sync.WaitGroup{}
|
||||
deleteWait.Add(deleteDiff)
|
||||
for i := 0; i < deleteDiff; i++ {
|
||||
@@ -1042,7 +1070,7 @@ func (dsc *DaemonSetsController) syncNodes(ctx context.Context, ds *apps.DaemonS
|
||||
if err := dsc.podControl.DeletePod(ctx, ds.Namespace, podsToDelete[ix], ds); err != nil {
|
||||
dsc.expectations.DeletionObserved(dsKey)
|
||||
if !apierrors.IsNotFound(err) {
|
||||
klog.V(2).Infof("Failed deletion, decremented expectations for set %q/%q", ds.Namespace, ds.Name)
|
||||
logger.V(2).Info("Failed deletion, decremented expectations for daemon set", "daemonset", klog.KObj(ds))
|
||||
errCh <- err
|
||||
utilruntime.HandleError(err)
|
||||
}
|
||||
@@ -1116,7 +1144,8 @@ func storeDaemonSetStatus(
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) updateDaemonSetStatus(ctx context.Context, ds *apps.DaemonSet, nodeList []*v1.Node, hash string, updateObservedGen bool) error {
|
||||
klog.V(4).Infof("Updating daemon set status")
|
||||
logger := klog.FromContext(ctx)
|
||||
logger.V(4).Info("Updating daemon set status")
|
||||
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ctx, ds)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err)
|
||||
@@ -1175,10 +1204,11 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ctx context.Context, ds *
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) syncDaemonSet(ctx context.Context, key string) error {
|
||||
logger := klog.FromContext(ctx)
|
||||
startTime := dsc.failedPodsBackoff.Clock.Now()
|
||||
|
||||
defer func() {
|
||||
klog.V(4).Infof("Finished syncing daemon set %q (%v)", key, dsc.failedPodsBackoff.Clock.Now().Sub(startTime))
|
||||
logger.V(4).Info("Finished syncing daemon set", "daemonset", key, "time", dsc.failedPodsBackoff.Clock.Now().Sub(startTime))
|
||||
}()
|
||||
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
@@ -1187,7 +1217,7 @@ func (dsc *DaemonSetsController) syncDaemonSet(ctx context.Context, key string)
|
||||
}
|
||||
ds, err := dsc.dsLister.DaemonSets(namespace).Get(name)
|
||||
if apierrors.IsNotFound(err) {
|
||||
klog.V(3).Infof("daemon set has been deleted %v", key)
|
||||
logger.V(3).Info("Daemon set has been deleted", "daemonset", key)
|
||||
dsc.expectations.DeleteExpectations(key)
|
||||
return nil
|
||||
}
|
||||
@@ -1244,7 +1274,7 @@ func (dsc *DaemonSetsController) syncDaemonSet(ctx context.Context, key string)
|
||||
case err != nil && statusErr != nil:
|
||||
// If there was an error, and we failed to update status,
|
||||
// log it and return the original error.
|
||||
klog.ErrorS(statusErr, "Failed to update status", "daemonSet", klog.KObj(ds))
|
||||
klog.Error(statusErr, "Failed to update status", "daemonSet", klog.KObj(ds))
|
||||
return err
|
||||
case err != nil:
|
||||
return err
|
||||
|
Reference in New Issue
Block a user