Merge pull request #112127 from gjkim42/update-status-despite-error

Update daemonSet status even if syncDaemonSet fails
This commit is contained in:
Kubernetes Prow Robot
2022-11-07 16:00:28 -08:00
committed by GitHub
4 changed files with 267 additions and 24 deletions

View File

@@ -887,6 +887,32 @@ func (dsc *DaemonSetsController) podsShouldBeOnNode(
return nodesNeedingDaemonPods, podsToDelete
}
func (dsc *DaemonSetsController) updateDaemonSet(ctx context.Context, ds *apps.DaemonSet, nodeList []*v1.Node, hash, key string, old []*apps.ControllerRevision) error {
err := dsc.manage(ctx, ds, nodeList, hash)
if err != nil {
return err
}
// Process rolling updates if we're ready.
if dsc.expectations.SatisfiedExpectations(key) {
switch ds.Spec.UpdateStrategy.Type {
case apps.OnDeleteDaemonSetStrategyType:
case apps.RollingUpdateDaemonSetStrategyType:
err = dsc.rollingUpdate(ctx, ds, nodeList, hash)
}
if err != nil {
return err
}
}
err = dsc.cleanupHistory(ctx, ds, old)
if err != nil {
return fmt.Errorf("failed to clean up revisions of DaemonSet: %w", err)
}
return nil
}
// manage manages the scheduling and running of Pods of ds on nodes.
// After figuring out which nodes should run a Pod of ds but not yet running one and
// which nodes should not run a Pod of ds but currently running one, it calls function
@@ -1136,7 +1162,7 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ctx context.Context, ds *
err = storeDaemonSetStatus(ctx, dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace), ds, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable, numberUnavailable, updateObservedGen)
if err != nil {
return fmt.Errorf("error storing status for daemon set %#v: %v", ds, err)
return fmt.Errorf("error storing status for daemon set %#v: %w", ds, err)
}
// Resync the DaemonSet after MinReadySeconds as a last line of defense to guard against clock-skew.
@@ -1210,29 +1236,21 @@ func (dsc *DaemonSetsController) syncDaemonSet(ctx context.Context, key string)
return dsc.updateDaemonSetStatus(ctx, ds, nodeList, hash, false)
}
err = dsc.manage(ctx, ds, nodeList, hash)
if err != nil {
err = dsc.updateDaemonSet(ctx, ds, nodeList, hash, dsKey, old)
statusErr := dsc.updateDaemonSetStatus(ctx, ds, nodeList, hash, true)
switch {
case err != nil && statusErr != nil:
// If there was an error, and we failed to update status,
// log it and return the original error.
klog.ErrorS(statusErr, "Failed to update status", "daemonSet", klog.KObj(ds))
return err
case err != nil:
return err
case statusErr != nil:
return statusErr
}
// Process rolling updates if we're ready.
if dsc.expectations.SatisfiedExpectations(dsKey) {
switch ds.Spec.UpdateStrategy.Type {
case apps.OnDeleteDaemonSetStrategyType:
case apps.RollingUpdateDaemonSetStrategyType:
err = dsc.rollingUpdate(ctx, ds, nodeList, hash)
}
if err != nil {
return err
}
}
err = dsc.cleanupHistory(ctx, ds, old)
if err != nil {
return fmt.Errorf("failed to clean up revisions of DaemonSet: %v", err)
}
return dsc.updateDaemonSetStatus(ctx, ds, nodeList, hash, true)
return nil
}
// NodeShouldRunDaemonPod checks a set of preconditions against a (node,daemonset) and returns a