Add backoff for DS's pod deletion to limit fighting with kubelet failing the pod repeatedly

This commit is contained in:
Tomas Nozicka
2018-08-15 16:03:39 +02:00
parent cfb4a5e95a
commit 63656da296
4 changed files with 131 additions and 10 deletions

View File

@@ -22,10 +22,11 @@ package app
import (
"fmt"
"net/http"
"time"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/util/flowcontrol"
"k8s.io/kubernetes/pkg/controller/daemon"
"k8s.io/kubernetes/pkg/controller/deployment"
"k8s.io/kubernetes/pkg/controller/replicaset"
@@ -42,6 +43,7 @@ func startDaemonSetController(ctx ControllerContext) (http.Handler, bool, error)
ctx.InformerFactory.Core().V1().Pods(),
ctx.InformerFactory.Core().V1().Nodes(),
ctx.ClientBuilder.ClientOrDie("daemon-set-controller"),
flowcontrol.NewBackOff(1*time.Second, 15*time.Minute),
)
if err != nil {
return nil, true, fmt.Errorf("error creating DaemonSets controller: %v", err)