Extend resyncPeriods in controllers in production.
This commit is contained in:
@@ -43,12 +43,6 @@ const (
|
||||
// happens based on contents in local pod storage.
|
||||
FullControllerResyncPeriod = 30 * time.Second
|
||||
|
||||
// If a watch misdelivers info about a pod, it'll take at least this long
|
||||
// to rectify the number of replicas. Note that dropped deletes are only
|
||||
// rectified after the expectation times out because we don't know the
|
||||
// final resting state of the pod.
|
||||
PodRelistPeriod = 5 * time.Minute
|
||||
|
||||
// Realistic value of the burstReplica field for the replication manager based off
|
||||
// performance requirements for kubernetes 1.0.
|
||||
BurstReplicas = 500
|
||||
@@ -95,7 +89,7 @@ type ReplicationManager struct {
|
||||
}
|
||||
|
||||
// NewReplicationManager creates a new ReplicationManager.
|
||||
func NewReplicationManager(kubeClient client.Interface, burstReplicas int) *ReplicationManager {
|
||||
func NewReplicationManager(kubeClient client.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int) *ReplicationManager {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
|
||||
@@ -121,6 +115,7 @@ func NewReplicationManager(kubeClient client.Interface, burstReplicas int) *Repl
|
||||
},
|
||||
},
|
||||
&api.ReplicationController{},
|
||||
// TODO: Can we have much longer period here?
|
||||
FullControllerResyncPeriod,
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
AddFunc: rm.enqueueController,
|
||||
@@ -161,7 +156,7 @@ func NewReplicationManager(kubeClient client.Interface, burstReplicas int) *Repl
|
||||
},
|
||||
},
|
||||
&api.Pod{},
|
||||
PodRelistPeriod,
|
||||
resyncPeriod(),
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
AddFunc: rm.addPod,
|
||||
// This invokes the rc for every pod change, eg: host assignment. Though this might seem like overkill
|
||||
|
Reference in New Issue
Block a user