Merge pull request #38320 from liggitt/golang-ratelimit

Automatic merge from submit-queue (batch tested with PRs 59158, 38320, 59059, 55516, 59357). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Switch from juju/ratelimit to golang.org/x/time/rate

Replaces juju/ratelimit with golang.org/x/time/rate
xref https://github.com/kubernetes/steering/issues/21

Requires removing the Saturation() method on the rate limiter. In the process of attempting to contribute it to the `golang.org/x/time/rate` implementation, it became clear that what it was calculating was not very useful when combined with periodic polling. See discussion in https://go-review.googlesource.com/c/time/+/29958#message-4caffc11669cadd90e2da4c05122cfec50ea6a22

```release-note
NONE
```
This commit is contained in:
Kubernetes Submit Queue
2018-02-05 12:40:34 -08:00
committed by GitHub
24 changed files with 81 additions and 960 deletions

View File

@@ -15,7 +15,7 @@ go_library(
deps = [
"//pkg/controller:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/juju/ratelimit:go_default_library",
"//vendor/golang.org/x/time/rate:go_default_library",
"//vendor/k8s.io/api/certificates/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",

View File

@@ -22,6 +22,9 @@ import (
"fmt"
"time"
"github.com/golang/glog"
"golang.org/x/time/rate"
certificates "k8s.io/api/certificates/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
@@ -34,9 +37,6 @@ import (
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/pkg/controller"
"github.com/golang/glog"
"github.com/juju/ratelimit"
)
type CertificateController struct {
@@ -65,7 +65,7 @@ func NewCertificateController(
queue: workqueue.NewNamedRateLimitingQueue(workqueue.NewMaxOfRateLimiter(
workqueue.NewItemExponentialFailureRateLimiter(200*time.Millisecond, 1000*time.Second),
// 10 qps, 100 bucket size. This is only for retry speed and its only the overall factor (not per item)
&workqueue.BucketRateLimiter{Bucket: ratelimit.NewBucketWithRate(float64(10), int64(100))},
&workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)},
), "certificate"),
handler: handler,
}

View File

@@ -293,20 +293,15 @@ func (q *RateLimitedTimedQueue) SwapLimiter(newQPS float32) {
newLimiter = flowcontrol.NewFakeNeverRateLimiter()
} else {
newLimiter = flowcontrol.NewTokenBucketRateLimiter(newQPS, EvictionRateLimiterBurst)
}
// If we're currently waiting on limiter, we drain the new one - this is a good approach when Burst value is 1
// TODO: figure out if we need to support higher Burst values and decide on the drain logic, should we keep:
// - saturation (percentage of used tokens)
// - number of used tokens
// - number of available tokens
// - something else
for q.limiter.Saturation() > newLimiter.Saturation() {
// Check if we're not using fake limiter
previousSaturation := newLimiter.Saturation()
newLimiter.TryAccept()
// It's a fake limiter
if newLimiter.Saturation() == previousSaturation {
break
// If we're currently waiting on limiter, we drain the new one - this is a good approach when Burst value is 1
// TODO: figure out if we need to support higher Burst values and decide on the drain logic, should we keep:
// - saturation (percentage of used tokens)
// - number of used tokens
// - number of available tokens
// - something else
if q.limiter.TryAccept() == false {
newLimiter.TryAccept()
}
}
q.limiter.Stop()