Merge pull request #13147 from mwielgus/hpa_impl

Implementation of HorizontalPodAutoscaler
This commit is contained in:
Filip Grzadkowski
2015-08-26 15:36:27 +02:00
6 changed files with 352 additions and 30 deletions

View File

@@ -17,16 +17,21 @@ limitations under the License.
package autoscalercontroller
import (
"encoding/json"
"fmt"
"strings"
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/expapi"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util"
heapster "k8s.io/heapster/api/v1/types"
)
const (
@@ -34,16 +39,38 @@ const (
heapsterService = "monitoring-heapster"
)
var resourceToMetric = map[api.ResourceName]string{
api.ResourceCPU: "cpu-usage",
}
var heapsterQueryStart, _ = time.ParseDuration("-20m")
type HorizontalPodAutoscalerController struct {
client *client.Client
expClient client.ExperimentalInterface
}
// Aggregates results into ResourceConsumption. Also returns number of
// pods included in the aggregation.
type metricAggregator func(heapster.MetricResultList) (expapi.ResourceConsumption, int)
type metricDefinition struct {
name string
aggregator metricAggregator
}
var resourceDefinitions = map[api.ResourceName]metricDefinition{
//TODO: add memory
api.ResourceCPU: {"cpu-usage",
func(metrics heapster.MetricResultList) (expapi.ResourceConsumption, int) {
sum, count := calculateSumFromLatestSample(metrics)
value := "0"
if count > 0 {
// assumes that cpu usage is in millis
value = fmt.Sprintf("%dm", sum/uint64(count))
}
return expapi.ResourceConsumption{Resource: api.ResourceCPU, Quantity: resource.MustParse(value)}, count
}},
}
var heapsterQueryStart, _ = time.ParseDuration("-5m")
var downscaleForbiddenWindow, _ = time.ParseDuration("20m")
var upscaleForbiddenWindow, _ = time.ParseDuration("3m")
func New(client *client.Client, expClient client.ExperimentalInterface) *HorizontalPodAutoscalerController {
//TODO: switch to client.Interface
return &HorizontalPodAutoscalerController{
@@ -86,16 +113,18 @@ func (a *HorizontalPodAutoscalerController) reconcileAutoscalers() error {
podNames = append(podNames, pod.Name)
}
metric, metricDefined := resourceToMetric[hpa.Spec.Target.Resource]
metricSpec, metricDefined := resourceDefinitions[hpa.Spec.Target.Resource]
if !metricDefined {
glog.Warningf("Heapster metric not defined for %s %v", reference, hpa.Spec.Target.Resource)
continue
}
startTime := time.Now().Add(heapsterQueryStart)
now := time.Now()
startTime := now.Add(heapsterQueryStart)
metricPath := fmt.Sprintf("/api/v1/model/namespaces/%s/pod-list/%s/metrics/%s",
hpa.Spec.ScaleRef.Namespace,
strings.Join(podNames, ","),
metric)
metricSpec.name)
resultRaw, err := a.client.
Get().
@@ -113,7 +142,90 @@ func (a *HorizontalPodAutoscalerController) reconcileAutoscalers() error {
continue
}
var metrics heapster.MetricResultList
err = json.Unmarshal(resultRaw, &metrics)
if err != nil {
glog.Warningf("Failed to unmarshall heapster response: %v", err)
continue
}
glog.Infof("Metrics available for %s: %s", reference, string(resultRaw))
currentConsumption, count := metricSpec.aggregator(metrics)
if count != len(podList.Items) {
glog.Warningf("Metrics obtained for %d/%d of pods", count, len(podList.Items))
continue
}
// if the ratio is 1.2 we want to have 2 replicas
desiredReplicas := 1 + int((currentConsumption.Quantity.MilliValue()*int64(count))/hpa.Spec.Target.Quantity.MilliValue())
if desiredReplicas < hpa.Spec.MinCount {
desiredReplicas = hpa.Spec.MinCount
}
if desiredReplicas > hpa.Spec.MaxCount {
desiredReplicas = hpa.Spec.MaxCount
}
rescale := false
if desiredReplicas != count {
// Going down
if desiredReplicas < count && (hpa.Status.LastScaleTimestamp == nil ||
hpa.Status.LastScaleTimestamp.Add(downscaleForbiddenWindow).Before(now)) {
rescale = true
}
// Going up
if desiredReplicas > count && (hpa.Status.LastScaleTimestamp == nil ||
hpa.Status.LastScaleTimestamp.Add(upscaleForbiddenWindow).Before(now)) {
rescale = true
}
if rescale {
scale.Spec.Replicas = desiredReplicas
_, err = a.expClient.Scales(hpa.Namespace).Update(hpa.Spec.ScaleRef.Kind, scale)
if err != nil {
glog.Warningf("Failed to rescale %s: %v", reference, err)
continue
}
}
}
hpa.Status = expapi.HorizontalPodAutoscalerStatus{
CurrentReplicas: count,
DesiredReplicas: desiredReplicas,
CurrentConsumption: currentConsumption,
}
if rescale {
now := util.NewTime(now)
hpa.Status.LastScaleTimestamp = &now
}
_, err = a.expClient.HorizontalPodAutoscalers(hpa.Namespace).Update(&hpa)
if err != nil {
glog.Warningf("Failed to update HorizontalPodAutoscaler %s: %v", hpa.Name, err)
continue
}
}
return nil
}
func calculateSumFromLatestSample(metrics heapster.MetricResultList) (uint64, int) {
sum := uint64(0)
count := 0
for _, metrics := range metrics.Items {
var newest *heapster.MetricPoint
newest = nil
for _, metricPoint := range metrics.Metrics {
if newest == nil || newest.Timestamp.Before(metricPoint.Timestamp) {
newest = &metricPoint
}
}
if newest != nil {
sum += newest.Value
count++
}
}
return sum, count
}