Merge pull request #25279 from piosz/hpa-ga

Automatic merge from submit-queue

Move internal types of hpa from pkg/apis/extensions to pkg/apis/autoscaling

ref #21577

@lavalamp could you please review or delegate to someone from CSI team?
@janetkuo could you please take a look into the kubelet changes?

cc @fgrzadkowski @jszczepkowski @mwielgus @kubernetes/autoscaling
This commit is contained in:
k8s-merge-robot
2016-05-11 13:19:42 -07:00
52 changed files with 3063 additions and 3662 deletions

View File

@@ -26,8 +26,10 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/autoscaling"
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/client/cache"
unversionedautoscaling "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned"
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned"
"k8s.io/kubernetes/pkg/client/record"
@@ -51,7 +53,7 @@ const (
type HorizontalController struct {
scaleNamespacer unversionedextensions.ScalesGetter
hpaNamespacer unversionedextensions.HorizontalPodAutoscalersGetter
hpaNamespacer unversionedautoscaling.HorizontalPodAutoscalersGetter
metricsClient metrics.MetricsClient
eventRecorder record.EventRecorder
@@ -75,12 +77,12 @@ func newInformer(controller *HorizontalController, resyncPeriod time.Duration) (
return controller.hpaNamespacer.HorizontalPodAutoscalers(api.NamespaceAll).Watch(options)
},
},
&extensions.HorizontalPodAutoscaler{},
&autoscaling.HorizontalPodAutoscaler{},
resyncPeriod,
framework.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
hpa := obj.(*extensions.HorizontalPodAutoscaler)
hasCPUPolicy := hpa.Spec.CPUUtilization != nil
hpa := obj.(*autoscaling.HorizontalPodAutoscaler)
hasCPUPolicy := hpa.Spec.TargetCPUUtilizationPercentage != nil
_, hasCustomMetricsPolicy := hpa.Annotations[HpaCustomMetricsTargetAnnotationName]
if !hasCPUPolicy && !hasCustomMetricsPolicy {
controller.eventRecorder.Event(hpa, api.EventTypeNormal, "DefaultPolicy", "No scaling policy specified - will use default one. See documentation for details")
@@ -91,7 +93,7 @@ func newInformer(controller *HorizontalController, resyncPeriod time.Duration) (
}
},
UpdateFunc: func(old, cur interface{}) {
hpa := cur.(*extensions.HorizontalPodAutoscaler)
hpa := cur.(*autoscaling.HorizontalPodAutoscaler)
err := controller.reconcileAutoscaler(hpa)
if err != nil {
glog.Warningf("Failed to reconcile %s: %v", hpa.Name, err)
@@ -102,7 +104,7 @@ func newInformer(controller *HorizontalController, resyncPeriod time.Duration) (
)
}
func NewHorizontalController(evtNamespacer unversionedcore.EventsGetter, scaleNamespacer unversionedextensions.ScalesGetter, hpaNamespacer unversionedextensions.HorizontalPodAutoscalersGetter, metricsClient metrics.MetricsClient, resyncPeriod time.Duration) *HorizontalController {
func NewHorizontalController(evtNamespacer unversionedcore.EventsGetter, scaleNamespacer unversionedextensions.ScalesGetter, hpaNamespacer unversionedautoscaling.HorizontalPodAutoscalersGetter, metricsClient metrics.MetricsClient, resyncPeriod time.Duration) *HorizontalController {
broadcaster := record.NewBroadcaster()
broadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: evtNamespacer.Events("")})
recorder := broadcaster.NewRecorder(api.EventSource{Component: "horizontal-pod-autoscaler"})
@@ -128,10 +130,10 @@ func (a *HorizontalController) Run(stopCh <-chan struct{}) {
glog.Infof("Shutting down HPA Controller")
}
func (a *HorizontalController) computeReplicasForCPUUtilization(hpa *extensions.HorizontalPodAutoscaler, scale *extensions.Scale) (int32, *int32, time.Time, error) {
func (a *HorizontalController) computeReplicasForCPUUtilization(hpa *autoscaling.HorizontalPodAutoscaler, scale *extensions.Scale) (int32, *int32, time.Time, error) {
targetUtilization := int32(defaultTargetCPUUtilizationPercentage)
if hpa.Spec.CPUUtilization != nil {
targetUtilization = hpa.Spec.CPUUtilization.TargetPercentage
if hpa.Spec.TargetCPUUtilizationPercentage != nil {
targetUtilization = *hpa.Spec.TargetCPUUtilizationPercentage
}
currentReplicas := scale.Status.Replicas
@@ -170,7 +172,7 @@ func (a *HorizontalController) computeReplicasForCPUUtilization(hpa *extensions.
// Returns number of replicas, metric which required highest number of replicas,
// status string (also json-serialized extensions.CustomMetricsCurrentStatusList),
// last timestamp of the metrics involved in computations or error, if occurred.
func (a *HorizontalController) computeReplicasForCustomMetrics(hpa *extensions.HorizontalPodAutoscaler, scale *extensions.Scale,
func (a *HorizontalController) computeReplicasForCustomMetrics(hpa *autoscaling.HorizontalPodAutoscaler, scale *extensions.Scale,
cmAnnotation string) (replicas int32, metric string, status string, timestamp time.Time, err error) {
currentReplicas := scale.Status.Replicas
@@ -246,10 +248,10 @@ func (a *HorizontalController) computeReplicasForCustomMetrics(hpa *extensions.H
return replicas, metric, string(byteStatusList), timestamp, nil
}
func (a *HorizontalController) reconcileAutoscaler(hpa *extensions.HorizontalPodAutoscaler) error {
reference := fmt.Sprintf("%s/%s/%s", hpa.Spec.ScaleRef.Kind, hpa.Namespace, hpa.Spec.ScaleRef.Name)
func (a *HorizontalController) reconcileAutoscaler(hpa *autoscaling.HorizontalPodAutoscaler) error {
reference := fmt.Sprintf("%s/%s/%s", hpa.Spec.ScaleTargetRef.Kind, hpa.Namespace, hpa.Spec.ScaleTargetRef.Name)
scale, err := a.scaleNamespacer.Scales(hpa.Namespace).Get(hpa.Spec.ScaleRef.Kind, hpa.Spec.ScaleRef.Name)
scale, err := a.scaleNamespacer.Scales(hpa.Namespace).Get(hpa.Spec.ScaleTargetRef.Kind, hpa.Spec.ScaleTargetRef.Name)
if err != nil {
a.eventRecorder.Event(hpa, api.EventTypeWarning, "FailedGetScale", err.Error())
return fmt.Errorf("failed to query scale subresource for %s: %v", reference, err)
@@ -282,7 +284,7 @@ func (a *HorizontalController) reconcileAutoscaler(hpa *extensions.HorizontalPod
// All basic scenarios covered, the state should be sane, lets use metrics.
cmAnnotation, cmAnnotationFound := hpa.Annotations[HpaCustomMetricsTargetAnnotationName]
if hpa.Spec.CPUUtilization != nil || !cmAnnotationFound {
if hpa.Spec.TargetCPUUtilizationPercentage != nil || !cmAnnotationFound {
cpuDesiredReplicas, cpuCurrentUtilization, cpuTimestamp, err = a.computeReplicasForCPUUtilization(hpa, scale)
if err != nil {
a.updateCurrentReplicasInStatus(hpa, currentReplicas)
@@ -334,7 +336,7 @@ func (a *HorizontalController) reconcileAutoscaler(hpa *extensions.HorizontalPod
rescale := shouldScale(hpa, currentReplicas, desiredReplicas, timestamp)
if rescale {
scale.Spec.Replicas = desiredReplicas
_, err = a.scaleNamespacer.Scales(hpa.Namespace).Update(hpa.Spec.ScaleRef.Kind, scale)
_, err = a.scaleNamespacer.Scales(hpa.Namespace).Update(hpa.Spec.ScaleTargetRef.Kind, scale)
if err != nil {
a.eventRecorder.Eventf(hpa, api.EventTypeWarning, "FailedRescale", "New size: %d; reason: %s; error: %v", desiredReplicas, rescaleReason, err.Error())
return fmt.Errorf("failed to rescale %s: %v", reference, err)
@@ -349,7 +351,7 @@ func (a *HorizontalController) reconcileAutoscaler(hpa *extensions.HorizontalPod
return a.updateStatus(hpa, currentReplicas, desiredReplicas, cpuCurrentUtilization, cmStatus, rescale)
}
func shouldScale(hpa *extensions.HorizontalPodAutoscaler, currentReplicas, desiredReplicas int32, timestamp time.Time) bool {
func shouldScale(hpa *autoscaling.HorizontalPodAutoscaler, currentReplicas, desiredReplicas int32, timestamp time.Time) bool {
if desiredReplicas != currentReplicas {
// Going down only if the usageRatio dropped significantly below the target
// and there was no rescaling in the last downscaleForbiddenWindow.
@@ -370,15 +372,15 @@ func shouldScale(hpa *extensions.HorizontalPodAutoscaler, currentReplicas, desir
return false
}
func (a *HorizontalController) updateCurrentReplicasInStatus(hpa *extensions.HorizontalPodAutoscaler, currentReplicas int32) {
func (a *HorizontalController) updateCurrentReplicasInStatus(hpa *autoscaling.HorizontalPodAutoscaler, currentReplicas int32) {
err := a.updateStatus(hpa, currentReplicas, hpa.Status.DesiredReplicas, hpa.Status.CurrentCPUUtilizationPercentage, hpa.Annotations[HpaCustomMetricsStatusAnnotationName], false)
if err != nil {
glog.Errorf("%v", err)
}
}
func (a *HorizontalController) updateStatus(hpa *extensions.HorizontalPodAutoscaler, currentReplicas, desiredReplicas int32, cpuCurrentUtilization *int32, cmStatus string, rescale bool) error {
hpa.Status = extensions.HorizontalPodAutoscalerStatus{
func (a *HorizontalController) updateStatus(hpa *autoscaling.HorizontalPodAutoscaler, currentReplicas, desiredReplicas int32, cpuCurrentUtilization *int32, cmStatus string, rescale bool) error {
hpa.Status = autoscaling.HorizontalPodAutoscalerStatus{
CurrentReplicas: currentReplicas,
DesiredReplicas: desiredReplicas,
CurrentCPUUtilizationPercentage: cpuCurrentUtilization,

View File

@@ -28,6 +28,7 @@ import (
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned"
_ "k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/apis/autoscaling"
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
@@ -138,25 +139,24 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
tc.Lock()
defer tc.Unlock()
obj := &extensions.HorizontalPodAutoscalerList{
Items: []extensions.HorizontalPodAutoscaler{
obj := &autoscaling.HorizontalPodAutoscalerList{
Items: []autoscaling.HorizontalPodAutoscaler{
{
ObjectMeta: api.ObjectMeta{
Name: hpaName,
Namespace: namespace,
SelfLink: "experimental/v1/namespaces/" + namespace + "/horizontalpodautoscalers/" + hpaName,
},
Spec: extensions.HorizontalPodAutoscalerSpec{
ScaleRef: extensions.SubresourceReference{
Kind: tc.resource.kind,
Name: tc.resource.name,
APIVersion: tc.resource.apiVersion,
Subresource: "scale",
Spec: autoscaling.HorizontalPodAutoscalerSpec{
ScaleTargetRef: autoscaling.CrossVersionObjectReference{
Kind: tc.resource.kind,
Name: tc.resource.name,
APIVersion: tc.resource.apiVersion,
},
MinReplicas: &tc.minReplicas,
MaxReplicas: tc.maxReplicas,
},
Status: extensions.HorizontalPodAutoscalerStatus{
Status: autoscaling.HorizontalPodAutoscalerStatus{
CurrentReplicas: tc.initialReplicas,
DesiredReplicas: tc.initialReplicas,
},
@@ -165,7 +165,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
}
if tc.CPUTarget > 0.0 {
obj.Items[0].Spec.CPUUtilization = &extensions.CPUTargetUtilization{TargetPercentage: tc.CPUTarget}
obj.Items[0].Spec.TargetCPUUtilizationPercentage = &tc.CPUTarget
}
if tc.cmTarget != nil {
b, err := json.Marshal(tc.cmTarget)
@@ -327,7 +327,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
tc.Lock()
defer tc.Unlock()
obj := action.(core.UpdateAction).GetObject().(*extensions.HorizontalPodAutoscaler)
obj := action.(core.UpdateAction).GetObject().(*autoscaling.HorizontalPodAutoscaler)
assert.Equal(t, namespace, obj.Namespace)
assert.Equal(t, hpaName, obj.Name)
assert.Equal(t, tc.desiredReplicas, obj.Status.DesiredReplicas)
@@ -383,7 +383,7 @@ func (tc *testCase) runTest(t *testing.T) {
metricsClient: metricsClient,
eventRecorder: recorder,
scaleNamespacer: testClient.Extensions(),
hpaNamespacer: testClient.Extensions(),
hpaNamespacer: testClient.Autoscaling(),
}
store, frameworkController := newInformer(hpaController, time.Minute)