Merge pull request #129519 from kishen-v/automated-cherry-pick-of-#127422-upstream-release-1.31
Automated cherry pick of #127422: Fix Go vet errors for master golang
This commit is contained in:
@@ -2782,7 +2782,7 @@ func waitForChanReceive(t *testing.T, timeout time.Duration, receivingChan chan
|
||||
timer := time.NewTimer(timeout)
|
||||
select {
|
||||
case <-timer.C:
|
||||
t.Errorf(errorMsg)
|
||||
t.Error(errorMsg)
|
||||
case <-receivingChan:
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2450,7 +2450,7 @@ func TestApplyNoExecuteTaints(t *testing.T) {
|
||||
node2.Status = healthyNodeNewStatus
|
||||
_, err = fakeNodeHandler.UpdateStatus(ctx, node2, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
t.Errorf(err.Error())
|
||||
t.Error(err.Error())
|
||||
return
|
||||
}
|
||||
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
||||
@@ -2479,7 +2479,7 @@ func TestApplyNoExecuteTaints(t *testing.T) {
|
||||
node3.Status = unhealthyNodeNewStatus
|
||||
_, err = fakeNodeHandler.UpdateStatus(ctx, node3, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
t.Errorf(err.Error())
|
||||
t.Error(err.Error())
|
||||
return
|
||||
}
|
||||
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
||||
@@ -2492,7 +2492,7 @@ func TestApplyNoExecuteTaints(t *testing.T) {
|
||||
node3.Status.Conditions = overrideNodeNewStatusConditions
|
||||
_, err = fakeNodeHandler.UpdateStatus(ctx, node3, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
t.Errorf(err.Error())
|
||||
t.Error(err.Error())
|
||||
return
|
||||
}
|
||||
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
||||
@@ -2638,7 +2638,7 @@ func TestApplyNoExecuteTaintsToNodesEnqueueTwice(t *testing.T) {
|
||||
node0.Status = healthyNodeNewStatus
|
||||
_, err = fakeNodeHandler.UpdateStatus(ctx, node0, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
t.Errorf(err.Error())
|
||||
t.Error(err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
@@ -2870,12 +2870,12 @@ func TestSwapUnreachableNotReadyTaints(t *testing.T) {
|
||||
node1.Status = healthyNodeNewStatus
|
||||
_, err = fakeNodeHandler.UpdateStatus(ctx, node0, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
t.Errorf(err.Error())
|
||||
t.Error(err.Error())
|
||||
return
|
||||
}
|
||||
_, err = fakeNodeHandler.UpdateStatus(ctx, node1, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
t.Errorf(err.Error())
|
||||
t.Error(err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -338,7 +338,7 @@ func (a *HorizontalController) computeReplicasForMetrics(ctx context.Context, hp
|
||||
// return an error and set the condition of the hpa based on the first invalid metric.
|
||||
// Otherwise set the condition as scaling active as we're going to scale
|
||||
if invalidMetricsCount >= len(metricSpecs) || (invalidMetricsCount > 0 && replicas < specReplicas) {
|
||||
setCondition(hpa, invalidMetricCondition.Type, invalidMetricCondition.Status, invalidMetricCondition.Reason, invalidMetricCondition.Message)
|
||||
setCondition(hpa, invalidMetricCondition.Type, invalidMetricCondition.Status, invalidMetricCondition.Reason, "%s", invalidMetricCondition.Message)
|
||||
return -1, "", statuses, time.Time{}, invalidMetricError
|
||||
}
|
||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionTrue, "ValidMetricFound", "the HPA was able to successfully calculate a replica count from %s", metric)
|
||||
@@ -385,15 +385,15 @@ func (a *HorizontalController) validateAndParseSelector(hpa *autoscalingv2.Horiz
|
||||
errMsg := "selector is required"
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "SelectorRequired", errMsg)
|
||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "InvalidSelector", "the HPA target's scale is missing a selector")
|
||||
return nil, fmt.Errorf(errMsg)
|
||||
return nil, errors.New(errMsg)
|
||||
}
|
||||
|
||||
parsedSelector, err := labels.Parse(selector)
|
||||
if err != nil {
|
||||
errMsg := fmt.Sprintf("couldn't convert selector into a corresponding internal selector object: %v", err)
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "InvalidSelector", errMsg)
|
||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "InvalidSelector", errMsg)
|
||||
return nil, fmt.Errorf(errMsg)
|
||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "InvalidSelector", "%s", errMsg)
|
||||
return nil, errors.New(errMsg)
|
||||
}
|
||||
|
||||
hpaKey := selectors.Key{Name: hpa.Name, Namespace: hpa.Namespace}
|
||||
@@ -413,8 +413,8 @@ func (a *HorizontalController) validateAndParseSelector(hpa *autoscalingv2.Horiz
|
||||
if len(selectingHpas) > 1 {
|
||||
errMsg := fmt.Sprintf("pods by selector %v are controlled by multiple HPAs: %v", selector, selectingHpas)
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "AmbiguousSelector", errMsg)
|
||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "AmbiguousSelector", errMsg)
|
||||
return nil, fmt.Errorf(errMsg)
|
||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "AmbiguousSelector", "%s", errMsg)
|
||||
return nil, errors.New(errMsg)
|
||||
}
|
||||
|
||||
return parsedSelector, nil
|
||||
@@ -570,7 +570,7 @@ func (a *HorizontalController) computeStatusForObjectMetric(specReplicas, status
|
||||
return replicaCountProposal, timestampProposal, fmt.Sprintf("external metric %s(%+v)", metricSpec.Object.Metric.Name, metricSpec.Object.Metric.Selector), autoscalingv2.HorizontalPodAutoscalerCondition{}, nil
|
||||
}
|
||||
errMsg := "invalid object metric source: neither a value target nor an average value target was set"
|
||||
err = fmt.Errorf(errMsg)
|
||||
err = errors.New(errMsg)
|
||||
condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetObjectMetric", err)
|
||||
return 0, time.Time{}, "", condition, err
|
||||
}
|
||||
@@ -617,7 +617,7 @@ func (a *HorizontalController) computeStatusForResourceMetricGeneric(ctx context
|
||||
|
||||
if target.AverageUtilization == nil {
|
||||
errMsg := "invalid resource metric source: neither an average utilization target nor an average value (usage) target was set"
|
||||
return 0, nil, time.Time{}, "", condition, fmt.Errorf(errMsg)
|
||||
return 0, nil, time.Time{}, "", condition, errors.New(errMsg)
|
||||
}
|
||||
|
||||
targetUtilization := *target.AverageUtilization
|
||||
@@ -719,9 +719,9 @@ func (a *HorizontalController) computeStatusForExternalMetric(specReplicas, stat
|
||||
return replicaCountProposal, timestampProposal, fmt.Sprintf("external metric %s(%+v)", metricSpec.External.Metric.Name, metricSpec.External.Metric.Selector), autoscalingv2.HorizontalPodAutoscalerCondition{}, nil
|
||||
}
|
||||
errMsg := "invalid external metric source: neither a value target nor an average value target was set"
|
||||
err = fmt.Errorf(errMsg)
|
||||
err = errors.New(errMsg)
|
||||
condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetExternalMetric", err)
|
||||
return 0, time.Time{}, "", condition, fmt.Errorf(errMsg)
|
||||
return 0, time.Time{}, "", condition, errors.New(errMsg)
|
||||
}
|
||||
|
||||
func (a *HorizontalController) recordInitialRecommendation(currentReplicas int32, key string) {
|
||||
@@ -950,12 +950,12 @@ func (a *HorizontalController) normalizeDesiredReplicas(hpa *autoscalingv2.Horiz
|
||||
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, "ReadyForNewScale", "recommended size matches current size")
|
||||
}
|
||||
|
||||
desiredReplicas, condition, reason := convertDesiredReplicasWithRules(currentReplicas, stabilizedRecommendation, minReplicas, hpa.Spec.MaxReplicas)
|
||||
desiredReplicas, reason, message := convertDesiredReplicasWithRules(currentReplicas, stabilizedRecommendation, minReplicas, hpa.Spec.MaxReplicas)
|
||||
|
||||
if desiredReplicas == stabilizedRecommendation {
|
||||
setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionFalse, condition, reason)
|
||||
setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionFalse, reason, "%s", message)
|
||||
} else {
|
||||
setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, condition, reason)
|
||||
setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, reason, "%s", message)
|
||||
}
|
||||
|
||||
return desiredReplicas
|
||||
@@ -991,15 +991,15 @@ func (a *HorizontalController) normalizeDesiredReplicasWithBehaviors(hpa *autosc
|
||||
normalizationArg.DesiredReplicas = stabilizedRecommendation
|
||||
if stabilizedRecommendation != prenormalizedDesiredReplicas {
|
||||
// "ScaleUpStabilized" || "ScaleDownStabilized"
|
||||
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, reason, message)
|
||||
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, reason, "%s", message)
|
||||
} else {
|
||||
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, "ReadyForNewScale", "recommended size matches current size")
|
||||
}
|
||||
desiredReplicas, reason, message := a.convertDesiredReplicasWithBehaviorRate(normalizationArg)
|
||||
if desiredReplicas == stabilizedRecommendation {
|
||||
setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionFalse, reason, message)
|
||||
setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionFalse, reason, "%s", message)
|
||||
} else {
|
||||
setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, reason, message)
|
||||
setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, reason, "%s", message)
|
||||
}
|
||||
|
||||
return desiredReplicas
|
||||
|
||||
@@ -82,7 +82,7 @@ func statusOkWithOverrides(overrides ...autoscalingv2.HorizontalPodAutoscalerCon
|
||||
resv2 := make([]autoscalingv2.HorizontalPodAutoscalerCondition, len(statusOk))
|
||||
copy(resv2, statusOk)
|
||||
for _, override := range overrides {
|
||||
resv2 = setConditionInList(resv2, override.Type, override.Status, override.Reason, override.Message)
|
||||
resv2 = setConditionInList(resv2, override.Type, override.Status, override.Reason, "%s", override.Message)
|
||||
}
|
||||
|
||||
// copy to a v1 slice
|
||||
|
||||
@@ -824,35 +824,35 @@ func TestStaleOwnerRefOnScaleup(t *testing.T) {
|
||||
logger, ctx := ktesting.NewTestContext(t)
|
||||
ssc, spc, om, _ := newFakeStatefulSetController(ctx, set)
|
||||
if err := scaleUpStatefulSetController(logger, set, ssc, spc, om); err != nil {
|
||||
t.Errorf(onPolicy("Failed to turn up StatefulSet : %s", err))
|
||||
t.Error(onPolicy("Failed to turn up StatefulSet : %s", err))
|
||||
}
|
||||
var err error
|
||||
if set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name); err != nil {
|
||||
t.Errorf(onPolicy("Could not get scaled up set: %v", err))
|
||||
t.Error(onPolicy("Could not get scaled up set: %v", err))
|
||||
}
|
||||
if set.Status.Replicas != 3 {
|
||||
t.Errorf(onPolicy("set.Status.Replicas = %v; want 3", set.Status.Replicas))
|
||||
t.Error(onPolicy("set.Status.Replicas = %v; want 3", set.Status.Replicas))
|
||||
}
|
||||
*set.Spec.Replicas = 2
|
||||
if err := scaleDownStatefulSetController(logger, set, ssc, spc, om); err != nil {
|
||||
t.Errorf(onPolicy("Failed to scale down StatefulSet : msg, %s", err))
|
||||
t.Error(onPolicy("Failed to scale down StatefulSet : msg, %s", err))
|
||||
}
|
||||
set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name)
|
||||
if err != nil {
|
||||
t.Errorf(onPolicy("Could not get scaled down StatefulSet: %v", err))
|
||||
t.Error(onPolicy("Could not get scaled down StatefulSet: %v", err))
|
||||
}
|
||||
if set.Status.Replicas != 2 {
|
||||
t.Errorf(onPolicy("Failed to scale statefulset to 2 replicas"))
|
||||
t.Error(onPolicy("Failed to scale statefulset to 2 replicas"))
|
||||
}
|
||||
|
||||
var claim *v1.PersistentVolumeClaim
|
||||
claim, err = om.claimsLister.PersistentVolumeClaims(set.Namespace).Get("datadir-foo-2")
|
||||
if err != nil {
|
||||
t.Errorf(onPolicy("Could not find expected pvc datadir-foo-2"))
|
||||
t.Error(onPolicy("Could not find expected pvc datadir-foo-2"))
|
||||
}
|
||||
refs := claim.GetOwnerReferences()
|
||||
if len(refs) != 1 {
|
||||
t.Errorf(onPolicy("Expected only one refs: %v", refs))
|
||||
t.Error(onPolicy("Expected only one refs: %v", refs))
|
||||
}
|
||||
// Make the pod ref stale.
|
||||
for i := range refs {
|
||||
@@ -863,29 +863,29 @@ func TestStaleOwnerRefOnScaleup(t *testing.T) {
|
||||
}
|
||||
claim.SetOwnerReferences(refs)
|
||||
if err = om.claimsIndexer.Update(claim); err != nil {
|
||||
t.Errorf(onPolicy("Could not update claim with new owner ref: %v", err))
|
||||
t.Error(onPolicy("Could not update claim with new owner ref: %v", err))
|
||||
}
|
||||
|
||||
*set.Spec.Replicas = 3
|
||||
// Until the stale PVC goes away, the scale up should never finish. Run 10 iterations, then delete the PVC.
|
||||
if err := scaleUpStatefulSetControllerBounded(logger, set, ssc, spc, om, 10); err != nil {
|
||||
t.Errorf(onPolicy("Failed attempt to scale StatefulSet back up: %v", err))
|
||||
t.Error(onPolicy("Failed attempt to scale StatefulSet back up: %v", err))
|
||||
}
|
||||
set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name)
|
||||
if err != nil {
|
||||
t.Errorf(onPolicy("Could not get scaled down StatefulSet: %v", err))
|
||||
t.Error(onPolicy("Could not get scaled down StatefulSet: %v", err))
|
||||
}
|
||||
if set.Status.Replicas != 2 {
|
||||
t.Errorf(onPolicy("Expected set to stay at two replicas"))
|
||||
t.Error(onPolicy("Expected set to stay at two replicas"))
|
||||
}
|
||||
|
||||
claim, err = om.claimsLister.PersistentVolumeClaims(set.Namespace).Get("datadir-foo-2")
|
||||
if err != nil {
|
||||
t.Errorf(onPolicy("Could not find expected pvc datadir-foo-2"))
|
||||
t.Error(onPolicy("Could not find expected pvc datadir-foo-2"))
|
||||
}
|
||||
refs = claim.GetOwnerReferences()
|
||||
if len(refs) != 1 {
|
||||
t.Errorf(onPolicy("Unexpected change to condemned pvc ownerRefs: %v", refs))
|
||||
t.Error(onPolicy("Unexpected change to condemned pvc ownerRefs: %v", refs))
|
||||
}
|
||||
foundPodRef := false
|
||||
for i := range refs {
|
||||
@@ -895,21 +895,21 @@ func TestStaleOwnerRefOnScaleup(t *testing.T) {
|
||||
}
|
||||
}
|
||||
if !foundPodRef {
|
||||
t.Errorf(onPolicy("Claim ref unexpectedly changed: %v", refs))
|
||||
t.Error(onPolicy("Claim ref unexpectedly changed: %v", refs))
|
||||
}
|
||||
if err = om.claimsIndexer.Delete(claim); err != nil {
|
||||
t.Errorf(onPolicy("Could not delete stale pvc: %v", err))
|
||||
t.Error(onPolicy("Could not delete stale pvc: %v", err))
|
||||
}
|
||||
|
||||
if err := scaleUpStatefulSetController(logger, set, ssc, spc, om); err != nil {
|
||||
t.Errorf(onPolicy("Failed to scale StatefulSet back up: %v", err))
|
||||
t.Error(onPolicy("Failed to scale StatefulSet back up: %v", err))
|
||||
}
|
||||
set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name)
|
||||
if err != nil {
|
||||
t.Errorf(onPolicy("Could not get scaled down StatefulSet: %v", err))
|
||||
t.Error(onPolicy("Could not get scaled down StatefulSet: %v", err))
|
||||
}
|
||||
if set.Status.Replicas != 3 {
|
||||
t.Errorf(onPolicy("Failed to scale set back up once PVC was deleted"))
|
||||
t.Error(onPolicy("Failed to scale set back up once PVC was deleted"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -235,7 +235,7 @@ func TestAssociations(t *testing.T) {
|
||||
// Run consistency check after every operation.
|
||||
err := consistencyCheck(multimap)
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
}
|
||||
for _, expect := range tc.want {
|
||||
@@ -261,7 +261,7 @@ func TestEfficientAssociation(t *testing.T) {
|
||||
|
||||
err := forwardSelect(key("hpa-1"), key("pod-1"), key("pod-2"))(m)
|
||||
if err != nil {
|
||||
t.Errorf(err.Error())
|
||||
t.Error(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -22,6 +22,7 @@ reference them.
|
||||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -524,7 +525,7 @@ func (asw *actualStateOfWorld) updateNodeStatusUpdateNeeded(nodeName types.NodeN
|
||||
// should not happen
|
||||
errMsg := fmt.Sprintf("Failed to set statusUpdateNeeded to needed %t, because nodeName=%q does not exist",
|
||||
needed, nodeName)
|
||||
return fmt.Errorf(errMsg)
|
||||
return errors.New(errMsg)
|
||||
}
|
||||
|
||||
nodeToUpdate.statusUpdateNeeded = needed
|
||||
|
||||
@@ -18,6 +18,7 @@ package expand
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
@@ -28,7 +29,7 @@ import (
|
||||
|
||||
authenticationv1 "k8s.io/api/authentication/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
@@ -205,7 +206,7 @@ func (expc *expandController) syncHandler(ctx context.Context, key string) error
|
||||
return err
|
||||
}
|
||||
pvc, err := expc.pvcLister.PersistentVolumeClaims(namespace).Get(name)
|
||||
if errors.IsNotFound(err) {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
logger := klog.FromContext(ctx)
|
||||
@@ -256,14 +257,14 @@ func (expc *expandController) syncHandler(ctx context.Context, key string) error
|
||||
if err != nil {
|
||||
errorMsg := fmt.Sprintf("error getting CSI driver name for pvc %s, with error %v", key, err)
|
||||
expc.recorder.Event(pvc, v1.EventTypeWarning, events.ExternalExpanding, errorMsg)
|
||||
return fmt.Errorf(errorMsg)
|
||||
return errors.New(errorMsg)
|
||||
}
|
||||
|
||||
pvc, err := util.SetClaimResizer(pvc, csiResizerName, expc.kubeClient)
|
||||
if err != nil {
|
||||
errorMsg := fmt.Sprintf("error setting resizer annotation to pvc %s, with error %v", key, err)
|
||||
expc.recorder.Event(pvc, v1.EventTypeWarning, events.ExternalExpanding, errorMsg)
|
||||
return fmt.Errorf(errorMsg)
|
||||
return errors.New(errorMsg)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ package persistentvolume
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
@@ -1630,7 +1631,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(
|
||||
strerr := fmt.Sprintf("plugin %q is not a CSI plugin. Only CSI plugin can provision a claim with a datasource", pluginName)
|
||||
logger.V(2).Info(strerr)
|
||||
ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, strerr)
|
||||
return pluginName, fmt.Errorf(strerr)
|
||||
return pluginName, errors.New(strerr)
|
||||
|
||||
}
|
||||
provisionerName := storageClass.Provisioner
|
||||
|
||||
Reference in New Issue
Block a user