Fix Go vet errors for master golang
Co-authored-by: Rajalakshmi-Girish <rajalakshmi.girish1@ibm.com> Co-authored-by: Abhishek Kr Srivastav <Abhishek.kr.srivastav@ibm.com>
This commit is contained in:
committed by
Kishen Viswanathan
parent
7164c728c0
commit
9d10ddb060
@@ -185,7 +185,7 @@ func testWorkloadDefaults(t *testing.T, featuresEnabled bool) {
|
||||
defaults := detectDefaults(t, rc, reflect.ValueOf(template))
|
||||
if !reflect.DeepEqual(expectedDefaults, defaults) {
|
||||
t.Errorf("Defaults for PodTemplateSpec changed. This can cause spurious rollouts of workloads on API server upgrade.")
|
||||
t.Logf(cmp.Diff(expectedDefaults, defaults))
|
||||
t.Log(cmp.Diff(expectedDefaults, defaults))
|
||||
}
|
||||
})
|
||||
t.Run("hostnet PodTemplateSpec with ports", func(t *testing.T) {
|
||||
@@ -223,7 +223,7 @@ func testWorkloadDefaults(t *testing.T, featuresEnabled bool) {
|
||||
}()
|
||||
if !reflect.DeepEqual(expected, defaults) {
|
||||
t.Errorf("Defaults for PodTemplateSpec changed. This can cause spurious rollouts of workloads on API server upgrade.")
|
||||
t.Logf(cmp.Diff(expected, defaults))
|
||||
t.Log(cmp.Diff(expected, defaults))
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -374,7 +374,7 @@ func testPodDefaults(t *testing.T, featuresEnabled bool) {
|
||||
defaults := detectDefaults(t, pod, reflect.ValueOf(pod))
|
||||
if !reflect.DeepEqual(expectedDefaults, defaults) {
|
||||
t.Errorf("Defaults for PodSpec changed. This can cause spurious restarts of containers on API server upgrade.")
|
||||
t.Logf(cmp.Diff(expectedDefaults, defaults))
|
||||
t.Log(cmp.Diff(expectedDefaults, defaults))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2782,7 +2782,7 @@ func waitForChanReceive(t *testing.T, timeout time.Duration, receivingChan chan
|
||||
timer := time.NewTimer(timeout)
|
||||
select {
|
||||
case <-timer.C:
|
||||
t.Errorf(errorMsg)
|
||||
t.Error(errorMsg)
|
||||
case <-receivingChan:
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2450,7 +2450,7 @@ func TestApplyNoExecuteTaints(t *testing.T) {
|
||||
node2.Status = healthyNodeNewStatus
|
||||
_, err = fakeNodeHandler.UpdateStatus(ctx, node2, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
t.Errorf(err.Error())
|
||||
t.Error(err.Error())
|
||||
return
|
||||
}
|
||||
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
||||
@@ -2479,7 +2479,7 @@ func TestApplyNoExecuteTaints(t *testing.T) {
|
||||
node3.Status = unhealthyNodeNewStatus
|
||||
_, err = fakeNodeHandler.UpdateStatus(ctx, node3, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
t.Errorf(err.Error())
|
||||
t.Error(err.Error())
|
||||
return
|
||||
}
|
||||
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
||||
@@ -2492,7 +2492,7 @@ func TestApplyNoExecuteTaints(t *testing.T) {
|
||||
node3.Status.Conditions = overrideNodeNewStatusConditions
|
||||
_, err = fakeNodeHandler.UpdateStatus(ctx, node3, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
t.Errorf(err.Error())
|
||||
t.Error(err.Error())
|
||||
return
|
||||
}
|
||||
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
||||
@@ -2638,7 +2638,7 @@ func TestApplyNoExecuteTaintsToNodesEnqueueTwice(t *testing.T) {
|
||||
node0.Status = healthyNodeNewStatus
|
||||
_, err = fakeNodeHandler.UpdateStatus(ctx, node0, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
t.Errorf(err.Error())
|
||||
t.Error(err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
@@ -2870,12 +2870,12 @@ func TestSwapUnreachableNotReadyTaints(t *testing.T) {
|
||||
node1.Status = healthyNodeNewStatus
|
||||
_, err = fakeNodeHandler.UpdateStatus(ctx, node0, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
t.Errorf(err.Error())
|
||||
t.Error(err.Error())
|
||||
return
|
||||
}
|
||||
_, err = fakeNodeHandler.UpdateStatus(ctx, node1, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
t.Errorf(err.Error())
|
||||
t.Error(err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -338,7 +338,7 @@ func (a *HorizontalController) computeReplicasForMetrics(ctx context.Context, hp
|
||||
// return an error and set the condition of the hpa based on the first invalid metric.
|
||||
// Otherwise set the condition as scaling active as we're going to scale
|
||||
if invalidMetricsCount >= len(metricSpecs) || (invalidMetricsCount > 0 && replicas < specReplicas) {
|
||||
setCondition(hpa, invalidMetricCondition.Type, invalidMetricCondition.Status, invalidMetricCondition.Reason, invalidMetricCondition.Message)
|
||||
setCondition(hpa, invalidMetricCondition.Type, invalidMetricCondition.Status, invalidMetricCondition.Reason, "%s", invalidMetricCondition.Message)
|
||||
return -1, "", statuses, time.Time{}, invalidMetricError
|
||||
}
|
||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionTrue, "ValidMetricFound", "the HPA was able to successfully calculate a replica count from %s", metric)
|
||||
@@ -385,15 +385,15 @@ func (a *HorizontalController) validateAndParseSelector(hpa *autoscalingv2.Horiz
|
||||
errMsg := "selector is required"
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "SelectorRequired", errMsg)
|
||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "InvalidSelector", "the HPA target's scale is missing a selector")
|
||||
return nil, fmt.Errorf(errMsg)
|
||||
return nil, errors.New(errMsg)
|
||||
}
|
||||
|
||||
parsedSelector, err := labels.Parse(selector)
|
||||
if err != nil {
|
||||
errMsg := fmt.Sprintf("couldn't convert selector into a corresponding internal selector object: %v", err)
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "InvalidSelector", errMsg)
|
||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "InvalidSelector", errMsg)
|
||||
return nil, fmt.Errorf(errMsg)
|
||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "InvalidSelector", "%s", errMsg)
|
||||
return nil, errors.New(errMsg)
|
||||
}
|
||||
|
||||
hpaKey := selectors.Key{Name: hpa.Name, Namespace: hpa.Namespace}
|
||||
@@ -413,8 +413,8 @@ func (a *HorizontalController) validateAndParseSelector(hpa *autoscalingv2.Horiz
|
||||
if len(selectingHpas) > 1 {
|
||||
errMsg := fmt.Sprintf("pods by selector %v are controlled by multiple HPAs: %v", selector, selectingHpas)
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "AmbiguousSelector", errMsg)
|
||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "AmbiguousSelector", errMsg)
|
||||
return nil, fmt.Errorf(errMsg)
|
||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "AmbiguousSelector", "%s", errMsg)
|
||||
return nil, errors.New(errMsg)
|
||||
}
|
||||
|
||||
return parsedSelector, nil
|
||||
@@ -570,7 +570,7 @@ func (a *HorizontalController) computeStatusForObjectMetric(specReplicas, status
|
||||
return replicaCountProposal, timestampProposal, fmt.Sprintf("external metric %s(%+v)", metricSpec.Object.Metric.Name, metricSpec.Object.Metric.Selector), autoscalingv2.HorizontalPodAutoscalerCondition{}, nil
|
||||
}
|
||||
errMsg := "invalid object metric source: neither a value target nor an average value target was set"
|
||||
err = fmt.Errorf(errMsg)
|
||||
err = errors.New(errMsg)
|
||||
condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetObjectMetric", err)
|
||||
return 0, time.Time{}, "", condition, err
|
||||
}
|
||||
@@ -617,7 +617,7 @@ func (a *HorizontalController) computeStatusForResourceMetricGeneric(ctx context
|
||||
|
||||
if target.AverageUtilization == nil {
|
||||
errMsg := "invalid resource metric source: neither an average utilization target nor an average value (usage) target was set"
|
||||
return 0, nil, time.Time{}, "", condition, fmt.Errorf(errMsg)
|
||||
return 0, nil, time.Time{}, "", condition, errors.New(errMsg)
|
||||
}
|
||||
|
||||
targetUtilization := *target.AverageUtilization
|
||||
@@ -719,9 +719,9 @@ func (a *HorizontalController) computeStatusForExternalMetric(specReplicas, stat
|
||||
return replicaCountProposal, timestampProposal, fmt.Sprintf("external metric %s(%+v)", metricSpec.External.Metric.Name, metricSpec.External.Metric.Selector), autoscalingv2.HorizontalPodAutoscalerCondition{}, nil
|
||||
}
|
||||
errMsg := "invalid external metric source: neither a value target nor an average value target was set"
|
||||
err = fmt.Errorf(errMsg)
|
||||
err = errors.New(errMsg)
|
||||
condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetExternalMetric", err)
|
||||
return 0, time.Time{}, "", condition, fmt.Errorf(errMsg)
|
||||
return 0, time.Time{}, "", condition, errors.New(errMsg)
|
||||
}
|
||||
|
||||
func (a *HorizontalController) recordInitialRecommendation(currentReplicas int32, key string) {
|
||||
@@ -950,12 +950,12 @@ func (a *HorizontalController) normalizeDesiredReplicas(hpa *autoscalingv2.Horiz
|
||||
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, "ReadyForNewScale", "recommended size matches current size")
|
||||
}
|
||||
|
||||
desiredReplicas, condition, reason := convertDesiredReplicasWithRules(currentReplicas, stabilizedRecommendation, minReplicas, hpa.Spec.MaxReplicas)
|
||||
desiredReplicas, reason, message := convertDesiredReplicasWithRules(currentReplicas, stabilizedRecommendation, minReplicas, hpa.Spec.MaxReplicas)
|
||||
|
||||
if desiredReplicas == stabilizedRecommendation {
|
||||
setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionFalse, condition, reason)
|
||||
setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionFalse, reason, "%s", message)
|
||||
} else {
|
||||
setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, condition, reason)
|
||||
setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, reason, "%s", message)
|
||||
}
|
||||
|
||||
return desiredReplicas
|
||||
@@ -991,15 +991,15 @@ func (a *HorizontalController) normalizeDesiredReplicasWithBehaviors(hpa *autosc
|
||||
normalizationArg.DesiredReplicas = stabilizedRecommendation
|
||||
if stabilizedRecommendation != prenormalizedDesiredReplicas {
|
||||
// "ScaleUpStabilized" || "ScaleDownStabilized"
|
||||
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, reason, message)
|
||||
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, reason, "%s", message)
|
||||
} else {
|
||||
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, "ReadyForNewScale", "recommended size matches current size")
|
||||
}
|
||||
desiredReplicas, reason, message := a.convertDesiredReplicasWithBehaviorRate(normalizationArg)
|
||||
if desiredReplicas == stabilizedRecommendation {
|
||||
setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionFalse, reason, message)
|
||||
setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionFalse, reason, "%s", message)
|
||||
} else {
|
||||
setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, reason, message)
|
||||
setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, reason, "%s", message)
|
||||
}
|
||||
|
||||
return desiredReplicas
|
||||
|
||||
@@ -82,7 +82,7 @@ func statusOkWithOverrides(overrides ...autoscalingv2.HorizontalPodAutoscalerCon
|
||||
resv2 := make([]autoscalingv2.HorizontalPodAutoscalerCondition, len(statusOk))
|
||||
copy(resv2, statusOk)
|
||||
for _, override := range overrides {
|
||||
resv2 = setConditionInList(resv2, override.Type, override.Status, override.Reason, override.Message)
|
||||
resv2 = setConditionInList(resv2, override.Type, override.Status, override.Reason, "%s", override.Message)
|
||||
}
|
||||
|
||||
// copy to a v1 slice
|
||||
|
||||
@@ -824,35 +824,35 @@ func TestStaleOwnerRefOnScaleup(t *testing.T) {
|
||||
logger, ctx := ktesting.NewTestContext(t)
|
||||
ssc, spc, om, _ := newFakeStatefulSetController(ctx, set)
|
||||
if err := scaleUpStatefulSetController(logger, set, ssc, spc, om); err != nil {
|
||||
t.Errorf(onPolicy("Failed to turn up StatefulSet : %s", err))
|
||||
t.Error(onPolicy("Failed to turn up StatefulSet : %s", err))
|
||||
}
|
||||
var err error
|
||||
if set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name); err != nil {
|
||||
t.Errorf(onPolicy("Could not get scaled up set: %v", err))
|
||||
t.Error(onPolicy("Could not get scaled up set: %v", err))
|
||||
}
|
||||
if set.Status.Replicas != 3 {
|
||||
t.Errorf(onPolicy("set.Status.Replicas = %v; want 3", set.Status.Replicas))
|
||||
t.Error(onPolicy("set.Status.Replicas = %v; want 3", set.Status.Replicas))
|
||||
}
|
||||
*set.Spec.Replicas = 2
|
||||
if err := scaleDownStatefulSetController(logger, set, ssc, spc, om); err != nil {
|
||||
t.Errorf(onPolicy("Failed to scale down StatefulSet : msg, %s", err))
|
||||
t.Error(onPolicy("Failed to scale down StatefulSet : msg, %s", err))
|
||||
}
|
||||
set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name)
|
||||
if err != nil {
|
||||
t.Errorf(onPolicy("Could not get scaled down StatefulSet: %v", err))
|
||||
t.Error(onPolicy("Could not get scaled down StatefulSet: %v", err))
|
||||
}
|
||||
if set.Status.Replicas != 2 {
|
||||
t.Errorf(onPolicy("Failed to scale statefulset to 2 replicas"))
|
||||
t.Error(onPolicy("Failed to scale statefulset to 2 replicas"))
|
||||
}
|
||||
|
||||
var claim *v1.PersistentVolumeClaim
|
||||
claim, err = om.claimsLister.PersistentVolumeClaims(set.Namespace).Get("datadir-foo-2")
|
||||
if err != nil {
|
||||
t.Errorf(onPolicy("Could not find expected pvc datadir-foo-2"))
|
||||
t.Error(onPolicy("Could not find expected pvc datadir-foo-2"))
|
||||
}
|
||||
refs := claim.GetOwnerReferences()
|
||||
if len(refs) != 1 {
|
||||
t.Errorf(onPolicy("Expected only one refs: %v", refs))
|
||||
t.Error(onPolicy("Expected only one refs: %v", refs))
|
||||
}
|
||||
// Make the pod ref stale.
|
||||
for i := range refs {
|
||||
@@ -863,29 +863,29 @@ func TestStaleOwnerRefOnScaleup(t *testing.T) {
|
||||
}
|
||||
claim.SetOwnerReferences(refs)
|
||||
if err = om.claimsIndexer.Update(claim); err != nil {
|
||||
t.Errorf(onPolicy("Could not update claim with new owner ref: %v", err))
|
||||
t.Error(onPolicy("Could not update claim with new owner ref: %v", err))
|
||||
}
|
||||
|
||||
*set.Spec.Replicas = 3
|
||||
// Until the stale PVC goes away, the scale up should never finish. Run 10 iterations, then delete the PVC.
|
||||
if err := scaleUpStatefulSetControllerBounded(logger, set, ssc, spc, om, 10); err != nil {
|
||||
t.Errorf(onPolicy("Failed attempt to scale StatefulSet back up: %v", err))
|
||||
t.Error(onPolicy("Failed attempt to scale StatefulSet back up: %v", err))
|
||||
}
|
||||
set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name)
|
||||
if err != nil {
|
||||
t.Errorf(onPolicy("Could not get scaled down StatefulSet: %v", err))
|
||||
t.Error(onPolicy("Could not get scaled down StatefulSet: %v", err))
|
||||
}
|
||||
if set.Status.Replicas != 2 {
|
||||
t.Errorf(onPolicy("Expected set to stay at two replicas"))
|
||||
t.Error(onPolicy("Expected set to stay at two replicas"))
|
||||
}
|
||||
|
||||
claim, err = om.claimsLister.PersistentVolumeClaims(set.Namespace).Get("datadir-foo-2")
|
||||
if err != nil {
|
||||
t.Errorf(onPolicy("Could not find expected pvc datadir-foo-2"))
|
||||
t.Error(onPolicy("Could not find expected pvc datadir-foo-2"))
|
||||
}
|
||||
refs = claim.GetOwnerReferences()
|
||||
if len(refs) != 1 {
|
||||
t.Errorf(onPolicy("Unexpected change to condemned pvc ownerRefs: %v", refs))
|
||||
t.Error(onPolicy("Unexpected change to condemned pvc ownerRefs: %v", refs))
|
||||
}
|
||||
foundPodRef := false
|
||||
for i := range refs {
|
||||
@@ -895,21 +895,21 @@ func TestStaleOwnerRefOnScaleup(t *testing.T) {
|
||||
}
|
||||
}
|
||||
if !foundPodRef {
|
||||
t.Errorf(onPolicy("Claim ref unexpectedly changed: %v", refs))
|
||||
t.Error(onPolicy("Claim ref unexpectedly changed: %v", refs))
|
||||
}
|
||||
if err = om.claimsIndexer.Delete(claim); err != nil {
|
||||
t.Errorf(onPolicy("Could not delete stale pvc: %v", err))
|
||||
t.Error(onPolicy("Could not delete stale pvc: %v", err))
|
||||
}
|
||||
|
||||
if err := scaleUpStatefulSetController(logger, set, ssc, spc, om); err != nil {
|
||||
t.Errorf(onPolicy("Failed to scale StatefulSet back up: %v", err))
|
||||
t.Error(onPolicy("Failed to scale StatefulSet back up: %v", err))
|
||||
}
|
||||
set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name)
|
||||
if err != nil {
|
||||
t.Errorf(onPolicy("Could not get scaled down StatefulSet: %v", err))
|
||||
t.Error(onPolicy("Could not get scaled down StatefulSet: %v", err))
|
||||
}
|
||||
if set.Status.Replicas != 3 {
|
||||
t.Errorf(onPolicy("Failed to scale set back up once PVC was deleted"))
|
||||
t.Error(onPolicy("Failed to scale set back up once PVC was deleted"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -235,7 +235,7 @@ func TestAssociations(t *testing.T) {
|
||||
// Run consistency check after every operation.
|
||||
err := consistencyCheck(multimap)
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
}
|
||||
for _, expect := range tc.want {
|
||||
@@ -261,7 +261,7 @@ func TestEfficientAssociation(t *testing.T) {
|
||||
|
||||
err := forwardSelect(key("hpa-1"), key("pod-1"), key("pod-2"))(m)
|
||||
if err != nil {
|
||||
t.Errorf(err.Error())
|
||||
t.Error(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -22,6 +22,7 @@ reference them.
|
||||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -524,7 +525,7 @@ func (asw *actualStateOfWorld) updateNodeStatusUpdateNeeded(nodeName types.NodeN
|
||||
// should not happen
|
||||
errMsg := fmt.Sprintf("Failed to set statusUpdateNeeded to needed %t, because nodeName=%q does not exist",
|
||||
needed, nodeName)
|
||||
return fmt.Errorf(errMsg)
|
||||
return errors.New(errMsg)
|
||||
}
|
||||
|
||||
nodeToUpdate.statusUpdateNeeded = needed
|
||||
|
||||
@@ -18,6 +18,7 @@ package expand
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
@@ -28,7 +29,7 @@ import (
|
||||
|
||||
authenticationv1 "k8s.io/api/authentication/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
@@ -205,7 +206,7 @@ func (expc *expandController) syncHandler(ctx context.Context, key string) error
|
||||
return err
|
||||
}
|
||||
pvc, err := expc.pvcLister.PersistentVolumeClaims(namespace).Get(name)
|
||||
if errors.IsNotFound(err) {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
logger := klog.FromContext(ctx)
|
||||
@@ -256,14 +257,14 @@ func (expc *expandController) syncHandler(ctx context.Context, key string) error
|
||||
if err != nil {
|
||||
errorMsg := fmt.Sprintf("error getting CSI driver name for pvc %s, with error %v", key, err)
|
||||
expc.recorder.Event(pvc, v1.EventTypeWarning, events.ExternalExpanding, errorMsg)
|
||||
return fmt.Errorf(errorMsg)
|
||||
return errors.New(errorMsg)
|
||||
}
|
||||
|
||||
pvc, err := util.SetClaimResizer(pvc, csiResizerName, expc.kubeClient)
|
||||
if err != nil {
|
||||
errorMsg := fmt.Sprintf("error setting resizer annotation to pvc %s, with error %v", key, err)
|
||||
expc.recorder.Event(pvc, v1.EventTypeWarning, events.ExternalExpanding, errorMsg)
|
||||
return fmt.Errorf(errorMsg)
|
||||
return errors.New(errorMsg)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ package persistentvolume
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
@@ -1630,7 +1631,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(
|
||||
strerr := fmt.Sprintf("plugin %q is not a CSI plugin. Only CSI plugin can provision a claim with a datasource", pluginName)
|
||||
logger.V(2).Info(strerr)
|
||||
ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, strerr)
|
||||
return pluginName, fmt.Errorf(strerr)
|
||||
return pluginName, errors.New(strerr)
|
||||
|
||||
}
|
||||
provisionerName := storageClass.Provisioner
|
||||
|
||||
@@ -176,7 +176,7 @@ func LoadAndValidateData(data []byte, requireNonWebhookTypes sets.Set[authzconfi
|
||||
sets.NewString(modes.AuthorizationModeChoices...),
|
||||
sets.NewString(repeatableAuthorizerTypes...),
|
||||
); len(errors) != 0 {
|
||||
return nil, fmt.Errorf(errors.ToAggregate().Error())
|
||||
return nil, errors.ToAggregate()
|
||||
}
|
||||
|
||||
// test to check if the authorizer names passed conform to the authorizers for type!=Webhook
|
||||
|
||||
@@ -20,6 +20,7 @@ limitations under the License.
|
||||
package cm
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -112,7 +113,7 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error {
|
||||
if err := enforceExistingCgroup(cm.cgroupManager, cm.cgroupManager.CgroupName(nc.SystemReservedCgroupName), nc.SystemReserved); err != nil {
|
||||
message := fmt.Sprintf("Failed to enforce System Reserved Cgroup Limits on %q: %v", nc.SystemReservedCgroupName, err)
|
||||
cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message)
|
||||
return fmt.Errorf(message)
|
||||
return errors.New(message)
|
||||
}
|
||||
cm.recorder.Eventf(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on system reserved cgroup %v", nc.SystemReservedCgroupName)
|
||||
}
|
||||
@@ -121,7 +122,7 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error {
|
||||
if err := enforceExistingCgroup(cm.cgroupManager, cm.cgroupManager.CgroupName(nc.KubeReservedCgroupName), nc.KubeReserved); err != nil {
|
||||
message := fmt.Sprintf("Failed to enforce Kube Reserved Cgroup Limits on %q: %v", nc.KubeReservedCgroupName, err)
|
||||
cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message)
|
||||
return fmt.Errorf(message)
|
||||
return errors.New(message)
|
||||
}
|
||||
cm.recorder.Eventf(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on kube reserved cgroup %v", nc.KubeReservedCgroupName)
|
||||
}
|
||||
|
||||
@@ -274,7 +274,7 @@ func (m *kubeGenericRuntimeManager) startContainer(ctx context.Context, podSandb
|
||||
m.recordContainerEvent(pod, container, containerID, v1.EventTypeWarning, events.FailedToStartContainer, "Internal PreStartContainer hook failed: %v", s.Message())
|
||||
return s.Message(), ErrPreStartHook
|
||||
}
|
||||
m.recordContainerEvent(pod, container, containerID, v1.EventTypeNormal, events.CreatedContainer, fmt.Sprintf("Created container %s", container.Name))
|
||||
m.recordContainerEvent(pod, container, containerID, v1.EventTypeNormal, events.CreatedContainer, "Created container: %v", container.Name)
|
||||
|
||||
// Step 3: start the container.
|
||||
err = m.runtimeService.StartContainer(ctx, containerID)
|
||||
@@ -283,7 +283,7 @@ func (m *kubeGenericRuntimeManager) startContainer(ctx context.Context, podSandb
|
||||
m.recordContainerEvent(pod, container, containerID, v1.EventTypeWarning, events.FailedToStartContainer, "Error: %v", s.Message())
|
||||
return s.Message(), kubecontainer.ErrRunContainer
|
||||
}
|
||||
m.recordContainerEvent(pod, container, containerID, v1.EventTypeNormal, events.StartedContainer, fmt.Sprintf("Started container %s", container.Name))
|
||||
m.recordContainerEvent(pod, container, containerID, v1.EventTypeNormal, events.StartedContainer, "Started container %v", container.Name)
|
||||
|
||||
// Symlink container logs to the legacy container log location for cluster logging
|
||||
// support.
|
||||
@@ -780,7 +780,7 @@ func (m *kubeGenericRuntimeManager) killContainer(ctx context.Context, pod *v1.P
|
||||
if len(message) == 0 {
|
||||
message = fmt.Sprintf("Stopping container %s", containerSpec.Name)
|
||||
}
|
||||
m.recordContainerEvent(pod, containerSpec, containerID.ID, v1.EventTypeNormal, events.KillingContainer, message)
|
||||
m.recordContainerEvent(pod, containerSpec, containerID.ID, v1.EventTypeNormal, events.KillingContainer, "%v", message)
|
||||
|
||||
if gracePeriodOverride != nil {
|
||||
gracePeriod = *gracePeriodOverride
|
||||
|
||||
@@ -17,7 +17,6 @@ limitations under the License.
|
||||
package nodeshutdown
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
@@ -60,7 +59,7 @@ func TestLocalStorage(t *testing.T) {
|
||||
return
|
||||
}
|
||||
nowStr := now.Format(time.RFC3339Nano)
|
||||
wantRaw := fmt.Sprintf(`{"startTime":"` + nowStr + `","endTime":"` + nowStr + `"}`)
|
||||
wantRaw := `{"startTime":"` + nowStr + `","endTime":"` + nowStr + `"}`
|
||||
if string(raw) != wantRaw {
|
||||
t.Errorf("got %s, want %s", string(raw), wantRaw)
|
||||
return
|
||||
|
||||
@@ -21,6 +21,7 @@ keep track of registered plugins.
|
||||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -100,7 +101,7 @@ func (plugin *PluginInfo) GenerateMsg(prefixMsg, suffixMsg string) (simpleMsg, d
|
||||
// that can be used in logs.
|
||||
// The msg format follows the pattern "<prefixMsg> <plugin details>: <err> ",
|
||||
func (plugin *PluginInfo) GenerateErrorDetailed(prefixMsg string, err error) (detailedErr error) {
|
||||
return fmt.Errorf(plugin.GenerateMsgDetailed(prefixMsg, errSuffix(err)))
|
||||
return errors.New(plugin.GenerateMsgDetailed(prefixMsg, errSuffix(err)))
|
||||
}
|
||||
|
||||
// GenerateError returns simple and detailed errors for plugins to register
|
||||
@@ -108,7 +109,7 @@ func (plugin *PluginInfo) GenerateErrorDetailed(prefixMsg string, err error) (de
|
||||
// The msg format follows the pattern "<prefixMsg> <plugin details>: <err> ".
|
||||
func (plugin *PluginInfo) GenerateError(prefixMsg string, err error) (simpleErr, detailedErr error) {
|
||||
simpleMsg, detailedMsg := plugin.GenerateMsg(prefixMsg, errSuffix(err))
|
||||
return fmt.Errorf(simpleMsg), fmt.Errorf(detailedMsg)
|
||||
return errors.New(simpleMsg), errors.New(detailedMsg)
|
||||
}
|
||||
|
||||
// Generates an error string with the format ": <err>" if err exists
|
||||
|
||||
@@ -80,7 +80,7 @@ func TestTCPPortExhaustion(t *testing.T) {
|
||||
{"HTTP", true},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(fmt.Sprintf(tt.name), func(t *testing.T) {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
testRootDir := ""
|
||||
if tempDir, err := os.MkdirTemp("", "kubelet_test."); err != nil {
|
||||
t.Fatalf("can't make a temp rootdir: %v", err)
|
||||
|
||||
@@ -19,6 +19,7 @@ package scheduler
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
@@ -287,7 +288,7 @@ func (h *HTTPExtender) Filter(
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
if result.Error != "" {
|
||||
return nil, nil, nil, fmt.Errorf(result.Error)
|
||||
return nil, nil, nil, errors.New(result.Error)
|
||||
}
|
||||
|
||||
if h.nodeCacheCapable && result.NodeNames != nil {
|
||||
@@ -373,7 +374,7 @@ func (h *HTTPExtender) Bind(binding *v1.Binding) error {
|
||||
return err
|
||||
}
|
||||
if result.Error != "" {
|
||||
return fmt.Errorf(result.Error)
|
||||
return errors.New(result.Error)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
3
pkg/scheduler/internal/cache/cache.go
vendored
3
pkg/scheduler/internal/cache/cache.go
vendored
@@ -18,6 +18,7 @@ package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -272,7 +273,7 @@ func (cache *cacheImpl) UpdateSnapshot(logger klog.Logger, nodeSnapshot *Snapsho
|
||||
// We will try to recover by re-creating the lists for the next scheduling cycle, but still return an
|
||||
// error to surface the problem, the error will likely cause a failure to the current scheduling cycle.
|
||||
cache.updateNodeInfoSnapshotList(logger, nodeSnapshot, true)
|
||||
return fmt.Errorf(errMsg)
|
||||
return errors.New(errMsg)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -3766,7 +3766,7 @@ func setupTestSchedulerWithVolumeBinding(ctx context.Context, t *testing.T, volu
|
||||
// end with a period.
|
||||
func makePredicateError(failReason string) error {
|
||||
s := fmt.Sprintf("0/1 nodes are available: %v.", failReason)
|
||||
return fmt.Errorf(s)
|
||||
return errors.New(s)
|
||||
}
|
||||
|
||||
func mustNewPodInfo(t *testing.T, pod *v1.Pod) *framework.PodInfo {
|
||||
|
||||
@@ -269,7 +269,7 @@ func (c *csiAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) {
|
||||
}
|
||||
|
||||
func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string, deviceMounterArgs volume.DeviceMounterArgs) error {
|
||||
klog.V(4).Infof(log("attacher.MountDevice(%s, %s)", devicePath, deviceMountPath))
|
||||
klog.V(4).Info(log("attacher.MountDevice(%s, %s)", devicePath, deviceMountPath))
|
||||
|
||||
if deviceMountPath == "" {
|
||||
return errors.New(log("attacher.MountDevice failed, deviceMountPath is empty"))
|
||||
@@ -363,7 +363,7 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo
|
||||
// finished, we should remove the directory.
|
||||
if err != nil && volumetypes.IsOperationFinishedError(err) {
|
||||
// clean up metadata
|
||||
klog.Errorf(log("attacher.MountDevice failed: %v", err))
|
||||
klog.Error(log("attacher.MountDevice failed: %v", err))
|
||||
if err := removeMountDir(c.plugin, deviceMountPath); err != nil {
|
||||
klog.Error(log("attacher.MountDevice failed to remove mount dir after error [%s]: %v", deviceMountPath, err))
|
||||
}
|
||||
@@ -377,7 +377,7 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo
|
||||
}
|
||||
|
||||
if !stageUnstageSet {
|
||||
klog.Infof(log("attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice..."))
|
||||
klog.Info(log("attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice..."))
|
||||
// defer does *not* remove the metadata file and it's correct - UnmountDevice needs it there.
|
||||
return nil
|
||||
}
|
||||
@@ -415,7 +415,7 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo
|
||||
return err
|
||||
}
|
||||
|
||||
klog.V(4).Infof(log("attacher.MountDevice successfully requested NodeStageVolume [%s]", deviceMountPath))
|
||||
klog.V(4).Info(log("attacher.MountDevice successfully requested NodeStageVolume [%s]", deviceMountPath))
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -604,7 +604,7 @@ func (c *csiAttacher) UnmountDevice(deviceMountPath string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
klog.Errorf(log("attacher.UnmountDevice failed to get driver and volume name from device mount path: %v", err))
|
||||
klog.Error(log("attacher.UnmountDevice failed to get driver and volume name from device mount path: %v", err))
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -627,7 +627,7 @@ func (c *csiAttacher) UnmountDevice(deviceMountPath string) error {
|
||||
return errors.New(log("attacher.UnmountDevice failed to check whether STAGE_UNSTAGE_VOLUME set: %v", err))
|
||||
}
|
||||
if !stageUnstageSet {
|
||||
klog.Infof(log("attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice..."))
|
||||
klog.Info(log("attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice..."))
|
||||
// Just delete the global directory + json file
|
||||
if err := removeMountDir(c.plugin, deviceMountPath); err != nil {
|
||||
return errors.New(log("failed to clean up global mount %s: %s", dataDir, err))
|
||||
@@ -650,7 +650,7 @@ func (c *csiAttacher) UnmountDevice(deviceMountPath string) error {
|
||||
return errors.New(log("failed to clean up global mount %s: %s", dataDir, err))
|
||||
}
|
||||
|
||||
klog.V(4).Infof(log("attacher.UnmountDevice successfully requested NodeUnStageVolume [%s]", deviceMountPath))
|
||||
klog.V(4).Info(log("attacher.UnmountDevice successfully requested NodeUnStageVolume [%s]", deviceMountPath))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -105,7 +105,7 @@ var _ volume.CustomBlockVolumeMapper = &csiBlockMapper{}
|
||||
// Example: plugins/kubernetes.io/csi/volumeDevices/{specName}/dev
|
||||
func (m *csiBlockMapper) GetGlobalMapPath(spec *volume.Spec) (string, error) {
|
||||
dir := getVolumeDevicePluginDir(m.specName, m.plugin.host)
|
||||
klog.V(4).Infof(log("blockMapper.GetGlobalMapPath = %s", dir))
|
||||
klog.V(4).Info(log("blockMapper.GetGlobalMapPath = %s", dir))
|
||||
return dir, nil
|
||||
}
|
||||
|
||||
@@ -137,7 +137,7 @@ func (m *csiBlockMapper) getPublishPath() string {
|
||||
// returns: pods/{podUID}/volumeDevices/kubernetes.io~csi, {specName}
|
||||
func (m *csiBlockMapper) GetPodDeviceMapPath() (string, string) {
|
||||
path := m.plugin.host.GetPodVolumeDeviceDir(m.podUID, utilstrings.EscapeQualifiedName(CSIPluginName))
|
||||
klog.V(4).Infof(log("blockMapper.GetPodDeviceMapPath [path=%s; name=%s]", path, m.specName))
|
||||
klog.V(4).Info(log("blockMapper.GetPodDeviceMapPath [path=%s; name=%s]", path, m.specName))
|
||||
return path, m.specName
|
||||
}
|
||||
|
||||
@@ -149,10 +149,10 @@ func (m *csiBlockMapper) stageVolumeForBlock(
|
||||
csiSource *v1.CSIPersistentVolumeSource,
|
||||
attachment *storage.VolumeAttachment,
|
||||
) (string, error) {
|
||||
klog.V(4).Infof(log("blockMapper.stageVolumeForBlock called"))
|
||||
klog.V(4).Info(log("blockMapper.stageVolumeForBlock called"))
|
||||
|
||||
stagingPath := m.GetStagingPath()
|
||||
klog.V(4).Infof(log("blockMapper.stageVolumeForBlock stagingPath set [%s]", stagingPath))
|
||||
klog.V(4).Info(log("blockMapper.stageVolumeForBlock stagingPath set [%s]", stagingPath))
|
||||
|
||||
// Check whether "STAGE_UNSTAGE_VOLUME" is set
|
||||
stageUnstageSet, err := csi.NodeSupportsStageUnstage(ctx)
|
||||
@@ -160,7 +160,7 @@ func (m *csiBlockMapper) stageVolumeForBlock(
|
||||
return "", errors.New(log("blockMapper.stageVolumeForBlock failed to check STAGE_UNSTAGE_VOLUME capability: %v", err))
|
||||
}
|
||||
if !stageUnstageSet {
|
||||
klog.Infof(log("blockMapper.stageVolumeForBlock STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice..."))
|
||||
klog.Info(log("blockMapper.stageVolumeForBlock STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice..."))
|
||||
return "", nil
|
||||
}
|
||||
publishVolumeInfo := map[string]string{}
|
||||
@@ -200,7 +200,7 @@ func (m *csiBlockMapper) stageVolumeForBlock(
|
||||
return "", err
|
||||
}
|
||||
|
||||
klog.V(4).Infof(log("blockMapper.stageVolumeForBlock successfully requested NodeStageVolume [%s]", stagingPath))
|
||||
klog.V(4).Info(log("blockMapper.stageVolumeForBlock successfully requested NodeStageVolume [%s]", stagingPath))
|
||||
return stagingPath, nil
|
||||
}
|
||||
|
||||
@@ -212,7 +212,7 @@ func (m *csiBlockMapper) publishVolumeForBlock(
|
||||
csiSource *v1.CSIPersistentVolumeSource,
|
||||
attachment *storage.VolumeAttachment,
|
||||
) (string, error) {
|
||||
klog.V(4).Infof(log("blockMapper.publishVolumeForBlock called"))
|
||||
klog.V(4).Info(log("blockMapper.publishVolumeForBlock called"))
|
||||
|
||||
publishVolumeInfo := map[string]string{}
|
||||
if attachment != nil {
|
||||
@@ -279,7 +279,7 @@ func (m *csiBlockMapper) publishVolumeForBlock(
|
||||
|
||||
// SetUpDevice ensures the device is attached returns path where the device is located.
|
||||
func (m *csiBlockMapper) SetUpDevice() (string, error) {
|
||||
klog.V(4).Infof(log("blockMapper.SetUpDevice called"))
|
||||
klog.V(4).Info(log("blockMapper.SetUpDevice called"))
|
||||
|
||||
// Get csiSource from spec
|
||||
if m.spec == nil {
|
||||
@@ -341,7 +341,7 @@ func (m *csiBlockMapper) SetUpDevice() (string, error) {
|
||||
}
|
||||
|
||||
func (m *csiBlockMapper) MapPodDevice() (string, error) {
|
||||
klog.V(4).Infof(log("blockMapper.MapPodDevice called"))
|
||||
klog.V(4).Info(log("blockMapper.MapPodDevice called"))
|
||||
|
||||
// Get csiSource from spec
|
||||
if m.spec == nil {
|
||||
@@ -408,7 +408,7 @@ func (m *csiBlockMapper) unpublishVolumeForBlock(ctx context.Context, csi csiCli
|
||||
if err := csi.NodeUnpublishVolume(ctx, m.volumeID, publishPath); err != nil {
|
||||
return errors.New(log("blockMapper.unpublishVolumeForBlock failed: %v", err))
|
||||
}
|
||||
klog.V(4).Infof(log("blockMapper.unpublishVolumeForBlock NodeUnpublished successfully [%s]", publishPath))
|
||||
klog.V(4).Info(log("blockMapper.unpublishVolumeForBlock NodeUnpublished successfully [%s]", publishPath))
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -421,7 +421,7 @@ func (m *csiBlockMapper) unstageVolumeForBlock(ctx context.Context, csi csiClien
|
||||
return errors.New(log("blockMapper.unstageVolumeForBlock failed to check STAGE_UNSTAGE_VOLUME capability: %v", err))
|
||||
}
|
||||
if !stageUnstageSet {
|
||||
klog.Infof(log("blockMapper.unstageVolumeForBlock STAGE_UNSTAGE_VOLUME capability not set. Skipping unstageVolumeForBlock ..."))
|
||||
klog.Info(log("blockMapper.unstageVolumeForBlock STAGE_UNSTAGE_VOLUME capability not set. Skipping unstageVolumeForBlock ..."))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -431,7 +431,7 @@ func (m *csiBlockMapper) unstageVolumeForBlock(ctx context.Context, csi csiClien
|
||||
if err := csi.NodeUnstageVolume(ctx, m.volumeID, stagingPath); err != nil {
|
||||
return errors.New(log("blockMapper.unstageVolumeForBlock failed: %v", err))
|
||||
}
|
||||
klog.V(4).Infof(log("blockMapper.unstageVolumeForBlock NodeUnstageVolume successfully [%s]", stagingPath))
|
||||
klog.V(4).Info(log("blockMapper.unstageVolumeForBlock NodeUnstageVolume successfully [%s]", stagingPath))
|
||||
|
||||
// Remove stagingPath directory and its contents
|
||||
if err := os.RemoveAll(stagingPath); err != nil {
|
||||
@@ -457,7 +457,7 @@ func (m *csiBlockMapper) TearDownDevice(globalMapPath, devicePath string) error
|
||||
stagingPath := m.GetStagingPath()
|
||||
if _, err := os.Stat(stagingPath); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
klog.V(4).Infof(log("blockMapper.TearDownDevice stagingPath(%s) has already been deleted, skip calling NodeUnstageVolume", stagingPath))
|
||||
klog.V(4).Info(log("blockMapper.TearDownDevice stagingPath(%s) has already been deleted, skip calling NodeUnstageVolume", stagingPath))
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -101,7 +101,7 @@ func (c *csiMountMgr) SetUp(mounterArgs volume.MounterArgs) error {
|
||||
}
|
||||
|
||||
func (c *csiMountMgr) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
|
||||
klog.V(4).Infof(log("Mounter.SetUpAt(%s)", dir))
|
||||
klog.V(4).Info(log("Mounter.SetUpAt(%s)", dir))
|
||||
|
||||
csi, err := c.csiClientGetter.Get()
|
||||
if err != nil {
|
||||
@@ -346,7 +346,7 @@ func (c *csiMountMgr) SetUpAt(dir string, mounterArgs volume.MounterArgs) error
|
||||
klog.V(4).Info(log("mounter.SetupAt fsGroup [%d] applied successfully to %s", *mounterArgs.FsGroup, c.volumeID))
|
||||
}
|
||||
|
||||
klog.V(4).Infof(log("mounter.SetUp successfully requested NodePublish [%s]", dir))
|
||||
klog.V(4).Info(log("mounter.SetUp successfully requested NodePublish [%s]", dir))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -358,7 +358,7 @@ func (c *csiMountMgr) podServiceAccountTokenAttrs() (map[string]string, error) {
|
||||
csiDriver, err := c.plugin.csiDriverLister.Get(string(c.driverName))
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
klog.V(5).Infof(log("CSIDriver %q not found, not adding service account token information", c.driverName))
|
||||
klog.V(5).Info(log("CSIDriver %q not found, not adding service account token information", c.driverName))
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
@@ -394,7 +394,7 @@ func (c *csiMountMgr) podServiceAccountTokenAttrs() (map[string]string, error) {
|
||||
outputs[audience] = tr.Status
|
||||
}
|
||||
|
||||
klog.V(4).Infof(log("Fetched service account token attrs for CSIDriver %q", c.driverName))
|
||||
klog.V(4).Info(log("Fetched service account token attrs for CSIDriver %q", c.driverName))
|
||||
tokens, _ := json.Marshal(outputs)
|
||||
return map[string]string{
|
||||
"csi.storage.k8s.io/serviceAccount.tokens": string(tokens),
|
||||
@@ -416,7 +416,7 @@ func (c *csiMountMgr) TearDown() error {
|
||||
return c.TearDownAt(c.GetPath())
|
||||
}
|
||||
func (c *csiMountMgr) TearDownAt(dir string) error {
|
||||
klog.V(4).Infof(log("Unmounter.TearDownAt(%s)", dir))
|
||||
klog.V(4).Info(log("Unmounter.TearDownAt(%s)", dir))
|
||||
|
||||
volID := c.volumeID
|
||||
csi, err := c.csiClientGetter.Get()
|
||||
@@ -447,7 +447,7 @@ func (c *csiMountMgr) TearDownAt(dir string) error {
|
||||
if err := removeMountDir(c.plugin, dir); err != nil {
|
||||
return errors.New(log("Unmounter.TearDownAt failed to clean mount dir [%s]: %v", dir, err))
|
||||
}
|
||||
klog.V(4).Infof(log("Unmounter.TearDownAt successfully unmounted dir [%s]", dir))
|
||||
klog.V(4).Info(log("Unmounter.TearDownAt successfully unmounted dir [%s]", dir))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -97,7 +97,7 @@ var PluginHandler = &RegistrationHandler{}
|
||||
// ValidatePlugin is called by kubelet's plugin watcher upon detection
|
||||
// of a new registration socket opened by CSI Driver registrar side car.
|
||||
func (h *RegistrationHandler) ValidatePlugin(pluginName string, endpoint string, versions []string) error {
|
||||
klog.Infof(log("Trying to validate a new CSI Driver with name: %s endpoint: %s versions: %s",
|
||||
klog.Info(log("Trying to validate a new CSI Driver with name: %s endpoint: %s versions: %s",
|
||||
pluginName, endpoint, strings.Join(versions, ",")))
|
||||
|
||||
_, err := h.validateVersions("ValidatePlugin", pluginName, endpoint, versions)
|
||||
@@ -110,7 +110,7 @@ func (h *RegistrationHandler) ValidatePlugin(pluginName string, endpoint string,
|
||||
|
||||
// RegisterPlugin is called when a plugin can be registered
|
||||
func (h *RegistrationHandler) RegisterPlugin(pluginName string, endpoint string, versions []string, pluginClientTimeout *time.Duration) error {
|
||||
klog.Infof(log("Register new plugin with name: %s at endpoint: %s", pluginName, endpoint))
|
||||
klog.Info(log("Register new plugin with name: %s at endpoint: %s", pluginName, endpoint))
|
||||
|
||||
highestSupportedVersion, err := h.validateVersions("RegisterPlugin", pluginName, endpoint, versions)
|
||||
if err != nil {
|
||||
@@ -432,7 +432,7 @@ func (p *csiPlugin) NewMounter(
|
||||
}
|
||||
|
||||
func (p *csiPlugin) NewUnmounter(specName string, podUID types.UID) (volume.Unmounter, error) {
|
||||
klog.V(4).Infof(log("setting up unmounter for [name=%v, podUID=%v]", specName, podUID))
|
||||
klog.V(4).Info(log("setting up unmounter for [name=%v, podUID=%v]", specName, podUID))
|
||||
|
||||
kvh, ok := p.host.(volume.KubeletVolumeHost)
|
||||
if !ok {
|
||||
@@ -697,7 +697,7 @@ func (p *csiPlugin) NewBlockVolumeMapper(spec *volume.Spec, podRef *api.Pod) (vo
|
||||
}
|
||||
|
||||
func (p *csiPlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (volume.BlockVolumeUnmapper, error) {
|
||||
klog.V(4).Infof(log("setting up block unmapper for [Spec=%v, podUID=%v]", volName, podUID))
|
||||
klog.V(4).Info(log("setting up block unmapper for [Spec=%v, podUID=%v]", volName, podUID))
|
||||
unmapper := &csiBlockMapper{
|
||||
plugin: p,
|
||||
podUID: podUID,
|
||||
@@ -839,7 +839,7 @@ func (p *csiPlugin) podInfoEnabled(driverName string) (bool, error) {
|
||||
csiDriver, err := p.getCSIDriver(driverName)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
klog.V(4).Infof(log("CSIDriver %q not found, not adding pod information", driverName))
|
||||
klog.V(4).Info(log("CSIDriver %q not found, not adding pod information", driverName))
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
@@ -847,7 +847,7 @@ func (p *csiPlugin) podInfoEnabled(driverName string) (bool, error) {
|
||||
|
||||
// if PodInfoOnMount is not set or false we do not set pod attributes
|
||||
if csiDriver.Spec.PodInfoOnMount == nil || *csiDriver.Spec.PodInfoOnMount == false {
|
||||
klog.V(4).Infof(log("CSIDriver %q does not require pod information", driverName))
|
||||
klog.V(4).Info(log("CSIDriver %q does not require pod information", driverName))
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
|
||||
@@ -36,7 +36,7 @@ func (c *csiPlugin) RequiresFSResize() bool {
|
||||
}
|
||||
|
||||
func (c *csiPlugin) NodeExpand(resizeOptions volume.NodeResizeOptions) (bool, error) {
|
||||
klog.V(4).Infof(log("Expander.NodeExpand(%s)", resizeOptions.DeviceMountPath))
|
||||
klog.V(4).Info(log("Expander.NodeExpand(%s)", resizeOptions.DeviceMountPath))
|
||||
csiSource, err := getCSISourceFromSpec(resizeOptions.VolumeSpec)
|
||||
if err != nil {
|
||||
return false, errors.New(log("Expander.NodeExpand failed to get CSI persistent source: %v", err))
|
||||
|
||||
@@ -321,7 +321,7 @@ func (step stepName) getName() string { return step.name }
|
||||
func doVerifyLinesInFile(t *testing.T, volumePath, filename string, expected string) {
|
||||
data, err := os.ReadFile(filepath.Join(volumePath, filename))
|
||||
if err != nil {
|
||||
t.Errorf(err.Error())
|
||||
t.Error(err.Error())
|
||||
return
|
||||
}
|
||||
actualStr := string(data)
|
||||
@@ -357,7 +357,7 @@ type verifyMode struct {
|
||||
func (step verifyMode) run(test *downwardAPITest) {
|
||||
fileInfo, err := os.Stat(filepath.Join(test.volumePath, step.name))
|
||||
if err != nil {
|
||||
test.t.Errorf(err.Error())
|
||||
test.t.Error(err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -266,7 +266,7 @@ func handleCmdResponse(cmd string, output []byte) (*DriverStatus, error) {
|
||||
return nil, errors.New(status.Status)
|
||||
} else if status.Status != StatusSuccess {
|
||||
errMsg := fmt.Sprintf("%s command failed, status: %s, reason: %s", cmd, status.Status, status.Message)
|
||||
klog.Errorf(errMsg)
|
||||
klog.Error(errMsg)
|
||||
return nil, fmt.Errorf("%s", errMsg)
|
||||
}
|
||||
|
||||
|
||||
@@ -366,7 +366,7 @@ func getPortworxService(host volume.VolumeHost) (*v1.Service, error) {
|
||||
kubeClient := host.GetKubeClient()
|
||||
if kubeClient == nil {
|
||||
err := fmt.Errorf("failed to get kubeclient when creating portworx client")
|
||||
klog.Errorf(err.Error())
|
||||
klog.Error(err.Error())
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -379,7 +379,7 @@ func getPortworxService(host volume.VolumeHost) (*v1.Service, error) {
|
||||
|
||||
if svc == nil {
|
||||
err = fmt.Errorf("service: %v not found. Consult Portworx docs to deploy it", pxServiceName)
|
||||
klog.Errorf(err.Error())
|
||||
klog.Error(err.Error())
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
||||
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package secret
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
@@ -24,7 +25,7 @@ import (
|
||||
utilstrings "k8s.io/utils/strings"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
@@ -184,7 +185,7 @@ func (b *secretVolumeMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs
|
||||
optional := b.source.Optional != nil && *b.source.Optional
|
||||
secret, err := b.getSecret(b.pod.Namespace, b.source.SecretName)
|
||||
if err != nil {
|
||||
if !(errors.IsNotFound(err) && optional) {
|
||||
if !(apierrors.IsNotFound(err) && optional) {
|
||||
klog.Errorf("Couldn't get secret %v/%v: %v", b.pod.Namespace, b.source.SecretName, err)
|
||||
return err
|
||||
}
|
||||
@@ -276,8 +277,8 @@ func MakePayload(mappings []v1.KeyToPath, secret *v1.Secret, defaultMode *int32,
|
||||
continue
|
||||
}
|
||||
errMsg := fmt.Sprintf("references non-existent secret key: %s", ktp.Key)
|
||||
klog.Errorf(errMsg)
|
||||
return nil, fmt.Errorf(errMsg)
|
||||
klog.Error(errMsg)
|
||||
return nil, errors.New(errMsg)
|
||||
}
|
||||
|
||||
fileProjection.Data = []byte(content)
|
||||
|
||||
@@ -131,7 +131,7 @@ func (ne *NodeExpander) expandOnPlugin() (bool, error, testResponseData) {
|
||||
|
||||
if err != nil {
|
||||
msg := ne.vmt.GenerateErrorDetailed("MountVolume.NodeExpandVolume failed to mark node expansion in progress: %v", err)
|
||||
klog.Errorf(msg.Error())
|
||||
klog.Error(msg.Error())
|
||||
return false, err, testResponseData{}
|
||||
}
|
||||
}
|
||||
@@ -143,12 +143,12 @@ func (ne *NodeExpander) expandOnPlugin() (bool, error, testResponseData) {
|
||||
if volumetypes.IsInfeasibleError(resizeErr) || ne.markExpansionInfeasibleOnFailure {
|
||||
ne.pvc, markFailedError = util.MarkNodeExpansionInfeasible(ne.pvc, ne.kubeClient, resizeErr)
|
||||
if markFailedError != nil {
|
||||
klog.Errorf(ne.vmt.GenerateErrorDetailed("MountMount.NodeExpandVolume failed to mark node expansion as failed: %v", err).Error())
|
||||
klog.Error(ne.vmt.GenerateErrorDetailed("MountMount.NodeExpandVolume failed to mark node expansion as failed: %v", err).Error())
|
||||
}
|
||||
} else {
|
||||
ne.pvc, markFailedError = util.MarkNodeExpansionFailedCondition(ne.pvc, ne.kubeClient, resizeErr)
|
||||
if markFailedError != nil {
|
||||
klog.Errorf(ne.vmt.GenerateErrorDetailed("MountMount.NodeExpandVolume failed to mark node expansion as failed: %v", err).Error())
|
||||
klog.Error(ne.vmt.GenerateErrorDetailed("MountMount.NodeExpandVolume failed to mark node expansion as failed: %v", err).Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -158,7 +158,7 @@ func (ne *NodeExpander) expandOnPlugin() (bool, error, testResponseData) {
|
||||
// expansion operation should not block mounting
|
||||
if volumetypes.IsFailedPreconditionError(resizeErr) {
|
||||
ne.actualStateOfWorld.MarkForInUseExpansionError(ne.vmt.VolumeName)
|
||||
klog.Errorf(ne.vmt.GenerateErrorDetailed("MountVolume.NodeExapndVolume failed with %v", resizeErr).Error())
|
||||
klog.Error(ne.vmt.GenerateErrorDetailed("MountVolume.NodeExapndVolume failed with %v", resizeErr).Error())
|
||||
return false, nil, testResponseData{assumeResizeFinished: true, resizeCalledOnPlugin: true}
|
||||
}
|
||||
return false, resizeErr, testResponseData{assumeResizeFinished: true, resizeCalledOnPlugin: true}
|
||||
|
||||
@@ -370,13 +370,13 @@ func (volume *VolumeToAttach) GenerateMsg(prefixMsg, suffixMsg string) (simpleMs
|
||||
|
||||
// GenerateErrorDetailed returns detailed errors for volumes to attach
|
||||
func (volume *VolumeToAttach) GenerateErrorDetailed(prefixMsg string, err error) (detailedErr error) {
|
||||
return fmt.Errorf(volume.GenerateMsgDetailed(prefixMsg, errSuffix(err)))
|
||||
return errors.New(volume.GenerateMsgDetailed(prefixMsg, errSuffix(err)))
|
||||
}
|
||||
|
||||
// GenerateError returns simple and detailed errors for volumes to attach
|
||||
func (volume *VolumeToAttach) GenerateError(prefixMsg string, err error) (simpleErr, detailedErr error) {
|
||||
simpleMsg, detailedMsg := volume.GenerateMsg(prefixMsg, errSuffix(err))
|
||||
return fmt.Errorf(simpleMsg), fmt.Errorf(detailedMsg)
|
||||
return errors.New(simpleMsg), errors.New(detailedMsg)
|
||||
}
|
||||
|
||||
// String combines key fields of the volume for logging in text format.
|
||||
@@ -535,13 +535,13 @@ func (volume *VolumeToMount) GenerateMsg(prefixMsg, suffixMsg string) (simpleMsg
|
||||
|
||||
// GenerateErrorDetailed returns detailed errors for volumes to mount
|
||||
func (volume *VolumeToMount) GenerateErrorDetailed(prefixMsg string, err error) (detailedErr error) {
|
||||
return fmt.Errorf(volume.GenerateMsgDetailed(prefixMsg, errSuffix(err)))
|
||||
return errors.New(volume.GenerateMsgDetailed(prefixMsg, errSuffix(err)))
|
||||
}
|
||||
|
||||
// GenerateError returns simple and detailed errors for volumes to mount
|
||||
func (volume *VolumeToMount) GenerateError(prefixMsg string, err error) (simpleErr, detailedErr error) {
|
||||
simpleMsg, detailedMsg := volume.GenerateMsg(prefixMsg, errSuffix(err))
|
||||
return fmt.Errorf(simpleMsg), fmt.Errorf(detailedMsg)
|
||||
return errors.New(simpleMsg), errors.New(detailedMsg)
|
||||
}
|
||||
|
||||
// AttachedVolume represents a volume that is attached to a node.
|
||||
@@ -597,13 +597,13 @@ func (volume *AttachedVolume) GenerateMsg(prefixMsg, suffixMsg string) (simpleMs
|
||||
|
||||
// GenerateErrorDetailed returns detailed errors for attached volumes
|
||||
func (volume *AttachedVolume) GenerateErrorDetailed(prefixMsg string, err error) (detailedErr error) {
|
||||
return fmt.Errorf(volume.GenerateMsgDetailed(prefixMsg, errSuffix(err)))
|
||||
return errors.New(volume.GenerateMsgDetailed(prefixMsg, errSuffix(err)))
|
||||
}
|
||||
|
||||
// GenerateError returns simple and detailed errors for attached volumes
|
||||
func (volume *AttachedVolume) GenerateError(prefixMsg string, err error) (simpleErr, detailedErr error) {
|
||||
simpleMsg, detailedMsg := volume.GenerateMsg(prefixMsg, errSuffix(err))
|
||||
return fmt.Errorf(simpleMsg), fmt.Errorf(detailedMsg)
|
||||
return errors.New(simpleMsg), errors.New(detailedMsg)
|
||||
}
|
||||
|
||||
// String combines key fields of the volume for logging in text format.
|
||||
@@ -769,13 +769,13 @@ func (volume *MountedVolume) GenerateMsg(prefixMsg, suffixMsg string) (simpleMsg
|
||||
|
||||
// GenerateErrorDetailed returns simple and detailed errors for mounted volumes
|
||||
func (volume *MountedVolume) GenerateErrorDetailed(prefixMsg string, err error) (detailedErr error) {
|
||||
return fmt.Errorf(volume.GenerateMsgDetailed(prefixMsg, errSuffix(err)))
|
||||
return errors.New(volume.GenerateMsgDetailed(prefixMsg, errSuffix(err)))
|
||||
}
|
||||
|
||||
// GenerateError returns simple and detailed errors for mounted volumes
|
||||
func (volume *MountedVolume) GenerateError(prefixMsg string, err error) (simpleErr, detailedErr error) {
|
||||
simpleMsg, detailedMsg := volume.GenerateMsg(prefixMsg, errSuffix(err))
|
||||
return fmt.Errorf(simpleMsg), fmt.Errorf(detailedMsg)
|
||||
return errors.New(simpleMsg), errors.New(detailedMsg)
|
||||
}
|
||||
|
||||
type operationExecutor struct {
|
||||
|
||||
@@ -197,7 +197,7 @@ func (og *operationGenerator) GenerateVolumesAreAttachedFunc(
|
||||
volumePlugin, err :=
|
||||
og.volumePluginMgr.FindPluginBySpec(volumeAttached.VolumeSpec)
|
||||
if err != nil || volumePlugin == nil {
|
||||
klog.Errorf(volumeAttached.GenerateErrorDetailed("VolumesAreAttached.FindPluginBySpec failed", err).Error())
|
||||
klog.Error(volumeAttached.GenerateErrorDetailed("VolumesAreAttached.FindPluginBySpec failed", err).Error())
|
||||
continue
|
||||
}
|
||||
volumeSpecList, pluginExists := volumesPerPlugin[volumePlugin.GetPluginName()]
|
||||
@@ -314,7 +314,7 @@ func (og *operationGenerator) GenerateAttachVolumeFunc(
|
||||
for _, pod := range volumeToAttach.ScheduledPods {
|
||||
og.recorder.Eventf(pod, v1.EventTypeNormal, kevents.SuccessfulAttachVolume, simpleMsg)
|
||||
}
|
||||
klog.Infof(volumeToAttach.GenerateMsgDetailed("AttachVolume.Attach succeeded", ""))
|
||||
klog.Info(volumeToAttach.GenerateMsgDetailed("AttachVolume.Attach succeeded", ""))
|
||||
|
||||
// Update actual state of world
|
||||
addVolumeNodeErr := actualStateOfWorld.MarkVolumeAsAttached(
|
||||
@@ -434,7 +434,7 @@ func (og *operationGenerator) GenerateDetachVolumeFunc(
|
||||
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
|
||||
}
|
||||
|
||||
klog.Infof(volumeToDetach.GenerateMsgDetailed("DetachVolume.Detach succeeded", ""))
|
||||
klog.Info(volumeToDetach.GenerateMsgDetailed("DetachVolume.Detach succeeded", ""))
|
||||
|
||||
// Update actual state of world
|
||||
actualStateOfWorld.MarkVolumeAsDetached(
|
||||
@@ -647,7 +647,7 @@ func (og *operationGenerator) GenerateMountVolumeFunc(
|
||||
// Considering the above situations, we mark volume as uncertain here so that reconciler will trigger
|
||||
// volume tear down when pod is deleted, and also makes sure pod will not start using it.
|
||||
if err := actualStateOfWorld.MarkVolumeMountAsUncertain(markOpts); err != nil {
|
||||
klog.Errorf(volumeToMount.GenerateErrorDetailed("MountVolume.MarkVolumeMountAsUncertain failed", err).Error())
|
||||
klog.Error(volumeToMount.GenerateErrorDetailed("MountVolume.MarkVolumeMountAsUncertain failed", err).Error())
|
||||
}
|
||||
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
|
||||
}
|
||||
@@ -705,7 +705,7 @@ func (og *operationGenerator) markDeviceErrorState(volumeToMount VolumeToMount,
|
||||
// Only devices which were uncertain can be marked as unmounted
|
||||
markDeviceUnmountError := actualStateOfWorld.MarkDeviceAsUnmounted(volumeToMount.VolumeName)
|
||||
if markDeviceUnmountError != nil {
|
||||
klog.Errorf(volumeToMount.GenerateErrorDetailed("MountDevice.MarkDeviceAsUnmounted failed", markDeviceUnmountError).Error())
|
||||
klog.Error(volumeToMount.GenerateErrorDetailed("MountDevice.MarkDeviceAsUnmounted failed", markDeviceUnmountError).Error())
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -716,7 +716,7 @@ func (og *operationGenerator) markDeviceErrorState(volumeToMount VolumeToMount,
|
||||
// which was previously marked as mounted here as uncertain.
|
||||
markDeviceUncertainError := actualStateOfWorld.MarkDeviceAsUncertain(volumeToMount.VolumeName, devicePath, deviceMountPath, volumeToMount.SELinuxLabel)
|
||||
if markDeviceUncertainError != nil {
|
||||
klog.Errorf(volumeToMount.GenerateErrorDetailed("MountDevice.MarkDeviceAsUncertain failed", markDeviceUncertainError).Error())
|
||||
klog.Error(volumeToMount.GenerateErrorDetailed("MountDevice.MarkDeviceAsUncertain failed", markDeviceUncertainError).Error())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -734,7 +734,7 @@ func (og *operationGenerator) markVolumeErrorState(volumeToMount VolumeToMount,
|
||||
|
||||
t := actualStateOfWorld.MarkVolumeAsUnmounted(volumeToMount.PodName, volumeToMount.VolumeName)
|
||||
if t != nil {
|
||||
klog.Errorf(volumeToMount.GenerateErrorDetailed("MountVolume.MarkVolumeAsUnmounted failed", t).Error())
|
||||
klog.Error(volumeToMount.GenerateErrorDetailed("MountVolume.MarkVolumeAsUnmounted failed", t).Error())
|
||||
}
|
||||
return
|
||||
|
||||
@@ -744,7 +744,7 @@ func (og *operationGenerator) markVolumeErrorState(volumeToMount VolumeToMount,
|
||||
actualStateOfWorld.GetVolumeMountState(volumeToMount.VolumeName, markOpts.PodName) == VolumeNotMounted {
|
||||
t := actualStateOfWorld.MarkVolumeMountAsUncertain(markOpts)
|
||||
if t != nil {
|
||||
klog.Errorf(volumeToMount.GenerateErrorDetailed("MountVolume.MarkVolumeMountAsUncertain failed", t).Error())
|
||||
klog.Error(volumeToMount.GenerateErrorDetailed("MountVolume.MarkVolumeMountAsUncertain failed", t).Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -792,7 +792,7 @@ func (og *operationGenerator) GenerateUnmountVolumeFunc(
|
||||
markMountUncertainErr := actualStateOfWorld.MarkVolumeMountAsUncertain(opts)
|
||||
if markMountUncertainErr != nil {
|
||||
// There is nothing else we can do. Hope that UnmountVolume will be re-tried shortly.
|
||||
klog.Errorf(volumeToUnmount.GenerateErrorDetailed("UnmountVolume.MarkVolumeMountAsUncertain failed", markMountUncertainErr).Error())
|
||||
klog.Error(volumeToUnmount.GenerateErrorDetailed("UnmountVolume.MarkVolumeMountAsUncertain failed", markMountUncertainErr).Error())
|
||||
}
|
||||
|
||||
// On failure, return error. Caller will log and retry.
|
||||
@@ -815,7 +815,7 @@ func (og *operationGenerator) GenerateUnmountVolumeFunc(
|
||||
volumeToUnmount.PodName, volumeToUnmount.VolumeName)
|
||||
if markVolMountedErr != nil {
|
||||
// On failure, just log and exit
|
||||
klog.Errorf(volumeToUnmount.GenerateErrorDetailed("UnmountVolume.MarkVolumeAsUnmounted failed", markVolMountedErr).Error())
|
||||
klog.Error(volumeToUnmount.GenerateErrorDetailed("UnmountVolume.MarkVolumeAsUnmounted failed", markVolMountedErr).Error())
|
||||
}
|
||||
|
||||
return volumetypes.NewOperationContext(nil, nil, migrated)
|
||||
@@ -866,7 +866,7 @@ func (og *operationGenerator) GenerateUnmountDeviceFunc(
|
||||
// If the mount path could not be found, don't fail the unmount, but instead log a warning and proceed,
|
||||
// using the value from deviceToDetach.DeviceMountPath, so that the device can be marked as unmounted
|
||||
deviceMountPath = deviceToDetach.DeviceMountPath
|
||||
klog.Warningf(deviceToDetach.GenerateMsgDetailed(fmt.Sprintf(
|
||||
klog.Warning(deviceToDetach.GenerateMsgDetailed(fmt.Sprintf(
|
||||
"GetDeviceMountPath failed, but unmount operation will proceed using deviceMountPath=%s: %v", deviceMountPath, err), ""))
|
||||
}
|
||||
refs, err := deviceMountableVolumePlugin.GetDeviceMountRefs(deviceMountPath)
|
||||
@@ -885,7 +885,7 @@ func (og *operationGenerator) GenerateUnmountDeviceFunc(
|
||||
markDeviceUncertainErr := actualStateOfWorld.MarkDeviceAsUncertain(deviceToDetach.VolumeName, deviceToDetach.DevicePath, deviceMountPath, deviceToDetach.SELinuxMountContext)
|
||||
if markDeviceUncertainErr != nil {
|
||||
// There is nothing else we can do. Hope that UnmountDevice will be re-tried shortly.
|
||||
klog.Errorf(deviceToDetach.GenerateErrorDetailed("UnmountDevice.MarkDeviceAsUncertain failed", markDeviceUncertainErr).Error())
|
||||
klog.Error(deviceToDetach.GenerateErrorDetailed("UnmountDevice.MarkDeviceAsUncertain failed", markDeviceUncertainErr).Error())
|
||||
}
|
||||
|
||||
// On failure, return error. Caller will log and retry.
|
||||
@@ -906,7 +906,7 @@ func (og *operationGenerator) GenerateUnmountDeviceFunc(
|
||||
markDeviceUncertainErr := actualStateOfWorld.MarkDeviceAsUncertain(deviceToDetach.VolumeName, deviceToDetach.DevicePath, deviceMountPath, deviceToDetach.SELinuxMountContext)
|
||||
if markDeviceUncertainErr != nil {
|
||||
// There is nothing else we can do. Hope that UnmountDevice will be re-tried shortly.
|
||||
klog.Errorf(deviceToDetach.GenerateErrorDetailed("UnmountDevice.MarkDeviceAsUncertain failed", markDeviceUncertainErr).Error())
|
||||
klog.Error(deviceToDetach.GenerateErrorDetailed("UnmountDevice.MarkDeviceAsUncertain failed", markDeviceUncertainErr).Error())
|
||||
}
|
||||
eventErr, detailedErr := deviceToDetach.GenerateError(
|
||||
"UnmountDevice failed",
|
||||
@@ -1151,7 +1151,7 @@ func (og *operationGenerator) GenerateMapVolumeFunc(
|
||||
// Considering the above situations, we mark volume as uncertain here so that reconciler will trigger
|
||||
// volume tear down when pod is deleted, and also makes sure pod will not start using it.
|
||||
if err := actualStateOfWorld.MarkVolumeMountAsUncertain(markVolumeOpts); err != nil {
|
||||
klog.Errorf(volumeToMount.GenerateErrorDetailed("MountVolume.MarkVolumeMountAsUncertain failed", err).Error())
|
||||
klog.Error(volumeToMount.GenerateErrorDetailed("MountVolume.MarkVolumeMountAsUncertain failed", err).Error())
|
||||
}
|
||||
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
|
||||
}
|
||||
@@ -1270,7 +1270,7 @@ func (og *operationGenerator) GenerateUnmapVolumeFunc(
|
||||
volumeToUnmount.PodName, volumeToUnmount.VolumeName)
|
||||
if markVolUnmountedErr != nil {
|
||||
// On failure, just log and exit
|
||||
klog.Errorf(volumeToUnmount.GenerateErrorDetailed("UnmapVolume.MarkVolumeAsUnmounted failed", markVolUnmountedErr).Error())
|
||||
klog.Error(volumeToUnmount.GenerateErrorDetailed("UnmapVolume.MarkVolumeAsUnmounted failed", markVolUnmountedErr).Error())
|
||||
}
|
||||
|
||||
return volumetypes.NewOperationContext(nil, nil, migrated)
|
||||
@@ -1384,7 +1384,7 @@ func (og *operationGenerator) GenerateUnmapDeviceFunc(
|
||||
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
|
||||
}
|
||||
|
||||
klog.Infof(deviceToDetach.GenerateMsgDetailed("UnmapDevice succeeded", ""))
|
||||
klog.Info(deviceToDetach.GenerateMsgDetailed("UnmapDevice succeeded", ""))
|
||||
|
||||
// Update actual state of world
|
||||
markDeviceUnmountedErr := actualStateOfWorld.MarkDeviceAsUnmounted(
|
||||
@@ -1519,7 +1519,7 @@ func (og *operationGenerator) verifyVolumeIsSafeToDetach(
|
||||
node, fetchErr := og.kubeClient.CoreV1().Nodes().Get(context.TODO(), string(volumeToDetach.NodeName), metav1.GetOptions{})
|
||||
if fetchErr != nil {
|
||||
if errors.IsNotFound(fetchErr) {
|
||||
klog.Warningf(volumeToDetach.GenerateMsgDetailed("Node not found on API server. DetachVolume will skip safe to detach check", ""))
|
||||
klog.Warning(volumeToDetach.GenerateMsgDetailed("Node not found on API server. DetachVolume will skip safe to detach check", ""))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1536,7 +1536,7 @@ func (og *operationGenerator) verifyVolumeIsSafeToDetach(
|
||||
}
|
||||
|
||||
// Volume is not marked as in use by node
|
||||
klog.Infof(volumeToDetach.GenerateMsgDetailed("Verified volume is safe to detach", ""))
|
||||
klog.Info(volumeToDetach.GenerateMsgDetailed("Verified volume is safe to detach", ""))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1960,7 +1960,7 @@ func (og *operationGenerator) doOnlineExpansion(volumeToMount VolumeToMount,
|
||||
resizeDone, err := og.nodeExpandVolume(volumeToMount, actualStateOfWorld, resizeOptions)
|
||||
if err != nil {
|
||||
e1, e2 := volumeToMount.GenerateError("NodeExpandVolume.NodeExpandVolume failed", err)
|
||||
klog.Errorf(e2.Error())
|
||||
klog.Error(e2.Error())
|
||||
return false, e1, e2
|
||||
}
|
||||
if resizeDone {
|
||||
@@ -1991,7 +1991,7 @@ func (og *operationGenerator) expandVolumeDuringMount(volumeToMount VolumeToMoun
|
||||
if pvcStatusCap.Cmp(pvSpecCap) < 0 {
|
||||
if volumeToMount.VolumeSpec.ReadOnly {
|
||||
simpleMsg, detailedMsg := volumeToMount.GenerateMsg("MountVolume.NodeExpandVolume failed", "requested read-only file system")
|
||||
klog.Warningf(detailedMsg)
|
||||
klog.Warning(detailedMsg)
|
||||
og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FileSystemResizeFailed, simpleMsg)
|
||||
og.recorder.Eventf(pvc, v1.EventTypeWarning, kevents.FileSystemResizeFailed, simpleMsg)
|
||||
return true, nil
|
||||
@@ -2057,7 +2057,7 @@ func (og *operationGenerator) nodeExpandVolume(
|
||||
|
||||
if volumeToMount.VolumeSpec.ReadOnly {
|
||||
simpleMsg, detailedMsg := volumeToMount.GenerateMsg("MountVolume.NodeExpandVolume failed", "requested read-only file system")
|
||||
klog.Warningf(detailedMsg)
|
||||
klog.Warning(detailedMsg)
|
||||
og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FileSystemResizeFailed, simpleMsg)
|
||||
og.recorder.Eventf(pvc, v1.EventTypeWarning, kevents.FileSystemResizeFailed, simpleMsg)
|
||||
return true, nil
|
||||
@@ -2097,7 +2097,7 @@ func (og *operationGenerator) checkForRecoveryFromExpansion(pvc *v1.PersistentVo
|
||||
// and hence we are going to keep expanding using older logic.
|
||||
if resizeStatus == "" && allocatedResource == nil {
|
||||
_, detailedMsg := volumeToMount.GenerateMsg("MountVolume.NodeExpandVolume running with", "older external resize controller")
|
||||
klog.Warningf(detailedMsg)
|
||||
klog.Warning(detailedMsg)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
@@ -2139,7 +2139,7 @@ func (og *operationGenerator) legacyCallNodeExpandOnPlugin(resizeOp nodeResizeOp
|
||||
// expansion operation should not block mounting
|
||||
if volumetypes.IsFailedPreconditionError(resizeErr) {
|
||||
actualStateOfWorld.MarkForInUseExpansionError(volumeToMount.VolumeName)
|
||||
klog.Errorf(volumeToMount.GenerateErrorDetailed("MountVolume.NodeExapndVolume failed", resizeErr).Error())
|
||||
klog.Error(volumeToMount.GenerateErrorDetailed("MountVolume.NodeExapndVolume failed", resizeErr).Error())
|
||||
return true, nil
|
||||
}
|
||||
return false, resizeErr
|
||||
|
||||
@@ -18,11 +18,12 @@ package recyclerclient
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
@@ -72,7 +73,7 @@ func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Po
|
||||
// Start the pod
|
||||
_, err = recyclerClient.CreatePod(pod)
|
||||
if err != nil {
|
||||
if errors.IsAlreadyExists(err) {
|
||||
if apierrors.IsAlreadyExists(err) {
|
||||
deleteErr := recyclerClient.DeletePod(pod.Name, pod.Namespace)
|
||||
if deleteErr != nil {
|
||||
return fmt.Errorf("failed to delete old recycler pod %s/%s: %s", pod.Namespace, pod.Name, deleteErr)
|
||||
@@ -128,7 +129,7 @@ func waitForPod(pod *v1.Pod, recyclerClient recyclerClient, podCh <-chan watch.E
|
||||
}
|
||||
if pod.Status.Phase == v1.PodFailed {
|
||||
if pod.Status.Message != "" {
|
||||
return fmt.Errorf(pod.Status.Message)
|
||||
return errors.New(pod.Status.Message)
|
||||
}
|
||||
return fmt.Errorf("pod failed, pod.Status.Message unknown")
|
||||
}
|
||||
|
||||
@@ -259,7 +259,7 @@ func TestSafeMakeDir(t *testing.T) {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
base, err := ioutil.TempDir("", "safe-make-dir-"+test.name+"-")
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
defer os.RemoveAll(base)
|
||||
test.prepare(base)
|
||||
@@ -385,7 +385,7 @@ func TestRemoveEmptyDirs(t *testing.T) {
|
||||
klog.V(4).Infof("test %q", test.name)
|
||||
base, err := ioutil.TempDir("", "remove-empty-dirs-"+test.name+"-")
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
if err = test.prepare(base); err != nil {
|
||||
os.RemoveAll(base)
|
||||
@@ -615,7 +615,7 @@ func TestCleanSubPaths(t *testing.T) {
|
||||
klog.V(4).Infof("test %q", test.name)
|
||||
base, err := ioutil.TempDir("", "clean-subpaths-"+test.name+"-")
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
mounts, err := test.prepare(base)
|
||||
if err != nil {
|
||||
@@ -872,7 +872,7 @@ func TestBindSubPath(t *testing.T) {
|
||||
klog.V(4).Infof("test %q", test.name)
|
||||
base, err := ioutil.TempDir("", "bind-subpath-"+test.name+"-")
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
mounts, volPath, subPath, err := test.prepare(base)
|
||||
@@ -986,7 +986,7 @@ func TestSubpath_PrepareSafeSubpath(t *testing.T) {
|
||||
klog.V(4).Infof("test %q", test.name)
|
||||
base, err := ioutil.TempDir("", "bind-subpath-"+test.name+"-")
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
defer os.RemoveAll(base)
|
||||
|
||||
@@ -1220,7 +1220,7 @@ func TestSafeOpen(t *testing.T) {
|
||||
klog.V(4).Infof("test %q", test.name)
|
||||
base, err := ioutil.TempDir("", "safe-open-"+test.name+"-")
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
test.prepare(base)
|
||||
@@ -1367,7 +1367,7 @@ func TestFindExistingPrefix(t *testing.T) {
|
||||
klog.V(4).Infof("test %q", test.name)
|
||||
base, err := ioutil.TempDir("", "find-prefix-"+test.name+"-")
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
test.prepare(base)
|
||||
path := filepath.Join(base, test.path)
|
||||
|
||||
Reference in New Issue
Block a user