From 9d10ddb0608aa20ce287c89be879f888f5823cf9 Mon Sep 17 00:00:00 2001 From: Abhishek Kr Srivastav Date: Thu, 12 Sep 2024 18:15:22 +0530 Subject: [PATCH] Fix Go vet errors for master golang Co-authored-by: Rajalakshmi-Girish Co-authored-by: Abhishek Kr Srivastav --- .../phases/certs/renewal/readwriter_test.go | 3 +- pkg/apis/core/v1/defaults_test.go | 6 +-- .../endpoint/endpoints_controller_test.go | 2 +- .../node_lifecycle_controller_test.go | 12 ++--- pkg/controller/podautoscaler/horizontal.go | 32 ++++++------- .../podautoscaler/horizontal_test.go | 2 +- .../statefulset/stateful_set_test.go | 38 +++++++-------- .../util/selectors/bimultimap_test.go | 4 +- .../cache/actual_state_of_world.go | 3 +- .../volume/expand/expand_controller.go | 9 ++-- .../volume/persistentvolume/pv_controller.go | 3 +- pkg/kubeapiserver/authorizer/config.go | 2 +- .../cm/node_container_manager_linux.go | 5 +- .../kuberuntime/kuberuntime_container.go | 6 +-- pkg/kubelet/nodeshutdown/storage_test.go | 3 +- .../cache/desired_state_of_world.go | 5 +- pkg/kubelet/prober/scale_test.go | 2 +- pkg/scheduler/extender.go | 5 +- pkg/scheduler/internal/cache/cache.go | 3 +- pkg/scheduler/schedule_one_test.go | 2 +- pkg/volume/csi/csi_attacher.go | 14 +++--- pkg/volume/csi/csi_block.go | 26 +++++------ pkg/volume/csi/csi_mounter.go | 12 ++--- pkg/volume/csi/csi_plugin.go | 12 ++--- pkg/volume/csi/expander.go | 2 +- pkg/volume/downwardapi/downwardapi_test.go | 4 +- pkg/volume/flexvolume/driver-call.go | 2 +- pkg/volume/portworx/portworx_util.go | 4 +- pkg/volume/secret/secret.go | 9 ++-- .../util/operationexecutor/node_expander.go | 8 ++-- .../operationexecutor/operation_executor.go | 16 +++---- .../operationexecutor/operation_generator.go | 46 +++++++++---------- .../util/recyclerclient/recycler_client.go | 7 +-- pkg/volume/util/subpath/subpath_linux_test.go | 14 +++--- .../apiserver/validation/validation_test.go | 2 +- .../apiserver/pkg/cel/lazy/lazy_test.go | 2 +- .../apiserver/pkg/endpoints/apiserver_test.go | 6 +-- .../handlers/responsewriters/writers_test.go | 2 +- .../pkg/server/filters/maxinflight.go | 2 +- .../storage/cacher/cacher_whitebox_test.go | 2 +- .../pkg/util/peerproxy/peerproxy_handler.go | 2 +- .../k8s.io/cli-runtime/pkg/printers/json.go | 5 +- .../cli-runtime/pkg/printers/jsonpath.go | 3 +- .../k8s.io/cli-runtime/pkg/printers/name.go | 3 +- .../cli-runtime/pkg/printers/template.go | 3 +- .../k8s.io/cli-runtime/pkg/printers/yaml.go | 5 +- .../cli-runtime/pkg/resource/builder.go | 4 +- .../cli-runtime/pkg/resource/builder_test.go | 2 +- .../src/k8s.io/client-go/rest/client_test.go | 2 +- .../client-go/tools/remotecommand/v4.go | 2 +- .../k8s.io/client-go/transport/cache_test.go | 2 +- .../src/k8s.io/cloud-provider/app/webhooks.go | 2 +- .../controllers/route/route_controller.go | 2 +- .../k8s.io/cri-client/pkg/logs/logs_test.go | 2 +- .../k8s.io/kubectl/pkg/cmd/attach/attach.go | 2 +- .../src/k8s.io/kubectl/pkg/cmd/cp/cp_test.go | 2 +- .../kubectl/pkg/cmd/create/create_service.go | 5 +- .../k8s.io/kubectl/pkg/cmd/delete/delete.go | 2 +- .../src/k8s.io/kubectl/pkg/cmd/diff/diff.go | 2 +- .../src/k8s.io/kubectl/pkg/cmd/drain/drain.go | 2 +- .../src/k8s.io/kubectl/pkg/cmd/exec/exec.go | 2 +- .../kubectl/pkg/cmd/get/customcolumn.go | 5 +- staging/src/k8s.io/kubectl/pkg/cmd/get/get.go | 2 +- .../pkg/cmd/portforward/portforward.go | 2 +- .../src/k8s.io/kubectl/pkg/cmd/taint/taint.go | 2 +- .../k8s.io/kubectl/pkg/cmd/util/helpers.go | 2 +- .../kubectl/pkg/describe/describe_test.go | 8 ++-- .../pkg/util/templates/help_flags_printer.go | 2 +- staging/src/k8s.io/mount-utils/mount_linux.go | 4 +- test/e2e/apimachinery/aggregator.go | 6 +-- test/e2e/apimachinery/garbage_collector.go | 2 +- test/e2e/apps/statefulset.go | 12 ++--- .../e2e/cloud/gcp/common/upgrade_mechanics.go | 2 +- test/e2e/cloud/gcp/ha_master.go | 8 ++-- .../debug/resource_usage_gatherer.go | 3 +- test/e2e/framework/flake_reporting_util.go | 2 +- test/e2e/framework/framework.go | 4 +- test/e2e/framework/ingress/ingress_utils.go | 4 +- test/e2e/framework/network/utils.go | 6 +-- test/e2e/framework/node/resource.go | 4 +- test/e2e/framework/skipper/skipper.go | 5 +- test/e2e/framework/statefulset/rest.go | 4 +- test/e2e/kubectl/kubectl.go | 14 +++--- test/e2e/network/dns_common.go | 3 +- test/e2e/network/proxy.go | 2 +- test/e2e/network/service.go | 2 +- test/e2e/network/service_latency.go | 4 +- test/e2e/network/util.go | 2 +- test/e2e/scheduling/limit_range.go | 2 +- .../csi-test/mock/service/controller.go | 42 ++++++++--------- .../drivers/csi-test/mock/service/node.go | 24 +++++----- test/e2e/storage/testsuites/multivolume.go | 2 +- test/e2e/storage/testsuites/volumelimits.go | 3 +- test/e2e/storage/volume_provisioning.go | 4 +- test/e2e/upgrades/apps/cassandra.go | 6 ++- test/e2e/upgrades/apps/etcd.go | 5 +- test/e2e/upgrades/apps/mysql.go | 5 +- test/e2e/windows/gmsa_full.go | 12 ++--- test/e2e_node/system_node_critical_test.go | 6 +-- test/e2e_node/util_sriov.go | 4 +- .../converter/framework.go | 8 ++-- test/images/agnhost/net/main.go | 2 +- test/images/agnhost/netexec/netexec.go | 2 +- test/instrumentation/decode_metric.go | 4 +- test/integration/dualstack/dualstack_test.go | 3 +- test/integration/framework/util.go | 2 +- .../garbage_collector_test.go | 2 +- .../integration/servicecidr/allocator_test.go | 2 +- test/utils/density_utils.go | 3 +- test/utils/deployment.go | 3 +- test/utils/runners.go | 6 +-- 111 files changed, 345 insertions(+), 318 deletions(-) diff --git a/cmd/kubeadm/app/phases/certs/renewal/readwriter_test.go b/cmd/kubeadm/app/phases/certs/renewal/readwriter_test.go index c82ba36f9b9..4bbcac38add 100644 --- a/cmd/kubeadm/app/phases/certs/renewal/readwriter_test.go +++ b/cmd/kubeadm/app/phases/certs/renewal/readwriter_test.go @@ -19,7 +19,6 @@ package renewal import ( "crypto" "crypto/x509" - "fmt" "net" "os" "path/filepath" @@ -265,7 +264,7 @@ func TestPKICertificateReadWriterExists(t *testing.T) { } }() filename := "testfile" - tmpfilepath := filepath.Join(tmpdir, fmt.Sprintf(filename+".crt")) + tmpfilepath := filepath.Join(tmpdir, filename+".crt") err = os.WriteFile(tmpfilepath, nil, 0644) if err != nil { t.Fatalf("Couldn't write file: %v", err) diff --git a/pkg/apis/core/v1/defaults_test.go b/pkg/apis/core/v1/defaults_test.go index 734fd2f2a38..8ad20568720 100644 --- a/pkg/apis/core/v1/defaults_test.go +++ b/pkg/apis/core/v1/defaults_test.go @@ -185,7 +185,7 @@ func testWorkloadDefaults(t *testing.T, featuresEnabled bool) { defaults := detectDefaults(t, rc, reflect.ValueOf(template)) if !reflect.DeepEqual(expectedDefaults, defaults) { t.Errorf("Defaults for PodTemplateSpec changed. This can cause spurious rollouts of workloads on API server upgrade.") - t.Logf(cmp.Diff(expectedDefaults, defaults)) + t.Log(cmp.Diff(expectedDefaults, defaults)) } }) t.Run("hostnet PodTemplateSpec with ports", func(t *testing.T) { @@ -223,7 +223,7 @@ func testWorkloadDefaults(t *testing.T, featuresEnabled bool) { }() if !reflect.DeepEqual(expected, defaults) { t.Errorf("Defaults for PodTemplateSpec changed. This can cause spurious rollouts of workloads on API server upgrade.") - t.Logf(cmp.Diff(expected, defaults)) + t.Log(cmp.Diff(expected, defaults)) } }) } @@ -374,7 +374,7 @@ func testPodDefaults(t *testing.T, featuresEnabled bool) { defaults := detectDefaults(t, pod, reflect.ValueOf(pod)) if !reflect.DeepEqual(expectedDefaults, defaults) { t.Errorf("Defaults for PodSpec changed. This can cause spurious restarts of containers on API server upgrade.") - t.Logf(cmp.Diff(expectedDefaults, defaults)) + t.Log(cmp.Diff(expectedDefaults, defaults)) } } diff --git a/pkg/controller/endpoint/endpoints_controller_test.go b/pkg/controller/endpoint/endpoints_controller_test.go index d7004245799..4147d74b2e4 100644 --- a/pkg/controller/endpoint/endpoints_controller_test.go +++ b/pkg/controller/endpoint/endpoints_controller_test.go @@ -2782,7 +2782,7 @@ func waitForChanReceive(t *testing.T, timeout time.Duration, receivingChan chan timer := time.NewTimer(timeout) select { case <-timer.C: - t.Errorf(errorMsg) + t.Error(errorMsg) case <-receivingChan: } } diff --git a/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go b/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go index 2d348d29dc2..4335f83bd72 100644 --- a/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go +++ b/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go @@ -2450,7 +2450,7 @@ func TestApplyNoExecuteTaints(t *testing.T) { node2.Status = healthyNodeNewStatus _, err = fakeNodeHandler.UpdateStatus(ctx, node2, metav1.UpdateOptions{}) if err != nil { - t.Errorf(err.Error()) + t.Error(err.Error()) return } if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil { @@ -2479,7 +2479,7 @@ func TestApplyNoExecuteTaints(t *testing.T) { node3.Status = unhealthyNodeNewStatus _, err = fakeNodeHandler.UpdateStatus(ctx, node3, metav1.UpdateOptions{}) if err != nil { - t.Errorf(err.Error()) + t.Error(err.Error()) return } if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil { @@ -2492,7 +2492,7 @@ func TestApplyNoExecuteTaints(t *testing.T) { node3.Status.Conditions = overrideNodeNewStatusConditions _, err = fakeNodeHandler.UpdateStatus(ctx, node3, metav1.UpdateOptions{}) if err != nil { - t.Errorf(err.Error()) + t.Error(err.Error()) return } if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil { @@ -2638,7 +2638,7 @@ func TestApplyNoExecuteTaintsToNodesEnqueueTwice(t *testing.T) { node0.Status = healthyNodeNewStatus _, err = fakeNodeHandler.UpdateStatus(ctx, node0, metav1.UpdateOptions{}) if err != nil { - t.Errorf(err.Error()) + t.Error(err.Error()) return } @@ -2870,12 +2870,12 @@ func TestSwapUnreachableNotReadyTaints(t *testing.T) { node1.Status = healthyNodeNewStatus _, err = fakeNodeHandler.UpdateStatus(ctx, node0, metav1.UpdateOptions{}) if err != nil { - t.Errorf(err.Error()) + t.Error(err.Error()) return } _, err = fakeNodeHandler.UpdateStatus(ctx, node1, metav1.UpdateOptions{}) if err != nil { - t.Errorf(err.Error()) + t.Error(err.Error()) return } diff --git a/pkg/controller/podautoscaler/horizontal.go b/pkg/controller/podautoscaler/horizontal.go index 425bfe63987..758c0f1993a 100644 --- a/pkg/controller/podautoscaler/horizontal.go +++ b/pkg/controller/podautoscaler/horizontal.go @@ -338,7 +338,7 @@ func (a *HorizontalController) computeReplicasForMetrics(ctx context.Context, hp // return an error and set the condition of the hpa based on the first invalid metric. // Otherwise set the condition as scaling active as we're going to scale if invalidMetricsCount >= len(metricSpecs) || (invalidMetricsCount > 0 && replicas < specReplicas) { - setCondition(hpa, invalidMetricCondition.Type, invalidMetricCondition.Status, invalidMetricCondition.Reason, invalidMetricCondition.Message) + setCondition(hpa, invalidMetricCondition.Type, invalidMetricCondition.Status, invalidMetricCondition.Reason, "%s", invalidMetricCondition.Message) return -1, "", statuses, time.Time{}, invalidMetricError } setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionTrue, "ValidMetricFound", "the HPA was able to successfully calculate a replica count from %s", metric) @@ -385,15 +385,15 @@ func (a *HorizontalController) validateAndParseSelector(hpa *autoscalingv2.Horiz errMsg := "selector is required" a.eventRecorder.Event(hpa, v1.EventTypeWarning, "SelectorRequired", errMsg) setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "InvalidSelector", "the HPA target's scale is missing a selector") - return nil, fmt.Errorf(errMsg) + return nil, errors.New(errMsg) } parsedSelector, err := labels.Parse(selector) if err != nil { errMsg := fmt.Sprintf("couldn't convert selector into a corresponding internal selector object: %v", err) a.eventRecorder.Event(hpa, v1.EventTypeWarning, "InvalidSelector", errMsg) - setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "InvalidSelector", errMsg) - return nil, fmt.Errorf(errMsg) + setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "InvalidSelector", "%s", errMsg) + return nil, errors.New(errMsg) } hpaKey := selectors.Key{Name: hpa.Name, Namespace: hpa.Namespace} @@ -413,8 +413,8 @@ func (a *HorizontalController) validateAndParseSelector(hpa *autoscalingv2.Horiz if len(selectingHpas) > 1 { errMsg := fmt.Sprintf("pods by selector %v are controlled by multiple HPAs: %v", selector, selectingHpas) a.eventRecorder.Event(hpa, v1.EventTypeWarning, "AmbiguousSelector", errMsg) - setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "AmbiguousSelector", errMsg) - return nil, fmt.Errorf(errMsg) + setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "AmbiguousSelector", "%s", errMsg) + return nil, errors.New(errMsg) } return parsedSelector, nil @@ -570,7 +570,7 @@ func (a *HorizontalController) computeStatusForObjectMetric(specReplicas, status return replicaCountProposal, timestampProposal, fmt.Sprintf("external metric %s(%+v)", metricSpec.Object.Metric.Name, metricSpec.Object.Metric.Selector), autoscalingv2.HorizontalPodAutoscalerCondition{}, nil } errMsg := "invalid object metric source: neither a value target nor an average value target was set" - err = fmt.Errorf(errMsg) + err = errors.New(errMsg) condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetObjectMetric", err) return 0, time.Time{}, "", condition, err } @@ -617,7 +617,7 @@ func (a *HorizontalController) computeStatusForResourceMetricGeneric(ctx context if target.AverageUtilization == nil { errMsg := "invalid resource metric source: neither an average utilization target nor an average value (usage) target was set" - return 0, nil, time.Time{}, "", condition, fmt.Errorf(errMsg) + return 0, nil, time.Time{}, "", condition, errors.New(errMsg) } targetUtilization := *target.AverageUtilization @@ -719,9 +719,9 @@ func (a *HorizontalController) computeStatusForExternalMetric(specReplicas, stat return replicaCountProposal, timestampProposal, fmt.Sprintf("external metric %s(%+v)", metricSpec.External.Metric.Name, metricSpec.External.Metric.Selector), autoscalingv2.HorizontalPodAutoscalerCondition{}, nil } errMsg := "invalid external metric source: neither a value target nor an average value target was set" - err = fmt.Errorf(errMsg) + err = errors.New(errMsg) condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetExternalMetric", err) - return 0, time.Time{}, "", condition, fmt.Errorf(errMsg) + return 0, time.Time{}, "", condition, errors.New(errMsg) } func (a *HorizontalController) recordInitialRecommendation(currentReplicas int32, key string) { @@ -950,12 +950,12 @@ func (a *HorizontalController) normalizeDesiredReplicas(hpa *autoscalingv2.Horiz setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, "ReadyForNewScale", "recommended size matches current size") } - desiredReplicas, condition, reason := convertDesiredReplicasWithRules(currentReplicas, stabilizedRecommendation, minReplicas, hpa.Spec.MaxReplicas) + desiredReplicas, reason, message := convertDesiredReplicasWithRules(currentReplicas, stabilizedRecommendation, minReplicas, hpa.Spec.MaxReplicas) if desiredReplicas == stabilizedRecommendation { - setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionFalse, condition, reason) + setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionFalse, reason, "%s", message) } else { - setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, condition, reason) + setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, reason, "%s", message) } return desiredReplicas @@ -991,15 +991,15 @@ func (a *HorizontalController) normalizeDesiredReplicasWithBehaviors(hpa *autosc normalizationArg.DesiredReplicas = stabilizedRecommendation if stabilizedRecommendation != prenormalizedDesiredReplicas { // "ScaleUpStabilized" || "ScaleDownStabilized" - setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, reason, message) + setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, reason, "%s", message) } else { setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, "ReadyForNewScale", "recommended size matches current size") } desiredReplicas, reason, message := a.convertDesiredReplicasWithBehaviorRate(normalizationArg) if desiredReplicas == stabilizedRecommendation { - setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionFalse, reason, message) + setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionFalse, reason, "%s", message) } else { - setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, reason, message) + setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, reason, "%s", message) } return desiredReplicas diff --git a/pkg/controller/podautoscaler/horizontal_test.go b/pkg/controller/podautoscaler/horizontal_test.go index 14883028b50..65184b95bdc 100644 --- a/pkg/controller/podautoscaler/horizontal_test.go +++ b/pkg/controller/podautoscaler/horizontal_test.go @@ -82,7 +82,7 @@ func statusOkWithOverrides(overrides ...autoscalingv2.HorizontalPodAutoscalerCon resv2 := make([]autoscalingv2.HorizontalPodAutoscalerCondition, len(statusOk)) copy(resv2, statusOk) for _, override := range overrides { - resv2 = setConditionInList(resv2, override.Type, override.Status, override.Reason, override.Message) + resv2 = setConditionInList(resv2, override.Type, override.Status, override.Reason, "%s", override.Message) } // copy to a v1 slice diff --git a/pkg/controller/statefulset/stateful_set_test.go b/pkg/controller/statefulset/stateful_set_test.go index c40766b3907..8e834bd5e2b 100644 --- a/pkg/controller/statefulset/stateful_set_test.go +++ b/pkg/controller/statefulset/stateful_set_test.go @@ -824,35 +824,35 @@ func TestStaleOwnerRefOnScaleup(t *testing.T) { logger, ctx := ktesting.NewTestContext(t) ssc, spc, om, _ := newFakeStatefulSetController(ctx, set) if err := scaleUpStatefulSetController(logger, set, ssc, spc, om); err != nil { - t.Errorf(onPolicy("Failed to turn up StatefulSet : %s", err)) + t.Error(onPolicy("Failed to turn up StatefulSet : %s", err)) } var err error if set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name); err != nil { - t.Errorf(onPolicy("Could not get scaled up set: %v", err)) + t.Error(onPolicy("Could not get scaled up set: %v", err)) } if set.Status.Replicas != 3 { - t.Errorf(onPolicy("set.Status.Replicas = %v; want 3", set.Status.Replicas)) + t.Error(onPolicy("set.Status.Replicas = %v; want 3", set.Status.Replicas)) } *set.Spec.Replicas = 2 if err := scaleDownStatefulSetController(logger, set, ssc, spc, om); err != nil { - t.Errorf(onPolicy("Failed to scale down StatefulSet : msg, %s", err)) + t.Error(onPolicy("Failed to scale down StatefulSet : msg, %s", err)) } set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name) if err != nil { - t.Errorf(onPolicy("Could not get scaled down StatefulSet: %v", err)) + t.Error(onPolicy("Could not get scaled down StatefulSet: %v", err)) } if set.Status.Replicas != 2 { - t.Errorf(onPolicy("Failed to scale statefulset to 2 replicas")) + t.Error(onPolicy("Failed to scale statefulset to 2 replicas")) } var claim *v1.PersistentVolumeClaim claim, err = om.claimsLister.PersistentVolumeClaims(set.Namespace).Get("datadir-foo-2") if err != nil { - t.Errorf(onPolicy("Could not find expected pvc datadir-foo-2")) + t.Error(onPolicy("Could not find expected pvc datadir-foo-2")) } refs := claim.GetOwnerReferences() if len(refs) != 1 { - t.Errorf(onPolicy("Expected only one refs: %v", refs)) + t.Error(onPolicy("Expected only one refs: %v", refs)) } // Make the pod ref stale. for i := range refs { @@ -863,29 +863,29 @@ func TestStaleOwnerRefOnScaleup(t *testing.T) { } claim.SetOwnerReferences(refs) if err = om.claimsIndexer.Update(claim); err != nil { - t.Errorf(onPolicy("Could not update claim with new owner ref: %v", err)) + t.Error(onPolicy("Could not update claim with new owner ref: %v", err)) } *set.Spec.Replicas = 3 // Until the stale PVC goes away, the scale up should never finish. Run 10 iterations, then delete the PVC. if err := scaleUpStatefulSetControllerBounded(logger, set, ssc, spc, om, 10); err != nil { - t.Errorf(onPolicy("Failed attempt to scale StatefulSet back up: %v", err)) + t.Error(onPolicy("Failed attempt to scale StatefulSet back up: %v", err)) } set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name) if err != nil { - t.Errorf(onPolicy("Could not get scaled down StatefulSet: %v", err)) + t.Error(onPolicy("Could not get scaled down StatefulSet: %v", err)) } if set.Status.Replicas != 2 { - t.Errorf(onPolicy("Expected set to stay at two replicas")) + t.Error(onPolicy("Expected set to stay at two replicas")) } claim, err = om.claimsLister.PersistentVolumeClaims(set.Namespace).Get("datadir-foo-2") if err != nil { - t.Errorf(onPolicy("Could not find expected pvc datadir-foo-2")) + t.Error(onPolicy("Could not find expected pvc datadir-foo-2")) } refs = claim.GetOwnerReferences() if len(refs) != 1 { - t.Errorf(onPolicy("Unexpected change to condemned pvc ownerRefs: %v", refs)) + t.Error(onPolicy("Unexpected change to condemned pvc ownerRefs: %v", refs)) } foundPodRef := false for i := range refs { @@ -895,21 +895,21 @@ func TestStaleOwnerRefOnScaleup(t *testing.T) { } } if !foundPodRef { - t.Errorf(onPolicy("Claim ref unexpectedly changed: %v", refs)) + t.Error(onPolicy("Claim ref unexpectedly changed: %v", refs)) } if err = om.claimsIndexer.Delete(claim); err != nil { - t.Errorf(onPolicy("Could not delete stale pvc: %v", err)) + t.Error(onPolicy("Could not delete stale pvc: %v", err)) } if err := scaleUpStatefulSetController(logger, set, ssc, spc, om); err != nil { - t.Errorf(onPolicy("Failed to scale StatefulSet back up: %v", err)) + t.Error(onPolicy("Failed to scale StatefulSet back up: %v", err)) } set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name) if err != nil { - t.Errorf(onPolicy("Could not get scaled down StatefulSet: %v", err)) + t.Error(onPolicy("Could not get scaled down StatefulSet: %v", err)) } if set.Status.Replicas != 3 { - t.Errorf(onPolicy("Failed to scale set back up once PVC was deleted")) + t.Error(onPolicy("Failed to scale set back up once PVC was deleted")) } } } diff --git a/pkg/controller/util/selectors/bimultimap_test.go b/pkg/controller/util/selectors/bimultimap_test.go index e0d23166ef0..a3eeed04165 100644 --- a/pkg/controller/util/selectors/bimultimap_test.go +++ b/pkg/controller/util/selectors/bimultimap_test.go @@ -235,7 +235,7 @@ func TestAssociations(t *testing.T) { // Run consistency check after every operation. err := consistencyCheck(multimap) if err != nil { - t.Fatalf(err.Error()) + t.Fatal(err.Error()) } } for _, expect := range tc.want { @@ -261,7 +261,7 @@ func TestEfficientAssociation(t *testing.T) { err := forwardSelect(key("hpa-1"), key("pod-1"), key("pod-2"))(m) if err != nil { - t.Errorf(err.Error()) + t.Error(err.Error()) } } diff --git a/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go b/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go index cd4224d6e4f..b5188e0c01d 100644 --- a/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go +++ b/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go @@ -22,6 +22,7 @@ reference them. package cache import ( + "errors" "fmt" "sync" "time" @@ -524,7 +525,7 @@ func (asw *actualStateOfWorld) updateNodeStatusUpdateNeeded(nodeName types.NodeN // should not happen errMsg := fmt.Sprintf("Failed to set statusUpdateNeeded to needed %t, because nodeName=%q does not exist", needed, nodeName) - return fmt.Errorf(errMsg) + return errors.New(errMsg) } nodeToUpdate.statusUpdateNeeded = needed diff --git a/pkg/controller/volume/expand/expand_controller.go b/pkg/controller/volume/expand/expand_controller.go index 6019f173787..f4ae20bb48c 100644 --- a/pkg/controller/volume/expand/expand_controller.go +++ b/pkg/controller/volume/expand/expand_controller.go @@ -18,6 +18,7 @@ package expand import ( "context" + "errors" "fmt" "net" "time" @@ -28,7 +29,7 @@ import ( authenticationv1 "k8s.io/api/authentication/v1" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/runtime" @@ -205,7 +206,7 @@ func (expc *expandController) syncHandler(ctx context.Context, key string) error return err } pvc, err := expc.pvcLister.PersistentVolumeClaims(namespace).Get(name) - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return nil } logger := klog.FromContext(ctx) @@ -256,14 +257,14 @@ func (expc *expandController) syncHandler(ctx context.Context, key string) error if err != nil { errorMsg := fmt.Sprintf("error getting CSI driver name for pvc %s, with error %v", key, err) expc.recorder.Event(pvc, v1.EventTypeWarning, events.ExternalExpanding, errorMsg) - return fmt.Errorf(errorMsg) + return errors.New(errorMsg) } pvc, err := util.SetClaimResizer(pvc, csiResizerName, expc.kubeClient) if err != nil { errorMsg := fmt.Sprintf("error setting resizer annotation to pvc %s, with error %v", key, err) expc.recorder.Event(pvc, v1.EventTypeWarning, events.ExternalExpanding, errorMsg) - return fmt.Errorf(errorMsg) + return errors.New(errorMsg) } return nil } diff --git a/pkg/controller/volume/persistentvolume/pv_controller.go b/pkg/controller/volume/persistentvolume/pv_controller.go index 9020966eaff..70b593ae955 100644 --- a/pkg/controller/volume/persistentvolume/pv_controller.go +++ b/pkg/controller/volume/persistentvolume/pv_controller.go @@ -18,6 +18,7 @@ package persistentvolume import ( "context" + "errors" "fmt" "reflect" "strings" @@ -1630,7 +1631,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation( strerr := fmt.Sprintf("plugin %q is not a CSI plugin. Only CSI plugin can provision a claim with a datasource", pluginName) logger.V(2).Info(strerr) ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, strerr) - return pluginName, fmt.Errorf(strerr) + return pluginName, errors.New(strerr) } provisionerName := storageClass.Provisioner diff --git a/pkg/kubeapiserver/authorizer/config.go b/pkg/kubeapiserver/authorizer/config.go index e654ed317f6..eec90bb451e 100644 --- a/pkg/kubeapiserver/authorizer/config.go +++ b/pkg/kubeapiserver/authorizer/config.go @@ -176,7 +176,7 @@ func LoadAndValidateData(data []byte, requireNonWebhookTypes sets.Set[authzconfi sets.NewString(modes.AuthorizationModeChoices...), sets.NewString(repeatableAuthorizerTypes...), ); len(errors) != 0 { - return nil, fmt.Errorf(errors.ToAggregate().Error()) + return nil, errors.ToAggregate() } // test to check if the authorizer names passed conform to the authorizers for type!=Webhook diff --git a/pkg/kubelet/cm/node_container_manager_linux.go b/pkg/kubelet/cm/node_container_manager_linux.go index c9f013ad663..a069b1942fc 100644 --- a/pkg/kubelet/cm/node_container_manager_linux.go +++ b/pkg/kubelet/cm/node_container_manager_linux.go @@ -20,6 +20,7 @@ limitations under the License. package cm import ( + "errors" "fmt" "strconv" "strings" @@ -112,7 +113,7 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error { if err := enforceExistingCgroup(cm.cgroupManager, cm.cgroupManager.CgroupName(nc.SystemReservedCgroupName), nc.SystemReserved); err != nil { message := fmt.Sprintf("Failed to enforce System Reserved Cgroup Limits on %q: %v", nc.SystemReservedCgroupName, err) cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message) - return fmt.Errorf(message) + return errors.New(message) } cm.recorder.Eventf(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on system reserved cgroup %v", nc.SystemReservedCgroupName) } @@ -121,7 +122,7 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error { if err := enforceExistingCgroup(cm.cgroupManager, cm.cgroupManager.CgroupName(nc.KubeReservedCgroupName), nc.KubeReserved); err != nil { message := fmt.Sprintf("Failed to enforce Kube Reserved Cgroup Limits on %q: %v", nc.KubeReservedCgroupName, err) cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message) - return fmt.Errorf(message) + return errors.New(message) } cm.recorder.Eventf(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on kube reserved cgroup %v", nc.KubeReservedCgroupName) } diff --git a/pkg/kubelet/kuberuntime/kuberuntime_container.go b/pkg/kubelet/kuberuntime/kuberuntime_container.go index 6a891d8f42e..2a1ef3d15cd 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_container.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_container.go @@ -274,7 +274,7 @@ func (m *kubeGenericRuntimeManager) startContainer(ctx context.Context, podSandb m.recordContainerEvent(pod, container, containerID, v1.EventTypeWarning, events.FailedToStartContainer, "Internal PreStartContainer hook failed: %v", s.Message()) return s.Message(), ErrPreStartHook } - m.recordContainerEvent(pod, container, containerID, v1.EventTypeNormal, events.CreatedContainer, fmt.Sprintf("Created container %s", container.Name)) + m.recordContainerEvent(pod, container, containerID, v1.EventTypeNormal, events.CreatedContainer, "Created container: %v", container.Name) // Step 3: start the container. err = m.runtimeService.StartContainer(ctx, containerID) @@ -283,7 +283,7 @@ func (m *kubeGenericRuntimeManager) startContainer(ctx context.Context, podSandb m.recordContainerEvent(pod, container, containerID, v1.EventTypeWarning, events.FailedToStartContainer, "Error: %v", s.Message()) return s.Message(), kubecontainer.ErrRunContainer } - m.recordContainerEvent(pod, container, containerID, v1.EventTypeNormal, events.StartedContainer, fmt.Sprintf("Started container %s", container.Name)) + m.recordContainerEvent(pod, container, containerID, v1.EventTypeNormal, events.StartedContainer, "Started container %v", container.Name) // Symlink container logs to the legacy container log location for cluster logging // support. @@ -780,7 +780,7 @@ func (m *kubeGenericRuntimeManager) killContainer(ctx context.Context, pod *v1.P if len(message) == 0 { message = fmt.Sprintf("Stopping container %s", containerSpec.Name) } - m.recordContainerEvent(pod, containerSpec, containerID.ID, v1.EventTypeNormal, events.KillingContainer, message) + m.recordContainerEvent(pod, containerSpec, containerID.ID, v1.EventTypeNormal, events.KillingContainer, "%v", message) if gracePeriodOverride != nil { gracePeriod = *gracePeriodOverride diff --git a/pkg/kubelet/nodeshutdown/storage_test.go b/pkg/kubelet/nodeshutdown/storage_test.go index 3ad3e39fb14..7adbdaebe97 100644 --- a/pkg/kubelet/nodeshutdown/storage_test.go +++ b/pkg/kubelet/nodeshutdown/storage_test.go @@ -17,7 +17,6 @@ limitations under the License. package nodeshutdown import ( - "fmt" "os" "path/filepath" "testing" @@ -60,7 +59,7 @@ func TestLocalStorage(t *testing.T) { return } nowStr := now.Format(time.RFC3339Nano) - wantRaw := fmt.Sprintf(`{"startTime":"` + nowStr + `","endTime":"` + nowStr + `"}`) + wantRaw := `{"startTime":"` + nowStr + `","endTime":"` + nowStr + `"}` if string(raw) != wantRaw { t.Errorf("got %s, want %s", string(raw), wantRaw) return diff --git a/pkg/kubelet/pluginmanager/cache/desired_state_of_world.go b/pkg/kubelet/pluginmanager/cache/desired_state_of_world.go index 46d1c7d7653..c0456e42fc0 100644 --- a/pkg/kubelet/pluginmanager/cache/desired_state_of_world.go +++ b/pkg/kubelet/pluginmanager/cache/desired_state_of_world.go @@ -21,6 +21,7 @@ keep track of registered plugins. package cache import ( + "errors" "fmt" "sync" "time" @@ -100,7 +101,7 @@ func (plugin *PluginInfo) GenerateMsg(prefixMsg, suffixMsg string) (simpleMsg, d // that can be used in logs. // The msg format follows the pattern " : ", func (plugin *PluginInfo) GenerateErrorDetailed(prefixMsg string, err error) (detailedErr error) { - return fmt.Errorf(plugin.GenerateMsgDetailed(prefixMsg, errSuffix(err))) + return errors.New(plugin.GenerateMsgDetailed(prefixMsg, errSuffix(err))) } // GenerateError returns simple and detailed errors for plugins to register @@ -108,7 +109,7 @@ func (plugin *PluginInfo) GenerateErrorDetailed(prefixMsg string, err error) (de // The msg format follows the pattern " : ". func (plugin *PluginInfo) GenerateError(prefixMsg string, err error) (simpleErr, detailedErr error) { simpleMsg, detailedMsg := plugin.GenerateMsg(prefixMsg, errSuffix(err)) - return fmt.Errorf(simpleMsg), fmt.Errorf(detailedMsg) + return errors.New(simpleMsg), errors.New(detailedMsg) } // Generates an error string with the format ": " if err exists diff --git a/pkg/kubelet/prober/scale_test.go b/pkg/kubelet/prober/scale_test.go index 0b8b003d6f3..6213189a33a 100644 --- a/pkg/kubelet/prober/scale_test.go +++ b/pkg/kubelet/prober/scale_test.go @@ -80,7 +80,7 @@ func TestTCPPortExhaustion(t *testing.T) { {"HTTP", true}, } for _, tt := range tests { - t.Run(fmt.Sprintf(tt.name), func(t *testing.T) { + t.Run(tt.name, func(t *testing.T) { testRootDir := "" if tempDir, err := os.MkdirTemp("", "kubelet_test."); err != nil { t.Fatalf("can't make a temp rootdir: %v", err) diff --git a/pkg/scheduler/extender.go b/pkg/scheduler/extender.go index bc33aaafb1c..2a4866c4df1 100644 --- a/pkg/scheduler/extender.go +++ b/pkg/scheduler/extender.go @@ -19,6 +19,7 @@ package scheduler import ( "bytes" "encoding/json" + "errors" "fmt" "net/http" "strings" @@ -287,7 +288,7 @@ func (h *HTTPExtender) Filter( return nil, nil, nil, err } if result.Error != "" { - return nil, nil, nil, fmt.Errorf(result.Error) + return nil, nil, nil, errors.New(result.Error) } if h.nodeCacheCapable && result.NodeNames != nil { @@ -373,7 +374,7 @@ func (h *HTTPExtender) Bind(binding *v1.Binding) error { return err } if result.Error != "" { - return fmt.Errorf(result.Error) + return errors.New(result.Error) } return nil } diff --git a/pkg/scheduler/internal/cache/cache.go b/pkg/scheduler/internal/cache/cache.go index 97eaa6936e6..99736448115 100644 --- a/pkg/scheduler/internal/cache/cache.go +++ b/pkg/scheduler/internal/cache/cache.go @@ -18,6 +18,7 @@ package cache import ( "context" + "errors" "fmt" "sync" "time" @@ -272,7 +273,7 @@ func (cache *cacheImpl) UpdateSnapshot(logger klog.Logger, nodeSnapshot *Snapsho // We will try to recover by re-creating the lists for the next scheduling cycle, but still return an // error to surface the problem, the error will likely cause a failure to the current scheduling cycle. cache.updateNodeInfoSnapshotList(logger, nodeSnapshot, true) - return fmt.Errorf(errMsg) + return errors.New(errMsg) } return nil diff --git a/pkg/scheduler/schedule_one_test.go b/pkg/scheduler/schedule_one_test.go index 4289bb5c96f..88aa56f276d 100644 --- a/pkg/scheduler/schedule_one_test.go +++ b/pkg/scheduler/schedule_one_test.go @@ -3766,7 +3766,7 @@ func setupTestSchedulerWithVolumeBinding(ctx context.Context, t *testing.T, volu // end with a period. func makePredicateError(failReason string) error { s := fmt.Sprintf("0/1 nodes are available: %v.", failReason) - return fmt.Errorf(s) + return errors.New(s) } func mustNewPodInfo(t *testing.T, pod *v1.Pod) *framework.PodInfo { diff --git a/pkg/volume/csi/csi_attacher.go b/pkg/volume/csi/csi_attacher.go index b17646320fc..08dcbf6d505 100644 --- a/pkg/volume/csi/csi_attacher.go +++ b/pkg/volume/csi/csi_attacher.go @@ -269,7 +269,7 @@ func (c *csiAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) { } func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string, deviceMounterArgs volume.DeviceMounterArgs) error { - klog.V(4).Infof(log("attacher.MountDevice(%s, %s)", devicePath, deviceMountPath)) + klog.V(4).Info(log("attacher.MountDevice(%s, %s)", devicePath, deviceMountPath)) if deviceMountPath == "" { return errors.New(log("attacher.MountDevice failed, deviceMountPath is empty")) @@ -363,7 +363,7 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo // finished, we should remove the directory. if err != nil && volumetypes.IsOperationFinishedError(err) { // clean up metadata - klog.Errorf(log("attacher.MountDevice failed: %v", err)) + klog.Error(log("attacher.MountDevice failed: %v", err)) if err := removeMountDir(c.plugin, deviceMountPath); err != nil { klog.Error(log("attacher.MountDevice failed to remove mount dir after error [%s]: %v", deviceMountPath, err)) } @@ -377,7 +377,7 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo } if !stageUnstageSet { - klog.Infof(log("attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice...")) + klog.Info(log("attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice...")) // defer does *not* remove the metadata file and it's correct - UnmountDevice needs it there. return nil } @@ -415,7 +415,7 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo return err } - klog.V(4).Infof(log("attacher.MountDevice successfully requested NodeStageVolume [%s]", deviceMountPath)) + klog.V(4).Info(log("attacher.MountDevice successfully requested NodeStageVolume [%s]", deviceMountPath)) return err } @@ -604,7 +604,7 @@ func (c *csiAttacher) UnmountDevice(deviceMountPath string) error { return nil } - klog.Errorf(log("attacher.UnmountDevice failed to get driver and volume name from device mount path: %v", err)) + klog.Error(log("attacher.UnmountDevice failed to get driver and volume name from device mount path: %v", err)) return err } @@ -627,7 +627,7 @@ func (c *csiAttacher) UnmountDevice(deviceMountPath string) error { return errors.New(log("attacher.UnmountDevice failed to check whether STAGE_UNSTAGE_VOLUME set: %v", err)) } if !stageUnstageSet { - klog.Infof(log("attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice...")) + klog.Info(log("attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice...")) // Just delete the global directory + json file if err := removeMountDir(c.plugin, deviceMountPath); err != nil { return errors.New(log("failed to clean up global mount %s: %s", dataDir, err)) @@ -650,7 +650,7 @@ func (c *csiAttacher) UnmountDevice(deviceMountPath string) error { return errors.New(log("failed to clean up global mount %s: %s", dataDir, err)) } - klog.V(4).Infof(log("attacher.UnmountDevice successfully requested NodeUnStageVolume [%s]", deviceMountPath)) + klog.V(4).Info(log("attacher.UnmountDevice successfully requested NodeUnStageVolume [%s]", deviceMountPath)) return nil } diff --git a/pkg/volume/csi/csi_block.go b/pkg/volume/csi/csi_block.go index fa2570b42c1..80c6b088ddc 100644 --- a/pkg/volume/csi/csi_block.go +++ b/pkg/volume/csi/csi_block.go @@ -105,7 +105,7 @@ var _ volume.CustomBlockVolumeMapper = &csiBlockMapper{} // Example: plugins/kubernetes.io/csi/volumeDevices/{specName}/dev func (m *csiBlockMapper) GetGlobalMapPath(spec *volume.Spec) (string, error) { dir := getVolumeDevicePluginDir(m.specName, m.plugin.host) - klog.V(4).Infof(log("blockMapper.GetGlobalMapPath = %s", dir)) + klog.V(4).Info(log("blockMapper.GetGlobalMapPath = %s", dir)) return dir, nil } @@ -137,7 +137,7 @@ func (m *csiBlockMapper) getPublishPath() string { // returns: pods/{podUID}/volumeDevices/kubernetes.io~csi, {specName} func (m *csiBlockMapper) GetPodDeviceMapPath() (string, string) { path := m.plugin.host.GetPodVolumeDeviceDir(m.podUID, utilstrings.EscapeQualifiedName(CSIPluginName)) - klog.V(4).Infof(log("blockMapper.GetPodDeviceMapPath [path=%s; name=%s]", path, m.specName)) + klog.V(4).Info(log("blockMapper.GetPodDeviceMapPath [path=%s; name=%s]", path, m.specName)) return path, m.specName } @@ -149,10 +149,10 @@ func (m *csiBlockMapper) stageVolumeForBlock( csiSource *v1.CSIPersistentVolumeSource, attachment *storage.VolumeAttachment, ) (string, error) { - klog.V(4).Infof(log("blockMapper.stageVolumeForBlock called")) + klog.V(4).Info(log("blockMapper.stageVolumeForBlock called")) stagingPath := m.GetStagingPath() - klog.V(4).Infof(log("blockMapper.stageVolumeForBlock stagingPath set [%s]", stagingPath)) + klog.V(4).Info(log("blockMapper.stageVolumeForBlock stagingPath set [%s]", stagingPath)) // Check whether "STAGE_UNSTAGE_VOLUME" is set stageUnstageSet, err := csi.NodeSupportsStageUnstage(ctx) @@ -160,7 +160,7 @@ func (m *csiBlockMapper) stageVolumeForBlock( return "", errors.New(log("blockMapper.stageVolumeForBlock failed to check STAGE_UNSTAGE_VOLUME capability: %v", err)) } if !stageUnstageSet { - klog.Infof(log("blockMapper.stageVolumeForBlock STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice...")) + klog.Info(log("blockMapper.stageVolumeForBlock STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice...")) return "", nil } publishVolumeInfo := map[string]string{} @@ -200,7 +200,7 @@ func (m *csiBlockMapper) stageVolumeForBlock( return "", err } - klog.V(4).Infof(log("blockMapper.stageVolumeForBlock successfully requested NodeStageVolume [%s]", stagingPath)) + klog.V(4).Info(log("blockMapper.stageVolumeForBlock successfully requested NodeStageVolume [%s]", stagingPath)) return stagingPath, nil } @@ -212,7 +212,7 @@ func (m *csiBlockMapper) publishVolumeForBlock( csiSource *v1.CSIPersistentVolumeSource, attachment *storage.VolumeAttachment, ) (string, error) { - klog.V(4).Infof(log("blockMapper.publishVolumeForBlock called")) + klog.V(4).Info(log("blockMapper.publishVolumeForBlock called")) publishVolumeInfo := map[string]string{} if attachment != nil { @@ -279,7 +279,7 @@ func (m *csiBlockMapper) publishVolumeForBlock( // SetUpDevice ensures the device is attached returns path where the device is located. func (m *csiBlockMapper) SetUpDevice() (string, error) { - klog.V(4).Infof(log("blockMapper.SetUpDevice called")) + klog.V(4).Info(log("blockMapper.SetUpDevice called")) // Get csiSource from spec if m.spec == nil { @@ -341,7 +341,7 @@ func (m *csiBlockMapper) SetUpDevice() (string, error) { } func (m *csiBlockMapper) MapPodDevice() (string, error) { - klog.V(4).Infof(log("blockMapper.MapPodDevice called")) + klog.V(4).Info(log("blockMapper.MapPodDevice called")) // Get csiSource from spec if m.spec == nil { @@ -408,7 +408,7 @@ func (m *csiBlockMapper) unpublishVolumeForBlock(ctx context.Context, csi csiCli if err := csi.NodeUnpublishVolume(ctx, m.volumeID, publishPath); err != nil { return errors.New(log("blockMapper.unpublishVolumeForBlock failed: %v", err)) } - klog.V(4).Infof(log("blockMapper.unpublishVolumeForBlock NodeUnpublished successfully [%s]", publishPath)) + klog.V(4).Info(log("blockMapper.unpublishVolumeForBlock NodeUnpublished successfully [%s]", publishPath)) return nil } @@ -421,7 +421,7 @@ func (m *csiBlockMapper) unstageVolumeForBlock(ctx context.Context, csi csiClien return errors.New(log("blockMapper.unstageVolumeForBlock failed to check STAGE_UNSTAGE_VOLUME capability: %v", err)) } if !stageUnstageSet { - klog.Infof(log("blockMapper.unstageVolumeForBlock STAGE_UNSTAGE_VOLUME capability not set. Skipping unstageVolumeForBlock ...")) + klog.Info(log("blockMapper.unstageVolumeForBlock STAGE_UNSTAGE_VOLUME capability not set. Skipping unstageVolumeForBlock ...")) return nil } @@ -431,7 +431,7 @@ func (m *csiBlockMapper) unstageVolumeForBlock(ctx context.Context, csi csiClien if err := csi.NodeUnstageVolume(ctx, m.volumeID, stagingPath); err != nil { return errors.New(log("blockMapper.unstageVolumeForBlock failed: %v", err)) } - klog.V(4).Infof(log("blockMapper.unstageVolumeForBlock NodeUnstageVolume successfully [%s]", stagingPath)) + klog.V(4).Info(log("blockMapper.unstageVolumeForBlock NodeUnstageVolume successfully [%s]", stagingPath)) // Remove stagingPath directory and its contents if err := os.RemoveAll(stagingPath); err != nil { @@ -457,7 +457,7 @@ func (m *csiBlockMapper) TearDownDevice(globalMapPath, devicePath string) error stagingPath := m.GetStagingPath() if _, err := os.Stat(stagingPath); err != nil { if os.IsNotExist(err) { - klog.V(4).Infof(log("blockMapper.TearDownDevice stagingPath(%s) has already been deleted, skip calling NodeUnstageVolume", stagingPath)) + klog.V(4).Info(log("blockMapper.TearDownDevice stagingPath(%s) has already been deleted, skip calling NodeUnstageVolume", stagingPath)) } else { return err } diff --git a/pkg/volume/csi/csi_mounter.go b/pkg/volume/csi/csi_mounter.go index 21cdf29807b..b31a777c85a 100644 --- a/pkg/volume/csi/csi_mounter.go +++ b/pkg/volume/csi/csi_mounter.go @@ -101,7 +101,7 @@ func (c *csiMountMgr) SetUp(mounterArgs volume.MounterArgs) error { } func (c *csiMountMgr) SetUpAt(dir string, mounterArgs volume.MounterArgs) error { - klog.V(4).Infof(log("Mounter.SetUpAt(%s)", dir)) + klog.V(4).Info(log("Mounter.SetUpAt(%s)", dir)) csi, err := c.csiClientGetter.Get() if err != nil { @@ -346,7 +346,7 @@ func (c *csiMountMgr) SetUpAt(dir string, mounterArgs volume.MounterArgs) error klog.V(4).Info(log("mounter.SetupAt fsGroup [%d] applied successfully to %s", *mounterArgs.FsGroup, c.volumeID)) } - klog.V(4).Infof(log("mounter.SetUp successfully requested NodePublish [%s]", dir)) + klog.V(4).Info(log("mounter.SetUp successfully requested NodePublish [%s]", dir)) return nil } @@ -358,7 +358,7 @@ func (c *csiMountMgr) podServiceAccountTokenAttrs() (map[string]string, error) { csiDriver, err := c.plugin.csiDriverLister.Get(string(c.driverName)) if err != nil { if apierrors.IsNotFound(err) { - klog.V(5).Infof(log("CSIDriver %q not found, not adding service account token information", c.driverName)) + klog.V(5).Info(log("CSIDriver %q not found, not adding service account token information", c.driverName)) return nil, nil } return nil, err @@ -394,7 +394,7 @@ func (c *csiMountMgr) podServiceAccountTokenAttrs() (map[string]string, error) { outputs[audience] = tr.Status } - klog.V(4).Infof(log("Fetched service account token attrs for CSIDriver %q", c.driverName)) + klog.V(4).Info(log("Fetched service account token attrs for CSIDriver %q", c.driverName)) tokens, _ := json.Marshal(outputs) return map[string]string{ "csi.storage.k8s.io/serviceAccount.tokens": string(tokens), @@ -416,7 +416,7 @@ func (c *csiMountMgr) TearDown() error { return c.TearDownAt(c.GetPath()) } func (c *csiMountMgr) TearDownAt(dir string) error { - klog.V(4).Infof(log("Unmounter.TearDownAt(%s)", dir)) + klog.V(4).Info(log("Unmounter.TearDownAt(%s)", dir)) volID := c.volumeID csi, err := c.csiClientGetter.Get() @@ -447,7 +447,7 @@ func (c *csiMountMgr) TearDownAt(dir string) error { if err := removeMountDir(c.plugin, dir); err != nil { return errors.New(log("Unmounter.TearDownAt failed to clean mount dir [%s]: %v", dir, err)) } - klog.V(4).Infof(log("Unmounter.TearDownAt successfully unmounted dir [%s]", dir)) + klog.V(4).Info(log("Unmounter.TearDownAt successfully unmounted dir [%s]", dir)) return nil } diff --git a/pkg/volume/csi/csi_plugin.go b/pkg/volume/csi/csi_plugin.go index fec8a34b4d3..9afcd8cde20 100644 --- a/pkg/volume/csi/csi_plugin.go +++ b/pkg/volume/csi/csi_plugin.go @@ -97,7 +97,7 @@ var PluginHandler = &RegistrationHandler{} // ValidatePlugin is called by kubelet's plugin watcher upon detection // of a new registration socket opened by CSI Driver registrar side car. func (h *RegistrationHandler) ValidatePlugin(pluginName string, endpoint string, versions []string) error { - klog.Infof(log("Trying to validate a new CSI Driver with name: %s endpoint: %s versions: %s", + klog.Info(log("Trying to validate a new CSI Driver with name: %s endpoint: %s versions: %s", pluginName, endpoint, strings.Join(versions, ","))) _, err := h.validateVersions("ValidatePlugin", pluginName, endpoint, versions) @@ -110,7 +110,7 @@ func (h *RegistrationHandler) ValidatePlugin(pluginName string, endpoint string, // RegisterPlugin is called when a plugin can be registered func (h *RegistrationHandler) RegisterPlugin(pluginName string, endpoint string, versions []string, pluginClientTimeout *time.Duration) error { - klog.Infof(log("Register new plugin with name: %s at endpoint: %s", pluginName, endpoint)) + klog.Info(log("Register new plugin with name: %s at endpoint: %s", pluginName, endpoint)) highestSupportedVersion, err := h.validateVersions("RegisterPlugin", pluginName, endpoint, versions) if err != nil { @@ -432,7 +432,7 @@ func (p *csiPlugin) NewMounter( } func (p *csiPlugin) NewUnmounter(specName string, podUID types.UID) (volume.Unmounter, error) { - klog.V(4).Infof(log("setting up unmounter for [name=%v, podUID=%v]", specName, podUID)) + klog.V(4).Info(log("setting up unmounter for [name=%v, podUID=%v]", specName, podUID)) kvh, ok := p.host.(volume.KubeletVolumeHost) if !ok { @@ -697,7 +697,7 @@ func (p *csiPlugin) NewBlockVolumeMapper(spec *volume.Spec, podRef *api.Pod) (vo } func (p *csiPlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (volume.BlockVolumeUnmapper, error) { - klog.V(4).Infof(log("setting up block unmapper for [Spec=%v, podUID=%v]", volName, podUID)) + klog.V(4).Info(log("setting up block unmapper for [Spec=%v, podUID=%v]", volName, podUID)) unmapper := &csiBlockMapper{ plugin: p, podUID: podUID, @@ -839,7 +839,7 @@ func (p *csiPlugin) podInfoEnabled(driverName string) (bool, error) { csiDriver, err := p.getCSIDriver(driverName) if err != nil { if apierrors.IsNotFound(err) { - klog.V(4).Infof(log("CSIDriver %q not found, not adding pod information", driverName)) + klog.V(4).Info(log("CSIDriver %q not found, not adding pod information", driverName)) return false, nil } return false, err @@ -847,7 +847,7 @@ func (p *csiPlugin) podInfoEnabled(driverName string) (bool, error) { // if PodInfoOnMount is not set or false we do not set pod attributes if csiDriver.Spec.PodInfoOnMount == nil || *csiDriver.Spec.PodInfoOnMount == false { - klog.V(4).Infof(log("CSIDriver %q does not require pod information", driverName)) + klog.V(4).Info(log("CSIDriver %q does not require pod information", driverName)) return false, nil } return true, nil diff --git a/pkg/volume/csi/expander.go b/pkg/volume/csi/expander.go index e56627203af..3869539dfc0 100644 --- a/pkg/volume/csi/expander.go +++ b/pkg/volume/csi/expander.go @@ -36,7 +36,7 @@ func (c *csiPlugin) RequiresFSResize() bool { } func (c *csiPlugin) NodeExpand(resizeOptions volume.NodeResizeOptions) (bool, error) { - klog.V(4).Infof(log("Expander.NodeExpand(%s)", resizeOptions.DeviceMountPath)) + klog.V(4).Info(log("Expander.NodeExpand(%s)", resizeOptions.DeviceMountPath)) csiSource, err := getCSISourceFromSpec(resizeOptions.VolumeSpec) if err != nil { return false, errors.New(log("Expander.NodeExpand failed to get CSI persistent source: %v", err)) diff --git a/pkg/volume/downwardapi/downwardapi_test.go b/pkg/volume/downwardapi/downwardapi_test.go index d278e0f01a7..89ce887691b 100644 --- a/pkg/volume/downwardapi/downwardapi_test.go +++ b/pkg/volume/downwardapi/downwardapi_test.go @@ -321,7 +321,7 @@ func (step stepName) getName() string { return step.name } func doVerifyLinesInFile(t *testing.T, volumePath, filename string, expected string) { data, err := os.ReadFile(filepath.Join(volumePath, filename)) if err != nil { - t.Errorf(err.Error()) + t.Error(err.Error()) return } actualStr := string(data) @@ -357,7 +357,7 @@ type verifyMode struct { func (step verifyMode) run(test *downwardAPITest) { fileInfo, err := os.Stat(filepath.Join(test.volumePath, step.name)) if err != nil { - test.t.Errorf(err.Error()) + test.t.Error(err.Error()) return } diff --git a/pkg/volume/flexvolume/driver-call.go b/pkg/volume/flexvolume/driver-call.go index ec0e74c3b82..732d0c9cf70 100644 --- a/pkg/volume/flexvolume/driver-call.go +++ b/pkg/volume/flexvolume/driver-call.go @@ -266,7 +266,7 @@ func handleCmdResponse(cmd string, output []byte) (*DriverStatus, error) { return nil, errors.New(status.Status) } else if status.Status != StatusSuccess { errMsg := fmt.Sprintf("%s command failed, status: %s, reason: %s", cmd, status.Status, status.Message) - klog.Errorf(errMsg) + klog.Error(errMsg) return nil, fmt.Errorf("%s", errMsg) } diff --git a/pkg/volume/portworx/portworx_util.go b/pkg/volume/portworx/portworx_util.go index 32b40ecf257..1d201bcf4a1 100644 --- a/pkg/volume/portworx/portworx_util.go +++ b/pkg/volume/portworx/portworx_util.go @@ -366,7 +366,7 @@ func getPortworxService(host volume.VolumeHost) (*v1.Service, error) { kubeClient := host.GetKubeClient() if kubeClient == nil { err := fmt.Errorf("failed to get kubeclient when creating portworx client") - klog.Errorf(err.Error()) + klog.Error(err.Error()) return nil, err } @@ -379,7 +379,7 @@ func getPortworxService(host volume.VolumeHost) (*v1.Service, error) { if svc == nil { err = fmt.Errorf("service: %v not found. Consult Portworx docs to deploy it", pxServiceName) - klog.Errorf(err.Error()) + klog.Error(err.Error()) return nil, err } diff --git a/pkg/volume/secret/secret.go b/pkg/volume/secret/secret.go index c9d9c1951f8..6daa2c47732 100644 --- a/pkg/volume/secret/secret.go +++ b/pkg/volume/secret/secret.go @@ -17,6 +17,7 @@ limitations under the License. package secret import ( + "errors" "fmt" "k8s.io/klog/v2" @@ -24,7 +25,7 @@ import ( utilstrings "k8s.io/utils/strings" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/volume" @@ -184,7 +185,7 @@ func (b *secretVolumeMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs optional := b.source.Optional != nil && *b.source.Optional secret, err := b.getSecret(b.pod.Namespace, b.source.SecretName) if err != nil { - if !(errors.IsNotFound(err) && optional) { + if !(apierrors.IsNotFound(err) && optional) { klog.Errorf("Couldn't get secret %v/%v: %v", b.pod.Namespace, b.source.SecretName, err) return err } @@ -276,8 +277,8 @@ func MakePayload(mappings []v1.KeyToPath, secret *v1.Secret, defaultMode *int32, continue } errMsg := fmt.Sprintf("references non-existent secret key: %s", ktp.Key) - klog.Errorf(errMsg) - return nil, fmt.Errorf(errMsg) + klog.Error(errMsg) + return nil, errors.New(errMsg) } fileProjection.Data = []byte(content) diff --git a/pkg/volume/util/operationexecutor/node_expander.go b/pkg/volume/util/operationexecutor/node_expander.go index 19547c94e09..f6e52473fb7 100644 --- a/pkg/volume/util/operationexecutor/node_expander.go +++ b/pkg/volume/util/operationexecutor/node_expander.go @@ -131,7 +131,7 @@ func (ne *NodeExpander) expandOnPlugin() (bool, error, testResponseData) { if err != nil { msg := ne.vmt.GenerateErrorDetailed("MountVolume.NodeExpandVolume failed to mark node expansion in progress: %v", err) - klog.Errorf(msg.Error()) + klog.Error(msg.Error()) return false, err, testResponseData{} } } @@ -143,12 +143,12 @@ func (ne *NodeExpander) expandOnPlugin() (bool, error, testResponseData) { if volumetypes.IsInfeasibleError(resizeErr) || ne.markExpansionInfeasibleOnFailure { ne.pvc, markFailedError = util.MarkNodeExpansionInfeasible(ne.pvc, ne.kubeClient, resizeErr) if markFailedError != nil { - klog.Errorf(ne.vmt.GenerateErrorDetailed("MountMount.NodeExpandVolume failed to mark node expansion as failed: %v", err).Error()) + klog.Error(ne.vmt.GenerateErrorDetailed("MountMount.NodeExpandVolume failed to mark node expansion as failed: %v", err).Error()) } } else { ne.pvc, markFailedError = util.MarkNodeExpansionFailedCondition(ne.pvc, ne.kubeClient, resizeErr) if markFailedError != nil { - klog.Errorf(ne.vmt.GenerateErrorDetailed("MountMount.NodeExpandVolume failed to mark node expansion as failed: %v", err).Error()) + klog.Error(ne.vmt.GenerateErrorDetailed("MountMount.NodeExpandVolume failed to mark node expansion as failed: %v", err).Error()) } } } @@ -158,7 +158,7 @@ func (ne *NodeExpander) expandOnPlugin() (bool, error, testResponseData) { // expansion operation should not block mounting if volumetypes.IsFailedPreconditionError(resizeErr) { ne.actualStateOfWorld.MarkForInUseExpansionError(ne.vmt.VolumeName) - klog.Errorf(ne.vmt.GenerateErrorDetailed("MountVolume.NodeExapndVolume failed with %v", resizeErr).Error()) + klog.Error(ne.vmt.GenerateErrorDetailed("MountVolume.NodeExapndVolume failed with %v", resizeErr).Error()) return false, nil, testResponseData{assumeResizeFinished: true, resizeCalledOnPlugin: true} } return false, resizeErr, testResponseData{assumeResizeFinished: true, resizeCalledOnPlugin: true} diff --git a/pkg/volume/util/operationexecutor/operation_executor.go b/pkg/volume/util/operationexecutor/operation_executor.go index eaca8ca2282..d8fd95186d6 100644 --- a/pkg/volume/util/operationexecutor/operation_executor.go +++ b/pkg/volume/util/operationexecutor/operation_executor.go @@ -370,13 +370,13 @@ func (volume *VolumeToAttach) GenerateMsg(prefixMsg, suffixMsg string) (simpleMs // GenerateErrorDetailed returns detailed errors for volumes to attach func (volume *VolumeToAttach) GenerateErrorDetailed(prefixMsg string, err error) (detailedErr error) { - return fmt.Errorf(volume.GenerateMsgDetailed(prefixMsg, errSuffix(err))) + return errors.New(volume.GenerateMsgDetailed(prefixMsg, errSuffix(err))) } // GenerateError returns simple and detailed errors for volumes to attach func (volume *VolumeToAttach) GenerateError(prefixMsg string, err error) (simpleErr, detailedErr error) { simpleMsg, detailedMsg := volume.GenerateMsg(prefixMsg, errSuffix(err)) - return fmt.Errorf(simpleMsg), fmt.Errorf(detailedMsg) + return errors.New(simpleMsg), errors.New(detailedMsg) } // String combines key fields of the volume for logging in text format. @@ -535,13 +535,13 @@ func (volume *VolumeToMount) GenerateMsg(prefixMsg, suffixMsg string) (simpleMsg // GenerateErrorDetailed returns detailed errors for volumes to mount func (volume *VolumeToMount) GenerateErrorDetailed(prefixMsg string, err error) (detailedErr error) { - return fmt.Errorf(volume.GenerateMsgDetailed(prefixMsg, errSuffix(err))) + return errors.New(volume.GenerateMsgDetailed(prefixMsg, errSuffix(err))) } // GenerateError returns simple and detailed errors for volumes to mount func (volume *VolumeToMount) GenerateError(prefixMsg string, err error) (simpleErr, detailedErr error) { simpleMsg, detailedMsg := volume.GenerateMsg(prefixMsg, errSuffix(err)) - return fmt.Errorf(simpleMsg), fmt.Errorf(detailedMsg) + return errors.New(simpleMsg), errors.New(detailedMsg) } // AttachedVolume represents a volume that is attached to a node. @@ -597,13 +597,13 @@ func (volume *AttachedVolume) GenerateMsg(prefixMsg, suffixMsg string) (simpleMs // GenerateErrorDetailed returns detailed errors for attached volumes func (volume *AttachedVolume) GenerateErrorDetailed(prefixMsg string, err error) (detailedErr error) { - return fmt.Errorf(volume.GenerateMsgDetailed(prefixMsg, errSuffix(err))) + return errors.New(volume.GenerateMsgDetailed(prefixMsg, errSuffix(err))) } // GenerateError returns simple and detailed errors for attached volumes func (volume *AttachedVolume) GenerateError(prefixMsg string, err error) (simpleErr, detailedErr error) { simpleMsg, detailedMsg := volume.GenerateMsg(prefixMsg, errSuffix(err)) - return fmt.Errorf(simpleMsg), fmt.Errorf(detailedMsg) + return errors.New(simpleMsg), errors.New(detailedMsg) } // String combines key fields of the volume for logging in text format. @@ -769,13 +769,13 @@ func (volume *MountedVolume) GenerateMsg(prefixMsg, suffixMsg string) (simpleMsg // GenerateErrorDetailed returns simple and detailed errors for mounted volumes func (volume *MountedVolume) GenerateErrorDetailed(prefixMsg string, err error) (detailedErr error) { - return fmt.Errorf(volume.GenerateMsgDetailed(prefixMsg, errSuffix(err))) + return errors.New(volume.GenerateMsgDetailed(prefixMsg, errSuffix(err))) } // GenerateError returns simple and detailed errors for mounted volumes func (volume *MountedVolume) GenerateError(prefixMsg string, err error) (simpleErr, detailedErr error) { simpleMsg, detailedMsg := volume.GenerateMsg(prefixMsg, errSuffix(err)) - return fmt.Errorf(simpleMsg), fmt.Errorf(detailedMsg) + return errors.New(simpleMsg), errors.New(detailedMsg) } type operationExecutor struct { diff --git a/pkg/volume/util/operationexecutor/operation_generator.go b/pkg/volume/util/operationexecutor/operation_generator.go index e246719c23e..b450a8f0ddb 100644 --- a/pkg/volume/util/operationexecutor/operation_generator.go +++ b/pkg/volume/util/operationexecutor/operation_generator.go @@ -197,7 +197,7 @@ func (og *operationGenerator) GenerateVolumesAreAttachedFunc( volumePlugin, err := og.volumePluginMgr.FindPluginBySpec(volumeAttached.VolumeSpec) if err != nil || volumePlugin == nil { - klog.Errorf(volumeAttached.GenerateErrorDetailed("VolumesAreAttached.FindPluginBySpec failed", err).Error()) + klog.Error(volumeAttached.GenerateErrorDetailed("VolumesAreAttached.FindPluginBySpec failed", err).Error()) continue } volumeSpecList, pluginExists := volumesPerPlugin[volumePlugin.GetPluginName()] @@ -314,7 +314,7 @@ func (og *operationGenerator) GenerateAttachVolumeFunc( for _, pod := range volumeToAttach.ScheduledPods { og.recorder.Eventf(pod, v1.EventTypeNormal, kevents.SuccessfulAttachVolume, simpleMsg) } - klog.Infof(volumeToAttach.GenerateMsgDetailed("AttachVolume.Attach succeeded", "")) + klog.Info(volumeToAttach.GenerateMsgDetailed("AttachVolume.Attach succeeded", "")) // Update actual state of world addVolumeNodeErr := actualStateOfWorld.MarkVolumeAsAttached( @@ -434,7 +434,7 @@ func (og *operationGenerator) GenerateDetachVolumeFunc( return volumetypes.NewOperationContext(eventErr, detailedErr, migrated) } - klog.Infof(volumeToDetach.GenerateMsgDetailed("DetachVolume.Detach succeeded", "")) + klog.Info(volumeToDetach.GenerateMsgDetailed("DetachVolume.Detach succeeded", "")) // Update actual state of world actualStateOfWorld.MarkVolumeAsDetached( @@ -647,7 +647,7 @@ func (og *operationGenerator) GenerateMountVolumeFunc( // Considering the above situations, we mark volume as uncertain here so that reconciler will trigger // volume tear down when pod is deleted, and also makes sure pod will not start using it. if err := actualStateOfWorld.MarkVolumeMountAsUncertain(markOpts); err != nil { - klog.Errorf(volumeToMount.GenerateErrorDetailed("MountVolume.MarkVolumeMountAsUncertain failed", err).Error()) + klog.Error(volumeToMount.GenerateErrorDetailed("MountVolume.MarkVolumeMountAsUncertain failed", err).Error()) } return volumetypes.NewOperationContext(eventErr, detailedErr, migrated) } @@ -705,7 +705,7 @@ func (og *operationGenerator) markDeviceErrorState(volumeToMount VolumeToMount, // Only devices which were uncertain can be marked as unmounted markDeviceUnmountError := actualStateOfWorld.MarkDeviceAsUnmounted(volumeToMount.VolumeName) if markDeviceUnmountError != nil { - klog.Errorf(volumeToMount.GenerateErrorDetailed("MountDevice.MarkDeviceAsUnmounted failed", markDeviceUnmountError).Error()) + klog.Error(volumeToMount.GenerateErrorDetailed("MountDevice.MarkDeviceAsUnmounted failed", markDeviceUnmountError).Error()) } return } @@ -716,7 +716,7 @@ func (og *operationGenerator) markDeviceErrorState(volumeToMount VolumeToMount, // which was previously marked as mounted here as uncertain. markDeviceUncertainError := actualStateOfWorld.MarkDeviceAsUncertain(volumeToMount.VolumeName, devicePath, deviceMountPath, volumeToMount.SELinuxLabel) if markDeviceUncertainError != nil { - klog.Errorf(volumeToMount.GenerateErrorDetailed("MountDevice.MarkDeviceAsUncertain failed", markDeviceUncertainError).Error()) + klog.Error(volumeToMount.GenerateErrorDetailed("MountDevice.MarkDeviceAsUncertain failed", markDeviceUncertainError).Error()) } } @@ -734,7 +734,7 @@ func (og *operationGenerator) markVolumeErrorState(volumeToMount VolumeToMount, t := actualStateOfWorld.MarkVolumeAsUnmounted(volumeToMount.PodName, volumeToMount.VolumeName) if t != nil { - klog.Errorf(volumeToMount.GenerateErrorDetailed("MountVolume.MarkVolumeAsUnmounted failed", t).Error()) + klog.Error(volumeToMount.GenerateErrorDetailed("MountVolume.MarkVolumeAsUnmounted failed", t).Error()) } return @@ -744,7 +744,7 @@ func (og *operationGenerator) markVolumeErrorState(volumeToMount VolumeToMount, actualStateOfWorld.GetVolumeMountState(volumeToMount.VolumeName, markOpts.PodName) == VolumeNotMounted { t := actualStateOfWorld.MarkVolumeMountAsUncertain(markOpts) if t != nil { - klog.Errorf(volumeToMount.GenerateErrorDetailed("MountVolume.MarkVolumeMountAsUncertain failed", t).Error()) + klog.Error(volumeToMount.GenerateErrorDetailed("MountVolume.MarkVolumeMountAsUncertain failed", t).Error()) } } } @@ -792,7 +792,7 @@ func (og *operationGenerator) GenerateUnmountVolumeFunc( markMountUncertainErr := actualStateOfWorld.MarkVolumeMountAsUncertain(opts) if markMountUncertainErr != nil { // There is nothing else we can do. Hope that UnmountVolume will be re-tried shortly. - klog.Errorf(volumeToUnmount.GenerateErrorDetailed("UnmountVolume.MarkVolumeMountAsUncertain failed", markMountUncertainErr).Error()) + klog.Error(volumeToUnmount.GenerateErrorDetailed("UnmountVolume.MarkVolumeMountAsUncertain failed", markMountUncertainErr).Error()) } // On failure, return error. Caller will log and retry. @@ -815,7 +815,7 @@ func (og *operationGenerator) GenerateUnmountVolumeFunc( volumeToUnmount.PodName, volumeToUnmount.VolumeName) if markVolMountedErr != nil { // On failure, just log and exit - klog.Errorf(volumeToUnmount.GenerateErrorDetailed("UnmountVolume.MarkVolumeAsUnmounted failed", markVolMountedErr).Error()) + klog.Error(volumeToUnmount.GenerateErrorDetailed("UnmountVolume.MarkVolumeAsUnmounted failed", markVolMountedErr).Error()) } return volumetypes.NewOperationContext(nil, nil, migrated) @@ -866,7 +866,7 @@ func (og *operationGenerator) GenerateUnmountDeviceFunc( // If the mount path could not be found, don't fail the unmount, but instead log a warning and proceed, // using the value from deviceToDetach.DeviceMountPath, so that the device can be marked as unmounted deviceMountPath = deviceToDetach.DeviceMountPath - klog.Warningf(deviceToDetach.GenerateMsgDetailed(fmt.Sprintf( + klog.Warning(deviceToDetach.GenerateMsgDetailed(fmt.Sprintf( "GetDeviceMountPath failed, but unmount operation will proceed using deviceMountPath=%s: %v", deviceMountPath, err), "")) } refs, err := deviceMountableVolumePlugin.GetDeviceMountRefs(deviceMountPath) @@ -885,7 +885,7 @@ func (og *operationGenerator) GenerateUnmountDeviceFunc( markDeviceUncertainErr := actualStateOfWorld.MarkDeviceAsUncertain(deviceToDetach.VolumeName, deviceToDetach.DevicePath, deviceMountPath, deviceToDetach.SELinuxMountContext) if markDeviceUncertainErr != nil { // There is nothing else we can do. Hope that UnmountDevice will be re-tried shortly. - klog.Errorf(deviceToDetach.GenerateErrorDetailed("UnmountDevice.MarkDeviceAsUncertain failed", markDeviceUncertainErr).Error()) + klog.Error(deviceToDetach.GenerateErrorDetailed("UnmountDevice.MarkDeviceAsUncertain failed", markDeviceUncertainErr).Error()) } // On failure, return error. Caller will log and retry. @@ -906,7 +906,7 @@ func (og *operationGenerator) GenerateUnmountDeviceFunc( markDeviceUncertainErr := actualStateOfWorld.MarkDeviceAsUncertain(deviceToDetach.VolumeName, deviceToDetach.DevicePath, deviceMountPath, deviceToDetach.SELinuxMountContext) if markDeviceUncertainErr != nil { // There is nothing else we can do. Hope that UnmountDevice will be re-tried shortly. - klog.Errorf(deviceToDetach.GenerateErrorDetailed("UnmountDevice.MarkDeviceAsUncertain failed", markDeviceUncertainErr).Error()) + klog.Error(deviceToDetach.GenerateErrorDetailed("UnmountDevice.MarkDeviceAsUncertain failed", markDeviceUncertainErr).Error()) } eventErr, detailedErr := deviceToDetach.GenerateError( "UnmountDevice failed", @@ -1151,7 +1151,7 @@ func (og *operationGenerator) GenerateMapVolumeFunc( // Considering the above situations, we mark volume as uncertain here so that reconciler will trigger // volume tear down when pod is deleted, and also makes sure pod will not start using it. if err := actualStateOfWorld.MarkVolumeMountAsUncertain(markVolumeOpts); err != nil { - klog.Errorf(volumeToMount.GenerateErrorDetailed("MountVolume.MarkVolumeMountAsUncertain failed", err).Error()) + klog.Error(volumeToMount.GenerateErrorDetailed("MountVolume.MarkVolumeMountAsUncertain failed", err).Error()) } return volumetypes.NewOperationContext(eventErr, detailedErr, migrated) } @@ -1270,7 +1270,7 @@ func (og *operationGenerator) GenerateUnmapVolumeFunc( volumeToUnmount.PodName, volumeToUnmount.VolumeName) if markVolUnmountedErr != nil { // On failure, just log and exit - klog.Errorf(volumeToUnmount.GenerateErrorDetailed("UnmapVolume.MarkVolumeAsUnmounted failed", markVolUnmountedErr).Error()) + klog.Error(volumeToUnmount.GenerateErrorDetailed("UnmapVolume.MarkVolumeAsUnmounted failed", markVolUnmountedErr).Error()) } return volumetypes.NewOperationContext(nil, nil, migrated) @@ -1384,7 +1384,7 @@ func (og *operationGenerator) GenerateUnmapDeviceFunc( return volumetypes.NewOperationContext(eventErr, detailedErr, migrated) } - klog.Infof(deviceToDetach.GenerateMsgDetailed("UnmapDevice succeeded", "")) + klog.Info(deviceToDetach.GenerateMsgDetailed("UnmapDevice succeeded", "")) // Update actual state of world markDeviceUnmountedErr := actualStateOfWorld.MarkDeviceAsUnmounted( @@ -1519,7 +1519,7 @@ func (og *operationGenerator) verifyVolumeIsSafeToDetach( node, fetchErr := og.kubeClient.CoreV1().Nodes().Get(context.TODO(), string(volumeToDetach.NodeName), metav1.GetOptions{}) if fetchErr != nil { if errors.IsNotFound(fetchErr) { - klog.Warningf(volumeToDetach.GenerateMsgDetailed("Node not found on API server. DetachVolume will skip safe to detach check", "")) + klog.Warning(volumeToDetach.GenerateMsgDetailed("Node not found on API server. DetachVolume will skip safe to detach check", "")) return nil } @@ -1536,7 +1536,7 @@ func (og *operationGenerator) verifyVolumeIsSafeToDetach( } // Volume is not marked as in use by node - klog.Infof(volumeToDetach.GenerateMsgDetailed("Verified volume is safe to detach", "")) + klog.Info(volumeToDetach.GenerateMsgDetailed("Verified volume is safe to detach", "")) return nil } @@ -1960,7 +1960,7 @@ func (og *operationGenerator) doOnlineExpansion(volumeToMount VolumeToMount, resizeDone, err := og.nodeExpandVolume(volumeToMount, actualStateOfWorld, resizeOptions) if err != nil { e1, e2 := volumeToMount.GenerateError("NodeExpandVolume.NodeExpandVolume failed", err) - klog.Errorf(e2.Error()) + klog.Error(e2.Error()) return false, e1, e2 } if resizeDone { @@ -1991,7 +1991,7 @@ func (og *operationGenerator) expandVolumeDuringMount(volumeToMount VolumeToMoun if pvcStatusCap.Cmp(pvSpecCap) < 0 { if volumeToMount.VolumeSpec.ReadOnly { simpleMsg, detailedMsg := volumeToMount.GenerateMsg("MountVolume.NodeExpandVolume failed", "requested read-only file system") - klog.Warningf(detailedMsg) + klog.Warning(detailedMsg) og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FileSystemResizeFailed, simpleMsg) og.recorder.Eventf(pvc, v1.EventTypeWarning, kevents.FileSystemResizeFailed, simpleMsg) return true, nil @@ -2057,7 +2057,7 @@ func (og *operationGenerator) nodeExpandVolume( if volumeToMount.VolumeSpec.ReadOnly { simpleMsg, detailedMsg := volumeToMount.GenerateMsg("MountVolume.NodeExpandVolume failed", "requested read-only file system") - klog.Warningf(detailedMsg) + klog.Warning(detailedMsg) og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FileSystemResizeFailed, simpleMsg) og.recorder.Eventf(pvc, v1.EventTypeWarning, kevents.FileSystemResizeFailed, simpleMsg) return true, nil @@ -2097,7 +2097,7 @@ func (og *operationGenerator) checkForRecoveryFromExpansion(pvc *v1.PersistentVo // and hence we are going to keep expanding using older logic. if resizeStatus == "" && allocatedResource == nil { _, detailedMsg := volumeToMount.GenerateMsg("MountVolume.NodeExpandVolume running with", "older external resize controller") - klog.Warningf(detailedMsg) + klog.Warning(detailedMsg) return false } return true @@ -2139,7 +2139,7 @@ func (og *operationGenerator) legacyCallNodeExpandOnPlugin(resizeOp nodeResizeOp // expansion operation should not block mounting if volumetypes.IsFailedPreconditionError(resizeErr) { actualStateOfWorld.MarkForInUseExpansionError(volumeToMount.VolumeName) - klog.Errorf(volumeToMount.GenerateErrorDetailed("MountVolume.NodeExapndVolume failed", resizeErr).Error()) + klog.Error(volumeToMount.GenerateErrorDetailed("MountVolume.NodeExapndVolume failed", resizeErr).Error()) return true, nil } return false, resizeErr diff --git a/pkg/volume/util/recyclerclient/recycler_client.go b/pkg/volume/util/recyclerclient/recycler_client.go index b7197dbdfe4..e438ba21e46 100644 --- a/pkg/volume/util/recyclerclient/recycler_client.go +++ b/pkg/volume/util/recyclerclient/recycler_client.go @@ -18,11 +18,12 @@ package recyclerclient import ( "context" + "errors" "fmt" "sync" "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/watch" @@ -72,7 +73,7 @@ func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Po // Start the pod _, err = recyclerClient.CreatePod(pod) if err != nil { - if errors.IsAlreadyExists(err) { + if apierrors.IsAlreadyExists(err) { deleteErr := recyclerClient.DeletePod(pod.Name, pod.Namespace) if deleteErr != nil { return fmt.Errorf("failed to delete old recycler pod %s/%s: %s", pod.Namespace, pod.Name, deleteErr) @@ -128,7 +129,7 @@ func waitForPod(pod *v1.Pod, recyclerClient recyclerClient, podCh <-chan watch.E } if pod.Status.Phase == v1.PodFailed { if pod.Status.Message != "" { - return fmt.Errorf(pod.Status.Message) + return errors.New(pod.Status.Message) } return fmt.Errorf("pod failed, pod.Status.Message unknown") } diff --git a/pkg/volume/util/subpath/subpath_linux_test.go b/pkg/volume/util/subpath/subpath_linux_test.go index 86f24da11ff..987d6967d66 100644 --- a/pkg/volume/util/subpath/subpath_linux_test.go +++ b/pkg/volume/util/subpath/subpath_linux_test.go @@ -259,7 +259,7 @@ func TestSafeMakeDir(t *testing.T) { t.Run(test.name, func(t *testing.T) { base, err := ioutil.TempDir("", "safe-make-dir-"+test.name+"-") if err != nil { - t.Fatalf(err.Error()) + t.Fatal(err.Error()) } defer os.RemoveAll(base) test.prepare(base) @@ -385,7 +385,7 @@ func TestRemoveEmptyDirs(t *testing.T) { klog.V(4).Infof("test %q", test.name) base, err := ioutil.TempDir("", "remove-empty-dirs-"+test.name+"-") if err != nil { - t.Fatalf(err.Error()) + t.Fatal(err.Error()) } if err = test.prepare(base); err != nil { os.RemoveAll(base) @@ -615,7 +615,7 @@ func TestCleanSubPaths(t *testing.T) { klog.V(4).Infof("test %q", test.name) base, err := ioutil.TempDir("", "clean-subpaths-"+test.name+"-") if err != nil { - t.Fatalf(err.Error()) + t.Fatal(err.Error()) } mounts, err := test.prepare(base) if err != nil { @@ -872,7 +872,7 @@ func TestBindSubPath(t *testing.T) { klog.V(4).Infof("test %q", test.name) base, err := ioutil.TempDir("", "bind-subpath-"+test.name+"-") if err != nil { - t.Fatalf(err.Error()) + t.Fatal(err.Error()) } mounts, volPath, subPath, err := test.prepare(base) @@ -986,7 +986,7 @@ func TestSubpath_PrepareSafeSubpath(t *testing.T) { klog.V(4).Infof("test %q", test.name) base, err := ioutil.TempDir("", "bind-subpath-"+test.name+"-") if err != nil { - t.Fatalf(err.Error()) + t.Fatal(err.Error()) } defer os.RemoveAll(base) @@ -1220,7 +1220,7 @@ func TestSafeOpen(t *testing.T) { klog.V(4).Infof("test %q", test.name) base, err := ioutil.TempDir("", "safe-open-"+test.name+"-") if err != nil { - t.Fatalf(err.Error()) + t.Fatal(err.Error()) } test.prepare(base) @@ -1367,7 +1367,7 @@ func TestFindExistingPrefix(t *testing.T) { klog.V(4).Infof("test %q", test.name) base, err := ioutil.TempDir("", "find-prefix-"+test.name+"-") if err != nil { - t.Fatalf(err.Error()) + t.Fatal(err.Error()) } test.prepare(base) path := filepath.Join(base, test.path) diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/validation_test.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/validation_test.go index c6776ebf6a9..d1b9da2e5df 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/validation_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/validation_test.go @@ -620,7 +620,7 @@ func TestValidateCustomResource(t *testing.T) { } errs, _ := celValidator.Validate(context.TODO(), nil, structural, obj, oldObject, celconfig.RuntimeCELCostBudget) if len(errs) > 0 { - t.Errorf(errs.ToAggregate().Error()) + t.Error(errs.ToAggregate().Error()) } } for i, failingObject := range tt.failingObjects { diff --git a/staging/src/k8s.io/apiserver/pkg/cel/lazy/lazy_test.go b/staging/src/k8s.io/apiserver/pkg/cel/lazy/lazy_test.go index 7650e03ba3f..355d35bfa4c 100644 --- a/staging/src/k8s.io/apiserver/pkg/cel/lazy/lazy_test.go +++ b/staging/src/k8s.io/apiserver/pkg/cel/lazy/lazy_test.go @@ -60,7 +60,7 @@ func TestLazyMapType(t *testing.T) { evalCounter++ v, err := compileAndRun(env, activation, `{"a": "a"}`) if err != nil { - return types.NewErr(err.Error()) + return types.NewErr("%s", err.Error()) } return v }) diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/apiserver_test.go b/staging/src/k8s.io/apiserver/pkg/endpoints/apiserver_test.go index 04a28e7fa6b..9b79c1d93b0 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/apiserver_test.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/apiserver_test.go @@ -2818,7 +2818,7 @@ func TestDeleteWithOptions(t *testing.T) { if err != nil { t.Fatalf("unexpected error: %v", err) } - t.Logf(string(s)) + t.Log(string(s)) } if simpleStorage.deleted != ID { t.Errorf("Unexpected delete: %s, expected %s", simpleStorage.deleted, ID) @@ -2858,7 +2858,7 @@ func TestDeleteWithOptionsQuery(t *testing.T) { if err != nil { t.Fatalf("unexpected error: %v", err) } - t.Logf(string(s)) + t.Log(string(s)) } if simpleStorage.deleted != ID { t.Fatalf("Unexpected delete: %s, expected %s", simpleStorage.deleted, ID) @@ -2901,7 +2901,7 @@ func TestDeleteWithOptionsQueryAndBody(t *testing.T) { if err != nil { t.Fatalf("unexpected error: %v", err) } - t.Logf(string(s)) + t.Log(string(s)) } if simpleStorage.deleted != ID { t.Errorf("Unexpected delete: %s, expected %s", simpleStorage.deleted, ID) diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/writers_test.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/writers_test.go index 6e9e30f6146..874dc1980eb 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/writers_test.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/writers_test.go @@ -326,7 +326,7 @@ func TestSerializeObject(t *testing.T) { compressionEnabled: true, statusCode: http.StatusInternalServerError, out: smallPayload, - outErrs: []error{fmt.Errorf(string(largePayload)), fmt.Errorf("bad2")}, + outErrs: []error{errors.New(string(largePayload)), errors.New("bad2")}, mediaType: "application/json", req: &http.Request{ Header: http.Header{ diff --git a/staging/src/k8s.io/apiserver/pkg/server/filters/maxinflight.go b/staging/src/k8s.io/apiserver/pkg/server/filters/maxinflight.go index 9effcb768f2..8672c09420d 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/filters/maxinflight.go +++ b/staging/src/k8s.io/apiserver/pkg/server/filters/maxinflight.go @@ -50,7 +50,7 @@ var ( func handleError(w http.ResponseWriter, r *http.Request, err error) { errorMsg := fmt.Sprintf("Internal Server Error: %#v", r.RequestURI) http.Error(w, errorMsg, http.StatusInternalServerError) - klog.Errorf(err.Error()) + klog.Error(err.Error()) } // requestWatermark is used to track maximal numbers of requests in a particular phase of handling diff --git a/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher_whitebox_test.go b/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher_whitebox_test.go index 12b1e9e8a4c..1245f7fd6c4 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher_whitebox_test.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher_whitebox_test.go @@ -1603,7 +1603,7 @@ func verifyEvents(t *testing.T, w watch.Interface, events []watch.Event, strictO if !valid { t.Logf("(called from line %d)", line) for _, err := range errors { - t.Errorf(err) + t.Error(err) } } } diff --git a/staging/src/k8s.io/apiserver/pkg/util/peerproxy/peerproxy_handler.go b/staging/src/k8s.io/apiserver/pkg/util/peerproxy/peerproxy_handler.go index bc342165b21..e16f88a46e7 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/peerproxy/peerproxy_handler.go +++ b/staging/src/k8s.io/apiserver/pkg/util/peerproxy/peerproxy_handler.go @@ -176,7 +176,7 @@ func (h *peerProxyHandler) WrapHandler(handler http.Handler) http.Handler { // TODO: maintain locally serviceable GVRs somewhere so that we dont have to // consult the storageversion-informed map for those if len(serviceableByResp.peerEndpoints) == 0 { - klog.Errorf(fmt.Sprintf("GVR %v is not served by anything in this cluster", gvr)) + klog.Error(fmt.Sprintf("GVR %v is not served by anything in this cluster", gvr)) handler.ServeHTTP(w, r) return } diff --git a/staging/src/k8s.io/cli-runtime/pkg/printers/json.go b/staging/src/k8s.io/cli-runtime/pkg/printers/json.go index 8ab2235f8b2..7d14a4e5a61 100644 --- a/staging/src/k8s.io/cli-runtime/pkg/printers/json.go +++ b/staging/src/k8s.io/cli-runtime/pkg/printers/json.go @@ -19,6 +19,7 @@ package printers import ( "bytes" "encoding/json" + "errors" "fmt" "io" "reflect" @@ -36,13 +37,13 @@ func (p *JSONPrinter) PrintObj(obj runtime.Object, w io.Writer) error { // we need an actual value in order to retrieve the package path for an object. // using reflect.Indirect indiscriminately is valid here, as all runtime.Objects are supposed to be pointers. if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) { - return fmt.Errorf(InternalObjectPrinterErr) + return errors.New(InternalObjectPrinterErr) } switch obj := obj.(type) { case *metav1.WatchEvent: if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj.Object.Object)).Type().PkgPath()) { - return fmt.Errorf(InternalObjectPrinterErr) + return errors.New(InternalObjectPrinterErr) } data, err := json.Marshal(obj) if err != nil { diff --git a/staging/src/k8s.io/cli-runtime/pkg/printers/jsonpath.go b/staging/src/k8s.io/cli-runtime/pkg/printers/jsonpath.go index 769960d6677..216449ec445 100644 --- a/staging/src/k8s.io/cli-runtime/pkg/printers/jsonpath.go +++ b/staging/src/k8s.io/cli-runtime/pkg/printers/jsonpath.go @@ -19,6 +19,7 @@ package printers import ( "bytes" "encoding/json" + "errors" "fmt" "io" "reflect" @@ -119,7 +120,7 @@ func (j *JSONPathPrinter) PrintObj(obj runtime.Object, w io.Writer) error { // we need an actual value in order to retrieve the package path for an object. // using reflect.Indirect indiscriminately is valid here, as all runtime.Objects are supposed to be pointers. if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) { - return fmt.Errorf(InternalObjectPrinterErr) + return errors.New(InternalObjectPrinterErr) } var queryObj interface{} = obj diff --git a/staging/src/k8s.io/cli-runtime/pkg/printers/name.go b/staging/src/k8s.io/cli-runtime/pkg/printers/name.go index 086166af272..1d2fe7f9641 100644 --- a/staging/src/k8s.io/cli-runtime/pkg/printers/name.go +++ b/staging/src/k8s.io/cli-runtime/pkg/printers/name.go @@ -17,6 +17,7 @@ limitations under the License. package printers import ( + "errors" "fmt" "io" "reflect" @@ -52,7 +53,7 @@ func (p *NamePrinter) PrintObj(obj runtime.Object, w io.Writer) error { // using reflect.Indirect indiscriminately is valid here, as all runtime.Objects are supposed to be pointers. // we need an actual value in order to retrieve the package path for an object. if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) { - return fmt.Errorf(InternalObjectPrinterErr) + return errors.New(InternalObjectPrinterErr) } if meta.IsListType(obj) { diff --git a/staging/src/k8s.io/cli-runtime/pkg/printers/template.go b/staging/src/k8s.io/cli-runtime/pkg/printers/template.go index ccff542262c..4b08573ce35 100644 --- a/staging/src/k8s.io/cli-runtime/pkg/printers/template.go +++ b/staging/src/k8s.io/cli-runtime/pkg/printers/template.go @@ -18,6 +18,7 @@ package printers import ( "encoding/base64" + "errors" "fmt" "io" "reflect" @@ -61,7 +62,7 @@ func (p *GoTemplatePrinter) AllowMissingKeys(allow bool) { // PrintObj formats the obj with the Go Template. func (p *GoTemplatePrinter) PrintObj(obj runtime.Object, w io.Writer) error { if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) { - return fmt.Errorf(InternalObjectPrinterErr) + return errors.New(InternalObjectPrinterErr) } var data []byte diff --git a/staging/src/k8s.io/cli-runtime/pkg/printers/yaml.go b/staging/src/k8s.io/cli-runtime/pkg/printers/yaml.go index 9c444bdc265..8c6be82fe86 100644 --- a/staging/src/k8s.io/cli-runtime/pkg/printers/yaml.go +++ b/staging/src/k8s.io/cli-runtime/pkg/printers/yaml.go @@ -17,6 +17,7 @@ limitations under the License. package printers import ( + "errors" "fmt" "io" "reflect" @@ -42,7 +43,7 @@ func (p *YAMLPrinter) PrintObj(obj runtime.Object, w io.Writer) error { // we need an actual value in order to retrieve the package path for an object. // using reflect.Indirect indiscriminately is valid here, as all runtime.Objects are supposed to be pointers. if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) { - return fmt.Errorf(InternalObjectPrinterErr) + return errors.New(InternalObjectPrinterErr) } count := atomic.AddInt64(&p.printCount, 1) @@ -55,7 +56,7 @@ func (p *YAMLPrinter) PrintObj(obj runtime.Object, w io.Writer) error { switch obj := obj.(type) { case *metav1.WatchEvent: if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj.Object.Object)).Type().PkgPath()) { - return fmt.Errorf(InternalObjectPrinterErr) + return errors.New(InternalObjectPrinterErr) } data, err := yaml.Marshal(obj) if err != nil { diff --git a/staging/src/k8s.io/cli-runtime/pkg/resource/builder.go b/staging/src/k8s.io/cli-runtime/pkg/resource/builder.go index 47ec83bbbad..5f8fdcd9bdd 100644 --- a/staging/src/k8s.io/cli-runtime/pkg/resource/builder.go +++ b/staging/src/k8s.io/cli-runtime/pkg/resource/builder.go @@ -1030,7 +1030,7 @@ func (b *Builder) visitByResource() *Result { if b.allNamespace { errMsg = "a resource cannot be retrieved by name across all namespaces" } - return result.withError(fmt.Errorf(errMsg)) + return result.withError(errors.New(errMsg)) } } @@ -1093,7 +1093,7 @@ func (b *Builder) visitByName() *Result { if b.allNamespace { errMsg = "a resource cannot be retrieved by name across all namespaces" } - return result.withError(fmt.Errorf(errMsg)) + return result.withError(errors.New(errMsg)) } } diff --git a/staging/src/k8s.io/cli-runtime/pkg/resource/builder_test.go b/staging/src/k8s.io/cli-runtime/pkg/resource/builder_test.go index 3ec23461ece..41149b43feb 100644 --- a/staging/src/k8s.io/cli-runtime/pkg/resource/builder_test.go +++ b/staging/src/k8s.io/cli-runtime/pkg/resource/builder_test.go @@ -1873,7 +1873,7 @@ func TestHasNames(t *testing.T) { name: "test8", args: []string{"rc/foo", "bar"}, expectedHasName: false, - expectedError: fmt.Errorf("there is no need to specify a resource type as a separate argument when passing arguments in resource/name form (e.g. '" + basename + " get resource/' instead of '" + basename + " get resource resource/'"), + expectedError: errors.New("there is no need to specify a resource type as a separate argument when passing arguments in resource/name form (e.g. '" + basename + " get resource/' instead of '" + basename + " get resource resource/'"), }, } for _, tt := range tests { diff --git a/staging/src/k8s.io/client-go/rest/client_test.go b/staging/src/k8s.io/client-go/rest/client_test.go index 6ba9e7388c7..ebb35c50900 100644 --- a/staging/src/k8s.io/client-go/rest/client_test.go +++ b/staging/src/k8s.io/client-go/rest/client_test.go @@ -299,7 +299,7 @@ func TestHTTPProxy(t *testing.T) { })) defer testProxyServer.Close() - t.Logf(testProxyServer.URL) + t.Log(testProxyServer.URL) u, err := url.Parse(testProxyServer.URL) if err != nil { diff --git a/staging/src/k8s.io/client-go/tools/remotecommand/v4.go b/staging/src/k8s.io/client-go/tools/remotecommand/v4.go index 69ca934a0d7..6146bdf12a1 100644 --- a/staging/src/k8s.io/client-go/tools/remotecommand/v4.go +++ b/staging/src/k8s.io/client-go/tools/remotecommand/v4.go @@ -115,5 +115,5 @@ func (d *errorDecoderV4) decode(message []byte) error { return errors.New("error stream protocol error: unknown error") } - return fmt.Errorf(status.Message) + return errors.New(status.Message) } diff --git a/staging/src/k8s.io/client-go/transport/cache_test.go b/staging/src/k8s.io/client-go/transport/cache_test.go index f2e455ccb04..54705276d03 100644 --- a/staging/src/k8s.io/client-go/transport/cache_test.go +++ b/staging/src/k8s.io/client-go/transport/cache_test.go @@ -159,7 +159,7 @@ func TestTLSConfigKey(t *testing.T) { shouldCacheA := valueA.Proxy == nil if shouldCacheA != canCacheA { - t.Errorf("Unexpected canCache=false for " + nameA) + t.Error("Unexpected canCache=false for " + nameA) } configIsNotEmpty := !reflect.DeepEqual(*valueA, Config{}) diff --git a/staging/src/k8s.io/cloud-provider/app/webhooks.go b/staging/src/k8s.io/cloud-provider/app/webhooks.go index 523fe6ef375..20025ad6725 100644 --- a/staging/src/k8s.io/cloud-provider/app/webhooks.go +++ b/staging/src/k8s.io/cloud-provider/app/webhooks.go @@ -138,7 +138,7 @@ func (h WebhookHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { admissionResponse, err = h.AdmissionHandler(in.Request) if err != nil { e := fmt.Sprintf("error generating admission response: %v", err) - klog.Errorf(e) + klog.Error(e) statusCode = http.StatusInternalServerError http.Error(w, e, statusCode) return diff --git a/staging/src/k8s.io/cloud-provider/controllers/route/route_controller.go b/staging/src/k8s.io/cloud-provider/controllers/route/route_controller.go index d6e5ab3b800..26e2c36d0a0 100644 --- a/staging/src/k8s.io/cloud-provider/controllers/route/route_controller.go +++ b/staging/src/k8s.io/cloud-provider/controllers/route/route_controller.go @@ -312,7 +312,7 @@ func (rc *RouteController) reconcile(ctx context.Context, nodes []*v1.Node, rout UID: types.UID(nodeName), Namespace: "", }, v1.EventTypeWarning, "FailedToCreateRoute", msg) - klog.V(4).Infof(msg) + klog.V(4).Info(msg) return err } } diff --git a/staging/src/k8s.io/cri-client/pkg/logs/logs_test.go b/staging/src/k8s.io/cri-client/pkg/logs/logs_test.go index 00262cc3b41..692a645f649 100644 --- a/staging/src/k8s.io/cri-client/pkg/logs/logs_test.go +++ b/staging/src/k8s.io/cri-client/pkg/logs/logs_test.go @@ -198,7 +198,7 @@ func TestReadLogs(t *testing.T) { err = ReadLogs(context.TODO(), nil, file.Name(), containerID, opts, fakeRuntimeService, stdoutBuf, stderrBuf) if err != nil { - t.Fatalf(err.Error()) + t.Fatal(err.Error()) } if stderrBuf.Len() > 0 { t.Fatalf("Stderr: %v", stderrBuf.String()) diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/attach/attach.go b/staging/src/k8s.io/kubectl/pkg/cmd/attach/attach.go index b38760a1c86..73b3cea7d6f 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/attach/attach.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/attach/attach.go @@ -206,7 +206,7 @@ func (o *AttachOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []s o.GetPodTimeout, err = cmdutil.GetPodRunningTimeoutFlag(cmd) if err != nil { - return cmdutil.UsageErrorf(cmd, err.Error()) + return cmdutil.UsageErrorf(cmd, "%s", err.Error()) } o.Builder = f.NewBuilder diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/cp/cp_test.go b/staging/src/k8s.io/kubectl/pkg/cmd/cp/cp_test.go index 734f95625f7..b0038d0f3fc 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/cp/cp_test.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/cp/cp_test.go @@ -988,6 +988,6 @@ func cmpFileData(t *testing.T, filePath, data string) { type testWriter testing.T func (t *testWriter) Write(p []byte) (n int, err error) { - t.Logf(string(p)) + t.Log(string(p)) return len(p), nil } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go index 54b164b351b..6a28942e93b 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go @@ -18,6 +18,7 @@ package create import ( "context" + "errors" "fmt" "strconv" "strings" @@ -399,12 +400,12 @@ func parsePorts(portString string) (int32, intstr.IntOrString, error) { var targetPort intstr.IntOrString if portNum, err := strconv.Atoi(portStringSlice[1]); err != nil { if errs := validation.IsValidPortName(portStringSlice[1]); len(errs) != 0 { - return 0, intstr.FromInt32(0), fmt.Errorf(strings.Join(errs, ",")) + return 0, intstr.FromInt32(0), errors.New(strings.Join(errs, ",")) } targetPort = intstr.FromString(portStringSlice[1]) } else { if errs := validation.IsValidPortNum(portNum); len(errs) != 0 { - return 0, intstr.FromInt32(0), fmt.Errorf(strings.Join(errs, ",")) + return 0, intstr.FromInt32(0), errors.New(strings.Join(errs, ",")) } targetPort = intstr.FromInt32(int32(portNum)) } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/delete/delete.go b/staging/src/k8s.io/kubectl/pkg/cmd/delete/delete.go index 12df2ff4f7f..2792ccd481c 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/delete/delete.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/delete/delete.go @@ -531,7 +531,7 @@ func (o *DeleteOptions) confirmation(infos []*resource.Info) bool { fmt.Fprintf(o.Out, "%s/%s\n", kindString, info.Name) } - fmt.Fprintf(o.Out, i18n.T("Do you want to continue?")+" (y/n): ") + fmt.Fprint(o.Out, i18n.T("Do you want to continue?")+" (y/n): ") var input string _, err := fmt.Fscan(o.In, &input) if err != nil { diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/diff/diff.go b/staging/src/k8s.io/kubectl/pkg/cmd/diff/diff.go index 2afa6b17e9b..b82e493cee8 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/diff/diff.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/diff/diff.go @@ -161,7 +161,7 @@ func NewCmdDiff(f cmdutil.Factory, streams genericiooptions.IOStreams) *cobra.Co // command it means changes were found. // Thus, it should return status code greater than 1. cmd.SetFlagErrorFunc(func(command *cobra.Command, err error) error { - cmdutil.CheckDiffErr(cmdutil.UsageErrorf(cmd, err.Error())) + cmdutil.CheckDiffErr(cmdutil.UsageErrorf(cmd, "%s", err.Error())) return nil }) diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go b/staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go index 6999739c9a3..a4b71a4219c 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go @@ -243,7 +243,7 @@ func (o *DrainCmdOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args [ var err error if len(args) == 0 && !cmd.Flags().Changed("selector") { - return cmdutil.UsageErrorf(cmd, fmt.Sprintf("USAGE: %s [flags]", cmd.Use)) + return cmdutil.UsageErrorf(cmd, "USAGE: %s [flags]", cmd.Use) } if len(args) > 0 && len(o.drainer.Selector) > 0 { return cmdutil.UsageErrorf(cmd, "error: cannot specify both a node name and a --selector option") diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/exec/exec.go b/staging/src/k8s.io/kubectl/pkg/cmd/exec/exec.go index c008c5b43a9..f78c2bfcc53 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/exec/exec.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/exec/exec.go @@ -216,7 +216,7 @@ func (p *ExecOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, argsIn []s p.GetPodTimeout, err = cmdutil.GetPodRunningTimeoutFlag(cmd) if err != nil { - return cmdutil.UsageErrorf(cmd, err.Error()) + return cmdutil.UsageErrorf(cmd, "%s", err.Error()) } p.Builder = f.NewBuilder diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/get/customcolumn.go b/staging/src/k8s.io/kubectl/pkg/cmd/get/customcolumn.go index 38024cfa5db..b9943495da0 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/get/customcolumn.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/get/customcolumn.go @@ -19,6 +19,7 @@ package get import ( "bufio" "bytes" + "errors" "fmt" "io" "reflect" @@ -161,7 +162,7 @@ func (s *CustomColumnsPrinter) PrintObj(obj runtime.Object, out io.Writer) error // we need an actual value in order to retrieve the package path for an object. // using reflect.Indirect indiscriminately is valid here, as all runtime.Objects are supposed to be pointers. if printers.InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) { - return fmt.Errorf(printers.InternalObjectPrinterErr) + return errors.New(printers.InternalObjectPrinterErr) } if _, found := out.(*tabwriter.Writer); !found { @@ -210,7 +211,7 @@ func (s *CustomColumnsPrinter) printOneObject(obj runtime.Object, parsers []*jso switch u := obj.(type) { case *metav1.WatchEvent: if printers.InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(u.Object.Object)).Type().PkgPath()) { - return fmt.Errorf(printers.InternalObjectPrinterErr) + return errors.New(printers.InternalObjectPrinterErr) } unstructuredObject, err := runtime.DefaultUnstructuredConverter.ToUnstructured(u.Object.Object) if err != nil { diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/get/get.go b/staging/src/k8s.io/kubectl/pkg/cmd/get/get.go index a1bc13cb1a5..4d0ff217fc4 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/get/get.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/get/get.go @@ -290,7 +290,7 @@ func (o *GetOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []stri usageString = fmt.Sprintf("%s\nUse \"%s explain \" for a detailed description of that resource (e.g. %[2]s explain pods).", usageString, fullCmdName) } - return cmdutil.UsageErrorf(cmd, usageString) + return cmdutil.UsageErrorf(cmd, "%s", usageString) } } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/portforward/portforward.go b/staging/src/k8s.io/kubectl/pkg/cmd/portforward/portforward.go index aa6ee3b921f..4328560b896 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/portforward/portforward.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/portforward/portforward.go @@ -331,7 +331,7 @@ func (o *PortForwardOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, arg getPodTimeout, err := cmdutil.GetPodRunningTimeoutFlag(cmd) if err != nil { - return cmdutil.UsageErrorf(cmd, err.Error()) + return cmdutil.UsageErrorf(cmd, "%s", err.Error()) } resourceName := args[0] diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/taint/taint.go b/staging/src/k8s.io/kubectl/pkg/cmd/taint/taint.go index bca70fe3103..f185926ee74 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/taint/taint.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/taint/taint.go @@ -183,7 +183,7 @@ func (o *TaintOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []st } if o.taintsToAdd, o.taintsToRemove, err = parseTaints(taintArgs); err != nil { - return cmdutil.UsageErrorf(cmd, err.Error()) + return cmdutil.UsageErrorf(cmd, "%s", err.Error()) } o.builder = f.NewBuilder(). WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...). diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/util/helpers.go b/staging/src/k8s.io/kubectl/pkg/cmd/util/helpers.go index bd291a8c916..11eaf61a1fd 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/util/helpers.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/util/helpers.go @@ -243,7 +243,7 @@ func statusCausesToAggrError(scs []metav1.StatusCause) utilerrors.Aggregate { // commands. func StandardErrorMessage(err error) (string, bool) { if debugErr, ok := err.(debugError); ok { - klog.V(4).Infof(debugErr.DebugError()) + klog.V(4).Info(debugErr.DebugError()) } status, isStatus := err.(apierrors.APIStatus) switch { diff --git a/staging/src/k8s.io/kubectl/pkg/describe/describe_test.go b/staging/src/k8s.io/kubectl/pkg/describe/describe_test.go index 8898cb688c9..442311b848b 100644 --- a/staging/src/k8s.io/kubectl/pkg/describe/describe_test.go +++ b/staging/src/k8s.io/kubectl/pkg/describe/describe_test.go @@ -3521,8 +3521,8 @@ Events: t.Errorf("unexpected error: %v", err) } if out != test.output { - t.Logf(out) - t.Logf(test.output) + t.Log(out) + t.Log(test.output) t.Errorf("expected: \n%q\n but got output: \n%q\n", test.output, out) } }) @@ -5175,7 +5175,7 @@ Parameters: t.Errorf("unexpected error: %v", err) } if out != expectedOut { - t.Logf(out) + t.Log(out) t.Errorf("expected : %q\n but got output:\n %q", test.output, out) } }) @@ -6354,7 +6354,7 @@ Events: ` + "\n", t.Errorf("unexpected error: %v", err) } if out != tc.output { - t.Logf(out) + t.Log(out) t.Errorf("expected :\n%s\nbut got output:\n%s", tc.output, out) } }) diff --git a/staging/src/k8s.io/kubectl/pkg/util/templates/help_flags_printer.go b/staging/src/k8s.io/kubectl/pkg/util/templates/help_flags_printer.go index fdfdf08eeb1..b7e1bf00f8c 100644 --- a/staging/src/k8s.io/kubectl/pkg/util/templates/help_flags_printer.go +++ b/staging/src/k8s.io/kubectl/pkg/util/templates/help_flags_printer.go @@ -62,7 +62,7 @@ func (p *HelpFlagPrinter) PrintHelpFlag(flag *flag.Flag) { } appendTabStr := strings.ReplaceAll(wrappedStr, "\n", "\n\t") - fmt.Fprintf(p.out, appendTabStr+"\n\n") + fmt.Fprint(p.out, appendTabStr+"\n\n") } // writeFlag will output the help flag based diff --git a/staging/src/k8s.io/mount-utils/mount_linux.go b/staging/src/k8s.io/mount-utils/mount_linux.go index 1b52dcf82ab..3f35a656067 100644 --- a/staging/src/k8s.io/mount-utils/mount_linux.go +++ b/staging/src/k8s.io/mount-utils/mount_linux.go @@ -623,7 +623,7 @@ func (mounter *SafeFormatAndMount) formatAndMountSensitive(source string, target sensitiveOptionsLog := sanitizedOptionsForLogging(options, sensitiveOptions) detailedErr := fmt.Sprintf("format of disk %q failed: type:(%q) target:(%q) options:(%q) errcode:(%v) output:(%v) ", source, fstype, target, sensitiveOptionsLog, err, string(output)) klog.Error(detailedErr) - return NewMountError(FormatFailed, detailedErr) + return NewMountError(FormatFailed, "%s", detailedErr) } klog.Infof("Disk successfully formatted (mkfs): %s - %s %s", fstype, source, target) @@ -646,7 +646,7 @@ func (mounter *SafeFormatAndMount) formatAndMountSensitive(source string, target // Mount the disk klog.V(4).Infof("Attempting to mount disk %s in %s format at %s", source, fstype, target) if err := mounter.MountSensitive(source, target, fstype, options, sensitiveOptions); err != nil { - return NewMountError(mountErrorValue, err.Error()) + return NewMountError(mountErrorValue, "%s", err.Error()) } return nil diff --git a/test/e2e/apimachinery/aggregator.go b/test/e2e/apimachinery/aggregator.go index e1fe1196f32..596950ac178 100644 --- a/test/e2e/apimachinery/aggregator.go +++ b/test/e2e/apimachinery/aggregator.go @@ -553,13 +553,13 @@ func TestSampleAPIServer(ctx context.Context, f *framework.Framework, aggrclient locatedWardle := false for _, item := range list.Items { if item.Name == apiServiceName { - framework.Logf("Found " + apiServiceName + " in APIServiceList") + framework.Logf("Found %s in APIServiceList", apiServiceName) locatedWardle = true break } } if !locatedWardle { - framework.Failf("Unable to find " + apiServiceName + " in APIServiceList") + framework.Failf("Unable to find %s in APIServiceList", apiServiceName) } // As the APIService doesn't have any labels currently set we need to @@ -773,7 +773,7 @@ func validateErrorWithDebugInfo(ctx context.Context, f *framework.Framework, err msg += fmt.Sprintf("\nOriginal pods in %s:\n%v", namespace, pods) } - framework.Failf(msg) + framework.Fail(msg) } } diff --git a/test/e2e/apimachinery/garbage_collector.go b/test/e2e/apimachinery/garbage_collector.go index 1b2f69867de..d791d8d1fa1 100644 --- a/test/e2e/apimachinery/garbage_collector.go +++ b/test/e2e/apimachinery/garbage_collector.go @@ -262,7 +262,7 @@ func gatherMetrics(ctx context.Context, f *framework.Framework) { framework.Logf("MetricsGrabber failed grab metrics. Skipping metrics gathering.") } else { summary = (*e2emetrics.ComponentCollection)(&received) - framework.Logf(summary.PrintHumanReadable()) + framework.Logf("%s", summary.PrintHumanReadable()) } } } diff --git a/test/e2e/apps/statefulset.go b/test/e2e/apps/statefulset.go index 4714d77dd11..874cb54b954 100644 --- a/test/e2e/apps/statefulset.go +++ b/test/e2e/apps/statefulset.go @@ -2012,7 +2012,7 @@ func (z *zookeeperTester) write(statefulPodIndex int, kv map[string]string) { name := fmt.Sprintf("%v-%d", z.ss.Name, statefulPodIndex) for k, v := range kv { cmd := fmt.Sprintf("/opt/zookeeper/bin/zkCli.sh create /%v %v", k, v) - framework.Logf(e2ekubectl.RunKubectlOrDie(z.ss.Namespace, "exec", name, "--", "/bin/sh", "-c", cmd)) + framework.Logf("%s", e2ekubectl.RunKubectlOrDie(z.ss.Namespace, "exec", name, "--", "/bin/sh", "-c", cmd)) } } @@ -2047,7 +2047,7 @@ func (m *mysqlGaleraTester) deploy(ctx context.Context, ns string) *appsv1.State "create database statefulset;", "use statefulset; create table foo (k varchar(20), v varchar(20));", } { - framework.Logf(m.mysqlExec(cmd, ns, fmt.Sprintf("%v-0", m.ss.Name))) + framework.Logf("%s", m.mysqlExec(cmd, ns, fmt.Sprintf("%v-0", m.ss.Name))) } return m.ss } @@ -2056,7 +2056,7 @@ func (m *mysqlGaleraTester) write(statefulPodIndex int, kv map[string]string) { name := fmt.Sprintf("%v-%d", m.ss.Name, statefulPodIndex) for k, v := range kv { cmd := fmt.Sprintf("use statefulset; insert into foo (k, v) values (\"%v\", \"%v\");", k, v) - framework.Logf(m.mysqlExec(cmd, m.ss.Namespace, name)) + framework.Logf(cmd, m.mysqlExec(cmd, m.ss.Namespace, name)) } } @@ -2087,7 +2087,7 @@ func (m *redisTester) deploy(ctx context.Context, ns string) *appsv1.StatefulSet func (m *redisTester) write(statefulPodIndex int, kv map[string]string) { name := fmt.Sprintf("%v-%d", m.ss.Name, statefulPodIndex) for k, v := range kv { - framework.Logf(m.redisExec(fmt.Sprintf("SET %v %v", k, v), m.ss.Namespace, name)) + framework.Logf("%s", m.redisExec(fmt.Sprintf("SET %v %v", k, v), m.ss.Namespace, name)) } } @@ -2117,7 +2117,7 @@ func (c *cockroachDBTester) deploy(ctx context.Context, ns string) *appsv1.State "CREATE DATABASE IF NOT EXISTS foo;", "CREATE TABLE IF NOT EXISTS foo.bar (k STRING PRIMARY KEY, v STRING);", } { - framework.Logf(c.cockroachDBExec(cmd, ns, fmt.Sprintf("%v-0", c.ss.Name))) + framework.Logf("%s", c.cockroachDBExec(cmd, ns, fmt.Sprintf("%v-0", c.ss.Name))) } return c.ss } @@ -2126,7 +2126,7 @@ func (c *cockroachDBTester) write(statefulPodIndex int, kv map[string]string) { name := fmt.Sprintf("%v-%d", c.ss.Name, statefulPodIndex) for k, v := range kv { cmd := fmt.Sprintf("UPSERT INTO foo.bar VALUES ('%v', '%v');", k, v) - framework.Logf(c.cockroachDBExec(cmd, c.ss.Namespace, name)) + framework.Logf("%s", c.cockroachDBExec(cmd, c.ss.Namespace, name)) } } func (c *cockroachDBTester) read(statefulPodIndex int, key string) string { diff --git a/test/e2e/cloud/gcp/common/upgrade_mechanics.go b/test/e2e/cloud/gcp/common/upgrade_mechanics.go index d75f900fb96..4df326c1445 100644 --- a/test/e2e/cloud/gcp/common/upgrade_mechanics.go +++ b/test/e2e/cloud/gcp/common/upgrade_mechanics.go @@ -110,7 +110,7 @@ func traceRouteToControlPlane() { cmd := exec.Command(traceroute, "-I", framework.APIAddress()) out, err := cmd.Output() if len(out) != 0 { - framework.Logf(string(out)) + framework.Logf("%s", string(out)) } if exiterr, ok := err.(*exec.ExitError); err != nil && ok { framework.Logf("Error while running traceroute: %s", exiterr.Stderr) diff --git a/test/e2e/cloud/gcp/ha_master.go b/test/e2e/cloud/gcp/ha_master.go index 09bc5e2467a..6adfde6523f 100644 --- a/test/e2e/cloud/gcp/ha_master.go +++ b/test/e2e/cloud/gcp/ha_master.go @@ -40,7 +40,7 @@ import ( ) func addMasterReplica(zone string) error { - framework.Logf(fmt.Sprintf("Adding a new master replica, zone: %s", zone)) + framework.Logf("Adding a new master replica, zone: %s", zone) _, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-grow-cluster.sh"), zone, "true", "true", "false") if err != nil { return err @@ -49,7 +49,7 @@ func addMasterReplica(zone string) error { } func removeMasterReplica(zone string) error { - framework.Logf(fmt.Sprintf("Removing an existing master replica, zone: %s", zone)) + framework.Logf("Removing an existing master replica, zone: %s", zone) _, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-shrink-cluster.sh"), zone, "true", "false", "false") if err != nil { return err @@ -58,7 +58,7 @@ func removeMasterReplica(zone string) error { } func addWorkerNodes(zone string) error { - framework.Logf(fmt.Sprintf("Adding worker nodes, zone: %s", zone)) + framework.Logf("Adding worker nodes, zone: %s", zone) _, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-grow-cluster.sh"), zone, "true", "false", "true") if err != nil { return err @@ -67,7 +67,7 @@ func addWorkerNodes(zone string) error { } func removeWorkerNodes(zone string) error { - framework.Logf(fmt.Sprintf("Removing worker nodes, zone: %s", zone)) + framework.Logf("Removing worker nodes, zone: %s", zone) _, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-shrink-cluster.sh"), zone, "true", "true", "true") if err != nil { return err diff --git a/test/e2e/framework/debug/resource_usage_gatherer.go b/test/e2e/framework/debug/resource_usage_gatherer.go index 9c6537ed9d4..7e6875b49bd 100644 --- a/test/e2e/framework/debug/resource_usage_gatherer.go +++ b/test/e2e/framework/debug/resource_usage_gatherer.go @@ -21,6 +21,7 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" "math" "regexp" @@ -595,7 +596,7 @@ func (g *ContainerResourceGatherer) StopAndSummarize(percentiles []int, constrai } } if len(violatedConstraints) > 0 { - return &summary, fmt.Errorf(strings.Join(violatedConstraints, "\n")) + return &summary, errors.New(strings.Join(violatedConstraints, "\n")) } return &summary, nil } diff --git a/test/e2e/framework/flake_reporting_util.go b/test/e2e/framework/flake_reporting_util.go index 36d9baa98f0..103345fb6f2 100644 --- a/test/e2e/framework/flake_reporting_util.go +++ b/test/e2e/framework/flake_reporting_util.go @@ -57,7 +57,7 @@ func (f *FlakeReport) RecordFlakeIfError(err error, optionalDescription ...inter if desc != "" { msg = fmt.Sprintf("%v (Description: %v)", msg, desc) } - Logf(msg) + Logf("%s", msg) f.lock.Lock() defer f.lock.Unlock() f.Flakes = append(f.Flakes, msg) diff --git a/test/e2e/framework/framework.go b/test/e2e/framework/framework.go index a71d46c7b1d..ff08e25b4d3 100644 --- a/test/e2e/framework/framework.go +++ b/test/e2e/framework/framework.go @@ -311,7 +311,7 @@ func printSummaries(summaries []TestDataSummary, testBaseName string) { switch TestContext.OutputPrintType { case "hr": if TestContext.ReportDir == "" { - Logf(summaries[i].PrintHumanReadable()) + Logf("%s", summaries[i].PrintHumanReadable()) } else { // TODO: learn to extract test name and append it to the kind instead of timestamp. filePath := path.Join(TestContext.ReportDir, summaries[i].SummaryKind()+"_"+testBaseName+"_"+now.Format(time.RFC3339)+".txt") @@ -393,7 +393,7 @@ func (f *Framework) AfterEach(ctx context.Context) { for namespaceKey, namespaceErr := range nsDeletionErrors { messages = append(messages, fmt.Sprintf("Couldn't delete ns: %q: %s (%#v)", namespaceKey, namespaceErr, namespaceErr)) } - Failf(strings.Join(messages, ",")) + Fail(strings.Join(messages, ",")) } }() diff --git a/test/e2e/framework/ingress/ingress_utils.go b/test/e2e/framework/ingress/ingress_utils.go index 8c8d5588ccf..29b69af98e5 100644 --- a/test/e2e/framework/ingress/ingress_utils.go +++ b/test/e2e/framework/ingress/ingress_utils.go @@ -560,7 +560,7 @@ func DescribeIng(ns string) { framework.Logf("\nOutput of kubectl describe ing:\n") desc, _ := e2ekubectl.RunKubectl( ns, "describe", "ing") - framework.Logf(desc) + framework.Logf("%s", desc) } // Update retrieves the ingress, performs the passed function, and then updates it. @@ -829,7 +829,7 @@ func (j *TestJig) VerifyURL(ctx context.Context, route, host string, iterations for i := 0; i < iterations; i++ { b, err := SimpleGET(ctx, httpClient, route, host) if err != nil { - framework.Logf(b) + framework.Logf("%s", b) return err } j.Logger.Infof("Verified %v with host %v %d times, sleeping for %v", route, host, i, interval) diff --git a/test/e2e/framework/network/utils.go b/test/e2e/framework/network/utils.go index 2e8fb196c84..a2febeab5f8 100644 --- a/test/e2e/framework/network/utils.go +++ b/test/e2e/framework/network/utils.go @@ -255,7 +255,7 @@ func (config *NetworkingTestConfig) diagnoseMissingEndpoints(foundEndpoints sets framework.Logf("\nOutput of kubectl describe pod %v/%v:\n", e.Namespace, e.Name) desc, _ := e2ekubectl.RunKubectl( e.Namespace, "describe", "pod", e.Name, fmt.Sprintf("--namespace=%v", e.Namespace)) - framework.Logf(desc) + framework.Logf("%s", desc) } } @@ -554,12 +554,12 @@ func (config *NetworkingTestConfig) executeCurlCmd(ctx context.Context, cmd stri stdout, err := e2epodoutput.RunHostCmd(config.Namespace, podName, cmd) if err != nil { msg = fmt.Sprintf("failed executing cmd %v in %v/%v: %v", cmd, config.Namespace, podName, err) - framework.Logf(msg) + framework.Logf("%s", msg) return false, nil } if !strings.Contains(stdout, expected) { msg = fmt.Sprintf("successfully executed %v in %v/%v, but output '%v' doesn't contain expected string '%v'", cmd, config.Namespace, podName, stdout, expected) - framework.Logf(msg) + framework.Logf("%s", msg) return false, nil } return true, nil diff --git a/test/e2e/framework/node/resource.go b/test/e2e/framework/node/resource.go index a57473bab83..a7750399ca1 100644 --- a/test/e2e/framework/node/resource.go +++ b/test/e2e/framework/node/resource.go @@ -128,7 +128,7 @@ func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionT conditionType, node.Name, cond.Status == v1.ConditionTrue, taints) } if !silent { - framework.Logf(msg) + framework.Logf("%s", msg) } return false } @@ -822,6 +822,6 @@ func verifyThatTaintIsGone(ctx context.Context, c clientset.Interface, nodeName // TODO use wrapper methods in expect.go after removing core e2e dependency on node gomega.ExpectWithOffset(2, err).NotTo(gomega.HaveOccurred()) if taintExists(nodeUpdated.Spec.Taints, taint) { - framework.Failf("Failed removing taint " + taint.ToString() + " of the node " + nodeName) + framework.Fail("Failed removing taint " + taint.ToString() + " of the node " + nodeName) } } diff --git a/test/e2e/framework/skipper/skipper.go b/test/e2e/framework/skipper/skipper.go index 7d3b3d6b3de..955ff260703 100644 --- a/test/e2e/framework/skipper/skipper.go +++ b/test/e2e/framework/skipper/skipper.go @@ -46,10 +46,13 @@ func Skipf(format string, args ...interface{}) { panic("unreachable") } +// Skip is an alias for ginkgo.Skip. +var Skip = ginkgo.Skip + // SkipUnlessAtLeast skips if the value is less than the minValue. func SkipUnlessAtLeast(value int, minValue int, message string) { if value < minValue { - skipInternalf(1, message) + skipInternalf(1, "%s", message) } } diff --git a/test/e2e/framework/statefulset/rest.go b/test/e2e/framework/statefulset/rest.go index 2f18d6e333e..4d8e218381c 100644 --- a/test/e2e/framework/statefulset/rest.go +++ b/test/e2e/framework/statefulset/rest.go @@ -50,11 +50,11 @@ func CreateStatefulSet(ctx context.Context, c clientset.Interface, manifestPath, svc, err := e2emanifest.SvcFromManifest(mkpath("service.yaml")) framework.ExpectNoError(err) - framework.Logf(fmt.Sprintf("creating " + ss.Name + " service")) + framework.Logf("creating %s service", ss.Name) _, err = c.CoreV1().Services(ns).Create(ctx, svc, metav1.CreateOptions{}) framework.ExpectNoError(err) - framework.Logf(fmt.Sprintf("creating statefulset %v/%v with %d replicas and selector %+v", ss.Namespace, ss.Name, *(ss.Spec.Replicas), ss.Spec.Selector)) + framework.Logf("creating statefulset %v/%v with %d replicas and selector %+v", ss.Namespace, ss.Name, *(ss.Spec.Replicas), ss.Spec.Selector) _, err = c.AppsV1().StatefulSets(ns).Create(ctx, ss, metav1.CreateOptions{}) framework.ExpectNoError(err) WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) diff --git a/test/e2e/kubectl/kubectl.go b/test/e2e/kubectl/kubectl.go index b4e42ece191..6adb6fb2196 100644 --- a/test/e2e/kubectl/kubectl.go +++ b/test/e2e/kubectl/kubectl.go @@ -221,7 +221,7 @@ func assertCleanup(ns string, selectors ...string) { } err := wait.PollImmediate(500*time.Millisecond, 1*time.Minute, verifyCleanupFunc) if err != nil { - framework.Failf(e.Error()) + framework.Fail(e.Error()) } } @@ -396,7 +396,7 @@ var _ = SIGDescribe("Kubectl client", func() { }) ginkgo.By("creating all guestbook components") forEachGBFile(func(contents string) { - framework.Logf(contents) + framework.Logf("%s", contents) e2ekubectl.RunKubectlOrDieInput(ns, contents, "create", "-f", "-") }) @@ -1630,7 +1630,7 @@ metadata: ginkgo.By("verifying the pod has the label " + labelName + " with the value " + labelValue) output := e2ekubectl.RunKubectlOrDie(ns, "get", "pod", pausePodName, "-L", labelName) if !strings.Contains(output, labelValue) { - framework.Failf("Failed updating label " + labelName + " to the pod " + pausePodName) + framework.Fail("Failed updating label " + labelName + " to the pod " + pausePodName) } ginkgo.By("removing the label " + labelName + " of a pod") @@ -1638,7 +1638,7 @@ metadata: ginkgo.By("verifying the pod doesn't have the label " + labelName) output = e2ekubectl.RunKubectlOrDie(ns, "get", "pod", pausePodName, "-L", labelName) if strings.Contains(output, labelValue) { - framework.Failf("Failed removing label " + labelName + " of the pod " + pausePodName) + framework.Fail("Failed removing label " + labelName + " of the pod " + pausePodName) } }) }) @@ -1915,7 +1915,7 @@ metadata: ginkgo.By("verifying the node doesn't have the taint " + testTaint.Key) output = runKubectlRetryOrDie(ns, "describe", "node", nodeName) if strings.Contains(output, testTaint.Key) { - framework.Failf("Failed removing taint " + testTaint.Key + " of the node " + nodeName) + framework.Fail("Failed removing taint " + testTaint.Key + " of the node " + nodeName) } }) @@ -1983,7 +1983,7 @@ metadata: ginkgo.By("verifying the node doesn't have the taints that have the same key " + testTaint.Key) output = runKubectlRetryOrDie(ns, "describe", "node", nodeName) if strings.Contains(output, testTaint.Key) { - framework.Failf("Failed removing taints " + testTaint.Key + " of the node " + nodeName) + framework.Fail("Failed removing taints " + testTaint.Key + " of the node " + nodeName) } }) }) @@ -2330,7 +2330,7 @@ const applyTestLabel = "kubectl.kubernetes.io/apply-test" func readReplicationControllerFromString(contents string) *v1.ReplicationController { rc := v1.ReplicationController{} if err := yaml.Unmarshal([]byte(contents), &rc); err != nil { - framework.Failf(err.Error()) + framework.Fail(err.Error()) } return &rc diff --git a/test/e2e/network/dns_common.go b/test/e2e/network/dns_common.go index 9d64c5aae39..951e243b0d9 100644 --- a/test/e2e/network/dns_common.go +++ b/test/e2e/network/dns_common.go @@ -18,6 +18,7 @@ package network import ( "context" + "errors" "fmt" "regexp" "strings" @@ -123,7 +124,7 @@ func (t *dnsTestCommon) runDig(dnsName, target string) []string { case "cluster-dns-ipv6": cmd = append(cmd, "AAAA") default: - panic(fmt.Errorf("invalid target: " + target)) + panic(errors.New("invalid target: " + target)) } cmd = append(cmd, dnsName) diff --git a/test/e2e/network/proxy.go b/test/e2e/network/proxy.go index 897bfb87a15..584ef0a801f 100644 --- a/test/e2e/network/proxy.go +++ b/test/e2e/network/proxy.go @@ -271,7 +271,7 @@ var _ = common.SIGDescribe("Proxy", func() { framework.Logf("Pod %s has the following error logs: %s", pods[0].Name, body) } - framework.Failf(strings.Join(errs, "\n")) + framework.Fail(strings.Join(errs, "\n")) } }) diff --git a/test/e2e/network/service.go b/test/e2e/network/service.go index 2f23756e6d9..8b6fd4ebcfb 100644 --- a/test/e2e/network/service.go +++ b/test/e2e/network/service.go @@ -263,7 +263,7 @@ func (at *affinityTracker) checkHostTrace(count int) (fulfilled, affinityHolds b func checkAffinityFailed(tracker affinityTracker, err string) { framework.Logf("%v", tracker.hostTrace) - framework.Failf(err) + framework.Fail(err) } // StartServeHostnameService creates a replication controller that serves its diff --git a/test/e2e/network/service_latency.go b/test/e2e/network/service_latency.go index b9053f6a2ed..5c1d9738367 100644 --- a/test/e2e/network/service_latency.go +++ b/test/e2e/network/service_latency.go @@ -102,7 +102,7 @@ var _ = common.SIGDescribe("Service endpoints latency", func() { } if n < 2 { failing.Insert("Less than two runs succeeded; aborting.") - framework.Failf(strings.Join(failing.List(), "\n")) + framework.Fail(strings.Join(failing.List(), "\n")) } percentile := func(p int) time.Duration { est := n * p / 100 @@ -129,7 +129,7 @@ var _ = common.SIGDescribe("Service endpoints latency", func() { if failing.Len() > 0 { errList := strings.Join(failing.List(), "\n") helpfulInfo := fmt.Sprintf("\n50, 90, 99 percentiles: %v %v %v", p50, p90, p99) - framework.Failf(errList + helpfulInfo) + framework.Fail(errList + helpfulInfo) } }) }) diff --git a/test/e2e/network/util.go b/test/e2e/network/util.go index 2751b102f55..c5b2a9b5b8a 100644 --- a/test/e2e/network/util.go +++ b/test/e2e/network/util.go @@ -81,7 +81,7 @@ func DescribeSvc(ns string) { framework.Logf("\nOutput of kubectl describe svc:\n") desc, _ := e2ekubectl.RunKubectl( ns, "describe", "svc", fmt.Sprintf("--namespace=%v", ns)) - framework.Logf(desc) + framework.Logf("%s", desc) } // CheckSCTPModuleLoadedOnNodes checks whether any node on the list has the diff --git a/test/e2e/scheduling/limit_range.go b/test/e2e/scheduling/limit_range.go index cbe84e4720f..47c5fec6d1d 100644 --- a/test/e2e/scheduling/limit_range.go +++ b/test/e2e/scheduling/limit_range.go @@ -297,7 +297,7 @@ var _ = SIGDescribe("LimitRange", func() { lrNamespace, err := f.CreateNamespace(ctx, lrName, nil) framework.ExpectNoError(err, "failed creating Namespace") framework.Logf("Namespace %q created", lrNamespace.ObjectMeta.Name) - framework.Logf(fmt.Sprintf("Creating LimitRange %q in namespace %q", lrName, lrNamespace.Name)) + framework.Logf("Creating LimitRange %q in namespace %q", lrName, lrNamespace.Name) _, err = f.ClientSet.CoreV1().LimitRanges(lrNamespace.ObjectMeta.Name).Create(ctx, limitRange2, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create limitRange %q in %q namespace", lrName, lrNamespace.ObjectMeta.Name) diff --git a/test/e2e/storage/drivers/csi-test/mock/service/controller.go b/test/e2e/storage/drivers/csi-test/mock/service/controller.go index 33700104c25..42b13159a2d 100644 --- a/test/e2e/storage/drivers/csi-test/mock/service/controller.go +++ b/test/e2e/storage/drivers/csi-test/mock/service/controller.go @@ -112,7 +112,7 @@ func (s *service) CreateVolume( } if hookVal, hookMsg := s.execHook("CreateVolumeEnd"); hookVal != codes.OK { - return nil, status.Errorf(hookVal, hookMsg) + return nil, status.Error(hookVal, hookMsg) } return &csi.CreateVolumeResponse{Volume: &v}, nil @@ -132,7 +132,7 @@ func (s *service) DeleteVolume( } if hookVal, hookMsg := s.execHook("DeleteVolumeStart"); hookVal != codes.OK { - return nil, status.Errorf(hookVal, hookMsg) + return nil, status.Error(hookVal, hookMsg) } // If the volume does not exist then return an idempotent response. @@ -150,7 +150,7 @@ func (s *service) DeleteVolume( klog.V(5).InfoS("mock delete volume", "volumeID", req.VolumeId) if hookVal, hookMsg := s.execHook("DeleteVolumeEnd"); hookVal != codes.OK { - return nil, status.Errorf(hookVal, hookMsg) + return nil, status.Error(hookVal, hookMsg) } return &csi.DeleteVolumeResponse{}, nil } @@ -179,7 +179,7 @@ func (s *service) ControllerPublishVolume( } if hookVal, hookMsg := s.execHook("ControllerPublishVolumeStart"); hookVal != codes.OK { - return nil, status.Errorf(hookVal, hookMsg) + return nil, status.Error(hookVal, hookMsg) } s.volsRWL.Lock() @@ -246,7 +246,7 @@ func (s *service) ControllerPublishVolume( } if hookVal, hookMsg := s.execHook("ControllerPublishVolumeEnd"); hookVal != codes.OK { - return nil, status.Errorf(hookVal, hookMsg) + return nil, status.Error(hookVal, hookMsg) } return &csi.ControllerPublishVolumeResponse{ @@ -280,7 +280,7 @@ func (s *service) ControllerUnpublishVolume( } if hookVal, hookMsg := s.execHook("ControllerUnpublishVolumeStart"); hookVal != codes.OK { - return nil, status.Errorf(hookVal, hookMsg) + return nil, status.Error(hookVal, hookMsg) } s.volsRWL.Lock() @@ -309,7 +309,7 @@ func (s *service) ControllerUnpublishVolume( s.vols[i] = v if hookVal, hookMsg := s.execHook("ControllerUnpublishVolumeEnd"); hookVal != codes.OK { - return nil, status.Errorf(hookVal, hookMsg) + return nil, status.Error(hookVal, hookMsg) } return &csi.ControllerUnpublishVolumeResponse{}, nil @@ -332,7 +332,7 @@ func (s *service) ValidateVolumeCapabilities( } if hookVal, hookMsg := s.execHook("ValidateVolumeCapabilities"); hookVal != codes.OK { - return nil, status.Errorf(hookVal, hookMsg) + return nil, status.Error(hookVal, hookMsg) } return &csi.ValidateVolumeCapabilitiesResponse{ @@ -350,7 +350,7 @@ func (s *service) ControllerGetVolume( *csi.ControllerGetVolumeResponse, error) { if hookVal, hookMsg := s.execHook("GetVolumeStart"); hookVal != codes.OK { - return nil, status.Errorf(hookVal, hookMsg) + return nil, status.Error(hookVal, hookMsg) } resp := &csi.ControllerGetVolumeResponse{ @@ -373,7 +373,7 @@ func (s *service) ControllerGetVolume( } if hookVal, hookMsg := s.execHook("GetVolumeEnd"); hookVal != codes.OK { - return nil, status.Errorf(hookVal, hookMsg) + return nil, status.Error(hookVal, hookMsg) } return resp, nil @@ -385,7 +385,7 @@ func (s *service) ListVolumes( *csi.ListVolumesResponse, error) { if hookVal, hookMsg := s.execHook("ListVolumesStart"); hookVal != codes.OK { - return nil, status.Errorf(hookVal, hookMsg) + return nil, status.Error(hookVal, hookMsg) } // Copy the mock volumes into a new slice in order to avoid @@ -464,7 +464,7 @@ func (s *service) ListVolumes( } if hookVal, hookMsg := s.execHook("ListVolumesEnd"); hookVal != codes.OK { - return nil, status.Errorf(hookVal, hookMsg) + return nil, status.Error(hookVal, hookMsg) } return &csi.ListVolumesResponse{ @@ -479,7 +479,7 @@ func (s *service) GetCapacity( *csi.GetCapacityResponse, error) { if hookVal, hookMsg := s.execHook("GetCapacity"); hookVal != codes.OK { - return nil, status.Errorf(hookVal, hookMsg) + return nil, status.Error(hookVal, hookMsg) } return &csi.GetCapacityResponse{ @@ -493,7 +493,7 @@ func (s *service) ControllerGetCapabilities( *csi.ControllerGetCapabilitiesResponse, error) { if hookVal, hookMsg := s.execHook("ControllerGetCapabilitiesStart"); hookVal != codes.OK { - return nil, status.Errorf(hookVal, hookMsg) + return nil, status.Error(hookVal, hookMsg) } caps := []*csi.ControllerServiceCapability{ @@ -597,7 +597,7 @@ func (s *service) ControllerGetCapabilities( } if hookVal, hookMsg := s.execHook("ControllerGetCapabilitiesEnd"); hookVal != codes.OK { - return nil, status.Errorf(hookVal, hookMsg) + return nil, status.Error(hookVal, hookMsg) } return &csi.ControllerGetCapabilitiesResponse{ @@ -630,7 +630,7 @@ func (s *service) CreateSnapshot(ctx context.Context, s.snapshots.Add(snapshot) if hookVal, hookMsg := s.execHook("CreateSnapshotEnd"); hookVal != codes.OK { - return nil, status.Errorf(hookVal, hookMsg) + return nil, status.Error(hookVal, hookMsg) } return &csi.CreateSnapshotResponse{Snapshot: &snapshot.SnapshotCSI}, nil @@ -645,7 +645,7 @@ func (s *service) DeleteSnapshot(ctx context.Context, } if hookVal, hookMsg := s.execHook("DeleteSnapshotStart"); hookVal != codes.OK { - return nil, status.Errorf(hookVal, hookMsg) + return nil, status.Error(hookVal, hookMsg) } // If the snapshot does not exist then return an idempotent response. @@ -661,7 +661,7 @@ func (s *service) DeleteSnapshot(ctx context.Context, klog.V(5).InfoS("mock delete snapshot", "snapshotId", req.SnapshotId) if hookVal, hookMsg := s.execHook("DeleteSnapshotEnd"); hookVal != codes.OK { - return nil, status.Errorf(hookVal, hookMsg) + return nil, status.Error(hookVal, hookMsg) } return &csi.DeleteSnapshotResponse{}, nil @@ -671,7 +671,7 @@ func (s *service) ListSnapshots(ctx context.Context, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) { if hookVal, hookMsg := s.execHook("ListSnapshots"); hookVal != codes.OK { - return nil, status.Errorf(hookVal, hookMsg) + return nil, status.Error(hookVal, hookMsg) } // case 1: SnapshotId is not empty, return snapshots that match the snapshot id. @@ -700,7 +700,7 @@ func (s *service) ControllerExpandVolume( } if hookVal, hookMsg := s.execHook("ControllerExpandVolumeStart"); hookVal != codes.OK { - return nil, status.Errorf(hookVal, hookMsg) + return nil, status.Error(hookVal, hookMsg) } s.volsRWL.Lock() @@ -737,7 +737,7 @@ func (s *service) ControllerExpandVolume( s.vols[i] = v if hookVal, hookMsg := s.execHook("ControllerExpandVolumeEnd"); hookVal != codes.OK { - return nil, status.Errorf(hookVal, hookMsg) + return nil, status.Error(hookVal, hookMsg) } return resp, nil diff --git a/test/e2e/storage/drivers/csi-test/mock/service/node.go b/test/e2e/storage/drivers/csi-test/mock/service/node.go index ce79904aa5e..20fc495082e 100644 --- a/test/e2e/storage/drivers/csi-test/mock/service/node.go +++ b/test/e2e/storage/drivers/csi-test/mock/service/node.go @@ -89,7 +89,7 @@ func (s *service) NodeStageVolume( s.vols[i] = v if hookVal, hookMsg := s.execHook("NodeStageVolumeEnd"); hookVal != codes.OK { - return nil, status.Errorf(hookVal, hookMsg) + return nil, status.Error(hookVal, hookMsg) } return &csi.NodeStageVolumeResponse{}, nil @@ -130,7 +130,7 @@ func (s *service) NodeUnstageVolume( s.vols[i] = v if hookVal, hookMsg := s.execHook("NodeUnstageVolumeEnd"); hookVal != codes.OK { - return nil, status.Errorf(hookVal, hookMsg) + return nil, status.Error(hookVal, hookMsg) } return &csi.NodeUnstageVolumeResponse{}, nil } @@ -141,7 +141,7 @@ func (s *service) NodePublishVolume( *csi.NodePublishVolumeResponse, error) { if hookVal, hookMsg := s.execHook("NodePublishVolumeStart"); hookVal != codes.OK { - return nil, status.Errorf(hookVal, hookMsg) + return nil, status.Error(hookVal, hookMsg) } ephemeralVolume := req.GetVolumeContext()["csi.storage.k8s.io/ephemeral"] == "true" device, ok := req.PublishContext["device"] @@ -229,7 +229,7 @@ func (s *service) NodePublishVolume( s.vols[i] = v } if hookVal, hookMsg := s.execHook("NodePublishVolumeEnd"); hookVal != codes.OK { - return nil, status.Errorf(hookVal, hookMsg) + return nil, status.Error(hookVal, hookMsg) } return &csi.NodePublishVolumeResponse{}, nil @@ -247,7 +247,7 @@ func (s *service) NodeUnpublishVolume( return nil, status.Error(codes.InvalidArgument, "Target Path cannot be empty") } if hookVal, hookMsg := s.execHook("NodeUnpublishVolumeStart"); hookVal != codes.OK { - return nil, status.Errorf(hookVal, hookMsg) + return nil, status.Error(hookVal, hookMsg) } s.volsRWL.Lock() @@ -282,7 +282,7 @@ func (s *service) NodeUnpublishVolume( s.vols[i] = v } if hookVal, hookMsg := s.execHook("NodeUnpublishVolumeEnd"); hookVal != codes.OK { - return nil, status.Errorf(hookVal, hookMsg) + return nil, status.Error(hookVal, hookMsg) } return &csi.NodeUnpublishVolumeResponse{}, nil @@ -296,7 +296,7 @@ func (s *service) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandVolum return nil, status.Error(codes.InvalidArgument, "Volume Path cannot be empty") } if hookVal, hookMsg := s.execHook("NodeExpandVolumeStart"); hookVal != codes.OK { - return nil, status.Errorf(hookVal, hookMsg) + return nil, status.Error(hookVal, hookMsg) } s.volsRWL.Lock() @@ -323,7 +323,7 @@ func (s *service) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandVolum s.vols[i] = v } if hookVal, hookMsg := s.execHook("NodeExpandVolumeEnd"); hookVal != codes.OK { - return nil, status.Errorf(hookVal, hookMsg) + return nil, status.Error(hookVal, hookMsg) } return resp, nil @@ -335,7 +335,7 @@ func (s *service) NodeGetCapabilities( *csi.NodeGetCapabilitiesResponse, error) { if hookVal, hookMsg := s.execHook("NodeGetCapabilities"); hookVal != codes.OK { - return nil, status.Errorf(hookVal, hookMsg) + return nil, status.Error(hookVal, hookMsg) } capabilities := []*csi.NodeServiceCapability{ { @@ -395,7 +395,7 @@ func (s *service) NodeGetCapabilities( func (s *service) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) { if hookVal, hookMsg := s.execHook("NodeGetInfo"); hookVal != codes.OK { - return nil, status.Errorf(hookVal, hookMsg) + return nil, status.Error(hookVal, hookMsg) } csiNodeResponse := &csi.NodeGetInfoResponse{ NodeId: s.nodeID, @@ -442,11 +442,11 @@ func (s *service) NodeGetVolumeStats(ctx context.Context, msg := fmt.Sprintf("volume %q doest not exist on the specified path %q", req.VolumeId, req.VolumePath) resp.VolumeCondition.Abnormal = true resp.VolumeCondition.Message = msg - return resp, status.Errorf(codes.NotFound, msg) + return resp, status.Error(codes.NotFound, msg) } if hookVal, hookMsg := s.execHook("NodeGetVolumeStatsEnd"); hookVal != codes.OK { - return nil, status.Errorf(hookVal, hookMsg) + return nil, status.Error(hookVal, hookMsg) } resp.Usage = []*csi.VolumeUsage{ diff --git a/test/e2e/storage/testsuites/multivolume.go b/test/e2e/storage/testsuites/multivolume.go index 8c4363f0501..792c29e69ac 100644 --- a/test/e2e/storage/testsuites/multivolume.go +++ b/test/e2e/storage/testsuites/multivolume.go @@ -762,7 +762,7 @@ func ensureTopologyRequirements(ctx context.Context, nodeSelection *e2epod.NodeS nodes, err := e2enode.GetReadySchedulableNodes(ctx, cs) framework.ExpectNoError(err) if len(nodes.Items) < minCount { - e2eskipper.Skipf(fmt.Sprintf("Number of available nodes is less than %d - skipping", minCount)) + e2eskipper.Skipf("Number of available nodes is less than %d - skipping", minCount) } topologyKeys := driverInfo.TopologyKeys diff --git a/test/e2e/storage/testsuites/volumelimits.go b/test/e2e/storage/testsuites/volumelimits.go index bb92ca20a9b..c1b6761ca6f 100644 --- a/test/e2e/storage/testsuites/volumelimits.go +++ b/test/e2e/storage/testsuites/volumelimits.go @@ -18,6 +18,7 @@ package testsuites import ( "context" + "errors" "fmt" "regexp" "strings" @@ -314,7 +315,7 @@ func cleanupTest(ctx context.Context, cs clientset.Interface, ns string, podName cleanupErrors = append(cleanupErrors, fmt.Sprintf("timed out waiting for PVs to be deleted: %s", err)) } if len(cleanupErrors) != 0 { - return fmt.Errorf("test cleanup failed: " + strings.Join(cleanupErrors, "; ")) + return errors.New("test cleanup failed: " + strings.Join(cleanupErrors, "; ")) } return nil } diff --git a/test/e2e/storage/volume_provisioning.go b/test/e2e/storage/volume_provisioning.go index bd77e0f76fd..2cacbb87cc9 100644 --- a/test/e2e/storage/volume_provisioning.go +++ b/test/e2e/storage/volume_provisioning.go @@ -550,7 +550,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { // The claim should timeout phase:Pending err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout) gomega.Expect(err).To(gomega.MatchError(gomega.ContainSubstring("not all in phase Bound"))) - framework.Logf(err.Error()) + framework.Logf("%s", err.Error()) claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(ctx, claim.Name, metav1.GetOptions{}) framework.ExpectNoError(err) gomega.Expect(claim.Status.Phase).To(gomega.Equal(v1.ClaimPending)) @@ -589,7 +589,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { // The claim should timeout phase:Pending err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout) gomega.Expect(err).To(gomega.MatchError(gomega.ContainSubstring("not all in phase Bound"))) - framework.Logf(err.Error()) + framework.Logf("%s", err.Error()) claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(ctx, claim.Name, metav1.GetOptions{}) framework.ExpectNoError(err) gomega.Expect(claim.Status.Phase).To(gomega.Equal(v1.ClaimPending)) diff --git a/test/e2e/upgrades/apps/cassandra.go b/test/e2e/upgrades/apps/cassandra.go index 2f5aac92c99..53c3fbc0dac 100644 --- a/test/e2e/upgrades/apps/cassandra.go +++ b/test/e2e/upgrades/apps/cassandra.go @@ -19,6 +19,7 @@ package apps import ( "context" "encoding/json" + "errors" "fmt" "io" "net" @@ -131,13 +132,14 @@ func (t *CassandraUpgradeTest) listUsers() ([]string, error) { if err != nil { return nil, err } - return nil, fmt.Errorf(string(b)) + return nil, errors.New(string(b)) } var names []string if err := json.NewDecoder(r.Body).Decode(&names); err != nil { return nil, err } return names, nil + } // addUser adds a user to the db via the tester services. @@ -153,7 +155,7 @@ func (t *CassandraUpgradeTest) addUser(name string) error { if err != nil { return err } - return fmt.Errorf(string(b)) + return errors.New(string(b)) } return nil } diff --git a/test/e2e/upgrades/apps/etcd.go b/test/e2e/upgrades/apps/etcd.go index 774c35bd048..f218607ddbe 100644 --- a/test/e2e/upgrades/apps/etcd.go +++ b/test/e2e/upgrades/apps/etcd.go @@ -19,6 +19,7 @@ package apps import ( "context" "encoding/json" + "errors" "fmt" "io" "net" @@ -125,7 +126,7 @@ func (t *EtcdUpgradeTest) listUsers() ([]string, error) { if err != nil { return nil, err } - return nil, fmt.Errorf(string(b)) + return nil, errors.New(string(b)) } var names []string if err := json.NewDecoder(r.Body).Decode(&names); err != nil { @@ -146,7 +147,7 @@ func (t *EtcdUpgradeTest) addUser(name string) error { if err != nil { return err } - return fmt.Errorf(string(b)) + return errors.New(string(b)) } return nil } diff --git a/test/e2e/upgrades/apps/mysql.go b/test/e2e/upgrades/apps/mysql.go index 1a16931bacc..1f555b63783 100644 --- a/test/e2e/upgrades/apps/mysql.go +++ b/test/e2e/upgrades/apps/mysql.go @@ -19,6 +19,7 @@ package apps import ( "context" "encoding/json" + "errors" "fmt" "io" "net" @@ -194,7 +195,7 @@ func (t *MySQLUpgradeTest) addName(name string) error { if err != nil { return err } - return fmt.Errorf(string(b)) + return errors.New(string(b)) } return nil } @@ -212,7 +213,7 @@ func (t *MySQLUpgradeTest) countNames() (int, error) { if err != nil { return 0, err } - return 0, fmt.Errorf(string(b)) + return 0, errors.New(string(b)) } var count int if err := json.NewDecoder(r.Body).Decode(&count); err != nil { diff --git a/test/e2e/windows/gmsa_full.go b/test/e2e/windows/gmsa_full.go index eacd693e16d..cc8e0a796b8 100644 --- a/test/e2e/windows/gmsa_full.go +++ b/test/e2e/windows/gmsa_full.go @@ -112,19 +112,19 @@ var _ = sigDescribe(feature.Windows, "GMSA Full", framework.WithSerial(), framew ginkgo.By("deploying the GMSA webhook") err := deployGmsaWebhook(ctx, f) if err != nil { - framework.Failf(err.Error()) + framework.Fail(err.Error()) } ginkgo.By("creating the GMSA custom resource") err = createGmsaCustomResource(f.Namespace.Name, crdManifestContents) if err != nil { - framework.Failf(err.Error()) + framework.Fail(err.Error()) } ginkgo.By("creating an RBAC role to grant use access to that GMSA resource") rbacRoleName, err := createRBACRoleForGmsa(ctx, f) if err != nil { - framework.Failf(err.Error()) + framework.Fail(err.Error()) } ginkgo.By("creating a service account") @@ -179,19 +179,19 @@ var _ = sigDescribe(feature.Windows, "GMSA Full", framework.WithSerial(), framew ginkgo.By("deploying the GMSA webhook") err := deployGmsaWebhook(ctx, f) if err != nil { - framework.Failf(err.Error()) + framework.Fail(err.Error()) } ginkgo.By("creating the GMSA custom resource") err = createGmsaCustomResource(f.Namespace.Name, crdManifestContents) if err != nil { - framework.Failf(err.Error()) + framework.Fail(err.Error()) } ginkgo.By("creating an RBAC role to grant use access to that GMSA resource") rbacRoleName, err := createRBACRoleForGmsa(ctx, f) if err != nil { - framework.Failf(err.Error()) + framework.Fail(err.Error()) } ginkgo.By("creating a service account") diff --git a/test/e2e_node/system_node_critical_test.go b/test/e2e_node/system_node_critical_test.go index 7d0d8c30248..d4f842e44ea 100644 --- a/test/e2e_node/system_node_critical_test.go +++ b/test/e2e_node/system_node_critical_test.go @@ -18,6 +18,7 @@ package e2enode import ( "context" + "errors" "fmt" "os" "time" @@ -90,8 +91,7 @@ var _ = SIGDescribe("SystemNodeCriticalPod", framework.WithSlow(), framework.Wit return nil } msg := fmt.Sprintf("NodeCondition: %s not encountered yet", v1.NodeDiskPressure) - framework.Logf(msg) - return fmt.Errorf(msg) + return errors.New(msg) }, time.Minute*2, time.Second*4).Should(gomega.Succeed()) ginkgo.By("check if it's running all the time") @@ -100,7 +100,7 @@ var _ = SIGDescribe("SystemNodeCriticalPod", framework.WithSlow(), framework.Wit if err == nil { framework.Logf("mirror pod %q is running", mirrorPodName) } else { - framework.Logf(err.Error()) + framework.Logf("%s", err.Error()) } return err }, time.Minute*8, time.Second*4).ShouldNot(gomega.HaveOccurred()) diff --git a/test/e2e_node/util_sriov.go b/test/e2e_node/util_sriov.go index 569315bac53..1840d179ce1 100644 --- a/test/e2e_node/util_sriov.go +++ b/test/e2e_node/util_sriov.go @@ -42,8 +42,8 @@ func requireSRIOVDevices() { msg := "this test is meant to run on a system with at least one configured VF from SRIOV device" if framework.TestContext.RequireDevices { - framework.Failf(msg) + framework.Fail(msg) } else { - e2eskipper.Skipf(msg) + e2eskipper.Skip(msg) } } diff --git a/test/images/agnhost/crd-conversion-webhook/converter/framework.go b/test/images/agnhost/crd-conversion-webhook/converter/framework.go index c16a684f0d3..cdf12841f0e 100644 --- a/test/images/agnhost/crd-conversion-webhook/converter/framework.go +++ b/test/images/agnhost/crd-conversion-webhook/converter/framework.go @@ -127,7 +127,7 @@ func serve(w http.ResponseWriter, r *http.Request, convert convertFunc) { serializer := getInputSerializer(contentType) if serializer == nil { msg := fmt.Sprintf("invalid Content-Type header `%s`", contentType) - klog.Errorf(msg) + klog.Error(msg) http.Error(w, msg, http.StatusBadRequest) return } @@ -147,7 +147,7 @@ func serve(w http.ResponseWriter, r *http.Request, convert convertFunc) { convertReview, ok := obj.(*v1beta1.ConversionReview) if !ok { msg := fmt.Sprintf("Expected v1beta1.ConversionReview but got: %T", obj) - klog.Errorf(msg) + klog.Error(msg) http.Error(w, msg, http.StatusBadRequest) return } @@ -161,7 +161,7 @@ func serve(w http.ResponseWriter, r *http.Request, convert convertFunc) { convertReview, ok := obj.(*v1.ConversionReview) if !ok { msg := fmt.Sprintf("Expected v1.ConversionReview but got: %T", obj) - klog.Errorf(msg) + klog.Error(msg) http.Error(w, msg, http.StatusBadRequest) return } @@ -182,7 +182,7 @@ func serve(w http.ResponseWriter, r *http.Request, convert convertFunc) { outSerializer := getOutputSerializer(accept) if outSerializer == nil { msg := fmt.Sprintf("invalid accept header `%s`", accept) - klog.Errorf(msg) + klog.Error(msg) http.Error(w, msg, http.StatusBadRequest) return } diff --git a/test/images/agnhost/net/main.go b/test/images/agnhost/net/main.go index 61bded22a7e..6cf86ed80fe 100644 --- a/test/images/agnhost/net/main.go +++ b/test/images/agnhost/net/main.go @@ -178,5 +178,5 @@ func handleRunRequest(w http.ResponseWriter, r *http.Request) { return } - fmt.Fprintf(w, "ok\noutput:\n\n"+output.b.String()) + fmt.Fprint(w, "ok\noutput:\n\n"+output.b.String()) } diff --git a/test/images/agnhost/netexec/netexec.go b/test/images/agnhost/netexec/netexec.go index f1e2e62d536..e59d831795b 100644 --- a/test/images/agnhost/netexec/netexec.go +++ b/test/images/agnhost/netexec/netexec.go @@ -285,7 +285,7 @@ func echoHandler(w http.ResponseWriter, r *http.Request) { func clientIPHandler(w http.ResponseWriter, r *http.Request) { log.Printf("GET /clientip") - fmt.Fprintf(w, r.RemoteAddr) + fmt.Fprint(w, r.RemoteAddr) } func headerHandler(w http.ResponseWriter, r *http.Request) { key := r.FormValue("key") diff --git a/test/instrumentation/decode_metric.go b/test/instrumentation/decode_metric.go index 919d0f2d142..3dccb7bc603 100644 --- a/test/instrumentation/decode_metric.go +++ b/test/instrumentation/decode_metric.go @@ -345,7 +345,7 @@ func (c *metricDecoder) decodeOpts(expr ast.Expr) (metric, error) { var err error s, err := c.decodeString(kv.Value) if err != nil { - return m, newDecodeErrorf(expr, err.Error()) + return m, newDecodeErrorf(expr, "%s", err.Error()) } value = *s switch key { @@ -771,7 +771,7 @@ func (c *metricDecoder) decodeBuildFQNameArguments(fc *ast.CallExpr) (string, st for i, elt := range fc.Args { s, err := c.decodeString(elt) if err != nil || s == nil { - return "", "", "", newDecodeErrorf(fc, err.Error()) + return "", "", "", newDecodeErrorf(fc, "%s", err.Error()) } strArgs[i] = *s } diff --git a/test/integration/dualstack/dualstack_test.go b/test/integration/dualstack/dualstack_test.go index 36e007a46d0..01ab3323cd9 100644 --- a/test/integration/dualstack/dualstack_test.go +++ b/test/integration/dualstack/dualstack_test.go @@ -18,6 +18,7 @@ package dualstack import ( "encoding/json" + "errors" "fmt" "reflect" "strings" @@ -1384,7 +1385,7 @@ func validateServiceAndClusterIPFamily(svc *v1.Service, expectedIPFamilies []v1. if len(errstrings) > 0 { errstrings = append(errstrings, fmt.Sprintf("Error validating Service: %s, ClusterIPs: %v Expected IPFamilies %v", svc.Name, svc.Spec.ClusterIPs, expectedIPFamilies)) - return fmt.Errorf(strings.Join(errstrings, "\n")) + return errors.New(strings.Join(errstrings, "\n")) } return nil diff --git a/test/integration/framework/util.go b/test/integration/framework/util.go index 5e253a6c6ca..ac792a55323 100644 --- a/test/integration/framework/util.go +++ b/test/integration/framework/util.go @@ -158,7 +158,7 @@ func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionT conditionType, node.Name, cond.Status == v1.ConditionTrue, taints) } if !silent { - klog.Infof(msg) + klog.Info(msg) } return false } diff --git a/test/integration/garbagecollector/garbage_collector_test.go b/test/integration/garbagecollector/garbage_collector_test.go index 0c00b2dad39..8d1dc08193f 100644 --- a/test/integration/garbagecollector/garbage_collector_test.go +++ b/test/integration/garbagecollector/garbage_collector_test.go @@ -697,7 +697,7 @@ func TestStressingCascadingDeletion(t *testing.T) { wg.Wait() close(errs) for errString := range errs { - t.Fatalf(errString) + t.Fatal(errString) } t.Logf("all pods are created, all replications controllers are created then deleted") // wait for the RCs and Pods to reach the expected numbers. diff --git a/test/integration/servicecidr/allocator_test.go b/test/integration/servicecidr/allocator_test.go index 1196a98daca..5973840bb08 100644 --- a/test/integration/servicecidr/allocator_test.go +++ b/test/integration/servicecidr/allocator_test.go @@ -68,7 +68,7 @@ func TestServiceAllocation(t *testing.T) { }, } for _, tc := range testcases { - t.Run(fmt.Sprintf(tc.name), func(t *testing.T) { + t.Run(tc.name, func(t *testing.T) { etcdOptions := framework.SharedEtcd() apiServerOptions := kubeapiservertesting.NewDefaultTestServerOptions() s1 := kubeapiservertesting.StartTestServerOrDie(t, diff --git a/test/utils/density_utils.go b/test/utils/density_utils.go index 23917ad9f3d..e0747c48948 100644 --- a/test/utils/density_utils.go +++ b/test/utils/density_utils.go @@ -18,6 +18,7 @@ package utils import ( "context" + "errors" "fmt" "strings" "time" @@ -99,7 +100,7 @@ func VerifyLabelsRemoved(c clientset.Interface, nodeName string, labelKeys []str } for _, labelKey := range labelKeys { if node.Labels != nil && len(node.Labels[labelKey]) != 0 { - return fmt.Errorf("Failed removing label " + labelKey + " of the node " + nodeName) + return errors.New("Failed removing label " + labelKey + " of the node " + nodeName) } } return nil diff --git a/test/utils/deployment.go b/test/utils/deployment.go index a8876d799fc..eaa618a6cee 100644 --- a/test/utils/deployment.go +++ b/test/utils/deployment.go @@ -18,6 +18,7 @@ package utils import ( "context" + "errors" "fmt" "time" @@ -226,7 +227,7 @@ func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName }) if wait.Interrupted(err) { LogReplicaSetsOfDeployment(deployment, nil, newRS, logf) - err = fmt.Errorf(reason) + err = errors.New(reason) } if newRS == nil { return fmt.Errorf("deployment %q failed to create new replica set", deploymentName) diff --git a/test/utils/runners.go b/test/utils/runners.go index eccc78ac49e..35c002950c7 100644 --- a/test/utils/runners.go +++ b/test/utils/runners.go @@ -664,7 +664,7 @@ func (config *RCConfig) start(ctx context.Context) error { *config.CreatedPods = startupStatus.Created } if !config.Silent { - config.RCConfigLog(startupStatus.String(config.Name)) + config.RCConfigLog("%s", startupStatus.String(config.Name)) } if config.PodStatusFile != nil { @@ -688,8 +688,8 @@ func (config *RCConfig) start(ctx context.Context) error { if podDeletionsCount > config.MaxAllowedPodDeletions { // Number of pods which disappeared is over threshold err := fmt.Errorf("%d pods disappeared for %s: %v", podDeletionsCount, config.Name, strings.Join(deletedPods, ", ")) - config.RCConfigLog(err.Error()) - config.RCConfigLog(diff.String(sets.NewString())) + config.RCConfigLog("%s", err.Error()) + config.RCConfigLog("%s", diff.String(sets.NewString())) return err }