Fix Go vet errors for master golang
Co-authored-by: Rajalakshmi-Girish <rajalakshmi.girish1@ibm.com> Co-authored-by: Abhishek Kr Srivastav <Abhishek.kr.srivastav@ibm.com>
This commit is contained in:
committed by
Kishen Viswanathan
parent
7164c728c0
commit
9d10ddb060
@@ -553,13 +553,13 @@ func TestSampleAPIServer(ctx context.Context, f *framework.Framework, aggrclient
|
||||
locatedWardle := false
|
||||
for _, item := range list.Items {
|
||||
if item.Name == apiServiceName {
|
||||
framework.Logf("Found " + apiServiceName + " in APIServiceList")
|
||||
framework.Logf("Found %s in APIServiceList", apiServiceName)
|
||||
locatedWardle = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !locatedWardle {
|
||||
framework.Failf("Unable to find " + apiServiceName + " in APIServiceList")
|
||||
framework.Failf("Unable to find %s in APIServiceList", apiServiceName)
|
||||
}
|
||||
|
||||
// As the APIService doesn't have any labels currently set we need to
|
||||
@@ -773,7 +773,7 @@ func validateErrorWithDebugInfo(ctx context.Context, f *framework.Framework, err
|
||||
msg += fmt.Sprintf("\nOriginal pods in %s:\n%v", namespace, pods)
|
||||
}
|
||||
|
||||
framework.Failf(msg)
|
||||
framework.Fail(msg)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -262,7 +262,7 @@ func gatherMetrics(ctx context.Context, f *framework.Framework) {
|
||||
framework.Logf("MetricsGrabber failed grab metrics. Skipping metrics gathering.")
|
||||
} else {
|
||||
summary = (*e2emetrics.ComponentCollection)(&received)
|
||||
framework.Logf(summary.PrintHumanReadable())
|
||||
framework.Logf("%s", summary.PrintHumanReadable())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2012,7 +2012,7 @@ func (z *zookeeperTester) write(statefulPodIndex int, kv map[string]string) {
|
||||
name := fmt.Sprintf("%v-%d", z.ss.Name, statefulPodIndex)
|
||||
for k, v := range kv {
|
||||
cmd := fmt.Sprintf("/opt/zookeeper/bin/zkCli.sh create /%v %v", k, v)
|
||||
framework.Logf(e2ekubectl.RunKubectlOrDie(z.ss.Namespace, "exec", name, "--", "/bin/sh", "-c", cmd))
|
||||
framework.Logf("%s", e2ekubectl.RunKubectlOrDie(z.ss.Namespace, "exec", name, "--", "/bin/sh", "-c", cmd))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2047,7 +2047,7 @@ func (m *mysqlGaleraTester) deploy(ctx context.Context, ns string) *appsv1.State
|
||||
"create database statefulset;",
|
||||
"use statefulset; create table foo (k varchar(20), v varchar(20));",
|
||||
} {
|
||||
framework.Logf(m.mysqlExec(cmd, ns, fmt.Sprintf("%v-0", m.ss.Name)))
|
||||
framework.Logf("%s", m.mysqlExec(cmd, ns, fmt.Sprintf("%v-0", m.ss.Name)))
|
||||
}
|
||||
return m.ss
|
||||
}
|
||||
@@ -2056,7 +2056,7 @@ func (m *mysqlGaleraTester) write(statefulPodIndex int, kv map[string]string) {
|
||||
name := fmt.Sprintf("%v-%d", m.ss.Name, statefulPodIndex)
|
||||
for k, v := range kv {
|
||||
cmd := fmt.Sprintf("use statefulset; insert into foo (k, v) values (\"%v\", \"%v\");", k, v)
|
||||
framework.Logf(m.mysqlExec(cmd, m.ss.Namespace, name))
|
||||
framework.Logf(cmd, m.mysqlExec(cmd, m.ss.Namespace, name))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2087,7 +2087,7 @@ func (m *redisTester) deploy(ctx context.Context, ns string) *appsv1.StatefulSet
|
||||
func (m *redisTester) write(statefulPodIndex int, kv map[string]string) {
|
||||
name := fmt.Sprintf("%v-%d", m.ss.Name, statefulPodIndex)
|
||||
for k, v := range kv {
|
||||
framework.Logf(m.redisExec(fmt.Sprintf("SET %v %v", k, v), m.ss.Namespace, name))
|
||||
framework.Logf("%s", m.redisExec(fmt.Sprintf("SET %v %v", k, v), m.ss.Namespace, name))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2117,7 +2117,7 @@ func (c *cockroachDBTester) deploy(ctx context.Context, ns string) *appsv1.State
|
||||
"CREATE DATABASE IF NOT EXISTS foo;",
|
||||
"CREATE TABLE IF NOT EXISTS foo.bar (k STRING PRIMARY KEY, v STRING);",
|
||||
} {
|
||||
framework.Logf(c.cockroachDBExec(cmd, ns, fmt.Sprintf("%v-0", c.ss.Name)))
|
||||
framework.Logf("%s", c.cockroachDBExec(cmd, ns, fmt.Sprintf("%v-0", c.ss.Name)))
|
||||
}
|
||||
return c.ss
|
||||
}
|
||||
@@ -2126,7 +2126,7 @@ func (c *cockroachDBTester) write(statefulPodIndex int, kv map[string]string) {
|
||||
name := fmt.Sprintf("%v-%d", c.ss.Name, statefulPodIndex)
|
||||
for k, v := range kv {
|
||||
cmd := fmt.Sprintf("UPSERT INTO foo.bar VALUES ('%v', '%v');", k, v)
|
||||
framework.Logf(c.cockroachDBExec(cmd, c.ss.Namespace, name))
|
||||
framework.Logf("%s", c.cockroachDBExec(cmd, c.ss.Namespace, name))
|
||||
}
|
||||
}
|
||||
func (c *cockroachDBTester) read(statefulPodIndex int, key string) string {
|
||||
|
||||
@@ -110,7 +110,7 @@ func traceRouteToControlPlane() {
|
||||
cmd := exec.Command(traceroute, "-I", framework.APIAddress())
|
||||
out, err := cmd.Output()
|
||||
if len(out) != 0 {
|
||||
framework.Logf(string(out))
|
||||
framework.Logf("%s", string(out))
|
||||
}
|
||||
if exiterr, ok := err.(*exec.ExitError); err != nil && ok {
|
||||
framework.Logf("Error while running traceroute: %s", exiterr.Stderr)
|
||||
|
||||
@@ -40,7 +40,7 @@ import (
|
||||
)
|
||||
|
||||
func addMasterReplica(zone string) error {
|
||||
framework.Logf(fmt.Sprintf("Adding a new master replica, zone: %s", zone))
|
||||
framework.Logf("Adding a new master replica, zone: %s", zone)
|
||||
_, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-grow-cluster.sh"), zone, "true", "true", "false")
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -49,7 +49,7 @@ func addMasterReplica(zone string) error {
|
||||
}
|
||||
|
||||
func removeMasterReplica(zone string) error {
|
||||
framework.Logf(fmt.Sprintf("Removing an existing master replica, zone: %s", zone))
|
||||
framework.Logf("Removing an existing master replica, zone: %s", zone)
|
||||
_, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-shrink-cluster.sh"), zone, "true", "false", "false")
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -58,7 +58,7 @@ func removeMasterReplica(zone string) error {
|
||||
}
|
||||
|
||||
func addWorkerNodes(zone string) error {
|
||||
framework.Logf(fmt.Sprintf("Adding worker nodes, zone: %s", zone))
|
||||
framework.Logf("Adding worker nodes, zone: %s", zone)
|
||||
_, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-grow-cluster.sh"), zone, "true", "false", "true")
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -67,7 +67,7 @@ func addWorkerNodes(zone string) error {
|
||||
}
|
||||
|
||||
func removeWorkerNodes(zone string) error {
|
||||
framework.Logf(fmt.Sprintf("Removing worker nodes, zone: %s", zone))
|
||||
framework.Logf("Removing worker nodes, zone: %s", zone)
|
||||
_, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-shrink-cluster.sh"), zone, "true", "true", "true")
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"regexp"
|
||||
@@ -595,7 +596,7 @@ func (g *ContainerResourceGatherer) StopAndSummarize(percentiles []int, constrai
|
||||
}
|
||||
}
|
||||
if len(violatedConstraints) > 0 {
|
||||
return &summary, fmt.Errorf(strings.Join(violatedConstraints, "\n"))
|
||||
return &summary, errors.New(strings.Join(violatedConstraints, "\n"))
|
||||
}
|
||||
return &summary, nil
|
||||
}
|
||||
|
||||
@@ -57,7 +57,7 @@ func (f *FlakeReport) RecordFlakeIfError(err error, optionalDescription ...inter
|
||||
if desc != "" {
|
||||
msg = fmt.Sprintf("%v (Description: %v)", msg, desc)
|
||||
}
|
||||
Logf(msg)
|
||||
Logf("%s", msg)
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
f.Flakes = append(f.Flakes, msg)
|
||||
|
||||
@@ -311,7 +311,7 @@ func printSummaries(summaries []TestDataSummary, testBaseName string) {
|
||||
switch TestContext.OutputPrintType {
|
||||
case "hr":
|
||||
if TestContext.ReportDir == "" {
|
||||
Logf(summaries[i].PrintHumanReadable())
|
||||
Logf("%s", summaries[i].PrintHumanReadable())
|
||||
} else {
|
||||
// TODO: learn to extract test name and append it to the kind instead of timestamp.
|
||||
filePath := path.Join(TestContext.ReportDir, summaries[i].SummaryKind()+"_"+testBaseName+"_"+now.Format(time.RFC3339)+".txt")
|
||||
@@ -393,7 +393,7 @@ func (f *Framework) AfterEach(ctx context.Context) {
|
||||
for namespaceKey, namespaceErr := range nsDeletionErrors {
|
||||
messages = append(messages, fmt.Sprintf("Couldn't delete ns: %q: %s (%#v)", namespaceKey, namespaceErr, namespaceErr))
|
||||
}
|
||||
Failf(strings.Join(messages, ","))
|
||||
Fail(strings.Join(messages, ","))
|
||||
}
|
||||
}()
|
||||
|
||||
|
||||
@@ -560,7 +560,7 @@ func DescribeIng(ns string) {
|
||||
framework.Logf("\nOutput of kubectl describe ing:\n")
|
||||
desc, _ := e2ekubectl.RunKubectl(
|
||||
ns, "describe", "ing")
|
||||
framework.Logf(desc)
|
||||
framework.Logf("%s", desc)
|
||||
}
|
||||
|
||||
// Update retrieves the ingress, performs the passed function, and then updates it.
|
||||
@@ -829,7 +829,7 @@ func (j *TestJig) VerifyURL(ctx context.Context, route, host string, iterations
|
||||
for i := 0; i < iterations; i++ {
|
||||
b, err := SimpleGET(ctx, httpClient, route, host)
|
||||
if err != nil {
|
||||
framework.Logf(b)
|
||||
framework.Logf("%s", b)
|
||||
return err
|
||||
}
|
||||
j.Logger.Infof("Verified %v with host %v %d times, sleeping for %v", route, host, i, interval)
|
||||
|
||||
@@ -255,7 +255,7 @@ func (config *NetworkingTestConfig) diagnoseMissingEndpoints(foundEndpoints sets
|
||||
framework.Logf("\nOutput of kubectl describe pod %v/%v:\n", e.Namespace, e.Name)
|
||||
desc, _ := e2ekubectl.RunKubectl(
|
||||
e.Namespace, "describe", "pod", e.Name, fmt.Sprintf("--namespace=%v", e.Namespace))
|
||||
framework.Logf(desc)
|
||||
framework.Logf("%s", desc)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -554,12 +554,12 @@ func (config *NetworkingTestConfig) executeCurlCmd(ctx context.Context, cmd stri
|
||||
stdout, err := e2epodoutput.RunHostCmd(config.Namespace, podName, cmd)
|
||||
if err != nil {
|
||||
msg = fmt.Sprintf("failed executing cmd %v in %v/%v: %v", cmd, config.Namespace, podName, err)
|
||||
framework.Logf(msg)
|
||||
framework.Logf("%s", msg)
|
||||
return false, nil
|
||||
}
|
||||
if !strings.Contains(stdout, expected) {
|
||||
msg = fmt.Sprintf("successfully executed %v in %v/%v, but output '%v' doesn't contain expected string '%v'", cmd, config.Namespace, podName, stdout, expected)
|
||||
framework.Logf(msg)
|
||||
framework.Logf("%s", msg)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
|
||||
@@ -128,7 +128,7 @@ func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionT
|
||||
conditionType, node.Name, cond.Status == v1.ConditionTrue, taints)
|
||||
}
|
||||
if !silent {
|
||||
framework.Logf(msg)
|
||||
framework.Logf("%s", msg)
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -822,6 +822,6 @@ func verifyThatTaintIsGone(ctx context.Context, c clientset.Interface, nodeName
|
||||
// TODO use wrapper methods in expect.go after removing core e2e dependency on node
|
||||
gomega.ExpectWithOffset(2, err).NotTo(gomega.HaveOccurred())
|
||||
if taintExists(nodeUpdated.Spec.Taints, taint) {
|
||||
framework.Failf("Failed removing taint " + taint.ToString() + " of the node " + nodeName)
|
||||
framework.Fail("Failed removing taint " + taint.ToString() + " of the node " + nodeName)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -46,10 +46,13 @@ func Skipf(format string, args ...interface{}) {
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// Skip is an alias for ginkgo.Skip.
|
||||
var Skip = ginkgo.Skip
|
||||
|
||||
// SkipUnlessAtLeast skips if the value is less than the minValue.
|
||||
func SkipUnlessAtLeast(value int, minValue int, message string) {
|
||||
if value < minValue {
|
||||
skipInternalf(1, message)
|
||||
skipInternalf(1, "%s", message)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -50,11 +50,11 @@ func CreateStatefulSet(ctx context.Context, c clientset.Interface, manifestPath,
|
||||
svc, err := e2emanifest.SvcFromManifest(mkpath("service.yaml"))
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf(fmt.Sprintf("creating " + ss.Name + " service"))
|
||||
framework.Logf("creating %s service", ss.Name)
|
||||
_, err = c.CoreV1().Services(ns).Create(ctx, svc, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf(fmt.Sprintf("creating statefulset %v/%v with %d replicas and selector %+v", ss.Namespace, ss.Name, *(ss.Spec.Replicas), ss.Spec.Selector))
|
||||
framework.Logf("creating statefulset %v/%v with %d replicas and selector %+v", ss.Namespace, ss.Name, *(ss.Spec.Replicas), ss.Spec.Selector)
|
||||
_, err = c.AppsV1().StatefulSets(ns).Create(ctx, ss, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss)
|
||||
|
||||
@@ -221,7 +221,7 @@ func assertCleanup(ns string, selectors ...string) {
|
||||
}
|
||||
err := wait.PollImmediate(500*time.Millisecond, 1*time.Minute, verifyCleanupFunc)
|
||||
if err != nil {
|
||||
framework.Failf(e.Error())
|
||||
framework.Fail(e.Error())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -396,7 +396,7 @@ var _ = SIGDescribe("Kubectl client", func() {
|
||||
})
|
||||
ginkgo.By("creating all guestbook components")
|
||||
forEachGBFile(func(contents string) {
|
||||
framework.Logf(contents)
|
||||
framework.Logf("%s", contents)
|
||||
e2ekubectl.RunKubectlOrDieInput(ns, contents, "create", "-f", "-")
|
||||
})
|
||||
|
||||
@@ -1630,7 +1630,7 @@ metadata:
|
||||
ginkgo.By("verifying the pod has the label " + labelName + " with the value " + labelValue)
|
||||
output := e2ekubectl.RunKubectlOrDie(ns, "get", "pod", pausePodName, "-L", labelName)
|
||||
if !strings.Contains(output, labelValue) {
|
||||
framework.Failf("Failed updating label " + labelName + " to the pod " + pausePodName)
|
||||
framework.Fail("Failed updating label " + labelName + " to the pod " + pausePodName)
|
||||
}
|
||||
|
||||
ginkgo.By("removing the label " + labelName + " of a pod")
|
||||
@@ -1638,7 +1638,7 @@ metadata:
|
||||
ginkgo.By("verifying the pod doesn't have the label " + labelName)
|
||||
output = e2ekubectl.RunKubectlOrDie(ns, "get", "pod", pausePodName, "-L", labelName)
|
||||
if strings.Contains(output, labelValue) {
|
||||
framework.Failf("Failed removing label " + labelName + " of the pod " + pausePodName)
|
||||
framework.Fail("Failed removing label " + labelName + " of the pod " + pausePodName)
|
||||
}
|
||||
})
|
||||
})
|
||||
@@ -1915,7 +1915,7 @@ metadata:
|
||||
ginkgo.By("verifying the node doesn't have the taint " + testTaint.Key)
|
||||
output = runKubectlRetryOrDie(ns, "describe", "node", nodeName)
|
||||
if strings.Contains(output, testTaint.Key) {
|
||||
framework.Failf("Failed removing taint " + testTaint.Key + " of the node " + nodeName)
|
||||
framework.Fail("Failed removing taint " + testTaint.Key + " of the node " + nodeName)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -1983,7 +1983,7 @@ metadata:
|
||||
ginkgo.By("verifying the node doesn't have the taints that have the same key " + testTaint.Key)
|
||||
output = runKubectlRetryOrDie(ns, "describe", "node", nodeName)
|
||||
if strings.Contains(output, testTaint.Key) {
|
||||
framework.Failf("Failed removing taints " + testTaint.Key + " of the node " + nodeName)
|
||||
framework.Fail("Failed removing taints " + testTaint.Key + " of the node " + nodeName)
|
||||
}
|
||||
})
|
||||
})
|
||||
@@ -2330,7 +2330,7 @@ const applyTestLabel = "kubectl.kubernetes.io/apply-test"
|
||||
func readReplicationControllerFromString(contents string) *v1.ReplicationController {
|
||||
rc := v1.ReplicationController{}
|
||||
if err := yaml.Unmarshal([]byte(contents), &rc); err != nil {
|
||||
framework.Failf(err.Error())
|
||||
framework.Fail(err.Error())
|
||||
}
|
||||
|
||||
return &rc
|
||||
|
||||
@@ -18,6 +18,7 @@ package network
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
@@ -123,7 +124,7 @@ func (t *dnsTestCommon) runDig(dnsName, target string) []string {
|
||||
case "cluster-dns-ipv6":
|
||||
cmd = append(cmd, "AAAA")
|
||||
default:
|
||||
panic(fmt.Errorf("invalid target: " + target))
|
||||
panic(errors.New("invalid target: " + target))
|
||||
}
|
||||
cmd = append(cmd, dnsName)
|
||||
|
||||
|
||||
@@ -271,7 +271,7 @@ var _ = common.SIGDescribe("Proxy", func() {
|
||||
framework.Logf("Pod %s has the following error logs: %s", pods[0].Name, body)
|
||||
}
|
||||
|
||||
framework.Failf(strings.Join(errs, "\n"))
|
||||
framework.Fail(strings.Join(errs, "\n"))
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
@@ -263,7 +263,7 @@ func (at *affinityTracker) checkHostTrace(count int) (fulfilled, affinityHolds b
|
||||
|
||||
func checkAffinityFailed(tracker affinityTracker, err string) {
|
||||
framework.Logf("%v", tracker.hostTrace)
|
||||
framework.Failf(err)
|
||||
framework.Fail(err)
|
||||
}
|
||||
|
||||
// StartServeHostnameService creates a replication controller that serves its
|
||||
|
||||
@@ -102,7 +102,7 @@ var _ = common.SIGDescribe("Service endpoints latency", func() {
|
||||
}
|
||||
if n < 2 {
|
||||
failing.Insert("Less than two runs succeeded; aborting.")
|
||||
framework.Failf(strings.Join(failing.List(), "\n"))
|
||||
framework.Fail(strings.Join(failing.List(), "\n"))
|
||||
}
|
||||
percentile := func(p int) time.Duration {
|
||||
est := n * p / 100
|
||||
@@ -129,7 +129,7 @@ var _ = common.SIGDescribe("Service endpoints latency", func() {
|
||||
if failing.Len() > 0 {
|
||||
errList := strings.Join(failing.List(), "\n")
|
||||
helpfulInfo := fmt.Sprintf("\n50, 90, 99 percentiles: %v %v %v", p50, p90, p99)
|
||||
framework.Failf(errList + helpfulInfo)
|
||||
framework.Fail(errList + helpfulInfo)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
@@ -81,7 +81,7 @@ func DescribeSvc(ns string) {
|
||||
framework.Logf("\nOutput of kubectl describe svc:\n")
|
||||
desc, _ := e2ekubectl.RunKubectl(
|
||||
ns, "describe", "svc", fmt.Sprintf("--namespace=%v", ns))
|
||||
framework.Logf(desc)
|
||||
framework.Logf("%s", desc)
|
||||
}
|
||||
|
||||
// CheckSCTPModuleLoadedOnNodes checks whether any node on the list has the
|
||||
|
||||
@@ -297,7 +297,7 @@ var _ = SIGDescribe("LimitRange", func() {
|
||||
lrNamespace, err := f.CreateNamespace(ctx, lrName, nil)
|
||||
framework.ExpectNoError(err, "failed creating Namespace")
|
||||
framework.Logf("Namespace %q created", lrNamespace.ObjectMeta.Name)
|
||||
framework.Logf(fmt.Sprintf("Creating LimitRange %q in namespace %q", lrName, lrNamespace.Name))
|
||||
framework.Logf("Creating LimitRange %q in namespace %q", lrName, lrNamespace.Name)
|
||||
_, err = f.ClientSet.CoreV1().LimitRanges(lrNamespace.ObjectMeta.Name).Create(ctx, limitRange2, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "Failed to create limitRange %q in %q namespace", lrName, lrNamespace.ObjectMeta.Name)
|
||||
|
||||
|
||||
@@ -112,7 +112,7 @@ func (s *service) CreateVolume(
|
||||
}
|
||||
|
||||
if hookVal, hookMsg := s.execHook("CreateVolumeEnd"); hookVal != codes.OK {
|
||||
return nil, status.Errorf(hookVal, hookMsg)
|
||||
return nil, status.Error(hookVal, hookMsg)
|
||||
}
|
||||
|
||||
return &csi.CreateVolumeResponse{Volume: &v}, nil
|
||||
@@ -132,7 +132,7 @@ func (s *service) DeleteVolume(
|
||||
}
|
||||
|
||||
if hookVal, hookMsg := s.execHook("DeleteVolumeStart"); hookVal != codes.OK {
|
||||
return nil, status.Errorf(hookVal, hookMsg)
|
||||
return nil, status.Error(hookVal, hookMsg)
|
||||
}
|
||||
|
||||
// If the volume does not exist then return an idempotent response.
|
||||
@@ -150,7 +150,7 @@ func (s *service) DeleteVolume(
|
||||
klog.V(5).InfoS("mock delete volume", "volumeID", req.VolumeId)
|
||||
|
||||
if hookVal, hookMsg := s.execHook("DeleteVolumeEnd"); hookVal != codes.OK {
|
||||
return nil, status.Errorf(hookVal, hookMsg)
|
||||
return nil, status.Error(hookVal, hookMsg)
|
||||
}
|
||||
return &csi.DeleteVolumeResponse{}, nil
|
||||
}
|
||||
@@ -179,7 +179,7 @@ func (s *service) ControllerPublishVolume(
|
||||
}
|
||||
|
||||
if hookVal, hookMsg := s.execHook("ControllerPublishVolumeStart"); hookVal != codes.OK {
|
||||
return nil, status.Errorf(hookVal, hookMsg)
|
||||
return nil, status.Error(hookVal, hookMsg)
|
||||
}
|
||||
|
||||
s.volsRWL.Lock()
|
||||
@@ -246,7 +246,7 @@ func (s *service) ControllerPublishVolume(
|
||||
}
|
||||
|
||||
if hookVal, hookMsg := s.execHook("ControllerPublishVolumeEnd"); hookVal != codes.OK {
|
||||
return nil, status.Errorf(hookVal, hookMsg)
|
||||
return nil, status.Error(hookVal, hookMsg)
|
||||
}
|
||||
|
||||
return &csi.ControllerPublishVolumeResponse{
|
||||
@@ -280,7 +280,7 @@ func (s *service) ControllerUnpublishVolume(
|
||||
}
|
||||
|
||||
if hookVal, hookMsg := s.execHook("ControllerUnpublishVolumeStart"); hookVal != codes.OK {
|
||||
return nil, status.Errorf(hookVal, hookMsg)
|
||||
return nil, status.Error(hookVal, hookMsg)
|
||||
}
|
||||
|
||||
s.volsRWL.Lock()
|
||||
@@ -309,7 +309,7 @@ func (s *service) ControllerUnpublishVolume(
|
||||
s.vols[i] = v
|
||||
|
||||
if hookVal, hookMsg := s.execHook("ControllerUnpublishVolumeEnd"); hookVal != codes.OK {
|
||||
return nil, status.Errorf(hookVal, hookMsg)
|
||||
return nil, status.Error(hookVal, hookMsg)
|
||||
}
|
||||
|
||||
return &csi.ControllerUnpublishVolumeResponse{}, nil
|
||||
@@ -332,7 +332,7 @@ func (s *service) ValidateVolumeCapabilities(
|
||||
}
|
||||
|
||||
if hookVal, hookMsg := s.execHook("ValidateVolumeCapabilities"); hookVal != codes.OK {
|
||||
return nil, status.Errorf(hookVal, hookMsg)
|
||||
return nil, status.Error(hookVal, hookMsg)
|
||||
}
|
||||
|
||||
return &csi.ValidateVolumeCapabilitiesResponse{
|
||||
@@ -350,7 +350,7 @@ func (s *service) ControllerGetVolume(
|
||||
*csi.ControllerGetVolumeResponse, error) {
|
||||
|
||||
if hookVal, hookMsg := s.execHook("GetVolumeStart"); hookVal != codes.OK {
|
||||
return nil, status.Errorf(hookVal, hookMsg)
|
||||
return nil, status.Error(hookVal, hookMsg)
|
||||
}
|
||||
|
||||
resp := &csi.ControllerGetVolumeResponse{
|
||||
@@ -373,7 +373,7 @@ func (s *service) ControllerGetVolume(
|
||||
}
|
||||
|
||||
if hookVal, hookMsg := s.execHook("GetVolumeEnd"); hookVal != codes.OK {
|
||||
return nil, status.Errorf(hookVal, hookMsg)
|
||||
return nil, status.Error(hookVal, hookMsg)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
@@ -385,7 +385,7 @@ func (s *service) ListVolumes(
|
||||
*csi.ListVolumesResponse, error) {
|
||||
|
||||
if hookVal, hookMsg := s.execHook("ListVolumesStart"); hookVal != codes.OK {
|
||||
return nil, status.Errorf(hookVal, hookMsg)
|
||||
return nil, status.Error(hookVal, hookMsg)
|
||||
}
|
||||
|
||||
// Copy the mock volumes into a new slice in order to avoid
|
||||
@@ -464,7 +464,7 @@ func (s *service) ListVolumes(
|
||||
}
|
||||
|
||||
if hookVal, hookMsg := s.execHook("ListVolumesEnd"); hookVal != codes.OK {
|
||||
return nil, status.Errorf(hookVal, hookMsg)
|
||||
return nil, status.Error(hookVal, hookMsg)
|
||||
}
|
||||
|
||||
return &csi.ListVolumesResponse{
|
||||
@@ -479,7 +479,7 @@ func (s *service) GetCapacity(
|
||||
*csi.GetCapacityResponse, error) {
|
||||
|
||||
if hookVal, hookMsg := s.execHook("GetCapacity"); hookVal != codes.OK {
|
||||
return nil, status.Errorf(hookVal, hookMsg)
|
||||
return nil, status.Error(hookVal, hookMsg)
|
||||
}
|
||||
|
||||
return &csi.GetCapacityResponse{
|
||||
@@ -493,7 +493,7 @@ func (s *service) ControllerGetCapabilities(
|
||||
*csi.ControllerGetCapabilitiesResponse, error) {
|
||||
|
||||
if hookVal, hookMsg := s.execHook("ControllerGetCapabilitiesStart"); hookVal != codes.OK {
|
||||
return nil, status.Errorf(hookVal, hookMsg)
|
||||
return nil, status.Error(hookVal, hookMsg)
|
||||
}
|
||||
|
||||
caps := []*csi.ControllerServiceCapability{
|
||||
@@ -597,7 +597,7 @@ func (s *service) ControllerGetCapabilities(
|
||||
}
|
||||
|
||||
if hookVal, hookMsg := s.execHook("ControllerGetCapabilitiesEnd"); hookVal != codes.OK {
|
||||
return nil, status.Errorf(hookVal, hookMsg)
|
||||
return nil, status.Error(hookVal, hookMsg)
|
||||
}
|
||||
|
||||
return &csi.ControllerGetCapabilitiesResponse{
|
||||
@@ -630,7 +630,7 @@ func (s *service) CreateSnapshot(ctx context.Context,
|
||||
s.snapshots.Add(snapshot)
|
||||
|
||||
if hookVal, hookMsg := s.execHook("CreateSnapshotEnd"); hookVal != codes.OK {
|
||||
return nil, status.Errorf(hookVal, hookMsg)
|
||||
return nil, status.Error(hookVal, hookMsg)
|
||||
}
|
||||
|
||||
return &csi.CreateSnapshotResponse{Snapshot: &snapshot.SnapshotCSI}, nil
|
||||
@@ -645,7 +645,7 @@ func (s *service) DeleteSnapshot(ctx context.Context,
|
||||
}
|
||||
|
||||
if hookVal, hookMsg := s.execHook("DeleteSnapshotStart"); hookVal != codes.OK {
|
||||
return nil, status.Errorf(hookVal, hookMsg)
|
||||
return nil, status.Error(hookVal, hookMsg)
|
||||
}
|
||||
|
||||
// If the snapshot does not exist then return an idempotent response.
|
||||
@@ -661,7 +661,7 @@ func (s *service) DeleteSnapshot(ctx context.Context,
|
||||
klog.V(5).InfoS("mock delete snapshot", "snapshotId", req.SnapshotId)
|
||||
|
||||
if hookVal, hookMsg := s.execHook("DeleteSnapshotEnd"); hookVal != codes.OK {
|
||||
return nil, status.Errorf(hookVal, hookMsg)
|
||||
return nil, status.Error(hookVal, hookMsg)
|
||||
}
|
||||
|
||||
return &csi.DeleteSnapshotResponse{}, nil
|
||||
@@ -671,7 +671,7 @@ func (s *service) ListSnapshots(ctx context.Context,
|
||||
req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) {
|
||||
|
||||
if hookVal, hookMsg := s.execHook("ListSnapshots"); hookVal != codes.OK {
|
||||
return nil, status.Errorf(hookVal, hookMsg)
|
||||
return nil, status.Error(hookVal, hookMsg)
|
||||
}
|
||||
|
||||
// case 1: SnapshotId is not empty, return snapshots that match the snapshot id.
|
||||
@@ -700,7 +700,7 @@ func (s *service) ControllerExpandVolume(
|
||||
}
|
||||
|
||||
if hookVal, hookMsg := s.execHook("ControllerExpandVolumeStart"); hookVal != codes.OK {
|
||||
return nil, status.Errorf(hookVal, hookMsg)
|
||||
return nil, status.Error(hookVal, hookMsg)
|
||||
}
|
||||
|
||||
s.volsRWL.Lock()
|
||||
@@ -737,7 +737,7 @@ func (s *service) ControllerExpandVolume(
|
||||
s.vols[i] = v
|
||||
|
||||
if hookVal, hookMsg := s.execHook("ControllerExpandVolumeEnd"); hookVal != codes.OK {
|
||||
return nil, status.Errorf(hookVal, hookMsg)
|
||||
return nil, status.Error(hookVal, hookMsg)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
|
||||
@@ -89,7 +89,7 @@ func (s *service) NodeStageVolume(
|
||||
s.vols[i] = v
|
||||
|
||||
if hookVal, hookMsg := s.execHook("NodeStageVolumeEnd"); hookVal != codes.OK {
|
||||
return nil, status.Errorf(hookVal, hookMsg)
|
||||
return nil, status.Error(hookVal, hookMsg)
|
||||
}
|
||||
|
||||
return &csi.NodeStageVolumeResponse{}, nil
|
||||
@@ -130,7 +130,7 @@ func (s *service) NodeUnstageVolume(
|
||||
s.vols[i] = v
|
||||
|
||||
if hookVal, hookMsg := s.execHook("NodeUnstageVolumeEnd"); hookVal != codes.OK {
|
||||
return nil, status.Errorf(hookVal, hookMsg)
|
||||
return nil, status.Error(hookVal, hookMsg)
|
||||
}
|
||||
return &csi.NodeUnstageVolumeResponse{}, nil
|
||||
}
|
||||
@@ -141,7 +141,7 @@ func (s *service) NodePublishVolume(
|
||||
*csi.NodePublishVolumeResponse, error) {
|
||||
|
||||
if hookVal, hookMsg := s.execHook("NodePublishVolumeStart"); hookVal != codes.OK {
|
||||
return nil, status.Errorf(hookVal, hookMsg)
|
||||
return nil, status.Error(hookVal, hookMsg)
|
||||
}
|
||||
ephemeralVolume := req.GetVolumeContext()["csi.storage.k8s.io/ephemeral"] == "true"
|
||||
device, ok := req.PublishContext["device"]
|
||||
@@ -229,7 +229,7 @@ func (s *service) NodePublishVolume(
|
||||
s.vols[i] = v
|
||||
}
|
||||
if hookVal, hookMsg := s.execHook("NodePublishVolumeEnd"); hookVal != codes.OK {
|
||||
return nil, status.Errorf(hookVal, hookMsg)
|
||||
return nil, status.Error(hookVal, hookMsg)
|
||||
}
|
||||
|
||||
return &csi.NodePublishVolumeResponse{}, nil
|
||||
@@ -247,7 +247,7 @@ func (s *service) NodeUnpublishVolume(
|
||||
return nil, status.Error(codes.InvalidArgument, "Target Path cannot be empty")
|
||||
}
|
||||
if hookVal, hookMsg := s.execHook("NodeUnpublishVolumeStart"); hookVal != codes.OK {
|
||||
return nil, status.Errorf(hookVal, hookMsg)
|
||||
return nil, status.Error(hookVal, hookMsg)
|
||||
}
|
||||
|
||||
s.volsRWL.Lock()
|
||||
@@ -282,7 +282,7 @@ func (s *service) NodeUnpublishVolume(
|
||||
s.vols[i] = v
|
||||
}
|
||||
if hookVal, hookMsg := s.execHook("NodeUnpublishVolumeEnd"); hookVal != codes.OK {
|
||||
return nil, status.Errorf(hookVal, hookMsg)
|
||||
return nil, status.Error(hookVal, hookMsg)
|
||||
}
|
||||
|
||||
return &csi.NodeUnpublishVolumeResponse{}, nil
|
||||
@@ -296,7 +296,7 @@ func (s *service) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandVolum
|
||||
return nil, status.Error(codes.InvalidArgument, "Volume Path cannot be empty")
|
||||
}
|
||||
if hookVal, hookMsg := s.execHook("NodeExpandVolumeStart"); hookVal != codes.OK {
|
||||
return nil, status.Errorf(hookVal, hookMsg)
|
||||
return nil, status.Error(hookVal, hookMsg)
|
||||
}
|
||||
|
||||
s.volsRWL.Lock()
|
||||
@@ -323,7 +323,7 @@ func (s *service) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandVolum
|
||||
s.vols[i] = v
|
||||
}
|
||||
if hookVal, hookMsg := s.execHook("NodeExpandVolumeEnd"); hookVal != codes.OK {
|
||||
return nil, status.Errorf(hookVal, hookMsg)
|
||||
return nil, status.Error(hookVal, hookMsg)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
@@ -335,7 +335,7 @@ func (s *service) NodeGetCapabilities(
|
||||
*csi.NodeGetCapabilitiesResponse, error) {
|
||||
|
||||
if hookVal, hookMsg := s.execHook("NodeGetCapabilities"); hookVal != codes.OK {
|
||||
return nil, status.Errorf(hookVal, hookMsg)
|
||||
return nil, status.Error(hookVal, hookMsg)
|
||||
}
|
||||
capabilities := []*csi.NodeServiceCapability{
|
||||
{
|
||||
@@ -395,7 +395,7 @@ func (s *service) NodeGetCapabilities(
|
||||
func (s *service) NodeGetInfo(ctx context.Context,
|
||||
req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) {
|
||||
if hookVal, hookMsg := s.execHook("NodeGetInfo"); hookVal != codes.OK {
|
||||
return nil, status.Errorf(hookVal, hookMsg)
|
||||
return nil, status.Error(hookVal, hookMsg)
|
||||
}
|
||||
csiNodeResponse := &csi.NodeGetInfoResponse{
|
||||
NodeId: s.nodeID,
|
||||
@@ -442,11 +442,11 @@ func (s *service) NodeGetVolumeStats(ctx context.Context,
|
||||
msg := fmt.Sprintf("volume %q doest not exist on the specified path %q", req.VolumeId, req.VolumePath)
|
||||
resp.VolumeCondition.Abnormal = true
|
||||
resp.VolumeCondition.Message = msg
|
||||
return resp, status.Errorf(codes.NotFound, msg)
|
||||
return resp, status.Error(codes.NotFound, msg)
|
||||
}
|
||||
|
||||
if hookVal, hookMsg := s.execHook("NodeGetVolumeStatsEnd"); hookVal != codes.OK {
|
||||
return nil, status.Errorf(hookVal, hookMsg)
|
||||
return nil, status.Error(hookVal, hookMsg)
|
||||
}
|
||||
|
||||
resp.Usage = []*csi.VolumeUsage{
|
||||
|
||||
@@ -762,7 +762,7 @@ func ensureTopologyRequirements(ctx context.Context, nodeSelection *e2epod.NodeS
|
||||
nodes, err := e2enode.GetReadySchedulableNodes(ctx, cs)
|
||||
framework.ExpectNoError(err)
|
||||
if len(nodes.Items) < minCount {
|
||||
e2eskipper.Skipf(fmt.Sprintf("Number of available nodes is less than %d - skipping", minCount))
|
||||
e2eskipper.Skipf("Number of available nodes is less than %d - skipping", minCount)
|
||||
}
|
||||
|
||||
topologyKeys := driverInfo.TopologyKeys
|
||||
|
||||
@@ -18,6 +18,7 @@ package testsuites
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
@@ -314,7 +315,7 @@ func cleanupTest(ctx context.Context, cs clientset.Interface, ns string, podName
|
||||
cleanupErrors = append(cleanupErrors, fmt.Sprintf("timed out waiting for PVs to be deleted: %s", err))
|
||||
}
|
||||
if len(cleanupErrors) != 0 {
|
||||
return fmt.Errorf("test cleanup failed: " + strings.Join(cleanupErrors, "; "))
|
||||
return errors.New("test cleanup failed: " + strings.Join(cleanupErrors, "; "))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -550,7 +550,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
||||
// The claim should timeout phase:Pending
|
||||
err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
|
||||
gomega.Expect(err).To(gomega.MatchError(gomega.ContainSubstring("not all in phase Bound")))
|
||||
framework.Logf(err.Error())
|
||||
framework.Logf("%s", err.Error())
|
||||
claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(ctx, claim.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(claim.Status.Phase).To(gomega.Equal(v1.ClaimPending))
|
||||
@@ -589,7 +589,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
||||
// The claim should timeout phase:Pending
|
||||
err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
|
||||
gomega.Expect(err).To(gomega.MatchError(gomega.ContainSubstring("not all in phase Bound")))
|
||||
framework.Logf(err.Error())
|
||||
framework.Logf("%s", err.Error())
|
||||
claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(ctx, claim.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(claim.Status.Phase).To(gomega.Equal(v1.ClaimPending))
|
||||
|
||||
@@ -19,6 +19,7 @@ package apps
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
@@ -131,13 +132,14 @@ func (t *CassandraUpgradeTest) listUsers() ([]string, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, fmt.Errorf(string(b))
|
||||
return nil, errors.New(string(b))
|
||||
}
|
||||
var names []string
|
||||
if err := json.NewDecoder(r.Body).Decode(&names); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return names, nil
|
||||
|
||||
}
|
||||
|
||||
// addUser adds a user to the db via the tester services.
|
||||
@@ -153,7 +155,7 @@ func (t *CassandraUpgradeTest) addUser(name string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf(string(b))
|
||||
return errors.New(string(b))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ package apps
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
@@ -125,7 +126,7 @@ func (t *EtcdUpgradeTest) listUsers() ([]string, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, fmt.Errorf(string(b))
|
||||
return nil, errors.New(string(b))
|
||||
}
|
||||
var names []string
|
||||
if err := json.NewDecoder(r.Body).Decode(&names); err != nil {
|
||||
@@ -146,7 +147,7 @@ func (t *EtcdUpgradeTest) addUser(name string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf(string(b))
|
||||
return errors.New(string(b))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ package apps
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
@@ -194,7 +195,7 @@ func (t *MySQLUpgradeTest) addName(name string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf(string(b))
|
||||
return errors.New(string(b))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -212,7 +213,7 @@ func (t *MySQLUpgradeTest) countNames() (int, error) {
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return 0, fmt.Errorf(string(b))
|
||||
return 0, errors.New(string(b))
|
||||
}
|
||||
var count int
|
||||
if err := json.NewDecoder(r.Body).Decode(&count); err != nil {
|
||||
|
||||
@@ -112,19 +112,19 @@ var _ = sigDescribe(feature.Windows, "GMSA Full", framework.WithSerial(), framew
|
||||
ginkgo.By("deploying the GMSA webhook")
|
||||
err := deployGmsaWebhook(ctx, f)
|
||||
if err != nil {
|
||||
framework.Failf(err.Error())
|
||||
framework.Fail(err.Error())
|
||||
}
|
||||
|
||||
ginkgo.By("creating the GMSA custom resource")
|
||||
err = createGmsaCustomResource(f.Namespace.Name, crdManifestContents)
|
||||
if err != nil {
|
||||
framework.Failf(err.Error())
|
||||
framework.Fail(err.Error())
|
||||
}
|
||||
|
||||
ginkgo.By("creating an RBAC role to grant use access to that GMSA resource")
|
||||
rbacRoleName, err := createRBACRoleForGmsa(ctx, f)
|
||||
if err != nil {
|
||||
framework.Failf(err.Error())
|
||||
framework.Fail(err.Error())
|
||||
}
|
||||
|
||||
ginkgo.By("creating a service account")
|
||||
@@ -179,19 +179,19 @@ var _ = sigDescribe(feature.Windows, "GMSA Full", framework.WithSerial(), framew
|
||||
ginkgo.By("deploying the GMSA webhook")
|
||||
err := deployGmsaWebhook(ctx, f)
|
||||
if err != nil {
|
||||
framework.Failf(err.Error())
|
||||
framework.Fail(err.Error())
|
||||
}
|
||||
|
||||
ginkgo.By("creating the GMSA custom resource")
|
||||
err = createGmsaCustomResource(f.Namespace.Name, crdManifestContents)
|
||||
if err != nil {
|
||||
framework.Failf(err.Error())
|
||||
framework.Fail(err.Error())
|
||||
}
|
||||
|
||||
ginkgo.By("creating an RBAC role to grant use access to that GMSA resource")
|
||||
rbacRoleName, err := createRBACRoleForGmsa(ctx, f)
|
||||
if err != nil {
|
||||
framework.Failf(err.Error())
|
||||
framework.Fail(err.Error())
|
||||
}
|
||||
|
||||
ginkgo.By("creating a service account")
|
||||
|
||||
@@ -18,6 +18,7 @@ package e2enode
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
@@ -90,8 +91,7 @@ var _ = SIGDescribe("SystemNodeCriticalPod", framework.WithSlow(), framework.Wit
|
||||
return nil
|
||||
}
|
||||
msg := fmt.Sprintf("NodeCondition: %s not encountered yet", v1.NodeDiskPressure)
|
||||
framework.Logf(msg)
|
||||
return fmt.Errorf(msg)
|
||||
return errors.New(msg)
|
||||
}, time.Minute*2, time.Second*4).Should(gomega.Succeed())
|
||||
|
||||
ginkgo.By("check if it's running all the time")
|
||||
@@ -100,7 +100,7 @@ var _ = SIGDescribe("SystemNodeCriticalPod", framework.WithSlow(), framework.Wit
|
||||
if err == nil {
|
||||
framework.Logf("mirror pod %q is running", mirrorPodName)
|
||||
} else {
|
||||
framework.Logf(err.Error())
|
||||
framework.Logf("%s", err.Error())
|
||||
}
|
||||
return err
|
||||
}, time.Minute*8, time.Second*4).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
@@ -42,8 +42,8 @@ func requireSRIOVDevices() {
|
||||
|
||||
msg := "this test is meant to run on a system with at least one configured VF from SRIOV device"
|
||||
if framework.TestContext.RequireDevices {
|
||||
framework.Failf(msg)
|
||||
framework.Fail(msg)
|
||||
} else {
|
||||
e2eskipper.Skipf(msg)
|
||||
e2eskipper.Skip(msg)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -127,7 +127,7 @@ func serve(w http.ResponseWriter, r *http.Request, convert convertFunc) {
|
||||
serializer := getInputSerializer(contentType)
|
||||
if serializer == nil {
|
||||
msg := fmt.Sprintf("invalid Content-Type header `%s`", contentType)
|
||||
klog.Errorf(msg)
|
||||
klog.Error(msg)
|
||||
http.Error(w, msg, http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
@@ -147,7 +147,7 @@ func serve(w http.ResponseWriter, r *http.Request, convert convertFunc) {
|
||||
convertReview, ok := obj.(*v1beta1.ConversionReview)
|
||||
if !ok {
|
||||
msg := fmt.Sprintf("Expected v1beta1.ConversionReview but got: %T", obj)
|
||||
klog.Errorf(msg)
|
||||
klog.Error(msg)
|
||||
http.Error(w, msg, http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
@@ -161,7 +161,7 @@ func serve(w http.ResponseWriter, r *http.Request, convert convertFunc) {
|
||||
convertReview, ok := obj.(*v1.ConversionReview)
|
||||
if !ok {
|
||||
msg := fmt.Sprintf("Expected v1.ConversionReview but got: %T", obj)
|
||||
klog.Errorf(msg)
|
||||
klog.Error(msg)
|
||||
http.Error(w, msg, http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
@@ -182,7 +182,7 @@ func serve(w http.ResponseWriter, r *http.Request, convert convertFunc) {
|
||||
outSerializer := getOutputSerializer(accept)
|
||||
if outSerializer == nil {
|
||||
msg := fmt.Sprintf("invalid accept header `%s`", accept)
|
||||
klog.Errorf(msg)
|
||||
klog.Error(msg)
|
||||
http.Error(w, msg, http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -178,5 +178,5 @@ func handleRunRequest(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, "ok\noutput:\n\n"+output.b.String())
|
||||
fmt.Fprint(w, "ok\noutput:\n\n"+output.b.String())
|
||||
}
|
||||
|
||||
@@ -285,7 +285,7 @@ func echoHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
func clientIPHandler(w http.ResponseWriter, r *http.Request) {
|
||||
log.Printf("GET /clientip")
|
||||
fmt.Fprintf(w, r.RemoteAddr)
|
||||
fmt.Fprint(w, r.RemoteAddr)
|
||||
}
|
||||
func headerHandler(w http.ResponseWriter, r *http.Request) {
|
||||
key := r.FormValue("key")
|
||||
|
||||
@@ -345,7 +345,7 @@ func (c *metricDecoder) decodeOpts(expr ast.Expr) (metric, error) {
|
||||
var err error
|
||||
s, err := c.decodeString(kv.Value)
|
||||
if err != nil {
|
||||
return m, newDecodeErrorf(expr, err.Error())
|
||||
return m, newDecodeErrorf(expr, "%s", err.Error())
|
||||
}
|
||||
value = *s
|
||||
switch key {
|
||||
@@ -771,7 +771,7 @@ func (c *metricDecoder) decodeBuildFQNameArguments(fc *ast.CallExpr) (string, st
|
||||
for i, elt := range fc.Args {
|
||||
s, err := c.decodeString(elt)
|
||||
if err != nil || s == nil {
|
||||
return "", "", "", newDecodeErrorf(fc, err.Error())
|
||||
return "", "", "", newDecodeErrorf(fc, "%s", err.Error())
|
||||
}
|
||||
strArgs[i] = *s
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ package dualstack
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
@@ -1384,7 +1385,7 @@ func validateServiceAndClusterIPFamily(svc *v1.Service, expectedIPFamilies []v1.
|
||||
|
||||
if len(errstrings) > 0 {
|
||||
errstrings = append(errstrings, fmt.Sprintf("Error validating Service: %s, ClusterIPs: %v Expected IPFamilies %v", svc.Name, svc.Spec.ClusterIPs, expectedIPFamilies))
|
||||
return fmt.Errorf(strings.Join(errstrings, "\n"))
|
||||
return errors.New(strings.Join(errstrings, "\n"))
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -158,7 +158,7 @@ func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionT
|
||||
conditionType, node.Name, cond.Status == v1.ConditionTrue, taints)
|
||||
}
|
||||
if !silent {
|
||||
klog.Infof(msg)
|
||||
klog.Info(msg)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -697,7 +697,7 @@ func TestStressingCascadingDeletion(t *testing.T) {
|
||||
wg.Wait()
|
||||
close(errs)
|
||||
for errString := range errs {
|
||||
t.Fatalf(errString)
|
||||
t.Fatal(errString)
|
||||
}
|
||||
t.Logf("all pods are created, all replications controllers are created then deleted")
|
||||
// wait for the RCs and Pods to reach the expected numbers.
|
||||
|
||||
@@ -68,7 +68,7 @@ func TestServiceAllocation(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, tc := range testcases {
|
||||
t.Run(fmt.Sprintf(tc.name), func(t *testing.T) {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
etcdOptions := framework.SharedEtcd()
|
||||
apiServerOptions := kubeapiservertesting.NewDefaultTestServerOptions()
|
||||
s1 := kubeapiservertesting.StartTestServerOrDie(t,
|
||||
|
||||
@@ -18,6 +18,7 @@ package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -99,7 +100,7 @@ func VerifyLabelsRemoved(c clientset.Interface, nodeName string, labelKeys []str
|
||||
}
|
||||
for _, labelKey := range labelKeys {
|
||||
if node.Labels != nil && len(node.Labels[labelKey]) != 0 {
|
||||
return fmt.Errorf("Failed removing label " + labelKey + " of the node " + nodeName)
|
||||
return errors.New("Failed removing label " + labelKey + " of the node " + nodeName)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -18,6 +18,7 @@ package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
@@ -226,7 +227,7 @@ func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName
|
||||
})
|
||||
if wait.Interrupted(err) {
|
||||
LogReplicaSetsOfDeployment(deployment, nil, newRS, logf)
|
||||
err = fmt.Errorf(reason)
|
||||
err = errors.New(reason)
|
||||
}
|
||||
if newRS == nil {
|
||||
return fmt.Errorf("deployment %q failed to create new replica set", deploymentName)
|
||||
|
||||
@@ -664,7 +664,7 @@ func (config *RCConfig) start(ctx context.Context) error {
|
||||
*config.CreatedPods = startupStatus.Created
|
||||
}
|
||||
if !config.Silent {
|
||||
config.RCConfigLog(startupStatus.String(config.Name))
|
||||
config.RCConfigLog("%s", startupStatus.String(config.Name))
|
||||
}
|
||||
|
||||
if config.PodStatusFile != nil {
|
||||
@@ -688,8 +688,8 @@ func (config *RCConfig) start(ctx context.Context) error {
|
||||
if podDeletionsCount > config.MaxAllowedPodDeletions {
|
||||
// Number of pods which disappeared is over threshold
|
||||
err := fmt.Errorf("%d pods disappeared for %s: %v", podDeletionsCount, config.Name, strings.Join(deletedPods, ", "))
|
||||
config.RCConfigLog(err.Error())
|
||||
config.RCConfigLog(diff.String(sets.NewString()))
|
||||
config.RCConfigLog("%s", err.Error())
|
||||
config.RCConfigLog("%s", diff.String(sets.NewString()))
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user