diff --git a/pkg/kubelet/active_deadline.go b/pkg/kubelet/active_deadline.go index f14024b986c..3b92b54c6a1 100644 --- a/pkg/kubelet/active_deadline.go +++ b/pkg/kubelet/active_deadline.go @@ -51,7 +51,7 @@ func newActiveDeadlineHandler( // check for all required fields if clock == nil || podStatusProvider == nil || recorder == nil { - return nil, fmt.Errorf("Required arguments must not be nil: %v, %v, %v", clock, podStatusProvider, recorder) + return nil, fmt.Errorf("required arguments must not be nil: %v, %v, %v", clock, podStatusProvider, recorder) } return &activeDeadlineHandler{ clock: clock, diff --git a/pkg/kubelet/apis/podresources/client.go b/pkg/kubelet/apis/podresources/client.go index cb3ca222ed3..ba6eb241f1a 100644 --- a/pkg/kubelet/apis/podresources/client.go +++ b/pkg/kubelet/apis/podresources/client.go @@ -38,7 +38,7 @@ func GetClient(socket string, connectionTimeout time.Duration, maxMsgSize int) ( conn, err := grpc.DialContext(ctx, addr, grpc.WithInsecure(), grpc.WithDialer(dialer), grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(maxMsgSize))) if err != nil { - return nil, nil, fmt.Errorf("Error dialing socket %s: %v", socket, err) + return nil, nil, fmt.Errorf("error dialing socket %s: %v", socket, err) } return podresourcesapi.NewPodResourcesListerClient(conn), conn, nil } diff --git a/pkg/kubelet/certificate/bootstrap/bootstrap.go b/pkg/kubelet/certificate/bootstrap/bootstrap.go index 8aef15c5212..9449ae12a6c 100644 --- a/pkg/kubelet/certificate/bootstrap/bootstrap.go +++ b/pkg/kubelet/certificate/bootstrap/bootstrap.go @@ -235,32 +235,32 @@ func isClientConfigStillValid(kubeconfigPath string) (bool, error) { } bootstrapClientConfig, err := loadRESTClientConfig(kubeconfigPath) if err != nil { - utilruntime.HandleError(fmt.Errorf("Unable to read existing bootstrap client config: %v", err)) + utilruntime.HandleError(fmt.Errorf("unable to read existing bootstrap client config: %v", err)) return false, nil } transportConfig, err := bootstrapClientConfig.TransportConfig() if err != nil { - utilruntime.HandleError(fmt.Errorf("Unable to load transport configuration from existing bootstrap client config: %v", err)) + utilruntime.HandleError(fmt.Errorf("unable to load transport configuration from existing bootstrap client config: %v", err)) return false, nil } // has side effect of populating transport config data fields if _, err := transport.TLSConfigFor(transportConfig); err != nil { - utilruntime.HandleError(fmt.Errorf("Unable to load TLS configuration from existing bootstrap client config: %v", err)) + utilruntime.HandleError(fmt.Errorf("unable to load TLS configuration from existing bootstrap client config: %v", err)) return false, nil } certs, err := certutil.ParseCertsPEM(transportConfig.TLS.CertData) if err != nil { - utilruntime.HandleError(fmt.Errorf("Unable to load TLS certificates from existing bootstrap client config: %v", err)) + utilruntime.HandleError(fmt.Errorf("unable to load TLS certificates from existing bootstrap client config: %v", err)) return false, nil } if len(certs) == 0 { - utilruntime.HandleError(fmt.Errorf("Unable to read TLS certificates from existing bootstrap client config: %v", err)) + utilruntime.HandleError(fmt.Errorf("unable to read TLS certificates from existing bootstrap client config: %v", err)) return false, nil } now := time.Now() for _, cert := range certs { if now.After(cert.NotAfter) { - utilruntime.HandleError(fmt.Errorf("Part of the existing bootstrap client certificate is expired: %s", cert.NotAfter)) + utilruntime.HandleError(fmt.Errorf("part of the existing bootstrap client certificate is expired: %s", cert.NotAfter)) return false, nil } } diff --git a/pkg/kubelet/cm/cgroup_manager_linux.go b/pkg/kubelet/cm/cgroup_manager_linux.go index 39aad1fb38d..a653e67c790 100644 --- a/pkg/kubelet/cm/cgroup_manager_linux.go +++ b/pkg/kubelet/cm/cgroup_manager_linux.go @@ -303,7 +303,7 @@ func (m *cgroupManagerImpl) Destroy(cgroupConfig *CgroupConfig) error { // Delete cgroups using libcontainers Managers Destroy() method if err = manager.Destroy(); err != nil { - return fmt.Errorf("Unable to destroy cgroup paths for cgroup %v : %v", cgroupConfig.Name, err) + return fmt.Errorf("unable to destroy cgroup paths for cgroup %v : %v", cgroupConfig.Name, err) } return nil @@ -346,14 +346,14 @@ func setSupportedSubsystems(cgroupConfig *libcontainerconfigs.Cgroup) error { for sys, required := range getSupportedSubsystems() { if _, ok := cgroupConfig.Paths[sys.Name()]; !ok { if required { - return fmt.Errorf("Failed to find subsystem mount for required subsystem: %v", sys.Name()) + return fmt.Errorf("failed to find subsystem mount for required subsystem: %v", sys.Name()) } // the cgroup is not mounted, but its not required so continue... klog.V(6).Infof("Unable to find subsystem mount for optional subsystem: %v", sys.Name()) continue } if err := sys.Set(cgroupConfig.Paths[sys.Name()], cgroupConfig); err != nil { - return fmt.Errorf("Failed to set config for supported subsystems : %v", err) + return fmt.Errorf("failed to set config for supported subsystems : %v", err) } } return nil @@ -560,14 +560,14 @@ func getStatsSupportedSubsystems(cgroupPaths map[string]string) (*libcontainercg for sys, required := range getSupportedSubsystems() { if _, ok := cgroupPaths[sys.Name()]; !ok { if required { - return nil, fmt.Errorf("Failed to find subsystem mount for required subsystem: %v", sys.Name()) + return nil, fmt.Errorf("failed to find subsystem mount for required subsystem: %v", sys.Name()) } // the cgroup is not mounted, but its not required so continue... klog.V(6).Infof("Unable to find subsystem mount for optional subsystem: %v", sys.Name()) continue } if err := sys.GetStats(cgroupPaths[sys.Name()], stats); err != nil { - return nil, fmt.Errorf("Failed to get stats for supported subsystems : %v", err) + return nil, fmt.Errorf("failed to get stats for supported subsystems : %v", err) } } return stats, nil diff --git a/pkg/kubelet/cm/container_manager_linux.go b/pkg/kubelet/cm/container_manager_linux.go index 5a645a07f5c..da651b7842d 100644 --- a/pkg/kubelet/cm/container_manager_linux.go +++ b/pkg/kubelet/cm/container_manager_linux.go @@ -214,7 +214,7 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I // If there is more than one line (table headers) in /proc/swaps, swap is enabled and we should // error out unless --fail-swap-on is set to false. if len(swapLines) > 1 { - return nil, fmt.Errorf("Running with swap on is not supported, please disable swap! or set --fail-swap-on flag to false. /proc/swaps contained: %v", swapLines) + return nil, fmt.Errorf("running with swap on is not supported, please disable swap! or set --fail-swap-on flag to false. /proc/swaps contained: %v", swapLines) } } @@ -395,7 +395,7 @@ func setupKernelTunables(option KernelTunableBehavior) error { switch option { case KernelTunableError: - errList = append(errList, fmt.Errorf("Invalid kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val)) + errList = append(errList, fmt.Errorf("invalid kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val)) case KernelTunableWarn: klog.V(2).Infof("Invalid kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val) case KernelTunableModify: diff --git a/pkg/kubelet/cm/cpumanager/state/state_checkpoint.go b/pkg/kubelet/cm/cpumanager/state/state_checkpoint.go index badafab43d8..b9fe0f46374 100644 --- a/pkg/kubelet/cm/cpumanager/state/state_checkpoint.go +++ b/pkg/kubelet/cm/cpumanager/state/state_checkpoint.go @@ -51,6 +51,7 @@ func NewCheckpointState(stateDir, checkpointName, policyName string) (State, err } if err := stateCheckpoint.restoreState(); err != nil { + //lint:ignore ST1005 user-facing error message return nil, fmt.Errorf("could not restore state from checkpoint: %v\n"+ "Please drain this node and delete the CPU manager checkpoint file %q before restarting Kubelet.", err, path.Join(stateDir, checkpointName)) diff --git a/pkg/kubelet/cm/devicemanager/manager.go b/pkg/kubelet/cm/devicemanager/manager.go index a78dbd6d3b3..00ac669dffc 100644 --- a/pkg/kubelet/cm/devicemanager/manager.go +++ b/pkg/kubelet/cm/devicemanager/manager.go @@ -276,12 +276,12 @@ func (m *ManagerImpl) RegisterPlugin(pluginName string, endpoint string, version e, err := newEndpointImpl(endpoint, pluginName, m.callback) if err != nil { - return fmt.Errorf("Failed to dial device plugin with socketPath %s: %v", endpoint, err) + return fmt.Errorf("failed to dial device plugin with socketPath %s: %v", endpoint, err) } options, err := e.client.GetDevicePluginOptions(context.Background(), &pluginapi.Empty{}) if err != nil { - return fmt.Errorf("Failed to get device plugin options: %v", err) + return fmt.Errorf("failed to get device plugin options: %v", err) } m.registerEndpoint(pluginName, options, e) @@ -697,7 +697,7 @@ func (m *ManagerImpl) allocateContainerResources(pod *v1.Pod, container *v1.Cont m.mutex.Lock() m.allocatedDevices = m.podDevices.devices() m.mutex.Unlock() - return fmt.Errorf("Unknown Device Plugin %s", resource) + return fmt.Errorf("unknown Device Plugin %s", resource) } devs := allocDevices.UnsortedList() @@ -717,7 +717,7 @@ func (m *ManagerImpl) allocateContainerResources(pod *v1.Pod, container *v1.Cont } if len(resp.ContainerResponses) == 0 { - return fmt.Errorf("No containers return in allocation response %v", resp) + return fmt.Errorf("no containers return in allocation response %v", resp) } // Update internal cached podDevices state. diff --git a/pkg/kubelet/cm/node_container_manager_linux.go b/pkg/kubelet/cm/node_container_manager_linux.go index 84370f4aa56..8b8fada15c0 100644 --- a/pkg/kubelet/cm/node_container_manager_linux.go +++ b/pkg/kubelet/cm/node_container_manager_linux.go @@ -237,7 +237,7 @@ func (cm *containerManagerImpl) validateNodeAllocatable() error { } if len(errors) > 0 { - return fmt.Errorf("Invalid Node Allocatable configuration. %s", strings.Join(errors, " ")) + return fmt.Errorf("invalid Node Allocatable configuration. %s", strings.Join(errors, " ")) } return nil } diff --git a/pkg/kubelet/config/http.go b/pkg/kubelet/config/http.go index d0e3727e233..42f51e91c31 100644 --- a/pkg/kubelet/config/http.go +++ b/pkg/kubelet/config/http.go @@ -138,6 +138,6 @@ func (s *sourceURL) extractFromURL() error { } return fmt.Errorf("%v: received '%v', but couldn't parse as "+ - "single (%v) or multiple pods (%v).\n", + "single (%v) or multiple pods (%v)", s.url, string(data), singlePodErr, multiPodErr) } diff --git a/pkg/kubelet/container/sync_result.go b/pkg/kubelet/container/sync_result.go index 0d456330307..ef1b2a97d5e 100644 --- a/pkg/kubelet/container/sync_result.go +++ b/pkg/kubelet/container/sync_result.go @@ -116,11 +116,11 @@ func (p *PodSyncResult) Fail(err error) { func (p *PodSyncResult) Error() error { errlist := []error{} if p.SyncError != nil { - errlist = append(errlist, fmt.Errorf("failed to SyncPod: %v\n", p.SyncError)) + errlist = append(errlist, fmt.Errorf("failed to SyncPod: %v", p.SyncError)) } for _, result := range p.SyncResults { if result.Error != nil { - errlist = append(errlist, fmt.Errorf("failed to %q for %q with %v: %q\n", result.Action, result.Target, + errlist = append(errlist, fmt.Errorf("failed to %q for %q with %v: %q", result.Action, result.Target, result.Error, result.Message)) } } diff --git a/pkg/kubelet/dockershim/docker_service.go b/pkg/kubelet/dockershim/docker_service.go index 16cd9fda9b8..ce40e9ff5b0 100644 --- a/pkg/kubelet/dockershim/docker_service.go +++ b/pkg/kubelet/dockershim/docker_service.go @@ -527,7 +527,7 @@ func (ds *dockerService) getDockerVersionFromCache() (*dockertypes.Version, erro } dv, ok := value.(*dockertypes.Version) if !ok { - return nil, fmt.Errorf("Converted to *dockertype.Version error") + return nil, fmt.Errorf("converted to *dockertype.Version error") } return dv, nil } diff --git a/pkg/kubelet/dockershim/docker_streaming_others.go b/pkg/kubelet/dockershim/docker_streaming_others.go index 60a5dc6b8f3..75570086894 100644 --- a/pkg/kubelet/dockershim/docker_streaming_others.go +++ b/pkg/kubelet/dockershim/docker_streaming_others.go @@ -41,14 +41,14 @@ func (r *streamingRuntime) portForward(podSandboxID string, port int32, stream i containerPid := container.State.Pid socatPath, lookupErr := exec.LookPath("socat") if lookupErr != nil { - return fmt.Errorf("unable to do port forwarding: socat not found.") + return fmt.Errorf("unable to do port forwarding: socat not found") } args := []string{"-t", fmt.Sprintf("%d", containerPid), "-n", socatPath, "-", fmt.Sprintf("TCP4:localhost:%d", port)} nsenterPath, lookupErr := exec.LookPath("nsenter") if lookupErr != nil { - return fmt.Errorf("unable to do port forwarding: nsenter not found.") + return fmt.Errorf("unable to do port forwarding: nsenter not found") } commandString := fmt.Sprintf("%s %s", nsenterPath, strings.Join(args, " ")) diff --git a/pkg/kubelet/dockershim/network/cni/cni_others.go b/pkg/kubelet/dockershim/network/cni/cni_others.go index f6f2ccd6595..6a75ef1e3cd 100644 --- a/pkg/kubelet/dockershim/network/cni/cni_others.go +++ b/pkg/kubelet/dockershim/network/cni/cni_others.go @@ -66,7 +66,7 @@ func (plugin *cniNetworkPlugin) GetPodNetworkStatus(namespace string, name strin return nil, fmt.Errorf("CNI failed to retrieve network namespace path: %v", err) } if netnsPath == "" { - return nil, fmt.Errorf("Cannot find the network namespace, skipping pod network status for container %q", id) + return nil, fmt.Errorf("cannot find the network namespace, skipping pod network status for container %q", id) } ips, err := network.GetPodIPs(plugin.execer, plugin.nsenterPath, netnsPath, network.DefaultInterfaceName) diff --git a/pkg/kubelet/dockershim/network/hairpin/hairpin.go b/pkg/kubelet/dockershim/network/hairpin/hairpin.go index 262e78460df..3faefd1b86d 100644 --- a/pkg/kubelet/dockershim/network/hairpin/hairpin.go +++ b/pkg/kubelet/dockershim/network/hairpin/hairpin.go @@ -53,12 +53,12 @@ func findPairInterfaceOfContainerInterface(e exec.Interface, containerInterfaceN nsenterArgs = append(nsenterArgs, "-F", "--", ethtoolPath, "--statistics", containerInterfaceName) output, err := e.Command(nsenterPath, nsenterArgs...).CombinedOutput() if err != nil { - return "", fmt.Errorf("Unable to query interface %s of container %s: %v: %s", containerInterfaceName, containerDesc, err, string(output)) + return "", fmt.Errorf("unable to query interface %s of container %s: %v: %s", containerInterfaceName, containerDesc, err, string(output)) } // look for peer_ifindex match := ethtoolOutputRegex.FindSubmatch(output) if match == nil { - return "", fmt.Errorf("No peer_ifindex in interface statistics for %s of container %s", containerInterfaceName, containerDesc) + return "", fmt.Errorf("no peer_ifindex in interface statistics for %s of container %s", containerInterfaceName, containerDesc) } peerIfIndex, err := strconv.Atoi(string(match[1])) if err != nil { // seems impossible (\d+ not numeric) diff --git a/pkg/kubelet/dockershim/network/hostport/fake_iptables.go b/pkg/kubelet/dockershim/network/hostport/fake_iptables.go index ab28a1d8747..e068e51b6a8 100644 --- a/pkg/kubelet/dockershim/network/hostport/fake_iptables.go +++ b/pkg/kubelet/dockershim/network/hostport/fake_iptables.go @@ -59,7 +59,7 @@ func (f *fakeIPTables) GetVersion() (string, error) { func (f *fakeIPTables) getTable(tableName utiliptables.Table) (*fakeTable, error) { table, ok := f.tables[string(tableName)] if !ok { - return nil, fmt.Errorf("Table %s does not exist", tableName) + return nil, fmt.Errorf("table %s does not exist", tableName) } return table, nil } @@ -72,7 +72,7 @@ func (f *fakeIPTables) getChain(tableName utiliptables.Table, chainName utilipta chain, ok := table.chains[string(chainName)] if !ok { - return table, nil, fmt.Errorf("Chain %s/%s does not exist", tableName, chainName) + return table, nil, fmt.Errorf("chain %s/%s does not exist", tableName, chainName) } return table, chain, nil @@ -152,7 +152,7 @@ func (f *fakeIPTables) ensureRule(position utiliptables.RulePosition, tableName } else if position == utiliptables.Append { chain.rules = append(chain.rules, rule) } else { - return false, fmt.Errorf("Unknown position argument %q", position) + return false, fmt.Errorf("unknown position argument %q", position) } return false, nil @@ -171,7 +171,7 @@ func normalizeRule(rule string) (string, error) { if remaining[0] == '"' { end = strings.Index(remaining[1:], "\"") if end < 0 { - return "", fmt.Errorf("Invalid rule syntax: mismatched quotes") + return "", fmt.Errorf("invalid rule syntax: mismatched quotes") } end += 2 } else { @@ -292,7 +292,7 @@ func (f *fakeIPTables) restore(restoreTableName utiliptables.Table, data []byte, } else if strings.HasPrefix(line, "-A") { parts := strings.Split(line, " ") if len(parts) < 3 { - return fmt.Errorf("Invalid iptables rule '%s'", line) + return fmt.Errorf("invalid iptables rule '%s'", line) } chainName := utiliptables.Chain(parts[1]) rule := strings.TrimPrefix(line, fmt.Sprintf("-A %s ", chainName)) @@ -303,7 +303,7 @@ func (f *fakeIPTables) restore(restoreTableName utiliptables.Table, data []byte, } else if strings.HasPrefix(line, "-I") { parts := strings.Split(line, " ") if len(parts) < 3 { - return fmt.Errorf("Invalid iptables rule '%s'", line) + return fmt.Errorf("invalid iptables rule '%s'", line) } chainName := utiliptables.Chain(parts[1]) rule := strings.TrimPrefix(line, fmt.Sprintf("-I %s ", chainName)) @@ -314,7 +314,7 @@ func (f *fakeIPTables) restore(restoreTableName utiliptables.Table, data []byte, } else if strings.HasPrefix(line, "-X") { parts := strings.Split(line, " ") if len(parts) < 2 { - return fmt.Errorf("Invalid iptables rule '%s'", line) + return fmt.Errorf("invalid iptables rule '%s'", line) } if err := f.DeleteChain(tableName, utiliptables.Chain(parts[1])); err != nil { return err diff --git a/pkg/kubelet/dockershim/network/hostport/hostport.go b/pkg/kubelet/dockershim/network/hostport/hostport.go index 95339cdea81..f5f12fe3a83 100644 --- a/pkg/kubelet/dockershim/network/hostport/hostport.go +++ b/pkg/kubelet/dockershim/network/hostport/hostport.go @@ -114,7 +114,7 @@ func ensureKubeHostportChains(iptables utiliptables.Interface, natInterfaceName klog.V(4).Info("Ensuring kubelet hostport chains") // Ensure kubeHostportChain if _, err := iptables.EnsureChain(utiliptables.TableNAT, kubeHostportsChain); err != nil { - return fmt.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableNAT, kubeHostportsChain, err) + return fmt.Errorf("failed to ensure that %s chain %s exists: %v", utiliptables.TableNAT, kubeHostportsChain, err) } tableChainsNeedJumpServices := []struct { table utiliptables.Table @@ -131,14 +131,14 @@ func ensureKubeHostportChains(iptables utiliptables.Interface, natInterfaceName // This ensures KUBE-SERVICES chain gets processed first. // Since rules in KUBE-HOSTPORTS chain matches broader cases, allow the more specific rules to be processed first. if _, err := iptables.EnsureRule(utiliptables.Append, tc.table, tc.chain, args...); err != nil { - return fmt.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", tc.table, tc.chain, kubeHostportsChain, err) + return fmt.Errorf("failed to ensure that %s chain %s jumps to %s: %v", tc.table, tc.chain, kubeHostportsChain, err) } } if natInterfaceName != "" && natInterfaceName != "lo" { // Need to SNAT traffic from localhost args = []string{"-m", "comment", "--comment", "SNAT for localhost access to hostports", "-o", natInterfaceName, "-s", "127.0.0.0/8", "-j", "MASQUERADE"} if _, err := iptables.EnsureRule(utiliptables.Append, utiliptables.TableNAT, utiliptables.ChainPostrouting, args...); err != nil { - return fmt.Errorf("Failed to ensure that %s chain %s jumps to MASQUERADE: %v", utiliptables.TableNAT, utiliptables.ChainPostrouting, err) + return fmt.Errorf("failed to ensure that %s chain %s jumps to MASQUERADE: %v", utiliptables.TableNAT, utiliptables.ChainPostrouting, err) } } return nil diff --git a/pkg/kubelet/dockershim/network/hostport/hostport_manager.go b/pkg/kubelet/dockershim/network/hostport/hostport_manager.go index 11482a50774..f3936bfbb60 100644 --- a/pkg/kubelet/dockershim/network/hostport/hostport_manager.go +++ b/pkg/kubelet/dockershim/network/hostport/hostport_manager.go @@ -249,7 +249,7 @@ func (hm *hostportManager) syncIPTables(lines []byte) error { klog.V(3).Infof("Restoring iptables rules: %s", lines) err := hm.iptables.RestoreAll(lines, utiliptables.NoFlushTables, utiliptables.RestoreCounters) if err != nil { - return fmt.Errorf("Failed to execute iptables-restore: %v", err) + return fmt.Errorf("failed to execute iptables-restore: %v", err) } return nil } diff --git a/pkg/kubelet/dockershim/network/hostport/hostport_syncer.go b/pkg/kubelet/dockershim/network/hostport/hostport_syncer.go index 5274f890dff..002a0f441d3 100644 --- a/pkg/kubelet/dockershim/network/hostport/hostport_syncer.go +++ b/pkg/kubelet/dockershim/network/hostport/hostport_syncer.go @@ -123,7 +123,7 @@ func gatherAllHostports(activePodPortMappings []*PodPortMapping) (map[*PortMappi podHostportMap := make(map[*PortMapping]targetPod) for _, pm := range activePodPortMappings { if pm.IP.To4() == nil { - return nil, fmt.Errorf("Invalid or missing pod %s IP", getPodFullName(pm)) + return nil, fmt.Errorf("invalid or missing pod %s IP", getPodFullName(pm)) } // should not handle hostports for hostnetwork pods if pm.HostNetwork { @@ -286,7 +286,7 @@ func (h *hostportSyncer) SyncHostports(natInterfaceName string, activePodPortMap klog.V(3).Infof("Restoring iptables rules: %s", natLines) err = h.iptables.RestoreAll(natLines, utiliptables.NoFlushTables, utiliptables.RestoreCounters) if err != nil { - return fmt.Errorf("Failed to execute iptables-restore: %v", err) + return fmt.Errorf("failed to execute iptables-restore: %v", err) } h.cleanupHostportMap(hostportPodMap) diff --git a/pkg/kubelet/dockershim/network/hostport/testing/fake.go b/pkg/kubelet/dockershim/network/hostport/testing/fake.go index 1d14c02ed90..3bde65cc2ed 100644 --- a/pkg/kubelet/dockershim/network/hostport/testing/fake.go +++ b/pkg/kubelet/dockershim/network/hostport/testing/fake.go @@ -35,7 +35,7 @@ func (h *fakeSyncer) OpenPodHostportsAndSync(newPortMapping *hostport.PodPortMap func (h *fakeSyncer) SyncHostports(natInterfaceName string, activePortMapping []*hostport.PodPortMapping) error { for _, r := range activePortMapping { if r.IP.To4() == nil { - return fmt.Errorf("Invalid or missing pod %s/%s IP", r.Namespace, r.Name) + return fmt.Errorf("invalid or missing pod %s/%s IP", r.Namespace, r.Name) } } diff --git a/pkg/kubelet/dockershim/network/kubenet/kubenet_linux.go b/pkg/kubelet/dockershim/network/kubenet/kubenet_linux.go index 556b074775f..a1402f5b5b3 100644 --- a/pkg/kubelet/dockershim/network/kubenet/kubenet_linux.go +++ b/pkg/kubelet/dockershim/network/kubenet/kubenet_linux.go @@ -180,12 +180,12 @@ func (plugin *kubenetNetworkPlugin) Init(host network.Host, hairpinMode kubeletc "type": "loopback" }`)) if err != nil { - return fmt.Errorf("Failed to generate loopback config: %v", err) + return fmt.Errorf("failed to generate loopback config: %v", err) } plugin.nsenterPath, err = plugin.execer.LookPath("nsenter") if err != nil { - return fmt.Errorf("Failed to find nsenter binary: %v", err) + return fmt.Errorf("failed to find nsenter binary: %v", err) } // Need to SNAT outbound traffic from cluster @@ -209,7 +209,7 @@ func (plugin *kubenetNetworkPlugin) ensureMasqRule() error { "-m", "addrtype", "!", "--dst-type", "LOCAL", "!", "-d", plugin.nonMasqueradeCIDR, "-j", "MASQUERADE"); err != nil { - return fmt.Errorf("Failed to ensure that %s chain %s jumps to MASQUERADE: %v", utiliptables.TableNAT, utiliptables.ChainPostrouting, err) + return fmt.Errorf("failed to ensure that %s chain %s jumps to MASQUERADE: %v", utiliptables.TableNAT, utiliptables.ChainPostrouting, err) } } return nil @@ -381,7 +381,7 @@ func (plugin *kubenetNetworkPlugin) setup(namespace string, name string, id kube // promiscuous mode is not on, then turn it on. err := netlink.SetPromiscOn(link) if err != nil { - return fmt.Errorf("Error setting promiscuous mode on %s: %v", BridgeName, err) + return fmt.Errorf("error setting promiscuous mode on %s: %v", BridgeName, err) } } @@ -410,7 +410,7 @@ func (plugin *kubenetNetworkPlugin) addTrafficShaping(id kubecontainer.Container shaper := plugin.shaper() ingress, egress, err := bandwidth.ExtractPodBandwidthResources(annotations) if err != nil { - return fmt.Errorf("Error reading pod bandwidth annotations: %v", err) + return fmt.Errorf("error reading pod bandwidth annotations: %v", err) } iplist, exists := plugin.getCachedPodIPs(id) if !exists { @@ -428,7 +428,7 @@ func (plugin *kubenetNetworkPlugin) addTrafficShaping(id kubecontainer.Container } if err := shaper.ReconcileCIDR(fmt.Sprintf("%v/%v", ip, mask), egress, ingress); err != nil { - return fmt.Errorf("Failed to add pod to shaper: %v", err) + return fmt.Errorf("failed to add pod to shaper: %v", err) } } } @@ -471,7 +471,7 @@ func (plugin *kubenetNetworkPlugin) SetUpPod(namespace string, name string, id k start := time.Now() if err := plugin.Status(); err != nil { - return fmt.Errorf("Kubenet cannot SetUpPod: %v", err) + return fmt.Errorf("kubenet cannot SetUpPod: %v", err) } defer func() { @@ -550,7 +550,7 @@ func (plugin *kubenetNetworkPlugin) TearDownPod(namespace string, name string, i }() if plugin.netConfig == nil { - return fmt.Errorf("Kubenet needs a PodCIDR to tear down pods") + return fmt.Errorf("kubenet needs a PodCIDR to tear down pods") } if err := plugin.teardown(namespace, name, id); err != nil { @@ -576,10 +576,10 @@ func (plugin *kubenetNetworkPlugin) GetPodNetworkStatus(namespace string, name s // not a cached version, get via network ns netnsPath, err := plugin.host.GetNetNS(id.ID) if err != nil { - return nil, fmt.Errorf("Kubenet failed to retrieve network namespace path: %v", err) + return nil, fmt.Errorf("kubenet failed to retrieve network namespace path: %v", err) } if netnsPath == "" { - return nil, fmt.Errorf("Cannot find the network namespace, skipping pod network status for container %q", id) + return nil, fmt.Errorf("cannot find the network namespace, skipping pod network status for container %q", id) } ips, err := network.GetPodIPs(plugin.execer, plugin.nsenterPath, netnsPath, network.DefaultInterfaceName) if err != nil { @@ -626,7 +626,7 @@ func (plugin *kubenetNetworkPlugin) getNetworkStatus(id kubecontainer.ContainerI func (plugin *kubenetNetworkPlugin) Status() error { // Can't set up pods if we don't have a PodCIDR yet if plugin.netConfig == nil { - return fmt.Errorf("Kubenet does not have netConfig. This is most likely due to lack of PodCIDR") + return fmt.Errorf("kubenet does not have netConfig. This is most likely due to lack of PodCIDR") } if !plugin.checkRequiredCNIPlugins() { @@ -683,14 +683,14 @@ func (plugin *kubenetNetworkPlugin) buildCNIRuntimeConf(ifName string, id kubeco func (plugin *kubenetNetworkPlugin) addContainerToNetwork(config *libcni.NetworkConfig, ifName, namespace, name string, id kubecontainer.ContainerID) (cnitypes.Result, error) { rt, err := plugin.buildCNIRuntimeConf(ifName, id, true) if err != nil { - return nil, fmt.Errorf("Error building CNI config: %v", err) + return nil, fmt.Errorf("error building CNI config: %v", err) } klog.V(3).Infof("Adding %s/%s to '%s' with CNI '%s' plugin and runtime: %+v", namespace, name, config.Network.Name, config.Network.Type, rt) res, err := plugin.cniConfig.AddNetwork(context.TODO(), config, rt) if err != nil { - return nil, fmt.Errorf("Error adding container to network: %v", err) + return nil, fmt.Errorf("error adding container to network: %v", err) } return res, nil } @@ -698,7 +698,7 @@ func (plugin *kubenetNetworkPlugin) addContainerToNetwork(config *libcni.Network func (plugin *kubenetNetworkPlugin) delContainerFromNetwork(config *libcni.NetworkConfig, ifName, namespace, name string, id kubecontainer.ContainerID) error { rt, err := plugin.buildCNIRuntimeConf(ifName, id, false) if err != nil { - return fmt.Errorf("Error building CNI config: %v", err) + return fmt.Errorf("error building CNI config: %v", err) } klog.V(3).Infof("Removing %s/%s from '%s' with CNI '%s' plugin and runtime: %+v", namespace, name, config.Network.Name, config.Network.Type, rt) @@ -706,7 +706,7 @@ func (plugin *kubenetNetworkPlugin) delContainerFromNetwork(config *libcni.Netwo // The pod may not get deleted successfully at the first time. // Ignore "no such file or directory" error in case the network has already been deleted in previous attempts. if err != nil && !strings.Contains(err.Error(), "no such file or directory") { - return fmt.Errorf("Error removing container from network: %v", err) + return fmt.Errorf("error removing container from network: %v", err) } return nil } @@ -791,20 +791,20 @@ func (plugin *kubenetNetworkPlugin) disableContainerDAD(id kubecontainer.Contain sysctlBin, err := plugin.execer.LookPath("sysctl") if err != nil { - return fmt.Errorf("Could not find sysctl binary: %s", err) + return fmt.Errorf("could not find sysctl binary: %s", err) } netnsPath, err := plugin.host.GetNetNS(id.ID) if err != nil { - return fmt.Errorf("Failed to get netns: %v", err) + return fmt.Errorf("failed to get netns: %v", err) } if netnsPath == "" { - return fmt.Errorf("Pod has no network namespace") + return fmt.Errorf("pod has no network namespace") } // If the sysctl doesn't exist, it means ipv6 is disabled; log and move on if _, err := plugin.sysctl.GetSysctl(key); err != nil { - return fmt.Errorf("Ipv6 not enabled: %v", err) + return fmt.Errorf("ipv6 not enabled: %v", err) } output, err := plugin.execer.Command(plugin.nsenterPath, @@ -812,7 +812,7 @@ func (plugin *kubenetNetworkPlugin) disableContainerDAD(id kubecontainer.Contain sysctlBin, "-w", fmt.Sprintf("%s=%s", key, "0"), ).CombinedOutput() if err != nil { - return fmt.Errorf("Failed to write sysctl: output: %s error: %s", + return fmt.Errorf("failed to write sysctl: output: %s error: %s", output, err) } return nil diff --git a/pkg/kubelet/dockershim/network/plugins.go b/pkg/kubelet/dockershim/network/plugins.go index 18d921166dc..4240f67611a 100644 --- a/pkg/kubelet/dockershim/network/plugins.go +++ b/pkg/kubelet/dockershim/network/plugins.go @@ -161,12 +161,12 @@ func InitNetworkPlugin(plugins []NetworkPlugin, networkPluginName string, host H if chosenPlugin != nil { err := chosenPlugin.Init(host, hairpinMode, nonMasqueradeCIDR, mtu) if err != nil { - allErrs = append(allErrs, fmt.Errorf("Network plugin %q failed init: %v", networkPluginName, err)) + allErrs = append(allErrs, fmt.Errorf("network plugin %q failed init: %v", networkPluginName, err)) } else { klog.V(1).Infof("Loaded network plugin %q", networkPluginName) } } else { - allErrs = append(allErrs, fmt.Errorf("Network plugin %q not found.", networkPluginName)) + allErrs = append(allErrs, fmt.Errorf("network plugin %q not found", networkPluginName)) } return chosenPlugin, utilerrors.NewAggregate(allErrs) @@ -235,16 +235,16 @@ func getOnePodIP(execer utilexec.Interface, nsenterPath, netnsPath, interfaceNam output, err := execer.Command(nsenterPath, fmt.Sprintf("--net=%s", netnsPath), "-F", "--", "ip", "-o", addrType, "addr", "show", "dev", interfaceName, "scope", "global").CombinedOutput() if err != nil { - return nil, fmt.Errorf("Unexpected command output %s with error: %v", output, err) + return nil, fmt.Errorf("unexpected command output %s with error: %v", output, err) } lines := strings.Split(string(output), "\n") if len(lines) < 1 { - return nil, fmt.Errorf("Unexpected command output %s", output) + return nil, fmt.Errorf("unexpected command output %s", output) } fields := strings.Fields(lines[0]) if len(fields) < 4 { - return nil, fmt.Errorf("Unexpected address output %s ", lines[0]) + return nil, fmt.Errorf("unexpected address output %s ", lines[0]) } ip, _, err := net.ParseCIDR(fields[3]) if err != nil { @@ -390,7 +390,7 @@ func (pm *PluginManager) GetPodNetworkStatus(podNamespace, podName string, id ku netStatus, err := pm.plugin.GetPodNetworkStatus(podNamespace, podName, id) if err != nil { - return nil, fmt.Errorf("NetworkPlugin %s failed on the status hook for pod %q: %v", pm.plugin.Name(), fullPodName, err) + return nil, fmt.Errorf("networkPlugin %s failed on the status hook for pod %q: %v", pm.plugin.Name(), fullPodName, err) } return netStatus, nil @@ -404,7 +404,7 @@ func (pm *PluginManager) SetUpPod(podNamespace, podName string, id kubecontainer klog.V(3).Infof("Calling network plugin %s to set up pod %q", pm.plugin.Name(), fullPodName) if err := pm.plugin.SetUpPod(podNamespace, podName, id, annotations, options); err != nil { - return fmt.Errorf("NetworkPlugin %s failed to set up pod %q network: %v", pm.plugin.Name(), fullPodName, err) + return fmt.Errorf("networkPlugin %s failed to set up pod %q network: %v", pm.plugin.Name(), fullPodName, err) } return nil @@ -418,7 +418,7 @@ func (pm *PluginManager) TearDownPod(podNamespace, podName string, id kubecontai klog.V(3).Infof("Calling network plugin %s to tear down pod %q", pm.plugin.Name(), fullPodName) if err := pm.plugin.TearDownPod(podNamespace, podName, id); err != nil { - return fmt.Errorf("NetworkPlugin %s failed to teardown pod %q network: %v", pm.plugin.Name(), fullPodName, err) + return fmt.Errorf("networkPlugin %s failed to teardown pod %q network: %v", pm.plugin.Name(), fullPodName, err) } return nil diff --git a/pkg/kubelet/dockershim/security_context.go b/pkg/kubelet/dockershim/security_context.go index 7a788b0df80..d1ffbf78ea8 100644 --- a/pkg/kubelet/dockershim/security_context.go +++ b/pkg/kubelet/dockershim/security_context.go @@ -90,7 +90,7 @@ func modifyContainerConfig(sc *runtimeapi.LinuxContainerSecurityContext, config user := config.User if sc.RunAsGroup != nil { if user == "" { - return fmt.Errorf("runAsGroup is specified without a runAsUser.") + return fmt.Errorf("runAsGroup is specified without a runAsUser") } user = fmt.Sprintf("%s:%d", config.User, sc.GetRunAsGroup().Value) } diff --git a/pkg/kubelet/kubelet_getters.go b/pkg/kubelet/kubelet_getters.go index ef74e46e72d..92ec4a9f6a4 100644 --- a/pkg/kubelet/kubelet_getters.go +++ b/pkg/kubelet/kubelet_getters.go @@ -287,7 +287,7 @@ func (kl *Kubelet) getPodVolumePathListFromDisk(podUID types.UID) ([]string, err podVolDir := kl.getPodVolumesDir(podUID) if pathExists, pathErr := mount.PathExists(podVolDir); pathErr != nil { - return volumes, fmt.Errorf("Error checking if path %q exists: %v", podVolDir, pathErr) + return volumes, fmt.Errorf("error checking if path %q exists: %v", podVolDir, pathErr) } else if !pathExists { klog.Warningf("Path %q does not exist", podVolDir) return volumes, nil @@ -303,7 +303,7 @@ func (kl *Kubelet) getPodVolumePathListFromDisk(podUID types.UID) ([]string, err volumePluginPath := filepath.Join(podVolDir, volumePluginName) volumeDirs, err := utilpath.ReadDirNoStat(volumePluginPath) if err != nil { - return volumes, fmt.Errorf("Could not read directory %s: %v", volumePluginPath, err) + return volumes, fmt.Errorf("could not read directory %s: %v", volumePluginPath, err) } for _, volumeDir := range volumeDirs { volumes = append(volumes, filepath.Join(volumePluginPath, volumeDir)) @@ -336,7 +336,7 @@ func (kl *Kubelet) podVolumeSubpathsDirExists(podUID types.UID) (bool, error) { podVolDir := kl.getPodVolumeSubpathsDir(podUID) if pathExists, pathErr := mount.PathExists(podVolDir); pathErr != nil { - return true, fmt.Errorf("Error checking if path %q exists: %v", podVolDir, pathErr) + return true, fmt.Errorf("error checking if path %q exists: %v", podVolDir, pathErr) } else if !pathExists { return false, nil } diff --git a/pkg/kubelet/kubelet_node_status.go b/pkg/kubelet/kubelet_node_status.go index 46e0595a751..7e6fa2f12bc 100644 --- a/pkg/kubelet/kubelet_node_status.go +++ b/pkg/kubelet/kubelet_node_status.go @@ -594,7 +594,7 @@ func validateNodeIP(nodeIP net.IP) error { return nil } } - return fmt.Errorf("Node IP: %q not found in the host's network interfaces", nodeIP.String()) + return fmt.Errorf("node IP: %q not found in the host's network interfaces", nodeIP.String()) } // nodeStatusHasChanged compares the original node and current node's status and diff --git a/pkg/kubelet/kubelet_pods.go b/pkg/kubelet/kubelet_pods.go index f63d1e02572..29a2b6b4db9 100644 --- a/pkg/kubelet/kubelet_pods.go +++ b/pkg/kubelet/kubelet_pods.go @@ -403,7 +403,7 @@ func (kl *Kubelet) GeneratePodHostNameAndDomain(pod *v1.Pod) (string, string, er hostname := pod.Name if len(pod.Spec.Hostname) > 0 { if msgs := utilvalidation.IsDNS1123Label(pod.Spec.Hostname); len(msgs) != 0 { - return "", "", fmt.Errorf("Pod Hostname %q is not a valid DNS label: %s", pod.Spec.Hostname, strings.Join(msgs, ";")) + return "", "", fmt.Errorf("pod Hostname %q is not a valid DNS label: %s", pod.Spec.Hostname, strings.Join(msgs, ";")) } hostname = pod.Spec.Hostname } @@ -416,7 +416,7 @@ func (kl *Kubelet) GeneratePodHostNameAndDomain(pod *v1.Pod) (string, string, er hostDomain := "" if len(pod.Spec.Subdomain) > 0 { if msgs := utilvalidation.IsDNS1123Label(pod.Spec.Subdomain); len(msgs) != 0 { - return "", "", fmt.Errorf("Pod Subdomain %q is not a valid DNS label: %s", pod.Spec.Subdomain, strings.Join(msgs, ";")) + return "", "", fmt.Errorf("pod Subdomain %q is not a valid DNS label: %s", pod.Spec.Subdomain, strings.Join(msgs, ";")) } hostDomain = fmt.Sprintf("%s.%s.svc.%s", pod.Spec.Subdomain, pod.Namespace, clusterDomain) } @@ -581,7 +581,7 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container configMap, ok := configMaps[name] if !ok { if kl.kubeClient == nil { - return result, fmt.Errorf("Couldn't get configMap %v/%v, no kubeClient defined", pod.Namespace, name) + return result, fmt.Errorf("couldn't get configMap %v/%v, no kubeClient defined", pod.Namespace, name) } optional := cm.Optional != nil && *cm.Optional configMap, err = kl.configMapManager.GetConfigMap(pod.Namespace, name) @@ -616,7 +616,7 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container secret, ok := secrets[name] if !ok { if kl.kubeClient == nil { - return result, fmt.Errorf("Couldn't get secret %v/%v, no kubeClient defined", pod.Namespace, name) + return result, fmt.Errorf("couldn't get secret %v/%v, no kubeClient defined", pod.Namespace, name) } optional := s.Optional != nil && *s.Optional secret, err = kl.secretManager.GetSecret(pod.Namespace, name) @@ -690,7 +690,7 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container configMap, ok := configMaps[name] if !ok { if kl.kubeClient == nil { - return result, fmt.Errorf("Couldn't get configMap %v/%v, no kubeClient defined", pod.Namespace, name) + return result, fmt.Errorf("couldn't get configMap %v/%v, no kubeClient defined", pod.Namespace, name) } configMap, err = kl.configMapManager.GetConfigMap(pod.Namespace, name) if err != nil { @@ -707,7 +707,7 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container if optional { continue } - return result, fmt.Errorf("Couldn't find key %v in ConfigMap %v/%v", key, pod.Namespace, name) + return result, fmt.Errorf("couldn't find key %v in ConfigMap %v/%v", key, pod.Namespace, name) } case envVar.ValueFrom.SecretKeyRef != nil: s := envVar.ValueFrom.SecretKeyRef @@ -717,7 +717,7 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container secret, ok := secrets[name] if !ok { if kl.kubeClient == nil { - return result, fmt.Errorf("Couldn't get secret %v/%v, no kubeClient defined", pod.Namespace, name) + return result, fmt.Errorf("couldn't get secret %v/%v, no kubeClient defined", pod.Namespace, name) } secret, err = kl.secretManager.GetSecret(pod.Namespace, name) if err != nil { @@ -734,7 +734,7 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container if optional { continue } - return result, fmt.Errorf("Couldn't find key %v in Secret %v/%v", key, pod.Namespace, name) + return result, fmt.Errorf("couldn't find key %v in Secret %v/%v", key, pod.Namespace, name) } runtimeVal = string(runtimeValBytes) } diff --git a/pkg/kubelet/kubelet_volumes.go b/pkg/kubelet/kubelet_volumes.go index 88b34c5efd1..fd12a66c279 100644 --- a/pkg/kubelet/kubelet_volumes.go +++ b/pkg/kubelet/kubelet_volumes.go @@ -123,22 +123,22 @@ func (kl *Kubelet) cleanupOrphanedPodDirs(pods []*v1.Pod, runningPods []*kubecon // If there are still volume directories, do not delete directory volumePaths, err := kl.getPodVolumePathListFromDisk(uid) if err != nil { - orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("Orphaned pod %q found, but error %v occurred during reading volume dir from disk", uid, err)) + orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("orphaned pod %q found, but error %v occurred during reading volume dir from disk", uid, err)) continue } if len(volumePaths) > 0 { - orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("Orphaned pod %q found, but volume paths are still present on disk", uid)) + orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("orphaned pod %q found, but volume paths are still present on disk", uid)) continue } // If there are any volume-subpaths, do not cleanup directories volumeSubpathExists, err := kl.podVolumeSubpathsDirExists(uid) if err != nil { - orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("Orphaned pod %q found, but error %v occurred during reading of volume-subpaths dir from disk", uid, err)) + orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("orphaned pod %q found, but error %v occurred during reading of volume-subpaths dir from disk", uid, err)) continue } if volumeSubpathExists { - orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("Orphaned pod %q found, but volume subpaths are still present on disk", uid)) + orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("orphaned pod %q found, but volume subpaths are still present on disk", uid)) continue } diff --git a/pkg/kubelet/kubeletconfig/configsync.go b/pkg/kubelet/kubeletconfig/configsync.go index 7a897f3badf..f6663a58748 100644 --- a/pkg/kubelet/kubeletconfig/configsync.go +++ b/pkg/kubelet/kubeletconfig/configsync.go @@ -213,7 +213,7 @@ func latestNodeConfigSource(store cache.Store, nodeName string) (*apiv1.NodeConf utillog.Errorf(err.Error()) return nil, err } else if !ok { - err := fmt.Errorf("Node %q does not exist in the informer's store, can't sync config source", nodeName) + err := fmt.Errorf("node %q does not exist in the informer's store, can't sync config source", nodeName) utillog.Errorf(err.Error()) return nil, err } diff --git a/pkg/kubelet/kuberuntime/kuberuntime_container.go b/pkg/kubelet/kuberuntime/kuberuntime_container.go index c499bdaac85..e814ce39854 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_container.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_container.go @@ -749,7 +749,7 @@ func (m *kubeGenericRuntimeManager) GetContainerLogs(ctx context.Context, pod *v status, err := m.runtimeService.ContainerStatus(containerID.ID) if err != nil { klog.V(4).Infof("failed to get container status for %v: %v", containerID.String(), err) - return fmt.Errorf("Unable to retrieve container logs for %v", containerID.String()) + return fmt.Errorf("unable to retrieve container logs for %v", containerID.String()) } return m.ReadLogs(ctx, status.GetLogPath(), containerID.ID, logOptions, stdout, stderr) } diff --git a/pkg/kubelet/kuberuntime/kuberuntime_manager.go b/pkg/kubelet/kuberuntime/kuberuntime_manager.go index af235957365..7ca444e4cdf 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_manager.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_manager.go @@ -68,7 +68,7 @@ const ( var ( // ErrVersionNotSupported is returned when the api version of runtime interface is not supported - ErrVersionNotSupported = errors.New("Runtime api version is not supported") + ErrVersionNotSupported = errors.New("runtime api version is not supported") ) // podStateProvider can determine if a pod is deleted ir terminated @@ -842,7 +842,7 @@ func (m *kubeGenericRuntimeManager) doBackOff(pod *v1.Pod, container *v1.Contain if ref, err := kubecontainer.GenerateContainerRef(pod, container); err == nil { m.recorder.Eventf(ref, v1.EventTypeWarning, events.BackOffStartContainer, "Back-off restarting failed container") } - err := fmt.Errorf("Back-off %s restarting failed container=%s pod=%s", backOff.Get(key), container.Name, format.Pod(pod)) + err := fmt.Errorf("back-off %s restarting failed container=%s pod=%s", backOff.Get(key), container.Name, format.Pod(pod)) klog.V(3).Infof("%s", err.Error()) return true, err.Error(), kubecontainer.ErrCrashLoopBackOff } diff --git a/pkg/kubelet/lifecycle/handlers.go b/pkg/kubelet/lifecycle/handlers.go index c84f7d9ad57..d09fa361d85 100644 --- a/pkg/kubelet/lifecycle/handlers.go +++ b/pkg/kubelet/lifecycle/handlers.go @@ -74,7 +74,7 @@ func (hr *HandlerRunner) Run(containerID kubecontainer.ContainerID, pod *v1.Pod, } return msg, err default: - err := fmt.Errorf("Invalid handler: %v", handler) + err := fmt.Errorf("invalid handler: %v", handler) msg := fmt.Sprintf("Cannot run handler: %v", err) klog.Errorf(msg) return msg, err diff --git a/pkg/kubelet/network/dns/dns.go b/pkg/kubelet/network/dns/dns.go index a3f20b7be10..b329b848700 100644 --- a/pkg/kubelet/network/dns/dns.go +++ b/pkg/kubelet/network/dns/dns.go @@ -191,8 +191,6 @@ func (c *Configurer) CheckLimitsForResolvConf() { klog.V(4).Infof("CheckLimitsForResolvConf: " + log) return } - - return } // parseResolvConf reads a resolv.conf file from the given reader, and parses diff --git a/pkg/kubelet/nodestatus/setters.go b/pkg/kubelet/nodestatus/setters.go index bbeabe280a4..f38af7a3bbe 100644 --- a/pkg/kubelet/nodestatus/setters.go +++ b/pkg/kubelet/nodestatus/setters.go @@ -475,7 +475,7 @@ func ReadyCondition( } } if len(missingCapacities) > 0 { - errs = append(errs, fmt.Errorf("Missing node capacity for resources: %s", strings.Join(missingCapacities, ", "))) + errs = append(errs, fmt.Errorf("missing node capacity for resources: %s", strings.Join(missingCapacities, ", "))) } if aggregatedErr := errors.NewAggregate(errs); aggregatedErr != nil { newNodeReadyCondition = v1.NodeCondition{ diff --git a/pkg/kubelet/nodestatus/setters_test.go b/pkg/kubelet/nodestatus/setters_test.go index 9cb2ea342e9..b0a695cbc44 100644 --- a/pkg/kubelet/nodestatus/setters_test.go +++ b/pkg/kubelet/nodestatus/setters_test.go @@ -970,7 +970,7 @@ func TestReadyCondition(t *testing.T) { { desc: "new, not ready: missing capacities", node: &v1.Node{}, - expectConditions: []v1.NodeCondition{*makeReadyCondition(false, "Missing node capacity for resources: cpu, memory, pods, ephemeral-storage", now, now)}, + expectConditions: []v1.NodeCondition{*makeReadyCondition(false, "missing node capacity for resources: cpu, memory, pods, ephemeral-storage", now, now)}, }, // the transition tests ensure timestamps are set correctly, no need to test the entire condition matrix in this section { diff --git a/pkg/kubelet/pluginmanager/cache/BUILD b/pkg/kubelet/pluginmanager/cache/BUILD index 0ef107b498e..0ad63ca5bac 100644 --- a/pkg/kubelet/pluginmanager/cache/BUILD +++ b/pkg/kubelet/pluginmanager/cache/BUILD @@ -19,6 +19,7 @@ go_test( "desired_state_of_world_test.go", ], embed = [":go_default_library"], + deps = ["//vendor/github.com/stretchr/testify/require:go_default_library"], ) filegroup( diff --git a/pkg/kubelet/pluginmanager/cache/actual_state_of_world.go b/pkg/kubelet/pluginmanager/cache/actual_state_of_world.go index c52b9805f48..b039cfdf0fe 100644 --- a/pkg/kubelet/pluginmanager/cache/actual_state_of_world.go +++ b/pkg/kubelet/pluginmanager/cache/actual_state_of_world.go @@ -87,7 +87,7 @@ func (asw *actualStateOfWorld) AddPlugin(pluginInfo PluginInfo) error { defer asw.Unlock() if pluginInfo.SocketPath == "" { - return fmt.Errorf("Socket path is empty") + return fmt.Errorf("socket path is empty") } if _, ok := asw.socketFileToInfo[pluginInfo.SocketPath]; ok { klog.V(2).Infof("Plugin (Path %s) exists in actual state cache", pluginInfo.SocketPath) diff --git a/pkg/kubelet/pluginmanager/cache/actual_state_of_world_test.go b/pkg/kubelet/pluginmanager/cache/actual_state_of_world_test.go index 8d9ba83ec49..df1f6bd6954 100644 --- a/pkg/kubelet/pluginmanager/cache/actual_state_of_world_test.go +++ b/pkg/kubelet/pluginmanager/cache/actual_state_of_world_test.go @@ -19,6 +19,8 @@ package cache import ( "testing" "time" + + "github.com/stretchr/testify/require" ) // Calls AddPlugin() to add a plugin @@ -63,10 +65,7 @@ func Test_ASW_AddPlugin_Negative_EmptySocketPath(t *testing.T) { Timestamp: time.Now(), } err := asw.AddPlugin(pluginInfo) - // Assert - if err == nil || err.Error() != "Socket path is empty" { - t.Fatalf("AddOrUpdatePlugin failed. Expected: Actual: <%v>", err) - } + require.EqualError(t, err, "socket path is empty") // Get registered plugins and check the newly added plugin is there aswPlugins := asw.GetRegisteredPlugins() diff --git a/pkg/kubelet/pluginmanager/cache/desired_state_of_world.go b/pkg/kubelet/pluginmanager/cache/desired_state_of_world.go index 320fad456bd..1b601ba65e4 100644 --- a/pkg/kubelet/pluginmanager/cache/desired_state_of_world.go +++ b/pkg/kubelet/pluginmanager/cache/desired_state_of_world.go @@ -125,7 +125,7 @@ func (dsw *desiredStateOfWorld) AddOrUpdatePlugin(socketPath string, foundInDepr defer dsw.Unlock() if socketPath == "" { - return fmt.Errorf("Socket path is empty") + return fmt.Errorf("socket path is empty") } if _, ok := dsw.socketFileToInfo[socketPath]; ok { klog.V(2).Infof("Plugin (Path %s) exists in actual state cache, timestamp will be updated", socketPath) diff --git a/pkg/kubelet/pluginmanager/cache/desired_state_of_world_test.go b/pkg/kubelet/pluginmanager/cache/desired_state_of_world_test.go index 65fed899269..3b037e6b6db 100644 --- a/pkg/kubelet/pluginmanager/cache/desired_state_of_world_test.go +++ b/pkg/kubelet/pluginmanager/cache/desired_state_of_world_test.go @@ -18,6 +18,8 @@ package cache import ( "testing" + + "github.com/stretchr/testify/require" ) // Calls AddOrUpdatePlugin() to add a plugin @@ -96,10 +98,7 @@ func Test_DSW_AddOrUpdatePlugin_Negative_PluginMissingInfo(t *testing.T) { dsw := NewDesiredStateOfWorld() socketPath := "" err := dsw.AddOrUpdatePlugin(socketPath, false /* foundInDeprecatedDir */) - // Assert - if err == nil || err.Error() != "Socket path is empty" { - t.Fatalf("AddOrUpdatePlugin failed. Expected: Actual: <%v>", err) - } + require.EqualError(t, err, "socket path is empty") // Get pluginsToRegister and check the newly added plugin is there dswPlugins := dsw.GetPluginsToRegister() diff --git a/pkg/kubelet/pluginmanager/pluginwatcher/example_handler.go b/pkg/kubelet/pluginmanager/pluginwatcher/example_handler.go index 0ae213dce22..a74f1b1c784 100644 --- a/pkg/kubelet/pluginmanager/pluginwatcher/example_handler.go +++ b/pkg/kubelet/pluginmanager/pluginwatcher/example_handler.go @@ -94,7 +94,7 @@ func (p *exampleHandler) RegisterPlugin(pluginName, endpoint string, versions [] // Verifies the grpcServer is ready to serve services. _, conn, err := dial(endpoint, time.Second) if err != nil { - return fmt.Errorf("Failed dialing endpoint (%s): %v", endpoint, err) + return fmt.Errorf("failed dialing endpoint (%s): %v", endpoint, err) } defer conn.Close() @@ -105,13 +105,13 @@ func (p *exampleHandler) RegisterPlugin(pluginName, endpoint string, versions [] // Tests v1beta1 GetExampleInfo _, err = v1beta1Client.GetExampleInfo(context.Background(), &v1beta1.ExampleRequest{}) if err != nil { - return fmt.Errorf("Failed GetExampleInfo for v1beta2Client(%s): %v", endpoint, err) + return fmt.Errorf("failed GetExampleInfo for v1beta2Client(%s): %v", endpoint, err) } // Tests v1beta1 GetExampleInfo _, err = v1beta2Client.GetExampleInfo(context.Background(), &v1beta2.ExampleRequest{}) if err != nil { - return fmt.Errorf("Failed GetExampleInfo for v1beta2Client(%s): %v", endpoint, err) + return fmt.Errorf("failed GetExampleInfo for v1beta2Client(%s): %v", endpoint, err) } return nil diff --git a/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin.go b/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin.go index be7582d5bbe..be127d77b60 100644 --- a/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin.go +++ b/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin.go @@ -138,7 +138,7 @@ func (e *examplePlugin) Serve(services ...string) error { v1beta2 := &pluginServiceV1Beta2{server: e} v1beta2.RegisterService() default: - return fmt.Errorf("Unsupported service: '%s'", service) + return fmt.Errorf("unsupported service: '%s'", service) } } @@ -169,7 +169,7 @@ func (e *examplePlugin) Stop() error { case <-c: break case <-time.After(time.Second): - return errors.New("Timed out on waiting for stop completion") + return errors.New("timed out on waiting for stop completion") } if err := os.Remove(e.endpoint); err != nil && !os.IsNotExist(err) { diff --git a/pkg/kubelet/prober/prober.go b/pkg/kubelet/prober/prober.go index cf22e6278b2..91ee6076efb 100644 --- a/pkg/kubelet/prober/prober.go +++ b/pkg/kubelet/prober/prober.go @@ -87,7 +87,7 @@ func (pb *prober) probe(probeType probeType, pod *v1.Pod, status v1.PodStatus, c case liveness: probeSpec = container.LivenessProbe default: - return results.Failure, fmt.Errorf("Unknown probe type: %q", probeType) + return results.Failure, fmt.Errorf("unknown probe type: %q", probeType) } ctrName := fmt.Sprintf("%s:%s", format.Pod(pod), container.Name) @@ -193,7 +193,7 @@ func (pb *prober) runProbe(probeType probeType, p *v1.Probe, pod *v1.Pod, status return pb.tcp.Probe(host, port, timeout) } klog.Warningf("Failed to find probe builder for container: %v", container) - return probe.Unknown, "", fmt.Errorf("Missing probe handler for %s:%s", format.Pod(pod), container.Name) + return probe.Unknown, "", fmt.Errorf("missing probe handler for %s:%s", format.Pod(pod), container.Name) } func extractPort(param intstr.IntOrString, container v1.Container) (int, error) { @@ -210,7 +210,7 @@ func extractPort(param intstr.IntOrString, container v1.Container) (int, error) } } default: - return port, fmt.Errorf("IntOrString had no kind: %+v", param) + return port, fmt.Errorf("intOrString had no kind: %+v", param) } if port > 0 && port < 65536 { return port, nil diff --git a/pkg/kubelet/runonce.go b/pkg/kubelet/runonce.go index 86c1f36f0c7..f4f28a87fca 100644 --- a/pkg/kubelet/runonce.go +++ b/pkg/kubelet/runonce.go @@ -116,7 +116,7 @@ func (kl *Kubelet) runPod(pod *v1.Pod, retryDelay time.Duration) error { for { status, err := kl.containerRuntime.GetPodStatus(pod.UID, pod.Name, pod.Namespace) if err != nil { - return fmt.Errorf("Unable to get status for pod %q: %v", format.Pod(pod), err) + return fmt.Errorf("unable to get status for pod %q: %v", format.Pod(pod), err) } if kl.isPodRunning(pod, status) { diff --git a/pkg/kubelet/runtimeclass/runtimeclass_manager.go b/pkg/kubelet/runtimeclass/runtimeclass_manager.go index 020275dcfaf..2ab2fb045da 100644 --- a/pkg/kubelet/runtimeclass/runtimeclass_manager.go +++ b/pkg/kubelet/runtimeclass/runtimeclass_manager.go @@ -71,7 +71,7 @@ func (m *Manager) LookupRuntimeHandler(runtimeClassName *string) (string, error) if errors.IsNotFound(err) { return "", err } - return "", fmt.Errorf("Failed to lookup RuntimeClass %s: %v", name, err) + return "", fmt.Errorf("failed to lookup RuntimeClass %s: %v", name, err) } return rc.Handler, nil diff --git a/pkg/kubelet/server/portforward/httpstream.go b/pkg/kubelet/server/portforward/httpstream.go index df122ee9368..4b5e66d6607 100644 --- a/pkg/kubelet/server/portforward/httpstream.go +++ b/pkg/kubelet/server/portforward/httpstream.go @@ -46,7 +46,7 @@ func handleHTTPStreams(req *http.Request, w http.ResponseWriter, portForwarder P upgrader := spdy.NewResponseUpgrader() conn := upgrader.UpgradeResponse(w, req, httpStreamReceived(streamChan)) if conn == nil { - return errors.New("Unable to upgrade httpstream connection") + return errors.New("unable to upgrade httpstream connection") } defer conn.Close() diff --git a/pkg/kubelet/server/portforward/websocket.go b/pkg/kubelet/server/portforward/websocket.go index c2895658f89..69502a6997d 100644 --- a/pkg/kubelet/server/portforward/websocket.go +++ b/pkg/kubelet/server/portforward/websocket.go @@ -115,7 +115,7 @@ func handleWebSocketStreams(req *http.Request, w http.ResponseWriter, portForwar conn.SetIdleTimeout(idleTimeout) _, streams, err := conn.Open(httplog.Unlogged(req, w), req) if err != nil { - err = fmt.Errorf("Unable to upgrade websocket connection: %v", err) + err = fmt.Errorf("unable to upgrade websocket connection: %v", err) return err } defer conn.Close() diff --git a/pkg/kubelet/server/remotecommand/httpstream.go b/pkg/kubelet/server/remotecommand/httpstream.go index df5c8eee36d..833762d83f5 100644 --- a/pkg/kubelet/server/remotecommand/httpstream.go +++ b/pkg/kubelet/server/remotecommand/httpstream.go @@ -232,7 +232,7 @@ WaitForStreams: ctx.resizeStream = stream go waitStreamReply(stream.replySent, replyChan, stop) default: - runtime.HandleError(fmt.Errorf("Unexpected stream type: %q", streamType)) + runtime.HandleError(fmt.Errorf("unexpected stream type: %q", streamType)) } case <-replyChan: receivedStreams++ @@ -283,7 +283,7 @@ WaitForStreams: ctx.resizeStream = stream go waitStreamReply(stream.replySent, replyChan, stop) default: - runtime.HandleError(fmt.Errorf("Unexpected stream type: %q", streamType)) + runtime.HandleError(fmt.Errorf("unexpected stream type: %q", streamType)) } case <-replyChan: receivedStreams++ @@ -331,7 +331,7 @@ WaitForStreams: ctx.stderrStream = stream go waitStreamReply(stream.replySent, replyChan, stop) default: - runtime.HandleError(fmt.Errorf("Unexpected stream type: %q", streamType)) + runtime.HandleError(fmt.Errorf("unexpected stream type: %q", streamType)) } case <-replyChan: receivedStreams++ @@ -385,7 +385,7 @@ WaitForStreams: ctx.stderrStream = stream go waitStreamReply(stream.replySent, replyChan, stop) default: - runtime.HandleError(fmt.Errorf("Unexpected stream type: %q", streamType)) + runtime.HandleError(fmt.Errorf("unexpected stream type: %q", streamType)) } case <-replyChan: receivedStreams++ diff --git a/pkg/kubelet/server/remotecommand/websocket.go b/pkg/kubelet/server/remotecommand/websocket.go index 87cd012318f..4a6be372dfd 100644 --- a/pkg/kubelet/server/remotecommand/websocket.go +++ b/pkg/kubelet/server/remotecommand/websocket.go @@ -97,7 +97,7 @@ func createWebSocketStreams(req *http.Request, w http.ResponseWriter, opts *Opti conn.SetIdleTimeout(idleTimeout) negotiatedProtocol, streams, err := conn.Open(httplog.Unlogged(req, w), req) if err != nil { - runtime.HandleError(fmt.Errorf("Unable to upgrade websocket connection: %v", err)) + runtime.HandleError(fmt.Errorf("unable to upgrade websocket connection: %v", err)) return nil, false } diff --git a/pkg/kubelet/util/util_unix.go b/pkg/kubelet/util/util_unix.go index 8dbc1399e31..12f7ce64ee4 100644 --- a/pkg/kubelet/util/util_unix.go +++ b/pkg/kubelet/util/util_unix.go @@ -120,7 +120,7 @@ func parseEndpoint(endpoint string) (string, string, error) { return "unix", u.Path, nil case "": - return "", "", fmt.Errorf("Using %q as endpoint is deprecated, please consider using full url format", endpoint) + return "", "", fmt.Errorf("using %q as endpoint is deprecated, please consider using full url format", endpoint) default: return u.Scheme, "", fmt.Errorf("protocol %q not supported", u.Scheme) diff --git a/pkg/kubelet/volume_host.go b/pkg/kubelet/volume_host.go index 34ac108734f..4a63db4b864 100644 --- a/pkg/kubelet/volume_host.go +++ b/pkg/kubelet/volume_host.go @@ -97,7 +97,7 @@ func NewInitializedVolumePluginMgr( if err := kvh.volumePluginMgr.InitPlugins(plugins, prober, kvh); err != nil { return nil, fmt.Errorf( - "Could not initialize volume plugins for KubeletVolumePluginMgr: %v", + "could not initialize volume plugins for KubeletVolumePluginMgr: %v", err) } diff --git a/pkg/kubelet/volumemanager/reconciler/reconciler.go b/pkg/kubelet/volumemanager/reconciler/reconciler.go index 6fcdcb1b274..e5d1d5ea235 100644 --- a/pkg/kubelet/volumemanager/reconciler/reconciler.go +++ b/pkg/kubelet/volumemanager/reconciler/reconciler.go @@ -470,7 +470,7 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume, } // TODO: remove feature gate check after no longer needed if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) && volume.volumeMode == v1.PersistentVolumeBlock && mapperPlugin == nil { - return nil, fmt.Errorf("Could not find block volume plugin %q (spec.Name: %q) pod %q (UID: %q)", volume.pluginName, volume.volumeSpecName, volume.podName, pod.UID) + return nil, fmt.Errorf("could not find block volume plugin %q (spec.Name: %q) pod %q (UID: %q)", volume.pluginName, volume.volumeSpecName, volume.podName, pod.UID) } volumeSpec, err := rc.operationExecutor.ReconstructVolumeOperation( @@ -543,7 +543,7 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume, } // If mount or symlink doesn't exist, volume reconstruction should be failed if !isExist { - return nil, fmt.Errorf("Volume: %q is not mounted", uniqueVolumeName) + return nil, fmt.Errorf("volume: %q is not mounted", uniqueVolumeName) } reconstructedVolume := &reconstructedVolume{