Clean up error messages (ST1005)
This commit is contained in:
parent
e06912ca3e
commit
8a495cb5e4
@ -51,7 +51,7 @@ func newActiveDeadlineHandler(
|
|||||||
|
|
||||||
// check for all required fields
|
// check for all required fields
|
||||||
if clock == nil || podStatusProvider == nil || recorder == nil {
|
if clock == nil || podStatusProvider == nil || recorder == nil {
|
||||||
return nil, fmt.Errorf("Required arguments must not be nil: %v, %v, %v", clock, podStatusProvider, recorder)
|
return nil, fmt.Errorf("required arguments must not be nil: %v, %v, %v", clock, podStatusProvider, recorder)
|
||||||
}
|
}
|
||||||
return &activeDeadlineHandler{
|
return &activeDeadlineHandler{
|
||||||
clock: clock,
|
clock: clock,
|
||||||
|
@ -38,7 +38,7 @@ func GetClient(socket string, connectionTimeout time.Duration, maxMsgSize int) (
|
|||||||
|
|
||||||
conn, err := grpc.DialContext(ctx, addr, grpc.WithInsecure(), grpc.WithDialer(dialer), grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(maxMsgSize)))
|
conn, err := grpc.DialContext(ctx, addr, grpc.WithInsecure(), grpc.WithDialer(dialer), grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(maxMsgSize)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, fmt.Errorf("Error dialing socket %s: %v", socket, err)
|
return nil, nil, fmt.Errorf("error dialing socket %s: %v", socket, err)
|
||||||
}
|
}
|
||||||
return podresourcesapi.NewPodResourcesListerClient(conn), conn, nil
|
return podresourcesapi.NewPodResourcesListerClient(conn), conn, nil
|
||||||
}
|
}
|
||||||
|
@ -235,32 +235,32 @@ func isClientConfigStillValid(kubeconfigPath string) (bool, error) {
|
|||||||
}
|
}
|
||||||
bootstrapClientConfig, err := loadRESTClientConfig(kubeconfigPath)
|
bootstrapClientConfig, err := loadRESTClientConfig(kubeconfigPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utilruntime.HandleError(fmt.Errorf("Unable to read existing bootstrap client config: %v", err))
|
utilruntime.HandleError(fmt.Errorf("unable to read existing bootstrap client config: %v", err))
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
transportConfig, err := bootstrapClientConfig.TransportConfig()
|
transportConfig, err := bootstrapClientConfig.TransportConfig()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utilruntime.HandleError(fmt.Errorf("Unable to load transport configuration from existing bootstrap client config: %v", err))
|
utilruntime.HandleError(fmt.Errorf("unable to load transport configuration from existing bootstrap client config: %v", err))
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
// has side effect of populating transport config data fields
|
// has side effect of populating transport config data fields
|
||||||
if _, err := transport.TLSConfigFor(transportConfig); err != nil {
|
if _, err := transport.TLSConfigFor(transportConfig); err != nil {
|
||||||
utilruntime.HandleError(fmt.Errorf("Unable to load TLS configuration from existing bootstrap client config: %v", err))
|
utilruntime.HandleError(fmt.Errorf("unable to load TLS configuration from existing bootstrap client config: %v", err))
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
certs, err := certutil.ParseCertsPEM(transportConfig.TLS.CertData)
|
certs, err := certutil.ParseCertsPEM(transportConfig.TLS.CertData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utilruntime.HandleError(fmt.Errorf("Unable to load TLS certificates from existing bootstrap client config: %v", err))
|
utilruntime.HandleError(fmt.Errorf("unable to load TLS certificates from existing bootstrap client config: %v", err))
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
if len(certs) == 0 {
|
if len(certs) == 0 {
|
||||||
utilruntime.HandleError(fmt.Errorf("Unable to read TLS certificates from existing bootstrap client config: %v", err))
|
utilruntime.HandleError(fmt.Errorf("unable to read TLS certificates from existing bootstrap client config: %v", err))
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
for _, cert := range certs {
|
for _, cert := range certs {
|
||||||
if now.After(cert.NotAfter) {
|
if now.After(cert.NotAfter) {
|
||||||
utilruntime.HandleError(fmt.Errorf("Part of the existing bootstrap client certificate is expired: %s", cert.NotAfter))
|
utilruntime.HandleError(fmt.Errorf("part of the existing bootstrap client certificate is expired: %s", cert.NotAfter))
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -303,7 +303,7 @@ func (m *cgroupManagerImpl) Destroy(cgroupConfig *CgroupConfig) error {
|
|||||||
|
|
||||||
// Delete cgroups using libcontainers Managers Destroy() method
|
// Delete cgroups using libcontainers Managers Destroy() method
|
||||||
if err = manager.Destroy(); err != nil {
|
if err = manager.Destroy(); err != nil {
|
||||||
return fmt.Errorf("Unable to destroy cgroup paths for cgroup %v : %v", cgroupConfig.Name, err)
|
return fmt.Errorf("unable to destroy cgroup paths for cgroup %v : %v", cgroupConfig.Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -346,14 +346,14 @@ func setSupportedSubsystems(cgroupConfig *libcontainerconfigs.Cgroup) error {
|
|||||||
for sys, required := range getSupportedSubsystems() {
|
for sys, required := range getSupportedSubsystems() {
|
||||||
if _, ok := cgroupConfig.Paths[sys.Name()]; !ok {
|
if _, ok := cgroupConfig.Paths[sys.Name()]; !ok {
|
||||||
if required {
|
if required {
|
||||||
return fmt.Errorf("Failed to find subsystem mount for required subsystem: %v", sys.Name())
|
return fmt.Errorf("failed to find subsystem mount for required subsystem: %v", sys.Name())
|
||||||
}
|
}
|
||||||
// the cgroup is not mounted, but its not required so continue...
|
// the cgroup is not mounted, but its not required so continue...
|
||||||
klog.V(6).Infof("Unable to find subsystem mount for optional subsystem: %v", sys.Name())
|
klog.V(6).Infof("Unable to find subsystem mount for optional subsystem: %v", sys.Name())
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err := sys.Set(cgroupConfig.Paths[sys.Name()], cgroupConfig); err != nil {
|
if err := sys.Set(cgroupConfig.Paths[sys.Name()], cgroupConfig); err != nil {
|
||||||
return fmt.Errorf("Failed to set config for supported subsystems : %v", err)
|
return fmt.Errorf("failed to set config for supported subsystems : %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -560,14 +560,14 @@ func getStatsSupportedSubsystems(cgroupPaths map[string]string) (*libcontainercg
|
|||||||
for sys, required := range getSupportedSubsystems() {
|
for sys, required := range getSupportedSubsystems() {
|
||||||
if _, ok := cgroupPaths[sys.Name()]; !ok {
|
if _, ok := cgroupPaths[sys.Name()]; !ok {
|
||||||
if required {
|
if required {
|
||||||
return nil, fmt.Errorf("Failed to find subsystem mount for required subsystem: %v", sys.Name())
|
return nil, fmt.Errorf("failed to find subsystem mount for required subsystem: %v", sys.Name())
|
||||||
}
|
}
|
||||||
// the cgroup is not mounted, but its not required so continue...
|
// the cgroup is not mounted, but its not required so continue...
|
||||||
klog.V(6).Infof("Unable to find subsystem mount for optional subsystem: %v", sys.Name())
|
klog.V(6).Infof("Unable to find subsystem mount for optional subsystem: %v", sys.Name())
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err := sys.GetStats(cgroupPaths[sys.Name()], stats); err != nil {
|
if err := sys.GetStats(cgroupPaths[sys.Name()], stats); err != nil {
|
||||||
return nil, fmt.Errorf("Failed to get stats for supported subsystems : %v", err)
|
return nil, fmt.Errorf("failed to get stats for supported subsystems : %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return stats, nil
|
return stats, nil
|
||||||
|
@ -214,7 +214,7 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I
|
|||||||
// If there is more than one line (table headers) in /proc/swaps, swap is enabled and we should
|
// If there is more than one line (table headers) in /proc/swaps, swap is enabled and we should
|
||||||
// error out unless --fail-swap-on is set to false.
|
// error out unless --fail-swap-on is set to false.
|
||||||
if len(swapLines) > 1 {
|
if len(swapLines) > 1 {
|
||||||
return nil, fmt.Errorf("Running with swap on is not supported, please disable swap! or set --fail-swap-on flag to false. /proc/swaps contained: %v", swapLines)
|
return nil, fmt.Errorf("running with swap on is not supported, please disable swap! or set --fail-swap-on flag to false. /proc/swaps contained: %v", swapLines)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -395,7 +395,7 @@ func setupKernelTunables(option KernelTunableBehavior) error {
|
|||||||
|
|
||||||
switch option {
|
switch option {
|
||||||
case KernelTunableError:
|
case KernelTunableError:
|
||||||
errList = append(errList, fmt.Errorf("Invalid kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val))
|
errList = append(errList, fmt.Errorf("invalid kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val))
|
||||||
case KernelTunableWarn:
|
case KernelTunableWarn:
|
||||||
klog.V(2).Infof("Invalid kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val)
|
klog.V(2).Infof("Invalid kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val)
|
||||||
case KernelTunableModify:
|
case KernelTunableModify:
|
||||||
|
@ -51,6 +51,7 @@ func NewCheckpointState(stateDir, checkpointName, policyName string) (State, err
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := stateCheckpoint.restoreState(); err != nil {
|
if err := stateCheckpoint.restoreState(); err != nil {
|
||||||
|
//lint:ignore ST1005 user-facing error message
|
||||||
return nil, fmt.Errorf("could not restore state from checkpoint: %v\n"+
|
return nil, fmt.Errorf("could not restore state from checkpoint: %v\n"+
|
||||||
"Please drain this node and delete the CPU manager checkpoint file %q before restarting Kubelet.",
|
"Please drain this node and delete the CPU manager checkpoint file %q before restarting Kubelet.",
|
||||||
err, path.Join(stateDir, checkpointName))
|
err, path.Join(stateDir, checkpointName))
|
||||||
|
@ -276,12 +276,12 @@ func (m *ManagerImpl) RegisterPlugin(pluginName string, endpoint string, version
|
|||||||
|
|
||||||
e, err := newEndpointImpl(endpoint, pluginName, m.callback)
|
e, err := newEndpointImpl(endpoint, pluginName, m.callback)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Failed to dial device plugin with socketPath %s: %v", endpoint, err)
|
return fmt.Errorf("failed to dial device plugin with socketPath %s: %v", endpoint, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
options, err := e.client.GetDevicePluginOptions(context.Background(), &pluginapi.Empty{})
|
options, err := e.client.GetDevicePluginOptions(context.Background(), &pluginapi.Empty{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Failed to get device plugin options: %v", err)
|
return fmt.Errorf("failed to get device plugin options: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
m.registerEndpoint(pluginName, options, e)
|
m.registerEndpoint(pluginName, options, e)
|
||||||
@ -697,7 +697,7 @@ func (m *ManagerImpl) allocateContainerResources(pod *v1.Pod, container *v1.Cont
|
|||||||
m.mutex.Lock()
|
m.mutex.Lock()
|
||||||
m.allocatedDevices = m.podDevices.devices()
|
m.allocatedDevices = m.podDevices.devices()
|
||||||
m.mutex.Unlock()
|
m.mutex.Unlock()
|
||||||
return fmt.Errorf("Unknown Device Plugin %s", resource)
|
return fmt.Errorf("unknown Device Plugin %s", resource)
|
||||||
}
|
}
|
||||||
|
|
||||||
devs := allocDevices.UnsortedList()
|
devs := allocDevices.UnsortedList()
|
||||||
@ -717,7 +717,7 @@ func (m *ManagerImpl) allocateContainerResources(pod *v1.Pod, container *v1.Cont
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(resp.ContainerResponses) == 0 {
|
if len(resp.ContainerResponses) == 0 {
|
||||||
return fmt.Errorf("No containers return in allocation response %v", resp)
|
return fmt.Errorf("no containers return in allocation response %v", resp)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update internal cached podDevices state.
|
// Update internal cached podDevices state.
|
||||||
|
@ -237,7 +237,7 @@ func (cm *containerManagerImpl) validateNodeAllocatable() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(errors) > 0 {
|
if len(errors) > 0 {
|
||||||
return fmt.Errorf("Invalid Node Allocatable configuration. %s", strings.Join(errors, " "))
|
return fmt.Errorf("invalid Node Allocatable configuration. %s", strings.Join(errors, " "))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -138,6 +138,6 @@ func (s *sourceURL) extractFromURL() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Errorf("%v: received '%v', but couldn't parse as "+
|
return fmt.Errorf("%v: received '%v', but couldn't parse as "+
|
||||||
"single (%v) or multiple pods (%v).\n",
|
"single (%v) or multiple pods (%v)",
|
||||||
s.url, string(data), singlePodErr, multiPodErr)
|
s.url, string(data), singlePodErr, multiPodErr)
|
||||||
}
|
}
|
||||||
|
@ -116,11 +116,11 @@ func (p *PodSyncResult) Fail(err error) {
|
|||||||
func (p *PodSyncResult) Error() error {
|
func (p *PodSyncResult) Error() error {
|
||||||
errlist := []error{}
|
errlist := []error{}
|
||||||
if p.SyncError != nil {
|
if p.SyncError != nil {
|
||||||
errlist = append(errlist, fmt.Errorf("failed to SyncPod: %v\n", p.SyncError))
|
errlist = append(errlist, fmt.Errorf("failed to SyncPod: %v", p.SyncError))
|
||||||
}
|
}
|
||||||
for _, result := range p.SyncResults {
|
for _, result := range p.SyncResults {
|
||||||
if result.Error != nil {
|
if result.Error != nil {
|
||||||
errlist = append(errlist, fmt.Errorf("failed to %q for %q with %v: %q\n", result.Action, result.Target,
|
errlist = append(errlist, fmt.Errorf("failed to %q for %q with %v: %q", result.Action, result.Target,
|
||||||
result.Error, result.Message))
|
result.Error, result.Message))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -527,7 +527,7 @@ func (ds *dockerService) getDockerVersionFromCache() (*dockertypes.Version, erro
|
|||||||
}
|
}
|
||||||
dv, ok := value.(*dockertypes.Version)
|
dv, ok := value.(*dockertypes.Version)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("Converted to *dockertype.Version error")
|
return nil, fmt.Errorf("converted to *dockertype.Version error")
|
||||||
}
|
}
|
||||||
return dv, nil
|
return dv, nil
|
||||||
}
|
}
|
||||||
|
@ -41,14 +41,14 @@ func (r *streamingRuntime) portForward(podSandboxID string, port int32, stream i
|
|||||||
containerPid := container.State.Pid
|
containerPid := container.State.Pid
|
||||||
socatPath, lookupErr := exec.LookPath("socat")
|
socatPath, lookupErr := exec.LookPath("socat")
|
||||||
if lookupErr != nil {
|
if lookupErr != nil {
|
||||||
return fmt.Errorf("unable to do port forwarding: socat not found.")
|
return fmt.Errorf("unable to do port forwarding: socat not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
args := []string{"-t", fmt.Sprintf("%d", containerPid), "-n", socatPath, "-", fmt.Sprintf("TCP4:localhost:%d", port)}
|
args := []string{"-t", fmt.Sprintf("%d", containerPid), "-n", socatPath, "-", fmt.Sprintf("TCP4:localhost:%d", port)}
|
||||||
|
|
||||||
nsenterPath, lookupErr := exec.LookPath("nsenter")
|
nsenterPath, lookupErr := exec.LookPath("nsenter")
|
||||||
if lookupErr != nil {
|
if lookupErr != nil {
|
||||||
return fmt.Errorf("unable to do port forwarding: nsenter not found.")
|
return fmt.Errorf("unable to do port forwarding: nsenter not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
commandString := fmt.Sprintf("%s %s", nsenterPath, strings.Join(args, " "))
|
commandString := fmt.Sprintf("%s %s", nsenterPath, strings.Join(args, " "))
|
||||||
|
@ -66,7 +66,7 @@ func (plugin *cniNetworkPlugin) GetPodNetworkStatus(namespace string, name strin
|
|||||||
return nil, fmt.Errorf("CNI failed to retrieve network namespace path: %v", err)
|
return nil, fmt.Errorf("CNI failed to retrieve network namespace path: %v", err)
|
||||||
}
|
}
|
||||||
if netnsPath == "" {
|
if netnsPath == "" {
|
||||||
return nil, fmt.Errorf("Cannot find the network namespace, skipping pod network status for container %q", id)
|
return nil, fmt.Errorf("cannot find the network namespace, skipping pod network status for container %q", id)
|
||||||
}
|
}
|
||||||
|
|
||||||
ips, err := network.GetPodIPs(plugin.execer, plugin.nsenterPath, netnsPath, network.DefaultInterfaceName)
|
ips, err := network.GetPodIPs(plugin.execer, plugin.nsenterPath, netnsPath, network.DefaultInterfaceName)
|
||||||
|
@ -53,12 +53,12 @@ func findPairInterfaceOfContainerInterface(e exec.Interface, containerInterfaceN
|
|||||||
nsenterArgs = append(nsenterArgs, "-F", "--", ethtoolPath, "--statistics", containerInterfaceName)
|
nsenterArgs = append(nsenterArgs, "-F", "--", ethtoolPath, "--statistics", containerInterfaceName)
|
||||||
output, err := e.Command(nsenterPath, nsenterArgs...).CombinedOutput()
|
output, err := e.Command(nsenterPath, nsenterArgs...).CombinedOutput()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("Unable to query interface %s of container %s: %v: %s", containerInterfaceName, containerDesc, err, string(output))
|
return "", fmt.Errorf("unable to query interface %s of container %s: %v: %s", containerInterfaceName, containerDesc, err, string(output))
|
||||||
}
|
}
|
||||||
// look for peer_ifindex
|
// look for peer_ifindex
|
||||||
match := ethtoolOutputRegex.FindSubmatch(output)
|
match := ethtoolOutputRegex.FindSubmatch(output)
|
||||||
if match == nil {
|
if match == nil {
|
||||||
return "", fmt.Errorf("No peer_ifindex in interface statistics for %s of container %s", containerInterfaceName, containerDesc)
|
return "", fmt.Errorf("no peer_ifindex in interface statistics for %s of container %s", containerInterfaceName, containerDesc)
|
||||||
}
|
}
|
||||||
peerIfIndex, err := strconv.Atoi(string(match[1]))
|
peerIfIndex, err := strconv.Atoi(string(match[1]))
|
||||||
if err != nil { // seems impossible (\d+ not numeric)
|
if err != nil { // seems impossible (\d+ not numeric)
|
||||||
|
@ -59,7 +59,7 @@ func (f *fakeIPTables) GetVersion() (string, error) {
|
|||||||
func (f *fakeIPTables) getTable(tableName utiliptables.Table) (*fakeTable, error) {
|
func (f *fakeIPTables) getTable(tableName utiliptables.Table) (*fakeTable, error) {
|
||||||
table, ok := f.tables[string(tableName)]
|
table, ok := f.tables[string(tableName)]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("Table %s does not exist", tableName)
|
return nil, fmt.Errorf("table %s does not exist", tableName)
|
||||||
}
|
}
|
||||||
return table, nil
|
return table, nil
|
||||||
}
|
}
|
||||||
@ -72,7 +72,7 @@ func (f *fakeIPTables) getChain(tableName utiliptables.Table, chainName utilipta
|
|||||||
|
|
||||||
chain, ok := table.chains[string(chainName)]
|
chain, ok := table.chains[string(chainName)]
|
||||||
if !ok {
|
if !ok {
|
||||||
return table, nil, fmt.Errorf("Chain %s/%s does not exist", tableName, chainName)
|
return table, nil, fmt.Errorf("chain %s/%s does not exist", tableName, chainName)
|
||||||
}
|
}
|
||||||
|
|
||||||
return table, chain, nil
|
return table, chain, nil
|
||||||
@ -152,7 +152,7 @@ func (f *fakeIPTables) ensureRule(position utiliptables.RulePosition, tableName
|
|||||||
} else if position == utiliptables.Append {
|
} else if position == utiliptables.Append {
|
||||||
chain.rules = append(chain.rules, rule)
|
chain.rules = append(chain.rules, rule)
|
||||||
} else {
|
} else {
|
||||||
return false, fmt.Errorf("Unknown position argument %q", position)
|
return false, fmt.Errorf("unknown position argument %q", position)
|
||||||
}
|
}
|
||||||
|
|
||||||
return false, nil
|
return false, nil
|
||||||
@ -171,7 +171,7 @@ func normalizeRule(rule string) (string, error) {
|
|||||||
if remaining[0] == '"' {
|
if remaining[0] == '"' {
|
||||||
end = strings.Index(remaining[1:], "\"")
|
end = strings.Index(remaining[1:], "\"")
|
||||||
if end < 0 {
|
if end < 0 {
|
||||||
return "", fmt.Errorf("Invalid rule syntax: mismatched quotes")
|
return "", fmt.Errorf("invalid rule syntax: mismatched quotes")
|
||||||
}
|
}
|
||||||
end += 2
|
end += 2
|
||||||
} else {
|
} else {
|
||||||
@ -292,7 +292,7 @@ func (f *fakeIPTables) restore(restoreTableName utiliptables.Table, data []byte,
|
|||||||
} else if strings.HasPrefix(line, "-A") {
|
} else if strings.HasPrefix(line, "-A") {
|
||||||
parts := strings.Split(line, " ")
|
parts := strings.Split(line, " ")
|
||||||
if len(parts) < 3 {
|
if len(parts) < 3 {
|
||||||
return fmt.Errorf("Invalid iptables rule '%s'", line)
|
return fmt.Errorf("invalid iptables rule '%s'", line)
|
||||||
}
|
}
|
||||||
chainName := utiliptables.Chain(parts[1])
|
chainName := utiliptables.Chain(parts[1])
|
||||||
rule := strings.TrimPrefix(line, fmt.Sprintf("-A %s ", chainName))
|
rule := strings.TrimPrefix(line, fmt.Sprintf("-A %s ", chainName))
|
||||||
@ -303,7 +303,7 @@ func (f *fakeIPTables) restore(restoreTableName utiliptables.Table, data []byte,
|
|||||||
} else if strings.HasPrefix(line, "-I") {
|
} else if strings.HasPrefix(line, "-I") {
|
||||||
parts := strings.Split(line, " ")
|
parts := strings.Split(line, " ")
|
||||||
if len(parts) < 3 {
|
if len(parts) < 3 {
|
||||||
return fmt.Errorf("Invalid iptables rule '%s'", line)
|
return fmt.Errorf("invalid iptables rule '%s'", line)
|
||||||
}
|
}
|
||||||
chainName := utiliptables.Chain(parts[1])
|
chainName := utiliptables.Chain(parts[1])
|
||||||
rule := strings.TrimPrefix(line, fmt.Sprintf("-I %s ", chainName))
|
rule := strings.TrimPrefix(line, fmt.Sprintf("-I %s ", chainName))
|
||||||
@ -314,7 +314,7 @@ func (f *fakeIPTables) restore(restoreTableName utiliptables.Table, data []byte,
|
|||||||
} else if strings.HasPrefix(line, "-X") {
|
} else if strings.HasPrefix(line, "-X") {
|
||||||
parts := strings.Split(line, " ")
|
parts := strings.Split(line, " ")
|
||||||
if len(parts) < 2 {
|
if len(parts) < 2 {
|
||||||
return fmt.Errorf("Invalid iptables rule '%s'", line)
|
return fmt.Errorf("invalid iptables rule '%s'", line)
|
||||||
}
|
}
|
||||||
if err := f.DeleteChain(tableName, utiliptables.Chain(parts[1])); err != nil {
|
if err := f.DeleteChain(tableName, utiliptables.Chain(parts[1])); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -114,7 +114,7 @@ func ensureKubeHostportChains(iptables utiliptables.Interface, natInterfaceName
|
|||||||
klog.V(4).Info("Ensuring kubelet hostport chains")
|
klog.V(4).Info("Ensuring kubelet hostport chains")
|
||||||
// Ensure kubeHostportChain
|
// Ensure kubeHostportChain
|
||||||
if _, err := iptables.EnsureChain(utiliptables.TableNAT, kubeHostportsChain); err != nil {
|
if _, err := iptables.EnsureChain(utiliptables.TableNAT, kubeHostportsChain); err != nil {
|
||||||
return fmt.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableNAT, kubeHostportsChain, err)
|
return fmt.Errorf("failed to ensure that %s chain %s exists: %v", utiliptables.TableNAT, kubeHostportsChain, err)
|
||||||
}
|
}
|
||||||
tableChainsNeedJumpServices := []struct {
|
tableChainsNeedJumpServices := []struct {
|
||||||
table utiliptables.Table
|
table utiliptables.Table
|
||||||
@ -131,14 +131,14 @@ func ensureKubeHostportChains(iptables utiliptables.Interface, natInterfaceName
|
|||||||
// This ensures KUBE-SERVICES chain gets processed first.
|
// This ensures KUBE-SERVICES chain gets processed first.
|
||||||
// Since rules in KUBE-HOSTPORTS chain matches broader cases, allow the more specific rules to be processed first.
|
// Since rules in KUBE-HOSTPORTS chain matches broader cases, allow the more specific rules to be processed first.
|
||||||
if _, err := iptables.EnsureRule(utiliptables.Append, tc.table, tc.chain, args...); err != nil {
|
if _, err := iptables.EnsureRule(utiliptables.Append, tc.table, tc.chain, args...); err != nil {
|
||||||
return fmt.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", tc.table, tc.chain, kubeHostportsChain, err)
|
return fmt.Errorf("failed to ensure that %s chain %s jumps to %s: %v", tc.table, tc.chain, kubeHostportsChain, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if natInterfaceName != "" && natInterfaceName != "lo" {
|
if natInterfaceName != "" && natInterfaceName != "lo" {
|
||||||
// Need to SNAT traffic from localhost
|
// Need to SNAT traffic from localhost
|
||||||
args = []string{"-m", "comment", "--comment", "SNAT for localhost access to hostports", "-o", natInterfaceName, "-s", "127.0.0.0/8", "-j", "MASQUERADE"}
|
args = []string{"-m", "comment", "--comment", "SNAT for localhost access to hostports", "-o", natInterfaceName, "-s", "127.0.0.0/8", "-j", "MASQUERADE"}
|
||||||
if _, err := iptables.EnsureRule(utiliptables.Append, utiliptables.TableNAT, utiliptables.ChainPostrouting, args...); err != nil {
|
if _, err := iptables.EnsureRule(utiliptables.Append, utiliptables.TableNAT, utiliptables.ChainPostrouting, args...); err != nil {
|
||||||
return fmt.Errorf("Failed to ensure that %s chain %s jumps to MASQUERADE: %v", utiliptables.TableNAT, utiliptables.ChainPostrouting, err)
|
return fmt.Errorf("failed to ensure that %s chain %s jumps to MASQUERADE: %v", utiliptables.TableNAT, utiliptables.ChainPostrouting, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -249,7 +249,7 @@ func (hm *hostportManager) syncIPTables(lines []byte) error {
|
|||||||
klog.V(3).Infof("Restoring iptables rules: %s", lines)
|
klog.V(3).Infof("Restoring iptables rules: %s", lines)
|
||||||
err := hm.iptables.RestoreAll(lines, utiliptables.NoFlushTables, utiliptables.RestoreCounters)
|
err := hm.iptables.RestoreAll(lines, utiliptables.NoFlushTables, utiliptables.RestoreCounters)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Failed to execute iptables-restore: %v", err)
|
return fmt.Errorf("failed to execute iptables-restore: %v", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -123,7 +123,7 @@ func gatherAllHostports(activePodPortMappings []*PodPortMapping) (map[*PortMappi
|
|||||||
podHostportMap := make(map[*PortMapping]targetPod)
|
podHostportMap := make(map[*PortMapping]targetPod)
|
||||||
for _, pm := range activePodPortMappings {
|
for _, pm := range activePodPortMappings {
|
||||||
if pm.IP.To4() == nil {
|
if pm.IP.To4() == nil {
|
||||||
return nil, fmt.Errorf("Invalid or missing pod %s IP", getPodFullName(pm))
|
return nil, fmt.Errorf("invalid or missing pod %s IP", getPodFullName(pm))
|
||||||
}
|
}
|
||||||
// should not handle hostports for hostnetwork pods
|
// should not handle hostports for hostnetwork pods
|
||||||
if pm.HostNetwork {
|
if pm.HostNetwork {
|
||||||
@ -286,7 +286,7 @@ func (h *hostportSyncer) SyncHostports(natInterfaceName string, activePodPortMap
|
|||||||
klog.V(3).Infof("Restoring iptables rules: %s", natLines)
|
klog.V(3).Infof("Restoring iptables rules: %s", natLines)
|
||||||
err = h.iptables.RestoreAll(natLines, utiliptables.NoFlushTables, utiliptables.RestoreCounters)
|
err = h.iptables.RestoreAll(natLines, utiliptables.NoFlushTables, utiliptables.RestoreCounters)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Failed to execute iptables-restore: %v", err)
|
return fmt.Errorf("failed to execute iptables-restore: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
h.cleanupHostportMap(hostportPodMap)
|
h.cleanupHostportMap(hostportPodMap)
|
||||||
|
@ -35,7 +35,7 @@ func (h *fakeSyncer) OpenPodHostportsAndSync(newPortMapping *hostport.PodPortMap
|
|||||||
func (h *fakeSyncer) SyncHostports(natInterfaceName string, activePortMapping []*hostport.PodPortMapping) error {
|
func (h *fakeSyncer) SyncHostports(natInterfaceName string, activePortMapping []*hostport.PodPortMapping) error {
|
||||||
for _, r := range activePortMapping {
|
for _, r := range activePortMapping {
|
||||||
if r.IP.To4() == nil {
|
if r.IP.To4() == nil {
|
||||||
return fmt.Errorf("Invalid or missing pod %s/%s IP", r.Namespace, r.Name)
|
return fmt.Errorf("invalid or missing pod %s/%s IP", r.Namespace, r.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -180,12 +180,12 @@ func (plugin *kubenetNetworkPlugin) Init(host network.Host, hairpinMode kubeletc
|
|||||||
"type": "loopback"
|
"type": "loopback"
|
||||||
}`))
|
}`))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Failed to generate loopback config: %v", err)
|
return fmt.Errorf("failed to generate loopback config: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
plugin.nsenterPath, err = plugin.execer.LookPath("nsenter")
|
plugin.nsenterPath, err = plugin.execer.LookPath("nsenter")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Failed to find nsenter binary: %v", err)
|
return fmt.Errorf("failed to find nsenter binary: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Need to SNAT outbound traffic from cluster
|
// Need to SNAT outbound traffic from cluster
|
||||||
@ -209,7 +209,7 @@ func (plugin *kubenetNetworkPlugin) ensureMasqRule() error {
|
|||||||
"-m", "addrtype", "!", "--dst-type", "LOCAL",
|
"-m", "addrtype", "!", "--dst-type", "LOCAL",
|
||||||
"!", "-d", plugin.nonMasqueradeCIDR,
|
"!", "-d", plugin.nonMasqueradeCIDR,
|
||||||
"-j", "MASQUERADE"); err != nil {
|
"-j", "MASQUERADE"); err != nil {
|
||||||
return fmt.Errorf("Failed to ensure that %s chain %s jumps to MASQUERADE: %v", utiliptables.TableNAT, utiliptables.ChainPostrouting, err)
|
return fmt.Errorf("failed to ensure that %s chain %s jumps to MASQUERADE: %v", utiliptables.TableNAT, utiliptables.ChainPostrouting, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -381,7 +381,7 @@ func (plugin *kubenetNetworkPlugin) setup(namespace string, name string, id kube
|
|||||||
// promiscuous mode is not on, then turn it on.
|
// promiscuous mode is not on, then turn it on.
|
||||||
err := netlink.SetPromiscOn(link)
|
err := netlink.SetPromiscOn(link)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error setting promiscuous mode on %s: %v", BridgeName, err)
|
return fmt.Errorf("error setting promiscuous mode on %s: %v", BridgeName, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -410,7 +410,7 @@ func (plugin *kubenetNetworkPlugin) addTrafficShaping(id kubecontainer.Container
|
|||||||
shaper := plugin.shaper()
|
shaper := plugin.shaper()
|
||||||
ingress, egress, err := bandwidth.ExtractPodBandwidthResources(annotations)
|
ingress, egress, err := bandwidth.ExtractPodBandwidthResources(annotations)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error reading pod bandwidth annotations: %v", err)
|
return fmt.Errorf("error reading pod bandwidth annotations: %v", err)
|
||||||
}
|
}
|
||||||
iplist, exists := plugin.getCachedPodIPs(id)
|
iplist, exists := plugin.getCachedPodIPs(id)
|
||||||
if !exists {
|
if !exists {
|
||||||
@ -428,7 +428,7 @@ func (plugin *kubenetNetworkPlugin) addTrafficShaping(id kubecontainer.Container
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := shaper.ReconcileCIDR(fmt.Sprintf("%v/%v", ip, mask), egress, ingress); err != nil {
|
if err := shaper.ReconcileCIDR(fmt.Sprintf("%v/%v", ip, mask), egress, ingress); err != nil {
|
||||||
return fmt.Errorf("Failed to add pod to shaper: %v", err)
|
return fmt.Errorf("failed to add pod to shaper: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -471,7 +471,7 @@ func (plugin *kubenetNetworkPlugin) SetUpPod(namespace string, name string, id k
|
|||||||
start := time.Now()
|
start := time.Now()
|
||||||
|
|
||||||
if err := plugin.Status(); err != nil {
|
if err := plugin.Status(); err != nil {
|
||||||
return fmt.Errorf("Kubenet cannot SetUpPod: %v", err)
|
return fmt.Errorf("kubenet cannot SetUpPod: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
@ -550,7 +550,7 @@ func (plugin *kubenetNetworkPlugin) TearDownPod(namespace string, name string, i
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
if plugin.netConfig == nil {
|
if plugin.netConfig == nil {
|
||||||
return fmt.Errorf("Kubenet needs a PodCIDR to tear down pods")
|
return fmt.Errorf("kubenet needs a PodCIDR to tear down pods")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := plugin.teardown(namespace, name, id); err != nil {
|
if err := plugin.teardown(namespace, name, id); err != nil {
|
||||||
@ -576,10 +576,10 @@ func (plugin *kubenetNetworkPlugin) GetPodNetworkStatus(namespace string, name s
|
|||||||
// not a cached version, get via network ns
|
// not a cached version, get via network ns
|
||||||
netnsPath, err := plugin.host.GetNetNS(id.ID)
|
netnsPath, err := plugin.host.GetNetNS(id.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Kubenet failed to retrieve network namespace path: %v", err)
|
return nil, fmt.Errorf("kubenet failed to retrieve network namespace path: %v", err)
|
||||||
}
|
}
|
||||||
if netnsPath == "" {
|
if netnsPath == "" {
|
||||||
return nil, fmt.Errorf("Cannot find the network namespace, skipping pod network status for container %q", id)
|
return nil, fmt.Errorf("cannot find the network namespace, skipping pod network status for container %q", id)
|
||||||
}
|
}
|
||||||
ips, err := network.GetPodIPs(plugin.execer, plugin.nsenterPath, netnsPath, network.DefaultInterfaceName)
|
ips, err := network.GetPodIPs(plugin.execer, plugin.nsenterPath, netnsPath, network.DefaultInterfaceName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -626,7 +626,7 @@ func (plugin *kubenetNetworkPlugin) getNetworkStatus(id kubecontainer.ContainerI
|
|||||||
func (plugin *kubenetNetworkPlugin) Status() error {
|
func (plugin *kubenetNetworkPlugin) Status() error {
|
||||||
// Can't set up pods if we don't have a PodCIDR yet
|
// Can't set up pods if we don't have a PodCIDR yet
|
||||||
if plugin.netConfig == nil {
|
if plugin.netConfig == nil {
|
||||||
return fmt.Errorf("Kubenet does not have netConfig. This is most likely due to lack of PodCIDR")
|
return fmt.Errorf("kubenet does not have netConfig. This is most likely due to lack of PodCIDR")
|
||||||
}
|
}
|
||||||
|
|
||||||
if !plugin.checkRequiredCNIPlugins() {
|
if !plugin.checkRequiredCNIPlugins() {
|
||||||
@ -683,14 +683,14 @@ func (plugin *kubenetNetworkPlugin) buildCNIRuntimeConf(ifName string, id kubeco
|
|||||||
func (plugin *kubenetNetworkPlugin) addContainerToNetwork(config *libcni.NetworkConfig, ifName, namespace, name string, id kubecontainer.ContainerID) (cnitypes.Result, error) {
|
func (plugin *kubenetNetworkPlugin) addContainerToNetwork(config *libcni.NetworkConfig, ifName, namespace, name string, id kubecontainer.ContainerID) (cnitypes.Result, error) {
|
||||||
rt, err := plugin.buildCNIRuntimeConf(ifName, id, true)
|
rt, err := plugin.buildCNIRuntimeConf(ifName, id, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Error building CNI config: %v", err)
|
return nil, fmt.Errorf("error building CNI config: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
klog.V(3).Infof("Adding %s/%s to '%s' with CNI '%s' plugin and runtime: %+v", namespace, name, config.Network.Name, config.Network.Type, rt)
|
klog.V(3).Infof("Adding %s/%s to '%s' with CNI '%s' plugin and runtime: %+v", namespace, name, config.Network.Name, config.Network.Type, rt)
|
||||||
|
|
||||||
res, err := plugin.cniConfig.AddNetwork(context.TODO(), config, rt)
|
res, err := plugin.cniConfig.AddNetwork(context.TODO(), config, rt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Error adding container to network: %v", err)
|
return nil, fmt.Errorf("error adding container to network: %v", err)
|
||||||
}
|
}
|
||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
@ -698,7 +698,7 @@ func (plugin *kubenetNetworkPlugin) addContainerToNetwork(config *libcni.Network
|
|||||||
func (plugin *kubenetNetworkPlugin) delContainerFromNetwork(config *libcni.NetworkConfig, ifName, namespace, name string, id kubecontainer.ContainerID) error {
|
func (plugin *kubenetNetworkPlugin) delContainerFromNetwork(config *libcni.NetworkConfig, ifName, namespace, name string, id kubecontainer.ContainerID) error {
|
||||||
rt, err := plugin.buildCNIRuntimeConf(ifName, id, false)
|
rt, err := plugin.buildCNIRuntimeConf(ifName, id, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error building CNI config: %v", err)
|
return fmt.Errorf("error building CNI config: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
klog.V(3).Infof("Removing %s/%s from '%s' with CNI '%s' plugin and runtime: %+v", namespace, name, config.Network.Name, config.Network.Type, rt)
|
klog.V(3).Infof("Removing %s/%s from '%s' with CNI '%s' plugin and runtime: %+v", namespace, name, config.Network.Name, config.Network.Type, rt)
|
||||||
@ -706,7 +706,7 @@ func (plugin *kubenetNetworkPlugin) delContainerFromNetwork(config *libcni.Netwo
|
|||||||
// The pod may not get deleted successfully at the first time.
|
// The pod may not get deleted successfully at the first time.
|
||||||
// Ignore "no such file or directory" error in case the network has already been deleted in previous attempts.
|
// Ignore "no such file or directory" error in case the network has already been deleted in previous attempts.
|
||||||
if err != nil && !strings.Contains(err.Error(), "no such file or directory") {
|
if err != nil && !strings.Contains(err.Error(), "no such file or directory") {
|
||||||
return fmt.Errorf("Error removing container from network: %v", err)
|
return fmt.Errorf("error removing container from network: %v", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -791,20 +791,20 @@ func (plugin *kubenetNetworkPlugin) disableContainerDAD(id kubecontainer.Contain
|
|||||||
|
|
||||||
sysctlBin, err := plugin.execer.LookPath("sysctl")
|
sysctlBin, err := plugin.execer.LookPath("sysctl")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Could not find sysctl binary: %s", err)
|
return fmt.Errorf("could not find sysctl binary: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
netnsPath, err := plugin.host.GetNetNS(id.ID)
|
netnsPath, err := plugin.host.GetNetNS(id.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Failed to get netns: %v", err)
|
return fmt.Errorf("failed to get netns: %v", err)
|
||||||
}
|
}
|
||||||
if netnsPath == "" {
|
if netnsPath == "" {
|
||||||
return fmt.Errorf("Pod has no network namespace")
|
return fmt.Errorf("pod has no network namespace")
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the sysctl doesn't exist, it means ipv6 is disabled; log and move on
|
// If the sysctl doesn't exist, it means ipv6 is disabled; log and move on
|
||||||
if _, err := plugin.sysctl.GetSysctl(key); err != nil {
|
if _, err := plugin.sysctl.GetSysctl(key); err != nil {
|
||||||
return fmt.Errorf("Ipv6 not enabled: %v", err)
|
return fmt.Errorf("ipv6 not enabled: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
output, err := plugin.execer.Command(plugin.nsenterPath,
|
output, err := plugin.execer.Command(plugin.nsenterPath,
|
||||||
@ -812,7 +812,7 @@ func (plugin *kubenetNetworkPlugin) disableContainerDAD(id kubecontainer.Contain
|
|||||||
sysctlBin, "-w", fmt.Sprintf("%s=%s", key, "0"),
|
sysctlBin, "-w", fmt.Sprintf("%s=%s", key, "0"),
|
||||||
).CombinedOutput()
|
).CombinedOutput()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Failed to write sysctl: output: %s error: %s",
|
return fmt.Errorf("failed to write sysctl: output: %s error: %s",
|
||||||
output, err)
|
output, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -161,12 +161,12 @@ func InitNetworkPlugin(plugins []NetworkPlugin, networkPluginName string, host H
|
|||||||
if chosenPlugin != nil {
|
if chosenPlugin != nil {
|
||||||
err := chosenPlugin.Init(host, hairpinMode, nonMasqueradeCIDR, mtu)
|
err := chosenPlugin.Init(host, hairpinMode, nonMasqueradeCIDR, mtu)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
allErrs = append(allErrs, fmt.Errorf("Network plugin %q failed init: %v", networkPluginName, err))
|
allErrs = append(allErrs, fmt.Errorf("network plugin %q failed init: %v", networkPluginName, err))
|
||||||
} else {
|
} else {
|
||||||
klog.V(1).Infof("Loaded network plugin %q", networkPluginName)
|
klog.V(1).Infof("Loaded network plugin %q", networkPluginName)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
allErrs = append(allErrs, fmt.Errorf("Network plugin %q not found.", networkPluginName))
|
allErrs = append(allErrs, fmt.Errorf("network plugin %q not found", networkPluginName))
|
||||||
}
|
}
|
||||||
|
|
||||||
return chosenPlugin, utilerrors.NewAggregate(allErrs)
|
return chosenPlugin, utilerrors.NewAggregate(allErrs)
|
||||||
@ -235,16 +235,16 @@ func getOnePodIP(execer utilexec.Interface, nsenterPath, netnsPath, interfaceNam
|
|||||||
output, err := execer.Command(nsenterPath, fmt.Sprintf("--net=%s", netnsPath), "-F", "--",
|
output, err := execer.Command(nsenterPath, fmt.Sprintf("--net=%s", netnsPath), "-F", "--",
|
||||||
"ip", "-o", addrType, "addr", "show", "dev", interfaceName, "scope", "global").CombinedOutput()
|
"ip", "-o", addrType, "addr", "show", "dev", interfaceName, "scope", "global").CombinedOutput()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Unexpected command output %s with error: %v", output, err)
|
return nil, fmt.Errorf("unexpected command output %s with error: %v", output, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
lines := strings.Split(string(output), "\n")
|
lines := strings.Split(string(output), "\n")
|
||||||
if len(lines) < 1 {
|
if len(lines) < 1 {
|
||||||
return nil, fmt.Errorf("Unexpected command output %s", output)
|
return nil, fmt.Errorf("unexpected command output %s", output)
|
||||||
}
|
}
|
||||||
fields := strings.Fields(lines[0])
|
fields := strings.Fields(lines[0])
|
||||||
if len(fields) < 4 {
|
if len(fields) < 4 {
|
||||||
return nil, fmt.Errorf("Unexpected address output %s ", lines[0])
|
return nil, fmt.Errorf("unexpected address output %s ", lines[0])
|
||||||
}
|
}
|
||||||
ip, _, err := net.ParseCIDR(fields[3])
|
ip, _, err := net.ParseCIDR(fields[3])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -390,7 +390,7 @@ func (pm *PluginManager) GetPodNetworkStatus(podNamespace, podName string, id ku
|
|||||||
|
|
||||||
netStatus, err := pm.plugin.GetPodNetworkStatus(podNamespace, podName, id)
|
netStatus, err := pm.plugin.GetPodNetworkStatus(podNamespace, podName, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("NetworkPlugin %s failed on the status hook for pod %q: %v", pm.plugin.Name(), fullPodName, err)
|
return nil, fmt.Errorf("networkPlugin %s failed on the status hook for pod %q: %v", pm.plugin.Name(), fullPodName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return netStatus, nil
|
return netStatus, nil
|
||||||
@ -404,7 +404,7 @@ func (pm *PluginManager) SetUpPod(podNamespace, podName string, id kubecontainer
|
|||||||
|
|
||||||
klog.V(3).Infof("Calling network plugin %s to set up pod %q", pm.plugin.Name(), fullPodName)
|
klog.V(3).Infof("Calling network plugin %s to set up pod %q", pm.plugin.Name(), fullPodName)
|
||||||
if err := pm.plugin.SetUpPod(podNamespace, podName, id, annotations, options); err != nil {
|
if err := pm.plugin.SetUpPod(podNamespace, podName, id, annotations, options); err != nil {
|
||||||
return fmt.Errorf("NetworkPlugin %s failed to set up pod %q network: %v", pm.plugin.Name(), fullPodName, err)
|
return fmt.Errorf("networkPlugin %s failed to set up pod %q network: %v", pm.plugin.Name(), fullPodName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -418,7 +418,7 @@ func (pm *PluginManager) TearDownPod(podNamespace, podName string, id kubecontai
|
|||||||
|
|
||||||
klog.V(3).Infof("Calling network plugin %s to tear down pod %q", pm.plugin.Name(), fullPodName)
|
klog.V(3).Infof("Calling network plugin %s to tear down pod %q", pm.plugin.Name(), fullPodName)
|
||||||
if err := pm.plugin.TearDownPod(podNamespace, podName, id); err != nil {
|
if err := pm.plugin.TearDownPod(podNamespace, podName, id); err != nil {
|
||||||
return fmt.Errorf("NetworkPlugin %s failed to teardown pod %q network: %v", pm.plugin.Name(), fullPodName, err)
|
return fmt.Errorf("networkPlugin %s failed to teardown pod %q network: %v", pm.plugin.Name(), fullPodName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -90,7 +90,7 @@ func modifyContainerConfig(sc *runtimeapi.LinuxContainerSecurityContext, config
|
|||||||
user := config.User
|
user := config.User
|
||||||
if sc.RunAsGroup != nil {
|
if sc.RunAsGroup != nil {
|
||||||
if user == "" {
|
if user == "" {
|
||||||
return fmt.Errorf("runAsGroup is specified without a runAsUser.")
|
return fmt.Errorf("runAsGroup is specified without a runAsUser")
|
||||||
}
|
}
|
||||||
user = fmt.Sprintf("%s:%d", config.User, sc.GetRunAsGroup().Value)
|
user = fmt.Sprintf("%s:%d", config.User, sc.GetRunAsGroup().Value)
|
||||||
}
|
}
|
||||||
|
@ -287,7 +287,7 @@ func (kl *Kubelet) getPodVolumePathListFromDisk(podUID types.UID) ([]string, err
|
|||||||
podVolDir := kl.getPodVolumesDir(podUID)
|
podVolDir := kl.getPodVolumesDir(podUID)
|
||||||
|
|
||||||
if pathExists, pathErr := mount.PathExists(podVolDir); pathErr != nil {
|
if pathExists, pathErr := mount.PathExists(podVolDir); pathErr != nil {
|
||||||
return volumes, fmt.Errorf("Error checking if path %q exists: %v", podVolDir, pathErr)
|
return volumes, fmt.Errorf("error checking if path %q exists: %v", podVolDir, pathErr)
|
||||||
} else if !pathExists {
|
} else if !pathExists {
|
||||||
klog.Warningf("Path %q does not exist", podVolDir)
|
klog.Warningf("Path %q does not exist", podVolDir)
|
||||||
return volumes, nil
|
return volumes, nil
|
||||||
@ -303,7 +303,7 @@ func (kl *Kubelet) getPodVolumePathListFromDisk(podUID types.UID) ([]string, err
|
|||||||
volumePluginPath := filepath.Join(podVolDir, volumePluginName)
|
volumePluginPath := filepath.Join(podVolDir, volumePluginName)
|
||||||
volumeDirs, err := utilpath.ReadDirNoStat(volumePluginPath)
|
volumeDirs, err := utilpath.ReadDirNoStat(volumePluginPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return volumes, fmt.Errorf("Could not read directory %s: %v", volumePluginPath, err)
|
return volumes, fmt.Errorf("could not read directory %s: %v", volumePluginPath, err)
|
||||||
}
|
}
|
||||||
for _, volumeDir := range volumeDirs {
|
for _, volumeDir := range volumeDirs {
|
||||||
volumes = append(volumes, filepath.Join(volumePluginPath, volumeDir))
|
volumes = append(volumes, filepath.Join(volumePluginPath, volumeDir))
|
||||||
@ -336,7 +336,7 @@ func (kl *Kubelet) podVolumeSubpathsDirExists(podUID types.UID) (bool, error) {
|
|||||||
podVolDir := kl.getPodVolumeSubpathsDir(podUID)
|
podVolDir := kl.getPodVolumeSubpathsDir(podUID)
|
||||||
|
|
||||||
if pathExists, pathErr := mount.PathExists(podVolDir); pathErr != nil {
|
if pathExists, pathErr := mount.PathExists(podVolDir); pathErr != nil {
|
||||||
return true, fmt.Errorf("Error checking if path %q exists: %v", podVolDir, pathErr)
|
return true, fmt.Errorf("error checking if path %q exists: %v", podVolDir, pathErr)
|
||||||
} else if !pathExists {
|
} else if !pathExists {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
@ -594,7 +594,7 @@ func validateNodeIP(nodeIP net.IP) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return fmt.Errorf("Node IP: %q not found in the host's network interfaces", nodeIP.String())
|
return fmt.Errorf("node IP: %q not found in the host's network interfaces", nodeIP.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
// nodeStatusHasChanged compares the original node and current node's status and
|
// nodeStatusHasChanged compares the original node and current node's status and
|
||||||
|
@ -403,7 +403,7 @@ func (kl *Kubelet) GeneratePodHostNameAndDomain(pod *v1.Pod) (string, string, er
|
|||||||
hostname := pod.Name
|
hostname := pod.Name
|
||||||
if len(pod.Spec.Hostname) > 0 {
|
if len(pod.Spec.Hostname) > 0 {
|
||||||
if msgs := utilvalidation.IsDNS1123Label(pod.Spec.Hostname); len(msgs) != 0 {
|
if msgs := utilvalidation.IsDNS1123Label(pod.Spec.Hostname); len(msgs) != 0 {
|
||||||
return "", "", fmt.Errorf("Pod Hostname %q is not a valid DNS label: %s", pod.Spec.Hostname, strings.Join(msgs, ";"))
|
return "", "", fmt.Errorf("pod Hostname %q is not a valid DNS label: %s", pod.Spec.Hostname, strings.Join(msgs, ";"))
|
||||||
}
|
}
|
||||||
hostname = pod.Spec.Hostname
|
hostname = pod.Spec.Hostname
|
||||||
}
|
}
|
||||||
@ -416,7 +416,7 @@ func (kl *Kubelet) GeneratePodHostNameAndDomain(pod *v1.Pod) (string, string, er
|
|||||||
hostDomain := ""
|
hostDomain := ""
|
||||||
if len(pod.Spec.Subdomain) > 0 {
|
if len(pod.Spec.Subdomain) > 0 {
|
||||||
if msgs := utilvalidation.IsDNS1123Label(pod.Spec.Subdomain); len(msgs) != 0 {
|
if msgs := utilvalidation.IsDNS1123Label(pod.Spec.Subdomain); len(msgs) != 0 {
|
||||||
return "", "", fmt.Errorf("Pod Subdomain %q is not a valid DNS label: %s", pod.Spec.Subdomain, strings.Join(msgs, ";"))
|
return "", "", fmt.Errorf("pod Subdomain %q is not a valid DNS label: %s", pod.Spec.Subdomain, strings.Join(msgs, ";"))
|
||||||
}
|
}
|
||||||
hostDomain = fmt.Sprintf("%s.%s.svc.%s", pod.Spec.Subdomain, pod.Namespace, clusterDomain)
|
hostDomain = fmt.Sprintf("%s.%s.svc.%s", pod.Spec.Subdomain, pod.Namespace, clusterDomain)
|
||||||
}
|
}
|
||||||
@ -581,7 +581,7 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container
|
|||||||
configMap, ok := configMaps[name]
|
configMap, ok := configMaps[name]
|
||||||
if !ok {
|
if !ok {
|
||||||
if kl.kubeClient == nil {
|
if kl.kubeClient == nil {
|
||||||
return result, fmt.Errorf("Couldn't get configMap %v/%v, no kubeClient defined", pod.Namespace, name)
|
return result, fmt.Errorf("couldn't get configMap %v/%v, no kubeClient defined", pod.Namespace, name)
|
||||||
}
|
}
|
||||||
optional := cm.Optional != nil && *cm.Optional
|
optional := cm.Optional != nil && *cm.Optional
|
||||||
configMap, err = kl.configMapManager.GetConfigMap(pod.Namespace, name)
|
configMap, err = kl.configMapManager.GetConfigMap(pod.Namespace, name)
|
||||||
@ -616,7 +616,7 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container
|
|||||||
secret, ok := secrets[name]
|
secret, ok := secrets[name]
|
||||||
if !ok {
|
if !ok {
|
||||||
if kl.kubeClient == nil {
|
if kl.kubeClient == nil {
|
||||||
return result, fmt.Errorf("Couldn't get secret %v/%v, no kubeClient defined", pod.Namespace, name)
|
return result, fmt.Errorf("couldn't get secret %v/%v, no kubeClient defined", pod.Namespace, name)
|
||||||
}
|
}
|
||||||
optional := s.Optional != nil && *s.Optional
|
optional := s.Optional != nil && *s.Optional
|
||||||
secret, err = kl.secretManager.GetSecret(pod.Namespace, name)
|
secret, err = kl.secretManager.GetSecret(pod.Namespace, name)
|
||||||
@ -690,7 +690,7 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container
|
|||||||
configMap, ok := configMaps[name]
|
configMap, ok := configMaps[name]
|
||||||
if !ok {
|
if !ok {
|
||||||
if kl.kubeClient == nil {
|
if kl.kubeClient == nil {
|
||||||
return result, fmt.Errorf("Couldn't get configMap %v/%v, no kubeClient defined", pod.Namespace, name)
|
return result, fmt.Errorf("couldn't get configMap %v/%v, no kubeClient defined", pod.Namespace, name)
|
||||||
}
|
}
|
||||||
configMap, err = kl.configMapManager.GetConfigMap(pod.Namespace, name)
|
configMap, err = kl.configMapManager.GetConfigMap(pod.Namespace, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -707,7 +707,7 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container
|
|||||||
if optional {
|
if optional {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
return result, fmt.Errorf("Couldn't find key %v in ConfigMap %v/%v", key, pod.Namespace, name)
|
return result, fmt.Errorf("couldn't find key %v in ConfigMap %v/%v", key, pod.Namespace, name)
|
||||||
}
|
}
|
||||||
case envVar.ValueFrom.SecretKeyRef != nil:
|
case envVar.ValueFrom.SecretKeyRef != nil:
|
||||||
s := envVar.ValueFrom.SecretKeyRef
|
s := envVar.ValueFrom.SecretKeyRef
|
||||||
@ -717,7 +717,7 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container
|
|||||||
secret, ok := secrets[name]
|
secret, ok := secrets[name]
|
||||||
if !ok {
|
if !ok {
|
||||||
if kl.kubeClient == nil {
|
if kl.kubeClient == nil {
|
||||||
return result, fmt.Errorf("Couldn't get secret %v/%v, no kubeClient defined", pod.Namespace, name)
|
return result, fmt.Errorf("couldn't get secret %v/%v, no kubeClient defined", pod.Namespace, name)
|
||||||
}
|
}
|
||||||
secret, err = kl.secretManager.GetSecret(pod.Namespace, name)
|
secret, err = kl.secretManager.GetSecret(pod.Namespace, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -734,7 +734,7 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container
|
|||||||
if optional {
|
if optional {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
return result, fmt.Errorf("Couldn't find key %v in Secret %v/%v", key, pod.Namespace, name)
|
return result, fmt.Errorf("couldn't find key %v in Secret %v/%v", key, pod.Namespace, name)
|
||||||
}
|
}
|
||||||
runtimeVal = string(runtimeValBytes)
|
runtimeVal = string(runtimeValBytes)
|
||||||
}
|
}
|
||||||
|
@ -123,22 +123,22 @@ func (kl *Kubelet) cleanupOrphanedPodDirs(pods []*v1.Pod, runningPods []*kubecon
|
|||||||
// If there are still volume directories, do not delete directory
|
// If there are still volume directories, do not delete directory
|
||||||
volumePaths, err := kl.getPodVolumePathListFromDisk(uid)
|
volumePaths, err := kl.getPodVolumePathListFromDisk(uid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("Orphaned pod %q found, but error %v occurred during reading volume dir from disk", uid, err))
|
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("orphaned pod %q found, but error %v occurred during reading volume dir from disk", uid, err))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if len(volumePaths) > 0 {
|
if len(volumePaths) > 0 {
|
||||||
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("Orphaned pod %q found, but volume paths are still present on disk", uid))
|
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("orphaned pod %q found, but volume paths are still present on disk", uid))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// If there are any volume-subpaths, do not cleanup directories
|
// If there are any volume-subpaths, do not cleanup directories
|
||||||
volumeSubpathExists, err := kl.podVolumeSubpathsDirExists(uid)
|
volumeSubpathExists, err := kl.podVolumeSubpathsDirExists(uid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("Orphaned pod %q found, but error %v occurred during reading of volume-subpaths dir from disk", uid, err))
|
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("orphaned pod %q found, but error %v occurred during reading of volume-subpaths dir from disk", uid, err))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if volumeSubpathExists {
|
if volumeSubpathExists {
|
||||||
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("Orphaned pod %q found, but volume subpaths are still present on disk", uid))
|
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("orphaned pod %q found, but volume subpaths are still present on disk", uid))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -213,7 +213,7 @@ func latestNodeConfigSource(store cache.Store, nodeName string) (*apiv1.NodeConf
|
|||||||
utillog.Errorf(err.Error())
|
utillog.Errorf(err.Error())
|
||||||
return nil, err
|
return nil, err
|
||||||
} else if !ok {
|
} else if !ok {
|
||||||
err := fmt.Errorf("Node %q does not exist in the informer's store, can't sync config source", nodeName)
|
err := fmt.Errorf("node %q does not exist in the informer's store, can't sync config source", nodeName)
|
||||||
utillog.Errorf(err.Error())
|
utillog.Errorf(err.Error())
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -749,7 +749,7 @@ func (m *kubeGenericRuntimeManager) GetContainerLogs(ctx context.Context, pod *v
|
|||||||
status, err := m.runtimeService.ContainerStatus(containerID.ID)
|
status, err := m.runtimeService.ContainerStatus(containerID.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.V(4).Infof("failed to get container status for %v: %v", containerID.String(), err)
|
klog.V(4).Infof("failed to get container status for %v: %v", containerID.String(), err)
|
||||||
return fmt.Errorf("Unable to retrieve container logs for %v", containerID.String())
|
return fmt.Errorf("unable to retrieve container logs for %v", containerID.String())
|
||||||
}
|
}
|
||||||
return m.ReadLogs(ctx, status.GetLogPath(), containerID.ID, logOptions, stdout, stderr)
|
return m.ReadLogs(ctx, status.GetLogPath(), containerID.ID, logOptions, stdout, stderr)
|
||||||
}
|
}
|
||||||
|
@ -68,7 +68,7 @@ const (
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
// ErrVersionNotSupported is returned when the api version of runtime interface is not supported
|
// ErrVersionNotSupported is returned when the api version of runtime interface is not supported
|
||||||
ErrVersionNotSupported = errors.New("Runtime api version is not supported")
|
ErrVersionNotSupported = errors.New("runtime api version is not supported")
|
||||||
)
|
)
|
||||||
|
|
||||||
// podStateProvider can determine if a pod is deleted ir terminated
|
// podStateProvider can determine if a pod is deleted ir terminated
|
||||||
@ -842,7 +842,7 @@ func (m *kubeGenericRuntimeManager) doBackOff(pod *v1.Pod, container *v1.Contain
|
|||||||
if ref, err := kubecontainer.GenerateContainerRef(pod, container); err == nil {
|
if ref, err := kubecontainer.GenerateContainerRef(pod, container); err == nil {
|
||||||
m.recorder.Eventf(ref, v1.EventTypeWarning, events.BackOffStartContainer, "Back-off restarting failed container")
|
m.recorder.Eventf(ref, v1.EventTypeWarning, events.BackOffStartContainer, "Back-off restarting failed container")
|
||||||
}
|
}
|
||||||
err := fmt.Errorf("Back-off %s restarting failed container=%s pod=%s", backOff.Get(key), container.Name, format.Pod(pod))
|
err := fmt.Errorf("back-off %s restarting failed container=%s pod=%s", backOff.Get(key), container.Name, format.Pod(pod))
|
||||||
klog.V(3).Infof("%s", err.Error())
|
klog.V(3).Infof("%s", err.Error())
|
||||||
return true, err.Error(), kubecontainer.ErrCrashLoopBackOff
|
return true, err.Error(), kubecontainer.ErrCrashLoopBackOff
|
||||||
}
|
}
|
||||||
|
@ -74,7 +74,7 @@ func (hr *HandlerRunner) Run(containerID kubecontainer.ContainerID, pod *v1.Pod,
|
|||||||
}
|
}
|
||||||
return msg, err
|
return msg, err
|
||||||
default:
|
default:
|
||||||
err := fmt.Errorf("Invalid handler: %v", handler)
|
err := fmt.Errorf("invalid handler: %v", handler)
|
||||||
msg := fmt.Sprintf("Cannot run handler: %v", err)
|
msg := fmt.Sprintf("Cannot run handler: %v", err)
|
||||||
klog.Errorf(msg)
|
klog.Errorf(msg)
|
||||||
return msg, err
|
return msg, err
|
||||||
|
@ -191,8 +191,6 @@ func (c *Configurer) CheckLimitsForResolvConf() {
|
|||||||
klog.V(4).Infof("CheckLimitsForResolvConf: " + log)
|
klog.V(4).Infof("CheckLimitsForResolvConf: " + log)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// parseResolvConf reads a resolv.conf file from the given reader, and parses
|
// parseResolvConf reads a resolv.conf file from the given reader, and parses
|
||||||
|
@ -475,7 +475,7 @@ func ReadyCondition(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(missingCapacities) > 0 {
|
if len(missingCapacities) > 0 {
|
||||||
errs = append(errs, fmt.Errorf("Missing node capacity for resources: %s", strings.Join(missingCapacities, ", ")))
|
errs = append(errs, fmt.Errorf("missing node capacity for resources: %s", strings.Join(missingCapacities, ", ")))
|
||||||
}
|
}
|
||||||
if aggregatedErr := errors.NewAggregate(errs); aggregatedErr != nil {
|
if aggregatedErr := errors.NewAggregate(errs); aggregatedErr != nil {
|
||||||
newNodeReadyCondition = v1.NodeCondition{
|
newNodeReadyCondition = v1.NodeCondition{
|
||||||
|
@ -970,7 +970,7 @@ func TestReadyCondition(t *testing.T) {
|
|||||||
{
|
{
|
||||||
desc: "new, not ready: missing capacities",
|
desc: "new, not ready: missing capacities",
|
||||||
node: &v1.Node{},
|
node: &v1.Node{},
|
||||||
expectConditions: []v1.NodeCondition{*makeReadyCondition(false, "Missing node capacity for resources: cpu, memory, pods, ephemeral-storage", now, now)},
|
expectConditions: []v1.NodeCondition{*makeReadyCondition(false, "missing node capacity for resources: cpu, memory, pods, ephemeral-storage", now, now)},
|
||||||
},
|
},
|
||||||
// the transition tests ensure timestamps are set correctly, no need to test the entire condition matrix in this section
|
// the transition tests ensure timestamps are set correctly, no need to test the entire condition matrix in this section
|
||||||
{
|
{
|
||||||
|
1
pkg/kubelet/pluginmanager/cache/BUILD
vendored
1
pkg/kubelet/pluginmanager/cache/BUILD
vendored
@ -19,6 +19,7 @@ go_test(
|
|||||||
"desired_state_of_world_test.go",
|
"desired_state_of_world_test.go",
|
||||||
],
|
],
|
||||||
embed = [":go_default_library"],
|
embed = [":go_default_library"],
|
||||||
|
deps = ["//vendor/github.com/stretchr/testify/require:go_default_library"],
|
||||||
)
|
)
|
||||||
|
|
||||||
filegroup(
|
filegroup(
|
||||||
|
@ -87,7 +87,7 @@ func (asw *actualStateOfWorld) AddPlugin(pluginInfo PluginInfo) error {
|
|||||||
defer asw.Unlock()
|
defer asw.Unlock()
|
||||||
|
|
||||||
if pluginInfo.SocketPath == "" {
|
if pluginInfo.SocketPath == "" {
|
||||||
return fmt.Errorf("Socket path is empty")
|
return fmt.Errorf("socket path is empty")
|
||||||
}
|
}
|
||||||
if _, ok := asw.socketFileToInfo[pluginInfo.SocketPath]; ok {
|
if _, ok := asw.socketFileToInfo[pluginInfo.SocketPath]; ok {
|
||||||
klog.V(2).Infof("Plugin (Path %s) exists in actual state cache", pluginInfo.SocketPath)
|
klog.V(2).Infof("Plugin (Path %s) exists in actual state cache", pluginInfo.SocketPath)
|
||||||
|
@ -19,6 +19,8 @@ package cache
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Calls AddPlugin() to add a plugin
|
// Calls AddPlugin() to add a plugin
|
||||||
@ -63,10 +65,7 @@ func Test_ASW_AddPlugin_Negative_EmptySocketPath(t *testing.T) {
|
|||||||
Timestamp: time.Now(),
|
Timestamp: time.Now(),
|
||||||
}
|
}
|
||||||
err := asw.AddPlugin(pluginInfo)
|
err := asw.AddPlugin(pluginInfo)
|
||||||
// Assert
|
require.EqualError(t, err, "socket path is empty")
|
||||||
if err == nil || err.Error() != "Socket path is empty" {
|
|
||||||
t.Fatalf("AddOrUpdatePlugin failed. Expected: <Socket path is empty> Actual: <%v>", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get registered plugins and check the newly added plugin is there
|
// Get registered plugins and check the newly added plugin is there
|
||||||
aswPlugins := asw.GetRegisteredPlugins()
|
aswPlugins := asw.GetRegisteredPlugins()
|
||||||
|
@ -125,7 +125,7 @@ func (dsw *desiredStateOfWorld) AddOrUpdatePlugin(socketPath string, foundInDepr
|
|||||||
defer dsw.Unlock()
|
defer dsw.Unlock()
|
||||||
|
|
||||||
if socketPath == "" {
|
if socketPath == "" {
|
||||||
return fmt.Errorf("Socket path is empty")
|
return fmt.Errorf("socket path is empty")
|
||||||
}
|
}
|
||||||
if _, ok := dsw.socketFileToInfo[socketPath]; ok {
|
if _, ok := dsw.socketFileToInfo[socketPath]; ok {
|
||||||
klog.V(2).Infof("Plugin (Path %s) exists in actual state cache, timestamp will be updated", socketPath)
|
klog.V(2).Infof("Plugin (Path %s) exists in actual state cache, timestamp will be updated", socketPath)
|
||||||
|
@ -18,6 +18,8 @@ package cache
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Calls AddOrUpdatePlugin() to add a plugin
|
// Calls AddOrUpdatePlugin() to add a plugin
|
||||||
@ -96,10 +98,7 @@ func Test_DSW_AddOrUpdatePlugin_Negative_PluginMissingInfo(t *testing.T) {
|
|||||||
dsw := NewDesiredStateOfWorld()
|
dsw := NewDesiredStateOfWorld()
|
||||||
socketPath := ""
|
socketPath := ""
|
||||||
err := dsw.AddOrUpdatePlugin(socketPath, false /* foundInDeprecatedDir */)
|
err := dsw.AddOrUpdatePlugin(socketPath, false /* foundInDeprecatedDir */)
|
||||||
// Assert
|
require.EqualError(t, err, "socket path is empty")
|
||||||
if err == nil || err.Error() != "Socket path is empty" {
|
|
||||||
t.Fatalf("AddOrUpdatePlugin failed. Expected: <Socket path is empty> Actual: <%v>", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get pluginsToRegister and check the newly added plugin is there
|
// Get pluginsToRegister and check the newly added plugin is there
|
||||||
dswPlugins := dsw.GetPluginsToRegister()
|
dswPlugins := dsw.GetPluginsToRegister()
|
||||||
|
@ -94,7 +94,7 @@ func (p *exampleHandler) RegisterPlugin(pluginName, endpoint string, versions []
|
|||||||
// Verifies the grpcServer is ready to serve services.
|
// Verifies the grpcServer is ready to serve services.
|
||||||
_, conn, err := dial(endpoint, time.Second)
|
_, conn, err := dial(endpoint, time.Second)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Failed dialing endpoint (%s): %v", endpoint, err)
|
return fmt.Errorf("failed dialing endpoint (%s): %v", endpoint, err)
|
||||||
}
|
}
|
||||||
defer conn.Close()
|
defer conn.Close()
|
||||||
|
|
||||||
@ -105,13 +105,13 @@ func (p *exampleHandler) RegisterPlugin(pluginName, endpoint string, versions []
|
|||||||
// Tests v1beta1 GetExampleInfo
|
// Tests v1beta1 GetExampleInfo
|
||||||
_, err = v1beta1Client.GetExampleInfo(context.Background(), &v1beta1.ExampleRequest{})
|
_, err = v1beta1Client.GetExampleInfo(context.Background(), &v1beta1.ExampleRequest{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Failed GetExampleInfo for v1beta2Client(%s): %v", endpoint, err)
|
return fmt.Errorf("failed GetExampleInfo for v1beta2Client(%s): %v", endpoint, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests v1beta1 GetExampleInfo
|
// Tests v1beta1 GetExampleInfo
|
||||||
_, err = v1beta2Client.GetExampleInfo(context.Background(), &v1beta2.ExampleRequest{})
|
_, err = v1beta2Client.GetExampleInfo(context.Background(), &v1beta2.ExampleRequest{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Failed GetExampleInfo for v1beta2Client(%s): %v", endpoint, err)
|
return fmt.Errorf("failed GetExampleInfo for v1beta2Client(%s): %v", endpoint, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -138,7 +138,7 @@ func (e *examplePlugin) Serve(services ...string) error {
|
|||||||
v1beta2 := &pluginServiceV1Beta2{server: e}
|
v1beta2 := &pluginServiceV1Beta2{server: e}
|
||||||
v1beta2.RegisterService()
|
v1beta2.RegisterService()
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("Unsupported service: '%s'", service)
|
return fmt.Errorf("unsupported service: '%s'", service)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -169,7 +169,7 @@ func (e *examplePlugin) Stop() error {
|
|||||||
case <-c:
|
case <-c:
|
||||||
break
|
break
|
||||||
case <-time.After(time.Second):
|
case <-time.After(time.Second):
|
||||||
return errors.New("Timed out on waiting for stop completion")
|
return errors.New("timed out on waiting for stop completion")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := os.Remove(e.endpoint); err != nil && !os.IsNotExist(err) {
|
if err := os.Remove(e.endpoint); err != nil && !os.IsNotExist(err) {
|
||||||
|
@ -87,7 +87,7 @@ func (pb *prober) probe(probeType probeType, pod *v1.Pod, status v1.PodStatus, c
|
|||||||
case liveness:
|
case liveness:
|
||||||
probeSpec = container.LivenessProbe
|
probeSpec = container.LivenessProbe
|
||||||
default:
|
default:
|
||||||
return results.Failure, fmt.Errorf("Unknown probe type: %q", probeType)
|
return results.Failure, fmt.Errorf("unknown probe type: %q", probeType)
|
||||||
}
|
}
|
||||||
|
|
||||||
ctrName := fmt.Sprintf("%s:%s", format.Pod(pod), container.Name)
|
ctrName := fmt.Sprintf("%s:%s", format.Pod(pod), container.Name)
|
||||||
@ -193,7 +193,7 @@ func (pb *prober) runProbe(probeType probeType, p *v1.Probe, pod *v1.Pod, status
|
|||||||
return pb.tcp.Probe(host, port, timeout)
|
return pb.tcp.Probe(host, port, timeout)
|
||||||
}
|
}
|
||||||
klog.Warningf("Failed to find probe builder for container: %v", container)
|
klog.Warningf("Failed to find probe builder for container: %v", container)
|
||||||
return probe.Unknown, "", fmt.Errorf("Missing probe handler for %s:%s", format.Pod(pod), container.Name)
|
return probe.Unknown, "", fmt.Errorf("missing probe handler for %s:%s", format.Pod(pod), container.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func extractPort(param intstr.IntOrString, container v1.Container) (int, error) {
|
func extractPort(param intstr.IntOrString, container v1.Container) (int, error) {
|
||||||
@ -210,7 +210,7 @@ func extractPort(param intstr.IntOrString, container v1.Container) (int, error)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return port, fmt.Errorf("IntOrString had no kind: %+v", param)
|
return port, fmt.Errorf("intOrString had no kind: %+v", param)
|
||||||
}
|
}
|
||||||
if port > 0 && port < 65536 {
|
if port > 0 && port < 65536 {
|
||||||
return port, nil
|
return port, nil
|
||||||
|
@ -116,7 +116,7 @@ func (kl *Kubelet) runPod(pod *v1.Pod, retryDelay time.Duration) error {
|
|||||||
for {
|
for {
|
||||||
status, err := kl.containerRuntime.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
|
status, err := kl.containerRuntime.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Unable to get status for pod %q: %v", format.Pod(pod), err)
|
return fmt.Errorf("unable to get status for pod %q: %v", format.Pod(pod), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if kl.isPodRunning(pod, status) {
|
if kl.isPodRunning(pod, status) {
|
||||||
|
@ -71,7 +71,7 @@ func (m *Manager) LookupRuntimeHandler(runtimeClassName *string) (string, error)
|
|||||||
if errors.IsNotFound(err) {
|
if errors.IsNotFound(err) {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
return "", fmt.Errorf("Failed to lookup RuntimeClass %s: %v", name, err)
|
return "", fmt.Errorf("failed to lookup RuntimeClass %s: %v", name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return rc.Handler, nil
|
return rc.Handler, nil
|
||||||
|
@ -46,7 +46,7 @@ func handleHTTPStreams(req *http.Request, w http.ResponseWriter, portForwarder P
|
|||||||
upgrader := spdy.NewResponseUpgrader()
|
upgrader := spdy.NewResponseUpgrader()
|
||||||
conn := upgrader.UpgradeResponse(w, req, httpStreamReceived(streamChan))
|
conn := upgrader.UpgradeResponse(w, req, httpStreamReceived(streamChan))
|
||||||
if conn == nil {
|
if conn == nil {
|
||||||
return errors.New("Unable to upgrade httpstream connection")
|
return errors.New("unable to upgrade httpstream connection")
|
||||||
}
|
}
|
||||||
defer conn.Close()
|
defer conn.Close()
|
||||||
|
|
||||||
|
@ -115,7 +115,7 @@ func handleWebSocketStreams(req *http.Request, w http.ResponseWriter, portForwar
|
|||||||
conn.SetIdleTimeout(idleTimeout)
|
conn.SetIdleTimeout(idleTimeout)
|
||||||
_, streams, err := conn.Open(httplog.Unlogged(req, w), req)
|
_, streams, err := conn.Open(httplog.Unlogged(req, w), req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = fmt.Errorf("Unable to upgrade websocket connection: %v", err)
|
err = fmt.Errorf("unable to upgrade websocket connection: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer conn.Close()
|
defer conn.Close()
|
||||||
|
@ -232,7 +232,7 @@ WaitForStreams:
|
|||||||
ctx.resizeStream = stream
|
ctx.resizeStream = stream
|
||||||
go waitStreamReply(stream.replySent, replyChan, stop)
|
go waitStreamReply(stream.replySent, replyChan, stop)
|
||||||
default:
|
default:
|
||||||
runtime.HandleError(fmt.Errorf("Unexpected stream type: %q", streamType))
|
runtime.HandleError(fmt.Errorf("unexpected stream type: %q", streamType))
|
||||||
}
|
}
|
||||||
case <-replyChan:
|
case <-replyChan:
|
||||||
receivedStreams++
|
receivedStreams++
|
||||||
@ -283,7 +283,7 @@ WaitForStreams:
|
|||||||
ctx.resizeStream = stream
|
ctx.resizeStream = stream
|
||||||
go waitStreamReply(stream.replySent, replyChan, stop)
|
go waitStreamReply(stream.replySent, replyChan, stop)
|
||||||
default:
|
default:
|
||||||
runtime.HandleError(fmt.Errorf("Unexpected stream type: %q", streamType))
|
runtime.HandleError(fmt.Errorf("unexpected stream type: %q", streamType))
|
||||||
}
|
}
|
||||||
case <-replyChan:
|
case <-replyChan:
|
||||||
receivedStreams++
|
receivedStreams++
|
||||||
@ -331,7 +331,7 @@ WaitForStreams:
|
|||||||
ctx.stderrStream = stream
|
ctx.stderrStream = stream
|
||||||
go waitStreamReply(stream.replySent, replyChan, stop)
|
go waitStreamReply(stream.replySent, replyChan, stop)
|
||||||
default:
|
default:
|
||||||
runtime.HandleError(fmt.Errorf("Unexpected stream type: %q", streamType))
|
runtime.HandleError(fmt.Errorf("unexpected stream type: %q", streamType))
|
||||||
}
|
}
|
||||||
case <-replyChan:
|
case <-replyChan:
|
||||||
receivedStreams++
|
receivedStreams++
|
||||||
@ -385,7 +385,7 @@ WaitForStreams:
|
|||||||
ctx.stderrStream = stream
|
ctx.stderrStream = stream
|
||||||
go waitStreamReply(stream.replySent, replyChan, stop)
|
go waitStreamReply(stream.replySent, replyChan, stop)
|
||||||
default:
|
default:
|
||||||
runtime.HandleError(fmt.Errorf("Unexpected stream type: %q", streamType))
|
runtime.HandleError(fmt.Errorf("unexpected stream type: %q", streamType))
|
||||||
}
|
}
|
||||||
case <-replyChan:
|
case <-replyChan:
|
||||||
receivedStreams++
|
receivedStreams++
|
||||||
|
@ -97,7 +97,7 @@ func createWebSocketStreams(req *http.Request, w http.ResponseWriter, opts *Opti
|
|||||||
conn.SetIdleTimeout(idleTimeout)
|
conn.SetIdleTimeout(idleTimeout)
|
||||||
negotiatedProtocol, streams, err := conn.Open(httplog.Unlogged(req, w), req)
|
negotiatedProtocol, streams, err := conn.Open(httplog.Unlogged(req, w), req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
runtime.HandleError(fmt.Errorf("Unable to upgrade websocket connection: %v", err))
|
runtime.HandleError(fmt.Errorf("unable to upgrade websocket connection: %v", err))
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -120,7 +120,7 @@ func parseEndpoint(endpoint string) (string, string, error) {
|
|||||||
return "unix", u.Path, nil
|
return "unix", u.Path, nil
|
||||||
|
|
||||||
case "":
|
case "":
|
||||||
return "", "", fmt.Errorf("Using %q as endpoint is deprecated, please consider using full url format", endpoint)
|
return "", "", fmt.Errorf("using %q as endpoint is deprecated, please consider using full url format", endpoint)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return u.Scheme, "", fmt.Errorf("protocol %q not supported", u.Scheme)
|
return u.Scheme, "", fmt.Errorf("protocol %q not supported", u.Scheme)
|
||||||
|
@ -97,7 +97,7 @@ func NewInitializedVolumePluginMgr(
|
|||||||
|
|
||||||
if err := kvh.volumePluginMgr.InitPlugins(plugins, prober, kvh); err != nil {
|
if err := kvh.volumePluginMgr.InitPlugins(plugins, prober, kvh); err != nil {
|
||||||
return nil, fmt.Errorf(
|
return nil, fmt.Errorf(
|
||||||
"Could not initialize volume plugins for KubeletVolumePluginMgr: %v",
|
"could not initialize volume plugins for KubeletVolumePluginMgr: %v",
|
||||||
err)
|
err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -470,7 +470,7 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume,
|
|||||||
}
|
}
|
||||||
// TODO: remove feature gate check after no longer needed
|
// TODO: remove feature gate check after no longer needed
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) && volume.volumeMode == v1.PersistentVolumeBlock && mapperPlugin == nil {
|
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) && volume.volumeMode == v1.PersistentVolumeBlock && mapperPlugin == nil {
|
||||||
return nil, fmt.Errorf("Could not find block volume plugin %q (spec.Name: %q) pod %q (UID: %q)", volume.pluginName, volume.volumeSpecName, volume.podName, pod.UID)
|
return nil, fmt.Errorf("could not find block volume plugin %q (spec.Name: %q) pod %q (UID: %q)", volume.pluginName, volume.volumeSpecName, volume.podName, pod.UID)
|
||||||
}
|
}
|
||||||
|
|
||||||
volumeSpec, err := rc.operationExecutor.ReconstructVolumeOperation(
|
volumeSpec, err := rc.operationExecutor.ReconstructVolumeOperation(
|
||||||
@ -543,7 +543,7 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume,
|
|||||||
}
|
}
|
||||||
// If mount or symlink doesn't exist, volume reconstruction should be failed
|
// If mount or symlink doesn't exist, volume reconstruction should be failed
|
||||||
if !isExist {
|
if !isExist {
|
||||||
return nil, fmt.Errorf("Volume: %q is not mounted", uniqueVolumeName)
|
return nil, fmt.Errorf("volume: %q is not mounted", uniqueVolumeName)
|
||||||
}
|
}
|
||||||
|
|
||||||
reconstructedVolume := &reconstructedVolume{
|
reconstructedVolume := &reconstructedVolume{
|
||||||
|
Loading…
Reference in New Issue
Block a user