Remove redundant string conversions in print statements
Signed-off-by: Daniel Hu <farmer.hutao@outlook.com>
This commit is contained in:
		@@ -560,12 +560,12 @@ func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, requi
 | 
				
			|||||||
	// This can happen if a container restarts for example.
 | 
						// This can happen if a container restarts for example.
 | 
				
			||||||
	devices := m.podDevices.containerDevices(podUID, contName, resource)
 | 
						devices := m.podDevices.containerDevices(podUID, contName, resource)
 | 
				
			||||||
	if devices != nil {
 | 
						if devices != nil {
 | 
				
			||||||
		klog.V(3).InfoS("Found pre-allocated devices for resource on pod", "resourceName", resource, "containerName", contName, "podUID", string(podUID), "devices", sets.List(devices))
 | 
							klog.V(3).InfoS("Found pre-allocated devices for resource on pod", "resourceName", resource, "containerName", contName, "podUID", podUID, "devices", sets.List(devices))
 | 
				
			||||||
		needed = needed - devices.Len()
 | 
							needed = needed - devices.Len()
 | 
				
			||||||
		// A pod's resource is not expected to change once admitted by the API server,
 | 
							// A pod's resource is not expected to change once admitted by the API server,
 | 
				
			||||||
		// so just fail loudly here. We can revisit this part if this no longer holds.
 | 
							// so just fail loudly here. We can revisit this part if this no longer holds.
 | 
				
			||||||
		if needed != 0 {
 | 
							if needed != 0 {
 | 
				
			||||||
			return nil, fmt.Errorf("pod %q container %q changed request for resource %q from %d to %d", string(podUID), contName, resource, devices.Len(), required)
 | 
								return nil, fmt.Errorf("pod %q container %q changed request for resource %q from %d to %d", podUID, contName, resource, devices.Len(), required)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -580,12 +580,12 @@ func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, requi
 | 
				
			|||||||
	// running, then it can only be a kubelet restart. On node reboot the runtime and the containers were also shut down. Then, if the container was running, it can only be
 | 
						// running, then it can only be a kubelet restart. On node reboot the runtime and the containers were also shut down. Then, if the container was running, it can only be
 | 
				
			||||||
	// because it already has access to all the required devices, so we got nothing to do and we can bail out.
 | 
						// because it already has access to all the required devices, so we got nothing to do and we can bail out.
 | 
				
			||||||
	if !m.sourcesReady.AllReady() && m.isContainerAlreadyRunning(podUID, contName) {
 | 
						if !m.sourcesReady.AllReady() && m.isContainerAlreadyRunning(podUID, contName) {
 | 
				
			||||||
		klog.V(3).InfoS("container detected running, nothing to do", "deviceNumber", needed, "resourceName", resource, "podUID", string(podUID), "containerName", contName)
 | 
							klog.V(3).InfoS("container detected running, nothing to do", "deviceNumber", needed, "resourceName", resource, "podUID", podUID, "containerName", contName)
 | 
				
			||||||
		return nil, nil
 | 
							return nil, nil
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// We dealt with scenario 2. If we got this far it's either scenario 3 (node reboot) or scenario 1 (steady state, normal flow).
 | 
						// We dealt with scenario 2. If we got this far it's either scenario 3 (node reboot) or scenario 1 (steady state, normal flow).
 | 
				
			||||||
	klog.V(3).InfoS("Need devices to allocate for pod", "deviceNumber", needed, "resourceName", resource, "podUID", string(podUID), "containerName", contName)
 | 
						klog.V(3).InfoS("Need devices to allocate for pod", "deviceNumber", needed, "resourceName", resource, "podUID", podUID, "containerName", contName)
 | 
				
			||||||
	healthyDevices, hasRegistered := m.healthyDevices[resource]
 | 
						healthyDevices, hasRegistered := m.healthyDevices[resource]
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// The following checks are expected to fail only happen on scenario 3 (node reboot).
 | 
						// The following checks are expected to fail only happen on scenario 3 (node reboot).
 | 
				
			||||||
@@ -611,7 +611,7 @@ func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, requi
 | 
				
			|||||||
	// We handled the known error paths in scenario 3 (node reboot), so from now on we can fall back in a common path.
 | 
						// We handled the known error paths in scenario 3 (node reboot), so from now on we can fall back in a common path.
 | 
				
			||||||
	// We cover container restart on kubelet steady state with the same flow.
 | 
						// We cover container restart on kubelet steady state with the same flow.
 | 
				
			||||||
	if needed == 0 {
 | 
						if needed == 0 {
 | 
				
			||||||
		klog.V(3).InfoS("no devices needed, nothing to do", "deviceNumber", needed, "resourceName", resource, "podUID", string(podUID), "containerName", contName)
 | 
							klog.V(3).InfoS("no devices needed, nothing to do", "deviceNumber", needed, "resourceName", resource, "podUID", podUID, "containerName", contName)
 | 
				
			||||||
		// No change, no work.
 | 
							// No change, no work.
 | 
				
			||||||
		return nil, nil
 | 
							return nil, nil
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -973,12 +973,12 @@ func (m *ManagerImpl) callPreStartContainerIfNeeded(podUID, contName, resource s
 | 
				
			|||||||
	devices := m.podDevices.containerDevices(podUID, contName, resource)
 | 
						devices := m.podDevices.containerDevices(podUID, contName, resource)
 | 
				
			||||||
	if devices == nil {
 | 
						if devices == nil {
 | 
				
			||||||
		m.mutex.Unlock()
 | 
							m.mutex.Unlock()
 | 
				
			||||||
		return fmt.Errorf("no devices found allocated in local cache for pod %s, container %s, resource %s", string(podUID), contName, resource)
 | 
							return fmt.Errorf("no devices found allocated in local cache for pod %s, container %s, resource %s", podUID, contName, resource)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	m.mutex.Unlock()
 | 
						m.mutex.Unlock()
 | 
				
			||||||
	devs := devices.UnsortedList()
 | 
						devs := devices.UnsortedList()
 | 
				
			||||||
	klog.V(4).InfoS("Issuing a PreStartContainer call for container", "containerName", contName, "podUID", string(podUID))
 | 
						klog.V(4).InfoS("Issuing a PreStartContainer call for container", "containerName", contName, "podUID", podUID)
 | 
				
			||||||
	_, err := eI.e.preStartContainer(devs)
 | 
						_, err := eI.e.preStartContainer(devs)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return fmt.Errorf("device plugin PreStartContainer rpc failed with err: %v", err)
 | 
							return fmt.Errorf("device plugin PreStartContainer rpc failed with err: %v", err)
 | 
				
			||||||
@@ -1001,7 +1001,7 @@ func (m *ManagerImpl) callGetPreferredAllocationIfAvailable(podUID, contName, re
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	m.mutex.Unlock()
 | 
						m.mutex.Unlock()
 | 
				
			||||||
	klog.V(4).InfoS("Issuing a GetPreferredAllocation call for container", "containerName", contName, "podUID", string(podUID))
 | 
						klog.V(4).InfoS("Issuing a GetPreferredAllocation call for container", "containerName", contName, "podUID", podUID)
 | 
				
			||||||
	resp, err := eI.e.getPreferredAllocation(available.UnsortedList(), mustInclude.UnsortedList(), size)
 | 
						resp, err := eI.e.getPreferredAllocation(available.UnsortedList(), mustInclude.UnsortedList(), size)
 | 
				
			||||||
	m.mutex.Lock()
 | 
						m.mutex.Lock()
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
 
 | 
				
			|||||||
		Reference in New Issue
	
	Block a user