[kubernetes/scheduler] use lockless diagnosis collection in findNodesThatPassFilters
This commit is contained in:
		@@ -595,10 +595,15 @@ func (sched *Scheduler) findNodesThatPassFilters(
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	errCh := parallelize.NewErrorChannel()
 | 
			
		||||
	var statusesLock sync.Mutex
 | 
			
		||||
	var feasibleNodesLen int32
 | 
			
		||||
	ctx, cancel := context.WithCancel(ctx)
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	type nodeStatus struct {
 | 
			
		||||
		node   string
 | 
			
		||||
		status *framework.Status
 | 
			
		||||
	}
 | 
			
		||||
	result := make([]*nodeStatus, len(feasibleNodes))
 | 
			
		||||
	checkNode := func(i int) {
 | 
			
		||||
		// We check the nodes starting from where we left off in the previous scheduling cycle,
 | 
			
		||||
		// this is to make sure all nodes have the same chance of being examined across pods.
 | 
			
		||||
@@ -617,12 +622,16 @@ func (sched *Scheduler) findNodesThatPassFilters(
 | 
			
		||||
				feasibleNodes[length-1] = nodeInfo
 | 
			
		||||
			}
 | 
			
		||||
		} else {
 | 
			
		||||
			statusesLock.Lock()
 | 
			
		||||
			diagnosis.NodeToStatusMap[nodeInfo.Node().Name] = status
 | 
			
		||||
			diagnosis.AddPluginStatus(status)
 | 
			
		||||
			statusesLock.Unlock()
 | 
			
		||||
			result[i] = &nodeStatus{node: nodeInfo.Node().Name, status: status}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	for _, item := range result {
 | 
			
		||||
		if item == nil {
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
		diagnosis.NodeToStatusMap[item.node] = item.status
 | 
			
		||||
		diagnosis.AddPluginStatus(item.status)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	beginCheckNode := time.Now()
 | 
			
		||||
	statusCode := framework.Success
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user