Merge pull request #61894 from atlassian/misc-cleanups
Automatic merge from submit-queue (batch tested with PRs 61894, 61369). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Use range in loops; misc fixes **What this PR does / why we need it**: It is cleaner to use `range` in for loops to iterate over channel until it is closed. **Release note**: ```release-note NONE ``` /kind cleanup
This commit is contained in:
commit
7ce753aa73
@ -32,11 +32,7 @@ func HandleResizing(resize <-chan remotecommand.TerminalSize, resizeFunc func(si
|
||||
go func() {
|
||||
defer runtime.HandleCrash()
|
||||
|
||||
for {
|
||||
size, ok := <-resize
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
for size := range resize {
|
||||
if size.Height < 1 || size.Width < 1 {
|
||||
continue
|
||||
}
|
||||
|
@ -1097,35 +1097,28 @@ func (kl *Kubelet) podKiller() {
|
||||
killing := sets.NewString()
|
||||
// guard for the killing set
|
||||
lock := sync.Mutex{}
|
||||
for {
|
||||
select {
|
||||
case podPair, ok := <-kl.podKillingCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
for podPair := range kl.podKillingCh {
|
||||
runningPod := podPair.RunningPod
|
||||
apiPod := podPair.APIPod
|
||||
|
||||
runningPod := podPair.RunningPod
|
||||
apiPod := podPair.APIPod
|
||||
lock.Lock()
|
||||
exists := killing.Has(string(runningPod.ID))
|
||||
if !exists {
|
||||
killing.Insert(string(runningPod.ID))
|
||||
}
|
||||
lock.Unlock()
|
||||
|
||||
lock.Lock()
|
||||
exists := killing.Has(string(runningPod.ID))
|
||||
if !exists {
|
||||
killing.Insert(string(runningPod.ID))
|
||||
}
|
||||
lock.Unlock()
|
||||
|
||||
if !exists {
|
||||
go func(apiPod *v1.Pod, runningPod *kubecontainer.Pod) {
|
||||
glog.V(2).Infof("Killing unwanted pod %q", runningPod.Name)
|
||||
err := kl.killPod(apiPod, runningPod, nil, nil)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed killing the pod %q: %v", runningPod.Name, err)
|
||||
}
|
||||
lock.Lock()
|
||||
killing.Delete(string(runningPod.ID))
|
||||
lock.Unlock()
|
||||
}(apiPod, runningPod)
|
||||
}
|
||||
if !exists {
|
||||
go func(apiPod *v1.Pod, runningPod *kubecontainer.Pod) {
|
||||
glog.V(2).Infof("Killing unwanted pod %q", runningPod.Name)
|
||||
err := kl.killPod(apiPod, runningPod, nil, nil)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed killing the pod %q: %v", runningPod.Name, err)
|
||||
}
|
||||
lock.Lock()
|
||||
killing.Delete(string(runningPod.ID))
|
||||
lock.Unlock()
|
||||
}(apiPod, runningPod)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -45,10 +45,8 @@ func newPodContainerDeletor(runtime kubecontainer.Runtime, containersToKeep int)
|
||||
buffer := make(chan kubecontainer.ContainerID, containerDeletorBufferLimit)
|
||||
go wait.Until(func() {
|
||||
for {
|
||||
select {
|
||||
case id := <-buffer:
|
||||
runtime.DeleteContainer(id)
|
||||
}
|
||||
id := <-buffer
|
||||
runtime.DeleteContainer(id)
|
||||
}
|
||||
}, 0, wait.NeverStop)
|
||||
|
||||
|
@ -226,7 +226,7 @@ func (c *realRecyclerClient) WatchPod(name, namespace string, stopChannel chan s
|
||||
var eventWatchChannelClosed bool
|
||||
for {
|
||||
select {
|
||||
case _ = <-stopChannel:
|
||||
case <-stopChannel:
|
||||
return
|
||||
|
||||
case podEvent, ok := <-podWatch.ResultChan():
|
||||
|
@ -62,11 +62,7 @@ func (fw *filteredWatch) Stop() {
|
||||
// loop waits for new values, filters them, and resends them.
|
||||
func (fw *filteredWatch) loop() {
|
||||
defer close(fw.result)
|
||||
for {
|
||||
event, ok := <-fw.incoming.ResultChan()
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
for event := range fw.incoming.ResultChan() {
|
||||
filtered, keep := fw.f(event)
|
||||
if keep {
|
||||
fw.result <- filtered
|
||||
|
@ -204,11 +204,7 @@ func (m *Broadcaster) Shutdown() {
|
||||
func (m *Broadcaster) loop() {
|
||||
// Deliberately not catching crashes here. Yes, bring down the process if there's a
|
||||
// bug in watch.Broadcaster.
|
||||
for {
|
||||
event, ok := <-m.incoming
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
for event := range m.incoming {
|
||||
if event.Type == internalRunFunctionMarker {
|
||||
event.Object.(functionFakeRuntimeObject)()
|
||||
continue
|
||||
|
@ -445,6 +445,7 @@ func (e *Store) WaitForInitialized(ctx genericapirequest.Context, obj runtime.Ob
|
||||
}
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1070,11 +1071,7 @@ func (e *Store) DeleteCollection(ctx genericapirequest.Context, options *metav1.
|
||||
})
|
||||
defer wg.Done()
|
||||
|
||||
for {
|
||||
index, ok := <-toProcess
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
for index := range toProcess {
|
||||
accessor, err := meta.Accessor(items[index])
|
||||
if err != nil {
|
||||
errs <- err
|
||||
|
@ -225,11 +225,7 @@ func (eventBroadcaster *eventBroadcasterImpl) StartEventWatcher(eventHandler fun
|
||||
watcher := eventBroadcaster.Watch()
|
||||
go func() {
|
||||
defer utilruntime.HandleCrash()
|
||||
for {
|
||||
watchEvent, open := <-watcher.ResultChan()
|
||||
if !open {
|
||||
return
|
||||
}
|
||||
for watchEvent := range watcher.ResultChan() {
|
||||
event, ok := watchEvent.Object.(*v1.Event)
|
||||
if !ok {
|
||||
// This is all local, so there's no reason this should
|
||||
|
@ -192,6 +192,7 @@ func expectNoEvent(w watch.Interface, eventType watch.EventType, object runtime.
|
||||
|
||||
func waitForEvent(w watch.Interface, expectType watch.EventType, expectObject runtime.Object) (watch.Event, bool) {
|
||||
stopTimer := time.NewTimer(1 * time.Minute)
|
||||
defer stopTimer.Stop()
|
||||
for {
|
||||
select {
|
||||
case actual := <-w.ResultChan():
|
||||
|
Loading…
Reference in New Issue
Block a user