Push the full channel logic into the implementation of the broadcaster

in watch/mux.go rather than being in the client event recording code.
This commit is contained in:
Alex Robinson
2015-01-13 01:40:17 +00:00
parent 702a6f96b4
commit be6b1cf0e2
5 changed files with 64 additions and 59 deletions

View File

@@ -22,6 +22,15 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
)
// FullChannelBehavior controls how the Broadcaster reacts if a watcher's watch
// channel is full.
type FullChannelBehavior int
const (
WaitIfChannelFull = iota
DropIfChannelFull = iota
)
// Broadcaster distributes event notifications among any number of watchers. Every event
// is delivered to every watcher.
type Broadcaster struct {
@@ -31,17 +40,27 @@ type Broadcaster struct {
nextWatcher int64
incoming chan Event
// How large to make watcher's channel.
watchQueueLength int
// If one of the watch channels is full, don't wait for it to become empty.
// Instead just deliver it to the watchers that do have space in their
// channels and move on to the next event.
// It's more fair to do this on a per-watcher basis than to do it on the
// "incoming" channel, which would allow one slow watcher to prevent all
// other watchers from getting new events.
fullChannelBehavior FullChannelBehavior
}
// NewBroadcaster creates a new Broadcaster. queueLength is the maximum number of events to queue.
// When queueLength is 0, Action will block until any prior event has been
// completely distributed. It is guaranteed that events will be distibuted in the
// order in which they ocurr, but the order in which a single event is distributed
// among all of the watchers is unspecified.
func NewBroadcaster(queueLength int) *Broadcaster {
// NewBroadcaster creates a new Broadcaster. queueLength is the maximum number of events to queue per watcher.
// It is guaranteed that events will be distibuted in the order in which they ocur,
// but the order in which a single event is distributed among all of the watchers is unspecified.
func NewBroadcaster(queueLength int, fullChannelBehavior FullChannelBehavior) *Broadcaster {
m := &Broadcaster{
watchers: map[int64]*broadcasterWatcher{},
incoming: make(chan Event, queueLength),
watchers: map[int64]*broadcasterWatcher{},
incoming: make(chan Event),
watchQueueLength: queueLength,
fullChannelBehavior: fullChannelBehavior,
}
go m.loop()
return m
@@ -56,7 +75,7 @@ func (m *Broadcaster) Watch() Interface {
id := m.nextWatcher
m.nextWatcher++
w := &broadcasterWatcher{
result: make(chan Event),
result: make(chan Event, m.watchQueueLength),
stopped: make(chan struct{}),
id: id,
m: m,
@@ -119,10 +138,20 @@ func (m *Broadcaster) loop() {
func (m *Broadcaster) distribute(event Event) {
m.lock.Lock()
defer m.lock.Unlock()
for _, w := range m.watchers {
select {
case w.result <- event:
case <-w.stopped:
if m.fullChannelBehavior == DropIfChannelFull {
for _, w := range m.watchers {
select {
case w.result <- event:
case <-w.stopped:
default: // Don't block if the event can't be queued.
}
}
} else {
for _, w := range m.watchers {
select {
case w.result <- event:
case <-w.stopped:
}
}
}
}

View File

@@ -39,7 +39,7 @@ func TestBroadcaster(t *testing.T) {
}
// The broadcaster we're testing
m := NewBroadcaster(0)
m := NewBroadcaster(0, WaitIfChannelFull)
// Add a bunch of watchers
const testWatchers = 2
@@ -77,7 +77,7 @@ func TestBroadcaster(t *testing.T) {
}
func TestBroadcasterWatcherClose(t *testing.T) {
m := NewBroadcaster(0)
m := NewBroadcaster(0, WaitIfChannelFull)
w := m.Watch()
w2 := m.Watch()
w.Stop()
@@ -95,7 +95,7 @@ func TestBroadcasterWatcherClose(t *testing.T) {
func TestBroadcasterWatcherStopDeadlock(t *testing.T) {
done := make(chan bool)
m := NewBroadcaster(0)
m := NewBroadcaster(0, WaitIfChannelFull)
go func(w0, w1 Interface) {
// We know Broadcaster is in the distribute loop once one watcher receives
// an event. Stop the other watcher while distribute is trying to