vendor: google.golang.org/grpc v1.57.1

server: prohibit more than MaxConcurrentStreams handlers from running at once
(CVE-2023-44487).

In addition to this change, applications should ensure they do not leave running
tasks behind related to the RPC before returning from method handlers, or should
enforce appropriate limits on any such work.

- https://github.com/grpc/grpc-go/compare/v1.57.0...v1.57.1

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
Sebastiaan van Stijn
2023-10-16 20:50:18 +02:00
parent eb9ce4fd64
commit 39b168cdb7
8 changed files with 59 additions and 36 deletions

View File

@@ -115,12 +115,6 @@ type serviceInfo struct {
mdata interface{}
}
type serverWorkerData struct {
st transport.ServerTransport
wg *sync.WaitGroup
stream *transport.Stream
}
// Server is a gRPC server to serve RPC requests.
type Server struct {
opts serverOptions
@@ -145,7 +139,7 @@ type Server struct {
channelzID *channelz.Identifier
czData *channelzData
serverWorkerChannel chan *serverWorkerData
serverWorkerChannel chan func()
}
type serverOptions struct {
@@ -178,6 +172,7 @@ type serverOptions struct {
}
var defaultServerOptions = serverOptions{
maxConcurrentStreams: math.MaxUint32,
maxReceiveMessageSize: defaultServerMaxReceiveMessageSize,
maxSendMessageSize: defaultServerMaxSendMessageSize,
connectionTimeout: 120 * time.Second,
@@ -389,6 +384,9 @@ func MaxSendMsgSize(m int) ServerOption {
// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number
// of concurrent streams to each ServerTransport.
func MaxConcurrentStreams(n uint32) ServerOption {
if n == 0 {
n = math.MaxUint32
}
return newFuncServerOption(func(o *serverOptions) {
o.maxConcurrentStreams = n
})
@@ -590,24 +588,19 @@ const serverWorkerResetThreshold = 1 << 16
// [1] https://github.com/golang/go/issues/18138
func (s *Server) serverWorker() {
for completed := 0; completed < serverWorkerResetThreshold; completed++ {
data, ok := <-s.serverWorkerChannel
f, ok := <-s.serverWorkerChannel
if !ok {
return
}
s.handleSingleStream(data)
f()
}
go s.serverWorker()
}
func (s *Server) handleSingleStream(data *serverWorkerData) {
defer data.wg.Done()
s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream))
}
// initServerWorkers creates worker goroutines and a channel to process incoming
// connections to reduce the time spent overall on runtime.morestack.
func (s *Server) initServerWorkers() {
s.serverWorkerChannel = make(chan *serverWorkerData)
s.serverWorkerChannel = make(chan func())
for i := uint32(0); i < s.opts.numServerWorkers; i++ {
go s.serverWorker()
}
@@ -966,21 +959,26 @@ func (s *Server) serveStreams(st transport.ServerTransport) {
defer st.Close(errors.New("finished serving streams for the server transport"))
var wg sync.WaitGroup
streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams)
st.HandleStreams(func(stream *transport.Stream) {
wg.Add(1)
streamQuota.acquire()
f := func() {
defer streamQuota.release()
defer wg.Done()
s.handleStream(st, stream, s.traceInfo(st, stream))
}
if s.opts.numServerWorkers > 0 {
data := &serverWorkerData{st: st, wg: &wg, stream: stream}
select {
case s.serverWorkerChannel <- data:
case s.serverWorkerChannel <- f:
return
default:
// If all stream workers are busy, fallback to the default code path.
}
}
go func() {
defer wg.Done()
s.handleStream(st, stream, s.traceInfo(st, stream))
}()
go f()
}, func(ctx context.Context, method string) context.Context {
if !EnableTracing {
return ctx
@@ -2075,3 +2073,32 @@ func validateSendCompressor(name, clientCompressors string) error {
}
return fmt.Errorf("client does not support compressor %q", name)
}
// atomicSemaphore implements a blocking, counting semaphore. acquire should be
// called synchronously; release may be called asynchronously.
type atomicSemaphore struct {
n int64 // accessed atomically
wait chan struct{}
}
func (q *atomicSemaphore) acquire() {
if atomic.AddInt64(&q.n, -1) < 0 {
// We ran out of quota. Block until a release happens.
<-q.wait
}
}
func (q *atomicSemaphore) release() {
// N.B. the "<= 0" check below should allow for this to work with multiple
// concurrent calls to acquire, but also note that with synchronous calls to
// acquire, as our system does, n will never be less than -1. There are
// fairness issues (queuing) to consider if this was to be generalized.
if atomic.AddInt64(&q.n, -1) <= 0 {
// An acquire was waiting on us. Unblock it.
q.wait <- struct{}{}
}
}
func newHandlerQuota(n uint32) *atomicSemaphore {
return &atomicSemaphore{n: int64(n), wait: make(chan struct{}, 1)}
}