working-config-otel
Signed-off-by: Davanum Srinivas <davanum@gmail.com>
This commit is contained in:
162
vendor/google.golang.org/grpc/server.go
generated
vendored
162
vendor/google.golang.org/grpc/server.go
generated
vendored
@@ -86,7 +86,7 @@ func init() {
|
||||
var statusOK = status.New(codes.OK, "")
|
||||
var logger = grpclog.Component("core")
|
||||
|
||||
type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error)
|
||||
type methodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error)
|
||||
|
||||
// MethodDesc represents an RPC service's method specification.
|
||||
type MethodDesc struct {
|
||||
@@ -99,20 +99,26 @@ type ServiceDesc struct {
|
||||
ServiceName string
|
||||
// The pointer to the service interface. Used to check whether the user
|
||||
// provided implementation satisfies the interface requirements.
|
||||
HandlerType interface{}
|
||||
HandlerType any
|
||||
Methods []MethodDesc
|
||||
Streams []StreamDesc
|
||||
Metadata interface{}
|
||||
Metadata any
|
||||
}
|
||||
|
||||
// serviceInfo wraps information about a service. It is very similar to
|
||||
// ServiceDesc and is constructed from it for internal purposes.
|
||||
type serviceInfo struct {
|
||||
// Contains the implementation for the methods in this service.
|
||||
serviceImpl interface{}
|
||||
serviceImpl any
|
||||
methods map[string]*MethodDesc
|
||||
streams map[string]*StreamDesc
|
||||
mdata interface{}
|
||||
mdata any
|
||||
}
|
||||
|
||||
type serverWorkerData struct {
|
||||
st transport.ServerTransport
|
||||
wg *sync.WaitGroup
|
||||
stream *transport.Stream
|
||||
}
|
||||
|
||||
// Server is a gRPC server to serve RPC requests.
|
||||
@@ -139,7 +145,7 @@ type Server struct {
|
||||
channelzID *channelz.Identifier
|
||||
czData *channelzData
|
||||
|
||||
serverWorkerChannel chan func()
|
||||
serverWorkerChannel chan *serverWorkerData
|
||||
}
|
||||
|
||||
type serverOptions struct {
|
||||
@@ -164,19 +170,21 @@ type serverOptions struct {
|
||||
initialConnWindowSize int32
|
||||
writeBufferSize int
|
||||
readBufferSize int
|
||||
sharedWriteBuffer bool
|
||||
connectionTimeout time.Duration
|
||||
maxHeaderListSize *uint32
|
||||
headerTableSize *uint32
|
||||
numServerWorkers uint32
|
||||
recvBufferPool SharedBufferPool
|
||||
}
|
||||
|
||||
var defaultServerOptions = serverOptions{
|
||||
maxConcurrentStreams: math.MaxUint32,
|
||||
maxReceiveMessageSize: defaultServerMaxReceiveMessageSize,
|
||||
maxSendMessageSize: defaultServerMaxSendMessageSize,
|
||||
connectionTimeout: 120 * time.Second,
|
||||
writeBufferSize: defaultWriteBufSize,
|
||||
readBufferSize: defaultReadBufSize,
|
||||
recvBufferPool: nopBufferPool{},
|
||||
}
|
||||
var globalServerOptions []ServerOption
|
||||
|
||||
@@ -228,6 +236,20 @@ func newJoinServerOption(opts ...ServerOption) ServerOption {
|
||||
return &joinServerOption{opts: opts}
|
||||
}
|
||||
|
||||
// SharedWriteBuffer allows reusing per-connection transport write buffer.
|
||||
// If this option is set to true every connection will release the buffer after
|
||||
// flushing the data on the wire.
|
||||
//
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
func SharedWriteBuffer(val bool) ServerOption {
|
||||
return newFuncServerOption(func(o *serverOptions) {
|
||||
o.sharedWriteBuffer = val
|
||||
})
|
||||
}
|
||||
|
||||
// WriteBufferSize determines how much data can be batched before doing a write
|
||||
// on the wire. The corresponding memory allocation for this buffer will be
|
||||
// twice the size to keep syscalls low. The default value for this buffer is
|
||||
@@ -268,9 +290,9 @@ func InitialConnWindowSize(s int32) ServerOption {
|
||||
|
||||
// KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server.
|
||||
func KeepaliveParams(kp keepalive.ServerParameters) ServerOption {
|
||||
if kp.Time > 0 && kp.Time < time.Second {
|
||||
if kp.Time > 0 && kp.Time < internal.KeepaliveMinServerPingTime {
|
||||
logger.Warning("Adjusting keepalive ping interval to minimum period of 1s")
|
||||
kp.Time = time.Second
|
||||
kp.Time = internal.KeepaliveMinServerPingTime
|
||||
}
|
||||
|
||||
return newFuncServerOption(func(o *serverOptions) {
|
||||
@@ -382,9 +404,6 @@ func MaxSendMsgSize(m int) ServerOption {
|
||||
// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number
|
||||
// of concurrent streams to each ServerTransport.
|
||||
func MaxConcurrentStreams(n uint32) ServerOption {
|
||||
if n == 0 {
|
||||
n = math.MaxUint32
|
||||
}
|
||||
return newFuncServerOption(func(o *serverOptions) {
|
||||
o.maxConcurrentStreams = n
|
||||
})
|
||||
@@ -550,6 +569,27 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption {
|
||||
})
|
||||
}
|
||||
|
||||
// RecvBufferPool returns a ServerOption that configures the server
|
||||
// to use the provided shared buffer pool for parsing incoming messages. Depending
|
||||
// on the application's workload, this could result in reduced memory allocation.
|
||||
//
|
||||
// If you are unsure about how to implement a memory pool but want to utilize one,
|
||||
// begin with grpc.NewSharedBufferPool.
|
||||
//
|
||||
// Note: The shared buffer pool feature will not be active if any of the following
|
||||
// options are used: StatsHandler, EnableTracing, or binary logging. In such
|
||||
// cases, the shared buffer pool will be ignored.
|
||||
//
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
func RecvBufferPool(bufferPool SharedBufferPool) ServerOption {
|
||||
return newFuncServerOption(func(o *serverOptions) {
|
||||
o.recvBufferPool = bufferPool
|
||||
})
|
||||
}
|
||||
|
||||
// serverWorkerResetThreshold defines how often the stack must be reset. Every
|
||||
// N requests, by spawning a new goroutine in its place, a worker can reset its
|
||||
// stack so that large stacks don't live in memory forever. 2^16 should allow
|
||||
@@ -565,19 +605,24 @@ const serverWorkerResetThreshold = 1 << 16
|
||||
// [1] https://github.com/golang/go/issues/18138
|
||||
func (s *Server) serverWorker() {
|
||||
for completed := 0; completed < serverWorkerResetThreshold; completed++ {
|
||||
f, ok := <-s.serverWorkerChannel
|
||||
data, ok := <-s.serverWorkerChannel
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
f()
|
||||
s.handleSingleStream(data)
|
||||
}
|
||||
go s.serverWorker()
|
||||
}
|
||||
|
||||
func (s *Server) handleSingleStream(data *serverWorkerData) {
|
||||
defer data.wg.Done()
|
||||
s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream))
|
||||
}
|
||||
|
||||
// initServerWorkers creates worker goroutines and a channel to process incoming
|
||||
// connections to reduce the time spent overall on runtime.morestack.
|
||||
func (s *Server) initServerWorkers() {
|
||||
s.serverWorkerChannel = make(chan func())
|
||||
s.serverWorkerChannel = make(chan *serverWorkerData)
|
||||
for i := uint32(0); i < s.opts.numServerWorkers; i++ {
|
||||
go s.serverWorker()
|
||||
}
|
||||
@@ -625,7 +670,7 @@ func NewServer(opt ...ServerOption) *Server {
|
||||
|
||||
// printf records an event in s's event log, unless s has been stopped.
|
||||
// REQUIRES s.mu is held.
|
||||
func (s *Server) printf(format string, a ...interface{}) {
|
||||
func (s *Server) printf(format string, a ...any) {
|
||||
if s.events != nil {
|
||||
s.events.Printf(format, a...)
|
||||
}
|
||||
@@ -633,7 +678,7 @@ func (s *Server) printf(format string, a ...interface{}) {
|
||||
|
||||
// errorf records an error in s's event log, unless s has been stopped.
|
||||
// REQUIRES s.mu is held.
|
||||
func (s *Server) errorf(format string, a ...interface{}) {
|
||||
func (s *Server) errorf(format string, a ...any) {
|
||||
if s.events != nil {
|
||||
s.events.Errorf(format, a...)
|
||||
}
|
||||
@@ -648,14 +693,14 @@ type ServiceRegistrar interface {
|
||||
// once the server has started serving.
|
||||
// desc describes the service and its methods and handlers. impl is the
|
||||
// service implementation which is passed to the method handlers.
|
||||
RegisterService(desc *ServiceDesc, impl interface{})
|
||||
RegisterService(desc *ServiceDesc, impl any)
|
||||
}
|
||||
|
||||
// RegisterService registers a service and its implementation to the gRPC
|
||||
// server. It is called from the IDL generated code. This must be called before
|
||||
// invoking Serve. If ss is non-nil (for legacy code), its type is checked to
|
||||
// ensure it implements sd.HandlerType.
|
||||
func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) {
|
||||
func (s *Server) RegisterService(sd *ServiceDesc, ss any) {
|
||||
if ss != nil {
|
||||
ht := reflect.TypeOf(sd.HandlerType).Elem()
|
||||
st := reflect.TypeOf(ss)
|
||||
@@ -666,7 +711,7 @@ func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) {
|
||||
s.register(sd, ss)
|
||||
}
|
||||
|
||||
func (s *Server) register(sd *ServiceDesc, ss interface{}) {
|
||||
func (s *Server) register(sd *ServiceDesc, ss any) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.printf("RegisterService(%q)", sd.ServiceName)
|
||||
@@ -707,7 +752,7 @@ type MethodInfo struct {
|
||||
type ServiceInfo struct {
|
||||
Methods []MethodInfo
|
||||
// Metadata is the metadata specified in ServiceDesc when registering service.
|
||||
Metadata interface{}
|
||||
Metadata any
|
||||
}
|
||||
|
||||
// GetServiceInfo returns a map from service names to ServiceInfo.
|
||||
@@ -908,6 +953,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
|
||||
InitialConnWindowSize: s.opts.initialConnWindowSize,
|
||||
WriteBufferSize: s.opts.writeBufferSize,
|
||||
ReadBufferSize: s.opts.readBufferSize,
|
||||
SharedWriteBuffer: s.opts.sharedWriteBuffer,
|
||||
ChannelzParentID: s.channelzID,
|
||||
MaxHeaderListSize: s.opts.maxHeaderListSize,
|
||||
HeaderTableSize: s.opts.headerTableSize,
|
||||
@@ -936,26 +982,21 @@ func (s *Server) serveStreams(st transport.ServerTransport) {
|
||||
defer st.Close(errors.New("finished serving streams for the server transport"))
|
||||
var wg sync.WaitGroup
|
||||
|
||||
streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams)
|
||||
st.HandleStreams(func(stream *transport.Stream) {
|
||||
wg.Add(1)
|
||||
|
||||
streamQuota.acquire()
|
||||
f := func() {
|
||||
defer streamQuota.release()
|
||||
defer wg.Done()
|
||||
s.handleStream(st, stream, s.traceInfo(st, stream))
|
||||
}
|
||||
|
||||
if s.opts.numServerWorkers > 0 {
|
||||
data := &serverWorkerData{st: st, wg: &wg, stream: stream}
|
||||
select {
|
||||
case s.serverWorkerChannel <- f:
|
||||
case s.serverWorkerChannel <- data:
|
||||
return
|
||||
default:
|
||||
// If all stream workers are busy, fallback to the default code path.
|
||||
}
|
||||
}
|
||||
go f()
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
s.handleStream(st, stream, s.traceInfo(st, stream))
|
||||
}()
|
||||
}, func(ctx context.Context, method string) context.Context {
|
||||
if !EnableTracing {
|
||||
return ctx
|
||||
@@ -1094,7 +1135,7 @@ func (s *Server) incrCallsFailed() {
|
||||
atomic.AddInt64(&s.czData.callsFailed, 1)
|
||||
}
|
||||
|
||||
func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error {
|
||||
func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error {
|
||||
data, err := encode(s.getCodec(stream.ContentSubtype()), msg)
|
||||
if err != nil {
|
||||
channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err)
|
||||
@@ -1141,7 +1182,7 @@ func chainUnaryServerInterceptors(s *Server) {
|
||||
}
|
||||
|
||||
func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor {
|
||||
return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) {
|
||||
return func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (any, error) {
|
||||
return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler))
|
||||
}
|
||||
}
|
||||
@@ -1150,7 +1191,7 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info
|
||||
if curr == len(interceptors)-1 {
|
||||
return finalHandler
|
||||
}
|
||||
return func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return func(ctx context.Context, req any) (any, error) {
|
||||
return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler))
|
||||
}
|
||||
}
|
||||
@@ -1187,7 +1228,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
||||
defer func() {
|
||||
if trInfo != nil {
|
||||
if err != nil && err != io.EOF {
|
||||
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
||||
trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
|
||||
trInfo.tr.SetError()
|
||||
}
|
||||
trInfo.tr.Finish()
|
||||
@@ -1294,7 +1335,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
||||
if len(shs) != 0 || len(binlogs) != 0 {
|
||||
payInfo = &payloadInfo{}
|
||||
}
|
||||
d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp)
|
||||
d, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp)
|
||||
if err != nil {
|
||||
if e := t.WriteStatus(stream, status.Convert(err)); e != nil {
|
||||
channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
|
||||
@@ -1304,7 +1345,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
||||
if channelz.IsOn() {
|
||||
t.IncrMsgRecv()
|
||||
}
|
||||
df := func(v interface{}) error {
|
||||
df := func(v any) error {
|
||||
if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil {
|
||||
return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
|
||||
}
|
||||
@@ -1468,7 +1509,7 @@ func chainStreamServerInterceptors(s *Server) {
|
||||
}
|
||||
|
||||
func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor {
|
||||
return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error {
|
||||
return func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error {
|
||||
return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler))
|
||||
}
|
||||
}
|
||||
@@ -1477,7 +1518,7 @@ func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, inf
|
||||
if curr == len(interceptors)-1 {
|
||||
return finalHandler
|
||||
}
|
||||
return func(srv interface{}, stream ServerStream) error {
|
||||
return func(srv any, stream ServerStream) error {
|
||||
return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler))
|
||||
}
|
||||
}
|
||||
@@ -1504,7 +1545,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
||||
ctx: ctx,
|
||||
t: t,
|
||||
s: stream,
|
||||
p: &parser{r: stream},
|
||||
p: &parser{r: stream, recvBufferPool: s.opts.recvBufferPool},
|
||||
codec: s.getCodec(stream.ContentSubtype()),
|
||||
maxReceiveMessageSize: s.opts.maxReceiveMessageSize,
|
||||
maxSendMessageSize: s.opts.maxSendMessageSize,
|
||||
@@ -1518,7 +1559,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
||||
if trInfo != nil {
|
||||
ss.mu.Lock()
|
||||
if err != nil && err != io.EOF {
|
||||
ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
||||
ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
|
||||
ss.trInfo.tr.SetError()
|
||||
}
|
||||
ss.trInfo.tr.Finish()
|
||||
@@ -1621,7 +1662,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
||||
trInfo.tr.LazyLog(&trInfo.firstLine, false)
|
||||
}
|
||||
var appErr error
|
||||
var server interface{}
|
||||
var server any
|
||||
if info != nil {
|
||||
server = info.serviceImpl
|
||||
}
|
||||
@@ -1687,13 +1728,13 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
|
||||
pos := strings.LastIndex(sm, "/")
|
||||
if pos == -1 {
|
||||
if trInfo != nil {
|
||||
trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true)
|
||||
trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true)
|
||||
trInfo.tr.SetError()
|
||||
}
|
||||
errDesc := fmt.Sprintf("malformed method name: %q", stream.Method())
|
||||
if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
|
||||
if trInfo != nil {
|
||||
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
||||
trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
|
||||
trInfo.tr.SetError()
|
||||
}
|
||||
channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err)
|
||||
@@ -1734,7 +1775,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
|
||||
}
|
||||
if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
|
||||
if trInfo != nil {
|
||||
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
||||
trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
|
||||
trInfo.tr.SetError()
|
||||
}
|
||||
channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err)
|
||||
@@ -2050,32 +2091,3 @@ func validateSendCompressor(name, clientCompressors string) error {
|
||||
}
|
||||
return fmt.Errorf("client does not support compressor %q", name)
|
||||
}
|
||||
|
||||
// atomicSemaphore implements a blocking, counting semaphore. acquire should be
|
||||
// called synchronously; release may be called asynchronously.
|
||||
type atomicSemaphore struct {
|
||||
n int64
|
||||
wait chan struct{}
|
||||
}
|
||||
|
||||
func (q *atomicSemaphore) acquire() {
|
||||
if atomic.AddInt64(&q.n, -1) < 0 {
|
||||
// We ran out of quota. Block until a release happens.
|
||||
<-q.wait
|
||||
}
|
||||
}
|
||||
|
||||
func (q *atomicSemaphore) release() {
|
||||
// N.B. the "<= 0" check below should allow for this to work with multiple
|
||||
// concurrent calls to acquire, but also note that with synchronous calls to
|
||||
// acquire, as our system does, n will never be less than -1. There are
|
||||
// fairness issues (queuing) to consider if this was to be generalized.
|
||||
if atomic.AddInt64(&q.n, 1) <= 0 {
|
||||
// An acquire was waiting on us. Unblock it.
|
||||
q.wait <- struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
func newHandlerQuota(n uint32) *atomicSemaphore {
|
||||
return &atomicSemaphore{n: int64(n), wait: make(chan struct{}, 1)}
|
||||
}
|
||||
|
Reference in New Issue
Block a user