vendor protobuf & grpc
Signed-off-by: Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
This commit is contained in:
		
							
								
								
									
										20
									
								
								vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										20
									
								
								vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -3,6 +3,8 @@
 | 
			
		||||
[](https://travis-ci.org/grpc-ecosystem/go-grpc-prometheus)
 | 
			
		||||
[](http://goreportcard.com/report/grpc-ecosystem/go-grpc-prometheus)
 | 
			
		||||
[](https://godoc.org/github.com/grpc-ecosystem/go-grpc-prometheus)
 | 
			
		||||
[](https://sourcegraph.com/github.com/grpc-ecosystem/go-grpc-prometheus/?badge)
 | 
			
		||||
[](https://codecov.io/gh/grpc-ecosystem/go-grpc-prometheus)
 | 
			
		||||
[](LICENSE)
 | 
			
		||||
 | 
			
		||||
[Prometheus](https://prometheus.io/) monitoring for your [gRPC Go](https://github.com/grpc/grpc-go) servers and clients.
 | 
			
		||||
@@ -36,7 +38,7 @@ import "github.com/grpc-ecosystem/go-grpc-prometheus"
 | 
			
		||||
    // After all your registrations, make sure all of the Prometheus metrics are initialized.
 | 
			
		||||
    grpc_prometheus.Register(myServer)
 | 
			
		||||
    // Register Prometheus metrics handler.    
 | 
			
		||||
    http.Handle("/metrics", prometheus.Handler())
 | 
			
		||||
    http.Handle("/metrics", promhttp.Handler())
 | 
			
		||||
...
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
@@ -47,8 +49,8 @@ import "github.com/grpc-ecosystem/go-grpc-prometheus"
 | 
			
		||||
...
 | 
			
		||||
   clientConn, err = grpc.Dial(
 | 
			
		||||
       address,
 | 
			
		||||
		   grpc.WithUnaryInterceptor(UnaryClientInterceptor),
 | 
			
		||||
		   grpc.WithStreamInterceptor(StreamClientInterceptor)
 | 
			
		||||
		   grpc.WithUnaryInterceptor(grpc_prometheus.UnaryClientInterceptor),
 | 
			
		||||
		   grpc.WithStreamInterceptor(grpc_prometheus.StreamClientInterceptor)
 | 
			
		||||
   )
 | 
			
		||||
   client = pb_testproto.NewTestServiceClient(clientConn)
 | 
			
		||||
   resp, err := client.PingEmpty(s.ctx, &myservice.Request{Msg: "hello"})
 | 
			
		||||
@@ -116,7 +118,7 @@ each of the 20 messages sent back, a counter will be incremented:
 | 
			
		||||
grpc_server_msg_sent_total{grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 20
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
After the call completes, it's status (`OK` or other [gRPC status code](https://github.com/grpc/grpc-go/blob/master/codes/codes.go)) 
 | 
			
		||||
After the call completes, its status (`OK` or other [gRPC status code](https://github.com/grpc/grpc-go/blob/master/codes/codes.go)) 
 | 
			
		||||
and the relevant call labels increment the `grpc_server_handled_total` counter.
 | 
			
		||||
 | 
			
		||||
```jsoniq
 | 
			
		||||
@@ -126,8 +128,8 @@ grpc_server_handled_total{grpc_code="OK",grpc_method="PingList",grpc_service="mw
 | 
			
		||||
## Histograms
 | 
			
		||||
 | 
			
		||||
[Prometheus histograms](https://prometheus.io/docs/concepts/metric_types/#histogram) are a great way
 | 
			
		||||
to measure latency distributions of your RPCs. However since it is bad practice to have metrics
 | 
			
		||||
of [high cardinality](https://prometheus.io/docs/practices/instrumentation/#do-not-overuse-labels))
 | 
			
		||||
to measure latency distributions of your RPCs. However, since it is bad practice to have metrics
 | 
			
		||||
of [high cardinality](https://prometheus.io/docs/practices/instrumentation/#do-not-overuse-labels)
 | 
			
		||||
the latency monitoring metrics are disabled by default. To enable them please call the following
 | 
			
		||||
in your server initialization code:
 | 
			
		||||
 | 
			
		||||
@@ -135,8 +137,8 @@ in your server initialization code:
 | 
			
		||||
grpc_prometheus.EnableHandlingTimeHistogram()
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
After the call completes, it's handling time will be recorded in a [Prometheus histogram](https://prometheus.io/docs/concepts/metric_types/#histogram)
 | 
			
		||||
variable `grpc_server_handling_seconds`. It contains three sub-metrics:
 | 
			
		||||
After the call completes, its handling time will be recorded in a [Prometheus histogram](https://prometheus.io/docs/concepts/metric_types/#histogram)
 | 
			
		||||
variable `grpc_server_handling_seconds`. The histogram variable contains three sub-metrics:
 | 
			
		||||
 | 
			
		||||
 * `grpc_server_handling_seconds_count` - the count of all completed RPCs by status and method 
 | 
			
		||||
 * `grpc_server_handling_seconds_sum` - cumulative time of RPCs by status and method, useful for 
 | 
			
		||||
@@ -166,7 +168,7 @@ grpc_server_handling_seconds_count{grpc_code="OK",grpc_method="PingList",grpc_se
 | 
			
		||||
 | 
			
		||||
## Useful query examples
 | 
			
		||||
 | 
			
		||||
Prometheus philosophy is to provide the most detailed metrics possible to the monitoring system, and
 | 
			
		||||
Prometheus philosophy is to provide raw metrics to the monitoring system, and
 | 
			
		||||
let the aggregations be handled there. The verbosity of above metrics make it possible to have that
 | 
			
		||||
flexibility. Here's a couple of useful monitoring queries:
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										85
									
								
								vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										85
									
								
								vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -6,67 +6,34 @@
 | 
			
		||||
package grpc_prometheus
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"io"
 | 
			
		||||
 | 
			
		||||
	"golang.org/x/net/context"
 | 
			
		||||
	"google.golang.org/grpc"
 | 
			
		||||
	"google.golang.org/grpc/codes"
 | 
			
		||||
	prom "github.com/prometheus/client_golang/prometheus"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// UnaryClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Unary RPCs.
 | 
			
		||||
func UnaryClientInterceptor(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
 | 
			
		||||
	monitor := newClientReporter(Unary, method)
 | 
			
		||||
	monitor.SentMessage()
 | 
			
		||||
	err := invoker(ctx, method, req, reply, cc, opts...)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		monitor.ReceivedMessage()
 | 
			
		||||
	}
 | 
			
		||||
	monitor.Handled(grpc.Code(err))
 | 
			
		||||
	return err
 | 
			
		||||
var (
 | 
			
		||||
	// DefaultClientMetrics is the default instance of ClientMetrics. It is
 | 
			
		||||
	// intended to be used in conjunction the default Prometheus metrics
 | 
			
		||||
	// registry.
 | 
			
		||||
	DefaultClientMetrics = NewClientMetrics()
 | 
			
		||||
 | 
			
		||||
	// UnaryClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Unary RPCs.
 | 
			
		||||
	UnaryClientInterceptor = DefaultClientMetrics.UnaryClientInterceptor()
 | 
			
		||||
 | 
			
		||||
	// StreamClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Streaming RPCs.
 | 
			
		||||
	StreamClientInterceptor = DefaultClientMetrics.StreamClientInterceptor()
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func init() {
 | 
			
		||||
	prom.MustRegister(DefaultClientMetrics.clientStartedCounter)
 | 
			
		||||
	prom.MustRegister(DefaultClientMetrics.clientHandledCounter)
 | 
			
		||||
	prom.MustRegister(DefaultClientMetrics.clientStreamMsgReceived)
 | 
			
		||||
	prom.MustRegister(DefaultClientMetrics.clientStreamMsgSent)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// StreamServerInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Streaming RPCs.
 | 
			
		||||
func StreamClientInterceptor(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
 | 
			
		||||
	monitor := newClientReporter(clientStreamType(desc), method)
 | 
			
		||||
	clientStream, err := streamer(ctx, desc, cc, method, opts...)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		monitor.Handled(grpc.Code(err))
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
	return &monitoredClientStream{clientStream, monitor}, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func clientStreamType(desc *grpc.StreamDesc) grpcType {
 | 
			
		||||
	if desc.ClientStreams && !desc.ServerStreams {
 | 
			
		||||
		return ClientStream
 | 
			
		||||
	} else if !desc.ClientStreams && desc.ServerStreams {
 | 
			
		||||
		return ServerStream
 | 
			
		||||
	}
 | 
			
		||||
	return BidiStream
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// monitoredClientStream wraps grpc.ClientStream allowing each Sent/Recv of message to increment counters.
 | 
			
		||||
type monitoredClientStream struct {
 | 
			
		||||
	grpc.ClientStream
 | 
			
		||||
	monitor *clientReporter
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *monitoredClientStream) SendMsg(m interface{}) error {
 | 
			
		||||
	err := s.ClientStream.SendMsg(m)
 | 
			
		||||
	if err == nil {
 | 
			
		||||
		s.monitor.SentMessage()
 | 
			
		||||
	}
 | 
			
		||||
	return err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *monitoredClientStream) RecvMsg(m interface{}) error {
 | 
			
		||||
	err := s.ClientStream.RecvMsg(m)
 | 
			
		||||
	if err == nil {
 | 
			
		||||
		s.monitor.ReceivedMessage()
 | 
			
		||||
	} else if err == io.EOF {
 | 
			
		||||
		s.monitor.Handled(codes.OK)
 | 
			
		||||
	} else {
 | 
			
		||||
		s.monitor.Handled(grpc.Code(err))
 | 
			
		||||
	}
 | 
			
		||||
	return err
 | 
			
		||||
// EnableClientHandlingTimeHistogram turns on recording of handling time of
 | 
			
		||||
// RPCs. Histogram metrics can be very expensive for Prometheus to retain and
 | 
			
		||||
// query. This function acts on the DefaultClientMetrics variable and the
 | 
			
		||||
// default Prometheus metrics registry.
 | 
			
		||||
func EnableClientHandlingTimeHistogram(opts ...HistogramOption) {
 | 
			
		||||
	DefaultClientMetrics.EnableClientHandlingTimeHistogram(opts...)
 | 
			
		||||
	prom.Register(DefaultClientMetrics.clientHandledHistogram)
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										170
									
								
								vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_metrics.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										170
									
								
								vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_metrics.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,170 @@
 | 
			
		||||
package grpc_prometheus
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"io"
 | 
			
		||||
 | 
			
		||||
	prom "github.com/prometheus/client_golang/prometheus"
 | 
			
		||||
	"golang.org/x/net/context"
 | 
			
		||||
	"google.golang.org/grpc"
 | 
			
		||||
	"google.golang.org/grpc/codes"
 | 
			
		||||
	"google.golang.org/grpc/status"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// ClientMetrics represents a collection of metrics to be registered on a
 | 
			
		||||
// Prometheus metrics registry for a gRPC client.
 | 
			
		||||
type ClientMetrics struct {
 | 
			
		||||
	clientStartedCounter          *prom.CounterVec
 | 
			
		||||
	clientHandledCounter          *prom.CounterVec
 | 
			
		||||
	clientStreamMsgReceived       *prom.CounterVec
 | 
			
		||||
	clientStreamMsgSent           *prom.CounterVec
 | 
			
		||||
	clientHandledHistogramEnabled bool
 | 
			
		||||
	clientHandledHistogramOpts    prom.HistogramOpts
 | 
			
		||||
	clientHandledHistogram        *prom.HistogramVec
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewClientMetrics returns a ClientMetrics object. Use a new instance of
 | 
			
		||||
// ClientMetrics when not using the default Prometheus metrics registry, for
 | 
			
		||||
// example when wanting to control which metrics are added to a registry as
 | 
			
		||||
// opposed to automatically adding metrics via init functions.
 | 
			
		||||
func NewClientMetrics(counterOpts ...CounterOption) *ClientMetrics {
 | 
			
		||||
	opts := counterOptions(counterOpts)
 | 
			
		||||
	return &ClientMetrics{
 | 
			
		||||
		clientStartedCounter: prom.NewCounterVec(
 | 
			
		||||
			opts.apply(prom.CounterOpts{
 | 
			
		||||
				Name: "grpc_client_started_total",
 | 
			
		||||
				Help: "Total number of RPCs started on the client.",
 | 
			
		||||
			}), []string{"grpc_type", "grpc_service", "grpc_method"}),
 | 
			
		||||
 | 
			
		||||
		clientHandledCounter: prom.NewCounterVec(
 | 
			
		||||
			opts.apply(prom.CounterOpts{
 | 
			
		||||
				Name: "grpc_client_handled_total",
 | 
			
		||||
				Help: "Total number of RPCs completed by the client, regardless of success or failure.",
 | 
			
		||||
			}), []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"}),
 | 
			
		||||
 | 
			
		||||
		clientStreamMsgReceived: prom.NewCounterVec(
 | 
			
		||||
			opts.apply(prom.CounterOpts{
 | 
			
		||||
				Name: "grpc_client_msg_received_total",
 | 
			
		||||
				Help: "Total number of RPC stream messages received by the client.",
 | 
			
		||||
			}), []string{"grpc_type", "grpc_service", "grpc_method"}),
 | 
			
		||||
 | 
			
		||||
		clientStreamMsgSent: prom.NewCounterVec(
 | 
			
		||||
			opts.apply(prom.CounterOpts{
 | 
			
		||||
				Name: "grpc_client_msg_sent_total",
 | 
			
		||||
				Help: "Total number of gRPC stream messages sent by the client.",
 | 
			
		||||
			}), []string{"grpc_type", "grpc_service", "grpc_method"}),
 | 
			
		||||
 | 
			
		||||
		clientHandledHistogramEnabled: false,
 | 
			
		||||
		clientHandledHistogramOpts: prom.HistogramOpts{
 | 
			
		||||
			Name:    "grpc_client_handling_seconds",
 | 
			
		||||
			Help:    "Histogram of response latency (seconds) of the gRPC until it is finished by the application.",
 | 
			
		||||
			Buckets: prom.DefBuckets,
 | 
			
		||||
		},
 | 
			
		||||
		clientHandledHistogram: nil,
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Describe sends the super-set of all possible descriptors of metrics
 | 
			
		||||
// collected by this Collector to the provided channel and returns once
 | 
			
		||||
// the last descriptor has been sent.
 | 
			
		||||
func (m *ClientMetrics) Describe(ch chan<- *prom.Desc) {
 | 
			
		||||
	m.clientStartedCounter.Describe(ch)
 | 
			
		||||
	m.clientHandledCounter.Describe(ch)
 | 
			
		||||
	m.clientStreamMsgReceived.Describe(ch)
 | 
			
		||||
	m.clientStreamMsgSent.Describe(ch)
 | 
			
		||||
	if m.clientHandledHistogramEnabled {
 | 
			
		||||
		m.clientHandledHistogram.Describe(ch)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Collect is called by the Prometheus registry when collecting
 | 
			
		||||
// metrics. The implementation sends each collected metric via the
 | 
			
		||||
// provided channel and returns once the last metric has been sent.
 | 
			
		||||
func (m *ClientMetrics) Collect(ch chan<- prom.Metric) {
 | 
			
		||||
	m.clientStartedCounter.Collect(ch)
 | 
			
		||||
	m.clientHandledCounter.Collect(ch)
 | 
			
		||||
	m.clientStreamMsgReceived.Collect(ch)
 | 
			
		||||
	m.clientStreamMsgSent.Collect(ch)
 | 
			
		||||
	if m.clientHandledHistogramEnabled {
 | 
			
		||||
		m.clientHandledHistogram.Collect(ch)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// EnableClientHandlingTimeHistogram turns on recording of handling time of RPCs.
 | 
			
		||||
// Histogram metrics can be very expensive for Prometheus to retain and query.
 | 
			
		||||
func (m *ClientMetrics) EnableClientHandlingTimeHistogram(opts ...HistogramOption) {
 | 
			
		||||
	for _, o := range opts {
 | 
			
		||||
		o(&m.clientHandledHistogramOpts)
 | 
			
		||||
	}
 | 
			
		||||
	if !m.clientHandledHistogramEnabled {
 | 
			
		||||
		m.clientHandledHistogram = prom.NewHistogramVec(
 | 
			
		||||
			m.clientHandledHistogramOpts,
 | 
			
		||||
			[]string{"grpc_type", "grpc_service", "grpc_method"},
 | 
			
		||||
		)
 | 
			
		||||
	}
 | 
			
		||||
	m.clientHandledHistogramEnabled = true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// UnaryClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Unary RPCs.
 | 
			
		||||
func (m *ClientMetrics) UnaryClientInterceptor() func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
 | 
			
		||||
	return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
 | 
			
		||||
		monitor := newClientReporter(m, Unary, method)
 | 
			
		||||
		monitor.SentMessage()
 | 
			
		||||
		err := invoker(ctx, method, req, reply, cc, opts...)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			monitor.ReceivedMessage()
 | 
			
		||||
		}
 | 
			
		||||
		st, _ := status.FromError(err)
 | 
			
		||||
		monitor.Handled(st.Code())
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// StreamClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Streaming RPCs.
 | 
			
		||||
func (m *ClientMetrics) StreamClientInterceptor() func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
 | 
			
		||||
	return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
 | 
			
		||||
		monitor := newClientReporter(m, clientStreamType(desc), method)
 | 
			
		||||
		clientStream, err := streamer(ctx, desc, cc, method, opts...)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			st, _ := status.FromError(err)
 | 
			
		||||
			monitor.Handled(st.Code())
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
		return &monitoredClientStream{clientStream, monitor}, nil
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func clientStreamType(desc *grpc.StreamDesc) grpcType {
 | 
			
		||||
	if desc.ClientStreams && !desc.ServerStreams {
 | 
			
		||||
		return ClientStream
 | 
			
		||||
	} else if !desc.ClientStreams && desc.ServerStreams {
 | 
			
		||||
		return ServerStream
 | 
			
		||||
	}
 | 
			
		||||
	return BidiStream
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// monitoredClientStream wraps grpc.ClientStream allowing each Sent/Recv of message to increment counters.
 | 
			
		||||
type monitoredClientStream struct {
 | 
			
		||||
	grpc.ClientStream
 | 
			
		||||
	monitor *clientReporter
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *monitoredClientStream) SendMsg(m interface{}) error {
 | 
			
		||||
	err := s.ClientStream.SendMsg(m)
 | 
			
		||||
	if err == nil {
 | 
			
		||||
		s.monitor.SentMessage()
 | 
			
		||||
	}
 | 
			
		||||
	return err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *monitoredClientStream) RecvMsg(m interface{}) error {
 | 
			
		||||
	err := s.ClientStream.RecvMsg(m)
 | 
			
		||||
	if err == nil {
 | 
			
		||||
		s.monitor.ReceivedMessage()
 | 
			
		||||
	} else if err == io.EOF {
 | 
			
		||||
		s.monitor.Handled(codes.OK)
 | 
			
		||||
	} else {
 | 
			
		||||
		st, _ := status.FromError(err)
 | 
			
		||||
		s.monitor.Handled(st.Code())
 | 
			
		||||
	}
 | 
			
		||||
	return err
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										91
									
								
								vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										91
									
								
								vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -7,105 +7,40 @@ import (
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
	"google.golang.org/grpc/codes"
 | 
			
		||||
 | 
			
		||||
	prom "github.com/prometheus/client_golang/prometheus"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	clientStartedCounter = prom.NewCounterVec(
 | 
			
		||||
		prom.CounterOpts{
 | 
			
		||||
			Namespace: "grpc",
 | 
			
		||||
			Subsystem: "client",
 | 
			
		||||
			Name:      "started_total",
 | 
			
		||||
			Help:      "Total number of RPCs started on the client.",
 | 
			
		||||
		}, []string{"grpc_type", "grpc_service", "grpc_method"})
 | 
			
		||||
 | 
			
		||||
	clientHandledCounter = prom.NewCounterVec(
 | 
			
		||||
		prom.CounterOpts{
 | 
			
		||||
			Namespace: "grpc",
 | 
			
		||||
			Subsystem: "client",
 | 
			
		||||
			Name:      "handled_total",
 | 
			
		||||
			Help:      "Total number of RPCs completed by the client, regardless of success or failure.",
 | 
			
		||||
		}, []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"})
 | 
			
		||||
 | 
			
		||||
	clientStreamMsgReceived = prom.NewCounterVec(
 | 
			
		||||
		prom.CounterOpts{
 | 
			
		||||
			Namespace: "grpc",
 | 
			
		||||
			Subsystem: "client",
 | 
			
		||||
			Name:      "msg_received_total",
 | 
			
		||||
			Help:      "Total number of RPC stream messages received by the client.",
 | 
			
		||||
		}, []string{"grpc_type", "grpc_service", "grpc_method"})
 | 
			
		||||
 | 
			
		||||
	clientStreamMsgSent = prom.NewCounterVec(
 | 
			
		||||
		prom.CounterOpts{
 | 
			
		||||
			Namespace: "grpc",
 | 
			
		||||
			Subsystem: "client",
 | 
			
		||||
			Name:      "msg_sent_total",
 | 
			
		||||
			Help:      "Total number of gRPC stream messages sent by the client.",
 | 
			
		||||
		}, []string{"grpc_type", "grpc_service", "grpc_method"})
 | 
			
		||||
 | 
			
		||||
	clientHandledHistogramEnabled = false
 | 
			
		||||
	clientHandledHistogramOpts    = prom.HistogramOpts{
 | 
			
		||||
		Namespace: "grpc",
 | 
			
		||||
		Subsystem: "client",
 | 
			
		||||
		Name:      "handling_seconds",
 | 
			
		||||
		Help:      "Histogram of response latency (seconds) of the gRPC until it is finished by the application.",
 | 
			
		||||
		Buckets:   prom.DefBuckets,
 | 
			
		||||
	}
 | 
			
		||||
	clientHandledHistogram *prom.HistogramVec
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func init() {
 | 
			
		||||
	prom.MustRegister(clientStartedCounter)
 | 
			
		||||
	prom.MustRegister(clientHandledCounter)
 | 
			
		||||
	prom.MustRegister(clientStreamMsgReceived)
 | 
			
		||||
	prom.MustRegister(clientStreamMsgSent)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// EnableClientHandlingTimeHistogram turns on recording of handling time of RPCs.
 | 
			
		||||
// Histogram metrics can be very expensive for Prometheus to retain and query.
 | 
			
		||||
func EnableClientHandlingTimeHistogram(opts ...HistogramOption) {
 | 
			
		||||
	for _, o := range opts {
 | 
			
		||||
		o(&clientHandledHistogramOpts)
 | 
			
		||||
	}
 | 
			
		||||
	if !clientHandledHistogramEnabled {
 | 
			
		||||
		clientHandledHistogram = prom.NewHistogramVec(
 | 
			
		||||
			clientHandledHistogramOpts,
 | 
			
		||||
			[]string{"grpc_type", "grpc_service", "grpc_method"},
 | 
			
		||||
		)
 | 
			
		||||
		prom.Register(clientHandledHistogram)
 | 
			
		||||
	}
 | 
			
		||||
	clientHandledHistogramEnabled = true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type clientReporter struct {
 | 
			
		||||
	metrics     *ClientMetrics
 | 
			
		||||
	rpcType     grpcType
 | 
			
		||||
	serviceName string
 | 
			
		||||
	methodName  string
 | 
			
		||||
	startTime   time.Time
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func newClientReporter(rpcType grpcType, fullMethod string) *clientReporter {
 | 
			
		||||
	r := &clientReporter{rpcType: rpcType}
 | 
			
		||||
	if clientHandledHistogramEnabled {
 | 
			
		||||
func newClientReporter(m *ClientMetrics, rpcType grpcType, fullMethod string) *clientReporter {
 | 
			
		||||
	r := &clientReporter{
 | 
			
		||||
		metrics: m,
 | 
			
		||||
		rpcType: rpcType,
 | 
			
		||||
	}
 | 
			
		||||
	if r.metrics.clientHandledHistogramEnabled {
 | 
			
		||||
		r.startTime = time.Now()
 | 
			
		||||
	}
 | 
			
		||||
	r.serviceName, r.methodName = splitMethodName(fullMethod)
 | 
			
		||||
	clientStartedCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
 | 
			
		||||
	r.metrics.clientStartedCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
 | 
			
		||||
	return r
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (r *clientReporter) ReceivedMessage() {
 | 
			
		||||
	clientStreamMsgReceived.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
 | 
			
		||||
	r.metrics.clientStreamMsgReceived.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (r *clientReporter) SentMessage() {
 | 
			
		||||
	clientStreamMsgSent.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
 | 
			
		||||
	r.metrics.clientStreamMsgSent.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (r *clientReporter) Handled(code codes.Code) {
 | 
			
		||||
	clientHandledCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName, code.String()).Inc()
 | 
			
		||||
	if clientHandledHistogramEnabled {
 | 
			
		||||
		clientHandledHistogram.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Observe(time.Since(r.startTime).Seconds())
 | 
			
		||||
	r.metrics.clientHandledCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName, code.String()).Inc()
 | 
			
		||||
	if r.metrics.clientHandledHistogramEnabled {
 | 
			
		||||
		r.metrics.clientHandledHistogram.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Observe(time.Since(r.startTime).Seconds())
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										41
									
								
								vendor/github.com/grpc-ecosystem/go-grpc-prometheus/metric_options.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										41
									
								
								vendor/github.com/grpc-ecosystem/go-grpc-prometheus/metric_options.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,41 @@
 | 
			
		||||
package grpc_prometheus
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	prom "github.com/prometheus/client_golang/prometheus"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// A CounterOption lets you add options to Counter metrics using With* funcs.
 | 
			
		||||
type CounterOption func(*prom.CounterOpts)
 | 
			
		||||
 | 
			
		||||
type counterOptions []CounterOption
 | 
			
		||||
 | 
			
		||||
func (co counterOptions) apply(o prom.CounterOpts) prom.CounterOpts {
 | 
			
		||||
	for _, f := range co {
 | 
			
		||||
		f(&o)
 | 
			
		||||
	}
 | 
			
		||||
	return o
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// WithConstLabels allows you to add ConstLabels to Counter metrics.
 | 
			
		||||
func WithConstLabels(labels prom.Labels) CounterOption {
 | 
			
		||||
	return func(o *prom.CounterOpts) {
 | 
			
		||||
		o.ConstLabels = labels
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// A HistogramOption lets you add options to Histogram metrics using With*
 | 
			
		||||
// funcs.
 | 
			
		||||
type HistogramOption func(*prom.HistogramOpts)
 | 
			
		||||
 | 
			
		||||
// WithHistogramBuckets allows you to specify custom bucket ranges for histograms if EnableHandlingTimeHistogram is on.
 | 
			
		||||
func WithHistogramBuckets(buckets []float64) HistogramOption {
 | 
			
		||||
	return func(o *prom.HistogramOpts) { o.Buckets = buckets }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// WithHistogramConstLabels allows you to add custom ConstLabels to
 | 
			
		||||
// histograms metrics.
 | 
			
		||||
func WithHistogramConstLabels(labels prom.Labels) HistogramOption {
 | 
			
		||||
	return func(o *prom.HistogramOpts) {
 | 
			
		||||
		o.ConstLabels = labels
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										92
									
								
								vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										92
									
								
								vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -6,69 +6,43 @@
 | 
			
		||||
package grpc_prometheus
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"golang.org/x/net/context"
 | 
			
		||||
	prom "github.com/prometheus/client_golang/prometheus"
 | 
			
		||||
	"google.golang.org/grpc"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// PreregisterServices takes a gRPC server and pre-initializes all counters to 0.
 | 
			
		||||
// This allows for easier monitoring in Prometheus (no missing metrics), and should be called *after* all services have
 | 
			
		||||
// been registered with the server.
 | 
			
		||||
var (
 | 
			
		||||
	// DefaultServerMetrics is the default instance of ServerMetrics. It is
 | 
			
		||||
	// intended to be used in conjunction the default Prometheus metrics
 | 
			
		||||
	// registry.
 | 
			
		||||
	DefaultServerMetrics = NewServerMetrics()
 | 
			
		||||
 | 
			
		||||
	// UnaryServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Unary RPCs.
 | 
			
		||||
	UnaryServerInterceptor = DefaultServerMetrics.UnaryServerInterceptor()
 | 
			
		||||
 | 
			
		||||
	// StreamServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Streaming RPCs.
 | 
			
		||||
	StreamServerInterceptor = DefaultServerMetrics.StreamServerInterceptor()
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func init() {
 | 
			
		||||
	prom.MustRegister(DefaultServerMetrics.serverStartedCounter)
 | 
			
		||||
	prom.MustRegister(DefaultServerMetrics.serverHandledCounter)
 | 
			
		||||
	prom.MustRegister(DefaultServerMetrics.serverStreamMsgReceived)
 | 
			
		||||
	prom.MustRegister(DefaultServerMetrics.serverStreamMsgSent)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Register takes a gRPC server and pre-initializes all counters to 0. This
 | 
			
		||||
// allows for easier monitoring in Prometheus (no missing metrics), and should
 | 
			
		||||
// be called *after* all services have been registered with the server. This
 | 
			
		||||
// function acts on the DefaultServerMetrics variable.
 | 
			
		||||
func Register(server *grpc.Server) {
 | 
			
		||||
	serviceInfo := server.GetServiceInfo()
 | 
			
		||||
	for serviceName, info := range serviceInfo {
 | 
			
		||||
		for _, mInfo := range info.Methods {
 | 
			
		||||
			preRegisterMethod(serviceName, &mInfo)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	DefaultServerMetrics.InitializeMetrics(server)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// UnaryServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Unary RPCs.
 | 
			
		||||
func UnaryServerInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
 | 
			
		||||
	monitor := newServerReporter(Unary, info.FullMethod)
 | 
			
		||||
	monitor.ReceivedMessage()
 | 
			
		||||
	resp, err := handler(ctx, req)
 | 
			
		||||
	monitor.Handled(grpc.Code(err))
 | 
			
		||||
	if err == nil {
 | 
			
		||||
		monitor.SentMessage()
 | 
			
		||||
	}
 | 
			
		||||
	return resp, err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// StreamServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Streaming RPCs.
 | 
			
		||||
func StreamServerInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
 | 
			
		||||
	monitor := newServerReporter(streamRpcType(info), info.FullMethod)
 | 
			
		||||
	err := handler(srv, &monitoredServerStream{ss, monitor})
 | 
			
		||||
	monitor.Handled(grpc.Code(err))
 | 
			
		||||
	return err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func streamRpcType(info *grpc.StreamServerInfo) grpcType {
 | 
			
		||||
	if info.IsClientStream && !info.IsServerStream {
 | 
			
		||||
		return ClientStream
 | 
			
		||||
	} else if !info.IsClientStream && info.IsServerStream {
 | 
			
		||||
		return ServerStream
 | 
			
		||||
	}
 | 
			
		||||
	return BidiStream
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// monitoredStream wraps grpc.ServerStream allowing each Sent/Recv of message to increment counters.
 | 
			
		||||
type monitoredServerStream struct {
 | 
			
		||||
	grpc.ServerStream
 | 
			
		||||
	monitor *serverReporter
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *monitoredServerStream) SendMsg(m interface{}) error {
 | 
			
		||||
	err := s.ServerStream.SendMsg(m)
 | 
			
		||||
	if err == nil {
 | 
			
		||||
		s.monitor.SentMessage()
 | 
			
		||||
	}
 | 
			
		||||
	return err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *monitoredServerStream) RecvMsg(m interface{}) error {
 | 
			
		||||
	err := s.ServerStream.RecvMsg(m)
 | 
			
		||||
	if err == nil {
 | 
			
		||||
		s.monitor.ReceivedMessage()
 | 
			
		||||
	}
 | 
			
		||||
	return err
 | 
			
		||||
// EnableHandlingTimeHistogram turns on recording of handling time
 | 
			
		||||
// of RPCs. Histogram metrics can be very expensive for Prometheus
 | 
			
		||||
// to retain and query. This function acts on the DefaultServerMetrics
 | 
			
		||||
// variable and the default Prometheus metrics registry.
 | 
			
		||||
func EnableHandlingTimeHistogram(opts ...HistogramOption) {
 | 
			
		||||
	DefaultServerMetrics.EnableHandlingTimeHistogram(opts...)
 | 
			
		||||
	prom.Register(DefaultServerMetrics.serverHandledHistogram)
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										185
									
								
								vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_metrics.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										185
									
								
								vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_metrics.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,185 @@
 | 
			
		||||
package grpc_prometheus
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	prom "github.com/prometheus/client_golang/prometheus"
 | 
			
		||||
	"golang.org/x/net/context"
 | 
			
		||||
	"google.golang.org/grpc"
 | 
			
		||||
	"google.golang.org/grpc/status"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// ServerMetrics represents a collection of metrics to be registered on a
 | 
			
		||||
// Prometheus metrics registry for a gRPC server.
 | 
			
		||||
type ServerMetrics struct {
 | 
			
		||||
	serverStartedCounter          *prom.CounterVec
 | 
			
		||||
	serverHandledCounter          *prom.CounterVec
 | 
			
		||||
	serverStreamMsgReceived       *prom.CounterVec
 | 
			
		||||
	serverStreamMsgSent           *prom.CounterVec
 | 
			
		||||
	serverHandledHistogramEnabled bool
 | 
			
		||||
	serverHandledHistogramOpts    prom.HistogramOpts
 | 
			
		||||
	serverHandledHistogram        *prom.HistogramVec
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewServerMetrics returns a ServerMetrics object. Use a new instance of
 | 
			
		||||
// ServerMetrics when not using the default Prometheus metrics registry, for
 | 
			
		||||
// example when wanting to control which metrics are added to a registry as
 | 
			
		||||
// opposed to automatically adding metrics via init functions.
 | 
			
		||||
func NewServerMetrics(counterOpts ...CounterOption) *ServerMetrics {
 | 
			
		||||
	opts := counterOptions(counterOpts)
 | 
			
		||||
	return &ServerMetrics{
 | 
			
		||||
		serverStartedCounter: prom.NewCounterVec(
 | 
			
		||||
			opts.apply(prom.CounterOpts{
 | 
			
		||||
				Name: "grpc_server_started_total",
 | 
			
		||||
				Help: "Total number of RPCs started on the server.",
 | 
			
		||||
			}), []string{"grpc_type", "grpc_service", "grpc_method"}),
 | 
			
		||||
		serverHandledCounter: prom.NewCounterVec(
 | 
			
		||||
			opts.apply(prom.CounterOpts{
 | 
			
		||||
				Name: "grpc_server_handled_total",
 | 
			
		||||
				Help: "Total number of RPCs completed on the server, regardless of success or failure.",
 | 
			
		||||
			}), []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"}),
 | 
			
		||||
		serverStreamMsgReceived: prom.NewCounterVec(
 | 
			
		||||
			opts.apply(prom.CounterOpts{
 | 
			
		||||
				Name: "grpc_server_msg_received_total",
 | 
			
		||||
				Help: "Total number of RPC stream messages received on the server.",
 | 
			
		||||
			}), []string{"grpc_type", "grpc_service", "grpc_method"}),
 | 
			
		||||
		serverStreamMsgSent: prom.NewCounterVec(
 | 
			
		||||
			opts.apply(prom.CounterOpts{
 | 
			
		||||
				Name: "grpc_server_msg_sent_total",
 | 
			
		||||
				Help: "Total number of gRPC stream messages sent by the server.",
 | 
			
		||||
			}), []string{"grpc_type", "grpc_service", "grpc_method"}),
 | 
			
		||||
		serverHandledHistogramEnabled: false,
 | 
			
		||||
		serverHandledHistogramOpts: prom.HistogramOpts{
 | 
			
		||||
			Name:    "grpc_server_handling_seconds",
 | 
			
		||||
			Help:    "Histogram of response latency (seconds) of gRPC that had been application-level handled by the server.",
 | 
			
		||||
			Buckets: prom.DefBuckets,
 | 
			
		||||
		},
 | 
			
		||||
		serverHandledHistogram: nil,
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// EnableHandlingTimeHistogram enables histograms being registered when
 | 
			
		||||
// registering the ServerMetrics on a Prometheus registry. Histograms can be
 | 
			
		||||
// expensive on Prometheus servers. It takes options to configure histogram
 | 
			
		||||
// options such as the defined buckets.
 | 
			
		||||
func (m *ServerMetrics) EnableHandlingTimeHistogram(opts ...HistogramOption) {
 | 
			
		||||
	for _, o := range opts {
 | 
			
		||||
		o(&m.serverHandledHistogramOpts)
 | 
			
		||||
	}
 | 
			
		||||
	if !m.serverHandledHistogramEnabled {
 | 
			
		||||
		m.serverHandledHistogram = prom.NewHistogramVec(
 | 
			
		||||
			m.serverHandledHistogramOpts,
 | 
			
		||||
			[]string{"grpc_type", "grpc_service", "grpc_method"},
 | 
			
		||||
		)
 | 
			
		||||
	}
 | 
			
		||||
	m.serverHandledHistogramEnabled = true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Describe sends the super-set of all possible descriptors of metrics
 | 
			
		||||
// collected by this Collector to the provided channel and returns once
 | 
			
		||||
// the last descriptor has been sent.
 | 
			
		||||
func (m *ServerMetrics) Describe(ch chan<- *prom.Desc) {
 | 
			
		||||
	m.serverStartedCounter.Describe(ch)
 | 
			
		||||
	m.serverHandledCounter.Describe(ch)
 | 
			
		||||
	m.serverStreamMsgReceived.Describe(ch)
 | 
			
		||||
	m.serverStreamMsgSent.Describe(ch)
 | 
			
		||||
	if m.serverHandledHistogramEnabled {
 | 
			
		||||
		m.serverHandledHistogram.Describe(ch)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Collect is called by the Prometheus registry when collecting
 | 
			
		||||
// metrics. The implementation sends each collected metric via the
 | 
			
		||||
// provided channel and returns once the last metric has been sent.
 | 
			
		||||
func (m *ServerMetrics) Collect(ch chan<- prom.Metric) {
 | 
			
		||||
	m.serverStartedCounter.Collect(ch)
 | 
			
		||||
	m.serverHandledCounter.Collect(ch)
 | 
			
		||||
	m.serverStreamMsgReceived.Collect(ch)
 | 
			
		||||
	m.serverStreamMsgSent.Collect(ch)
 | 
			
		||||
	if m.serverHandledHistogramEnabled {
 | 
			
		||||
		m.serverHandledHistogram.Collect(ch)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// UnaryServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Unary RPCs.
 | 
			
		||||
func (m *ServerMetrics) UnaryServerInterceptor() func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
 | 
			
		||||
	return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
 | 
			
		||||
		monitor := newServerReporter(m, Unary, info.FullMethod)
 | 
			
		||||
		monitor.ReceivedMessage()
 | 
			
		||||
		resp, err := handler(ctx, req)
 | 
			
		||||
		st, _ := status.FromError(err)
 | 
			
		||||
		monitor.Handled(st.Code())
 | 
			
		||||
		if err == nil {
 | 
			
		||||
			monitor.SentMessage()
 | 
			
		||||
		}
 | 
			
		||||
		return resp, err
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// StreamServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Streaming RPCs.
 | 
			
		||||
func (m *ServerMetrics) StreamServerInterceptor() func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
 | 
			
		||||
	return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
 | 
			
		||||
		monitor := newServerReporter(m, streamRPCType(info), info.FullMethod)
 | 
			
		||||
		err := handler(srv, &monitoredServerStream{ss, monitor})
 | 
			
		||||
		st, _ := status.FromError(err)
 | 
			
		||||
		monitor.Handled(st.Code())
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// InitializeMetrics initializes all metrics, with their appropriate null
 | 
			
		||||
// value, for all gRPC methods registered on a gRPC server. This is useful, to
 | 
			
		||||
// ensure that all metrics exist when collecting and querying.
 | 
			
		||||
func (m *ServerMetrics) InitializeMetrics(server *grpc.Server) {
 | 
			
		||||
	serviceInfo := server.GetServiceInfo()
 | 
			
		||||
	for serviceName, info := range serviceInfo {
 | 
			
		||||
		for _, mInfo := range info.Methods {
 | 
			
		||||
			preRegisterMethod(m, serviceName, &mInfo)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func streamRPCType(info *grpc.StreamServerInfo) grpcType {
 | 
			
		||||
	if info.IsClientStream && !info.IsServerStream {
 | 
			
		||||
		return ClientStream
 | 
			
		||||
	} else if !info.IsClientStream && info.IsServerStream {
 | 
			
		||||
		return ServerStream
 | 
			
		||||
	}
 | 
			
		||||
	return BidiStream
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// monitoredStream wraps grpc.ServerStream allowing each Sent/Recv of message to increment counters.
 | 
			
		||||
type monitoredServerStream struct {
 | 
			
		||||
	grpc.ServerStream
 | 
			
		||||
	monitor *serverReporter
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *monitoredServerStream) SendMsg(m interface{}) error {
 | 
			
		||||
	err := s.ServerStream.SendMsg(m)
 | 
			
		||||
	if err == nil {
 | 
			
		||||
		s.monitor.SentMessage()
 | 
			
		||||
	}
 | 
			
		||||
	return err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *monitoredServerStream) RecvMsg(m interface{}) error {
 | 
			
		||||
	err := s.ServerStream.RecvMsg(m)
 | 
			
		||||
	if err == nil {
 | 
			
		||||
		s.monitor.ReceivedMessage()
 | 
			
		||||
	}
 | 
			
		||||
	return err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// preRegisterMethod is invoked on Register of a Server, allowing all gRPC services labels to be pre-populated.
 | 
			
		||||
func preRegisterMethod(metrics *ServerMetrics, serviceName string, mInfo *grpc.MethodInfo) {
 | 
			
		||||
	methodName := mInfo.Name
 | 
			
		||||
	methodType := string(typeFromMethodInfo(mInfo))
 | 
			
		||||
	// These are just references (no increments), as just referencing will create the labels but not set values.
 | 
			
		||||
	metrics.serverStartedCounter.GetMetricWithLabelValues(methodType, serviceName, methodName)
 | 
			
		||||
	metrics.serverStreamMsgReceived.GetMetricWithLabelValues(methodType, serviceName, methodName)
 | 
			
		||||
	metrics.serverStreamMsgSent.GetMetricWithLabelValues(methodType, serviceName, methodName)
 | 
			
		||||
	if metrics.serverHandledHistogramEnabled {
 | 
			
		||||
		metrics.serverHandledHistogram.GetMetricWithLabelValues(methodType, serviceName, methodName)
 | 
			
		||||
	}
 | 
			
		||||
	for _, code := range allCodes {
 | 
			
		||||
		metrics.serverHandledCounter.GetMetricWithLabelValues(methodType, serviceName, methodName, code.String())
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										137
									
								
								vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										137
									
								
								vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -7,151 +7,40 @@ import (
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
	"google.golang.org/grpc/codes"
 | 
			
		||||
 | 
			
		||||
	prom "github.com/prometheus/client_golang/prometheus"
 | 
			
		||||
	"google.golang.org/grpc"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type grpcType string
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	Unary        grpcType = "unary"
 | 
			
		||||
	ClientStream grpcType = "client_stream"
 | 
			
		||||
	ServerStream grpcType = "server_stream"
 | 
			
		||||
	BidiStream   grpcType = "bidi_stream"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	serverStartedCounter = prom.NewCounterVec(
 | 
			
		||||
		prom.CounterOpts{
 | 
			
		||||
			Namespace: "grpc",
 | 
			
		||||
			Subsystem: "server",
 | 
			
		||||
			Name:      "started_total",
 | 
			
		||||
			Help:      "Total number of RPCs started on the server.",
 | 
			
		||||
		}, []string{"grpc_type", "grpc_service", "grpc_method"})
 | 
			
		||||
 | 
			
		||||
	serverHandledCounter = prom.NewCounterVec(
 | 
			
		||||
		prom.CounterOpts{
 | 
			
		||||
			Namespace: "grpc",
 | 
			
		||||
			Subsystem: "server",
 | 
			
		||||
			Name:      "handled_total",
 | 
			
		||||
			Help:      "Total number of RPCs completed on the server, regardless of success or failure.",
 | 
			
		||||
		}, []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"})
 | 
			
		||||
 | 
			
		||||
	serverStreamMsgReceived = prom.NewCounterVec(
 | 
			
		||||
		prom.CounterOpts{
 | 
			
		||||
			Namespace: "grpc",
 | 
			
		||||
			Subsystem: "server",
 | 
			
		||||
			Name:      "msg_received_total",
 | 
			
		||||
			Help:      "Total number of RPC stream messages received on the server.",
 | 
			
		||||
		}, []string{"grpc_type", "grpc_service", "grpc_method"})
 | 
			
		||||
 | 
			
		||||
	serverStreamMsgSent = prom.NewCounterVec(
 | 
			
		||||
		prom.CounterOpts{
 | 
			
		||||
			Namespace: "grpc",
 | 
			
		||||
			Subsystem: "server",
 | 
			
		||||
			Name:      "msg_sent_total",
 | 
			
		||||
			Help:      "Total number of gRPC stream messages sent by the server.",
 | 
			
		||||
		}, []string{"grpc_type", "grpc_service", "grpc_method"})
 | 
			
		||||
 | 
			
		||||
	serverHandledHistogramEnabled = false
 | 
			
		||||
	serverHandledHistogramOpts    = prom.HistogramOpts{
 | 
			
		||||
		Namespace: "grpc",
 | 
			
		||||
		Subsystem: "server",
 | 
			
		||||
		Name:      "handling_seconds",
 | 
			
		||||
		Help:      "Histogram of response latency (seconds) of gRPC that had been application-level handled by the server.",
 | 
			
		||||
		Buckets:   prom.DefBuckets,
 | 
			
		||||
	}
 | 
			
		||||
	serverHandledHistogram *prom.HistogramVec
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func init() {
 | 
			
		||||
	prom.MustRegister(serverStartedCounter)
 | 
			
		||||
	prom.MustRegister(serverHandledCounter)
 | 
			
		||||
	prom.MustRegister(serverStreamMsgReceived)
 | 
			
		||||
	prom.MustRegister(serverStreamMsgSent)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type HistogramOption func(*prom.HistogramOpts)
 | 
			
		||||
 | 
			
		||||
// WithHistogramBuckets allows you to specify custom bucket ranges for histograms if EnableHandlingTimeHistogram is on.
 | 
			
		||||
func WithHistogramBuckets(buckets []float64) HistogramOption {
 | 
			
		||||
	return func(o *prom.HistogramOpts) { o.Buckets = buckets }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// EnableHandlingTimeHistogram turns on recording of handling time of RPCs for server-side interceptors.
 | 
			
		||||
// Histogram metrics can be very expensive for Prometheus to retain and query.
 | 
			
		||||
func EnableHandlingTimeHistogram(opts ...HistogramOption) {
 | 
			
		||||
	for _, o := range opts {
 | 
			
		||||
		o(&serverHandledHistogramOpts)
 | 
			
		||||
	}
 | 
			
		||||
	if !serverHandledHistogramEnabled {
 | 
			
		||||
		serverHandledHistogram = prom.NewHistogramVec(
 | 
			
		||||
			serverHandledHistogramOpts,
 | 
			
		||||
			[]string{"grpc_type", "grpc_service", "grpc_method"},
 | 
			
		||||
		)
 | 
			
		||||
		prom.Register(serverHandledHistogram)
 | 
			
		||||
	}
 | 
			
		||||
	serverHandledHistogramEnabled = true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type serverReporter struct {
 | 
			
		||||
	metrics     *ServerMetrics
 | 
			
		||||
	rpcType     grpcType
 | 
			
		||||
	serviceName string
 | 
			
		||||
	methodName  string
 | 
			
		||||
	startTime   time.Time
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func newServerReporter(rpcType grpcType, fullMethod string) *serverReporter {
 | 
			
		||||
	r := &serverReporter{rpcType: rpcType}
 | 
			
		||||
	if serverHandledHistogramEnabled {
 | 
			
		||||
func newServerReporter(m *ServerMetrics, rpcType grpcType, fullMethod string) *serverReporter {
 | 
			
		||||
	r := &serverReporter{
 | 
			
		||||
		metrics: m,
 | 
			
		||||
		rpcType: rpcType,
 | 
			
		||||
	}
 | 
			
		||||
	if r.metrics.serverHandledHistogramEnabled {
 | 
			
		||||
		r.startTime = time.Now()
 | 
			
		||||
	}
 | 
			
		||||
	r.serviceName, r.methodName = splitMethodName(fullMethod)
 | 
			
		||||
	serverStartedCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
 | 
			
		||||
	r.metrics.serverStartedCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
 | 
			
		||||
	return r
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (r *serverReporter) ReceivedMessage() {
 | 
			
		||||
	serverStreamMsgReceived.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
 | 
			
		||||
	r.metrics.serverStreamMsgReceived.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (r *serverReporter) SentMessage() {
 | 
			
		||||
	serverStreamMsgSent.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
 | 
			
		||||
	r.metrics.serverStreamMsgSent.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (r *serverReporter) Handled(code codes.Code) {
 | 
			
		||||
	serverHandledCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName, code.String()).Inc()
 | 
			
		||||
	if serverHandledHistogramEnabled {
 | 
			
		||||
		serverHandledHistogram.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Observe(time.Since(r.startTime).Seconds())
 | 
			
		||||
	r.metrics.serverHandledCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName, code.String()).Inc()
 | 
			
		||||
	if r.metrics.serverHandledHistogramEnabled {
 | 
			
		||||
		r.metrics.serverHandledHistogram.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Observe(time.Since(r.startTime).Seconds())
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// preRegisterMethod is invoked on Register of a Server, allowing all gRPC services labels to be pre-populated.
 | 
			
		||||
func preRegisterMethod(serviceName string, mInfo *grpc.MethodInfo) {
 | 
			
		||||
	methodName := mInfo.Name
 | 
			
		||||
	methodType := string(typeFromMethodInfo(mInfo))
 | 
			
		||||
	// These are just references (no increments), as just referencing will create the labels but not set values.
 | 
			
		||||
	serverStartedCounter.GetMetricWithLabelValues(methodType, serviceName, methodName)
 | 
			
		||||
	serverStreamMsgReceived.GetMetricWithLabelValues(methodType, serviceName, methodName)
 | 
			
		||||
	serverStreamMsgSent.GetMetricWithLabelValues(methodType, serviceName, methodName)
 | 
			
		||||
	if serverHandledHistogramEnabled {
 | 
			
		||||
		serverHandledHistogram.GetMetricWithLabelValues(methodType, serviceName, methodName)
 | 
			
		||||
	}
 | 
			
		||||
	for _, code := range allCodes {
 | 
			
		||||
		serverHandledCounter.GetMetricWithLabelValues(methodType, serviceName, methodName, code.String())
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func typeFromMethodInfo(mInfo *grpc.MethodInfo) grpcType {
 | 
			
		||||
	if mInfo.IsClientStream == false && mInfo.IsServerStream == false {
 | 
			
		||||
		return Unary
 | 
			
		||||
	}
 | 
			
		||||
	if mInfo.IsClientStream == true && mInfo.IsServerStream == false {
 | 
			
		||||
		return ClientStream
 | 
			
		||||
	}
 | 
			
		||||
	if mInfo.IsClientStream == false && mInfo.IsServerStream == true {
 | 
			
		||||
		return ServerStream
 | 
			
		||||
	}
 | 
			
		||||
	return BidiStream
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										23
									
								
								vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										23
									
								
								vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -6,9 +6,19 @@ package grpc_prometheus
 | 
			
		||||
import (
 | 
			
		||||
	"strings"
 | 
			
		||||
 | 
			
		||||
	"google.golang.org/grpc"
 | 
			
		||||
	"google.golang.org/grpc/codes"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type grpcType string
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	Unary        grpcType = "unary"
 | 
			
		||||
	ClientStream grpcType = "client_stream"
 | 
			
		||||
	ServerStream grpcType = "server_stream"
 | 
			
		||||
	BidiStream   grpcType = "bidi_stream"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	allCodes = []codes.Code{
 | 
			
		||||
		codes.OK, codes.Canceled, codes.Unknown, codes.InvalidArgument, codes.DeadlineExceeded, codes.NotFound,
 | 
			
		||||
@@ -25,3 +35,16 @@ func splitMethodName(fullMethodName string) (string, string) {
 | 
			
		||||
	}
 | 
			
		||||
	return "unknown", "unknown"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func typeFromMethodInfo(mInfo *grpc.MethodInfo) grpcType {
 | 
			
		||||
	if !mInfo.IsClientStream && !mInfo.IsServerStream {
 | 
			
		||||
		return Unary
 | 
			
		||||
	}
 | 
			
		||||
	if mInfo.IsClientStream && !mInfo.IsServerStream {
 | 
			
		||||
		return ClientStream
 | 
			
		||||
	}
 | 
			
		||||
	if !mInfo.IsClientStream && mInfo.IsServerStream {
 | 
			
		||||
		return ServerStream
 | 
			
		||||
	}
 | 
			
		||||
	return BidiStream
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user