Update vendor package github.com/coreos/...
This commit is contained in:
1
vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/BUILD
generated
vendored
1
vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/BUILD
generated
vendored
@@ -23,6 +23,7 @@ go_library(
|
||||
"//vendor/github.com/coreos/etcd/rafthttp:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/version:go_default_library",
|
||||
"//vendor/github.com/coreos/pkg/capnslog:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus/promhttp:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
32
vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/metrics.go
generated
vendored
32
vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/metrics.go
generated
vendored
@@ -24,6 +24,7 @@ import (
|
||||
"github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/raft"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
)
|
||||
|
||||
@@ -43,11 +44,6 @@ func HandlePrometheus(mux *http.ServeMux) {
|
||||
mux.Handle(pathMetrics, promhttp.Handler())
|
||||
}
|
||||
|
||||
// HandleHealth registers health handler on '/health'.
|
||||
func HandleHealth(mux *http.ServeMux, srv etcdserver.ServerV2) {
|
||||
mux.Handle(PathHealth, NewHealthHandler(func() Health { return checkHealth(srv) }))
|
||||
}
|
||||
|
||||
// NewHealthHandler handles '/health' requests.
|
||||
func NewHealthHandler(hfunc func() Health) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -67,6 +63,26 @@ func NewHealthHandler(hfunc func() Health) http.HandlerFunc {
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
healthSuccess = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "etcd",
|
||||
Subsystem: "server",
|
||||
Name: "health_success",
|
||||
Help: "The total number of successful health checks",
|
||||
})
|
||||
healthFailed = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "etcd",
|
||||
Subsystem: "server",
|
||||
Name: "health_failures",
|
||||
Help: "The total number of failed health checks",
|
||||
})
|
||||
)
|
||||
|
||||
func init() {
|
||||
prometheus.MustRegister(healthSuccess)
|
||||
prometheus.MustRegister(healthFailed)
|
||||
}
|
||||
|
||||
// Health defines etcd server health status.
|
||||
// TODO: remove manual parsing in etcdctl cluster-health
|
||||
type Health struct {
|
||||
@@ -97,5 +113,11 @@ func checkHealth(srv etcdserver.ServerV2) Health {
|
||||
h.Health = "false"
|
||||
}
|
||||
}
|
||||
|
||||
if h.Health == "true" {
|
||||
healthSuccess.Inc()
|
||||
} else {
|
||||
healthFailed.Inc()
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
4
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/BUILD
generated
vendored
4
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/BUILD
generated
vendored
@@ -37,15 +37,17 @@ go_library(
|
||||
"//vendor/github.com/coreos/etcd/version:go_default_library",
|
||||
"//vendor/github.com/coreos/pkg/capnslog:go_default_library",
|
||||
"//vendor/github.com/gogo/protobuf/proto:go_default_library",
|
||||
"//vendor/github.com/grpc-ecosystem/go-grpc-middleware:go_default_library",
|
||||
"//vendor/github.com/grpc-ecosystem/go-grpc-prometheus:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/go.uber.org/zap:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/codes:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/credentials:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/grpclog:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/health:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/health/grpc_health_v1:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/metadata:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/peer:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/status:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
30
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go
generated
vendored
30
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go
generated
vendored
@@ -16,18 +16,15 @@ package v3rpc
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
|
||||
"github.com/grpc-ecosystem/go-grpc-middleware"
|
||||
"github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/health"
|
||||
healthpb "google.golang.org/grpc/health/grpc_health_v1"
|
||||
)
|
||||
@@ -38,17 +35,21 @@ const (
|
||||
maxSendBytes = math.MaxInt32
|
||||
)
|
||||
|
||||
// integration tests call this multiple times, which is racey in gRPC side
|
||||
var grpclogOnce sync.Once
|
||||
|
||||
func Server(s *etcdserver.EtcdServer, tls *tls.Config, gopts ...grpc.ServerOption) *grpc.Server {
|
||||
var opts []grpc.ServerOption
|
||||
opts = append(opts, grpc.CustomCodec(&codec{}))
|
||||
if tls != nil {
|
||||
opts = append(opts, grpc.Creds(credentials.NewTLS(tls)))
|
||||
}
|
||||
opts = append(opts, grpc.UnaryInterceptor(newUnaryInterceptor(s)))
|
||||
opts = append(opts, grpc.StreamInterceptor(newStreamInterceptor(s)))
|
||||
opts = append(opts, grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(
|
||||
newLogUnaryInterceptor(s),
|
||||
newUnaryInterceptor(s),
|
||||
grpc_prometheus.UnaryServerInterceptor,
|
||||
)))
|
||||
opts = append(opts, grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(
|
||||
newStreamInterceptor(s),
|
||||
grpc_prometheus.StreamServerInterceptor,
|
||||
)))
|
||||
opts = append(opts, grpc.MaxRecvMsgSize(int(s.Cfg.MaxRequestBytes+grpcOverheadBytes)))
|
||||
opts = append(opts, grpc.MaxSendMsgSize(maxSendBytes))
|
||||
opts = append(opts, grpc.MaxConcurrentStreams(maxStreams))
|
||||
@@ -71,16 +72,5 @@ func Server(s *etcdserver.EtcdServer, tls *tls.Config, gopts ...grpc.ServerOptio
|
||||
// set zero values for metrics registered for this grpc server
|
||||
grpc_prometheus.Register(grpcServer)
|
||||
|
||||
grpclogOnce.Do(func() {
|
||||
if s.Cfg.Debug {
|
||||
grpc.EnableTracing = true
|
||||
// enable info, warning, error
|
||||
grpclog.SetLoggerV2(grpclog.NewLoggerV2(os.Stderr, os.Stderr, os.Stderr))
|
||||
} else {
|
||||
// only discard info
|
||||
grpclog.SetLoggerV2(grpclog.NewLoggerV2(ioutil.Discard, os.Stderr, os.Stderr))
|
||||
}
|
||||
})
|
||||
|
||||
return grpcServer
|
||||
}
|
||||
|
||||
127
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go
generated
vendored
127
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go
generated
vendored
@@ -25,9 +25,11 @@ import (
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
"github.com/coreos/etcd/raft"
|
||||
|
||||
prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/peer"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -40,7 +42,7 @@ type streamsMap struct {
|
||||
}
|
||||
|
||||
func newUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor {
|
||||
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
|
||||
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
|
||||
if !api.IsCapabilityEnabled(api.V3rpcCapability) {
|
||||
return nil, rpctypes.ErrGRPCNotCapable
|
||||
}
|
||||
@@ -54,7 +56,124 @@ func newUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor {
|
||||
}
|
||||
}
|
||||
|
||||
return prometheus.UnaryServerInterceptor(ctx, req, info, handler)
|
||||
return handler(ctx, req)
|
||||
}
|
||||
}
|
||||
|
||||
func newLogUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor {
|
||||
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
|
||||
startTime := time.Now()
|
||||
resp, err := handler(ctx, req)
|
||||
defer logUnaryRequestStats(ctx, nil, info, startTime, req, resp)
|
||||
return resp, err
|
||||
}
|
||||
}
|
||||
|
||||
func logUnaryRequestStats(ctx context.Context, lg *zap.Logger, info *grpc.UnaryServerInfo, startTime time.Time, req interface{}, resp interface{}) {
|
||||
duration := time.Since(startTime)
|
||||
remote := "No remote client info."
|
||||
peerInfo, ok := peer.FromContext(ctx)
|
||||
if ok {
|
||||
remote = peerInfo.Addr.String()
|
||||
}
|
||||
var responseType string = info.FullMethod
|
||||
var reqCount, respCount int64
|
||||
var reqSize, respSize int
|
||||
var reqContent string
|
||||
switch _resp := resp.(type) {
|
||||
case *pb.RangeResponse:
|
||||
_req, ok := req.(*pb.RangeRequest)
|
||||
if ok {
|
||||
reqCount = 0
|
||||
reqSize = _req.Size()
|
||||
reqContent = _req.String()
|
||||
}
|
||||
if _resp != nil {
|
||||
respCount = _resp.GetCount()
|
||||
respSize = _resp.Size()
|
||||
}
|
||||
case *pb.PutResponse:
|
||||
_req, ok := req.(*pb.PutRequest)
|
||||
if ok {
|
||||
reqCount = 1
|
||||
reqSize = _req.Size()
|
||||
reqContent = pb.NewLoggablePutRequest(_req).String()
|
||||
// redact value field from request content, see PR #9821
|
||||
}
|
||||
if _resp != nil {
|
||||
respCount = 0
|
||||
respSize = _resp.Size()
|
||||
}
|
||||
case *pb.DeleteRangeResponse:
|
||||
_req, ok := req.(*pb.DeleteRangeRequest)
|
||||
if ok {
|
||||
reqCount = 0
|
||||
reqSize = _req.Size()
|
||||
reqContent = _req.String()
|
||||
}
|
||||
if _resp != nil {
|
||||
respCount = _resp.GetDeleted()
|
||||
respSize = _resp.Size()
|
||||
}
|
||||
case *pb.TxnResponse:
|
||||
_req, ok := req.(*pb.TxnRequest)
|
||||
if ok && _resp != nil {
|
||||
if _resp.GetSucceeded() { // determine the 'actual' count and size of request based on success or failure
|
||||
reqCount = int64(len(_req.GetSuccess()))
|
||||
reqSize = 0
|
||||
for _, r := range _req.GetSuccess() {
|
||||
reqSize += r.Size()
|
||||
}
|
||||
} else {
|
||||
reqCount = int64(len(_req.GetFailure()))
|
||||
reqSize = 0
|
||||
for _, r := range _req.GetFailure() {
|
||||
reqSize += r.Size()
|
||||
}
|
||||
}
|
||||
reqContent = pb.NewLoggableTxnRequest(_req).String()
|
||||
// redact value field from request content, see PR #9821
|
||||
}
|
||||
if _resp != nil {
|
||||
respCount = 0
|
||||
respSize = _resp.Size()
|
||||
}
|
||||
default:
|
||||
reqCount = -1
|
||||
reqSize = -1
|
||||
respCount = -1
|
||||
respSize = -1
|
||||
}
|
||||
|
||||
logGenericRequestStats(lg, startTime, duration, remote, responseType, reqCount, reqSize, respCount, respSize, reqContent)
|
||||
}
|
||||
|
||||
func logGenericRequestStats(lg *zap.Logger, startTime time.Time, duration time.Duration, remote string, responseType string,
|
||||
reqCount int64, reqSize int, respCount int64, respSize int, reqContent string) {
|
||||
if lg == nil {
|
||||
plog.Debugf("start time = %v, "+
|
||||
"time spent = %v, "+
|
||||
"remote = %s, "+
|
||||
"response type = %s, "+
|
||||
"request count = %d, "+
|
||||
"request size = %d, "+
|
||||
"response count = %d, "+
|
||||
"response size = %d, "+
|
||||
"request content = %s",
|
||||
startTime, duration, remote, responseType, reqCount, reqSize, respCount, respSize, reqContent,
|
||||
)
|
||||
} else {
|
||||
lg.Debug("request stats",
|
||||
zap.Time("start time", startTime),
|
||||
zap.Duration("time spent", duration),
|
||||
zap.String("remote", remote),
|
||||
zap.String("response type", responseType),
|
||||
zap.Int64("request count", reqCount),
|
||||
zap.Int("request size", reqSize),
|
||||
zap.Int64("response count", respCount),
|
||||
zap.Int("response size", respSize),
|
||||
zap.String("request content", reqContent),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -90,7 +209,7 @@ func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor
|
||||
}
|
||||
}
|
||||
|
||||
return prometheus.StreamServerInterceptor(srv, ss, info, handler)
|
||||
return handler(srv, ss)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
6
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go
generated
vendored
6
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go
generated
vendored
@@ -59,7 +59,7 @@ func (as *InternalRaftStringer) String() string {
|
||||
case as.Request.Put != nil:
|
||||
return fmt.Sprintf("header:<%s> put:<%s>",
|
||||
as.Request.Header.String(),
|
||||
newLoggablePutRequest(as.Request.Put).String(),
|
||||
NewLoggablePutRequest(as.Request.Put).String(),
|
||||
)
|
||||
case as.Request.Txn != nil:
|
||||
return fmt.Sprintf("header:<%s> txn:<%s>",
|
||||
@@ -121,7 +121,7 @@ func newLoggableRequestOp(op *RequestOp) *requestOpStringer {
|
||||
func (as *requestOpStringer) String() string {
|
||||
switch op := as.Op.Request.(type) {
|
||||
case *RequestOp_RequestPut:
|
||||
return fmt.Sprintf("request_put:<%s>", newLoggablePutRequest(op.RequestPut).String())
|
||||
return fmt.Sprintf("request_put:<%s>", NewLoggablePutRequest(op.RequestPut).String())
|
||||
case *RequestOp_RequestTxn:
|
||||
return fmt.Sprintf("request_txn:<%s>", NewLoggableTxnRequest(op.RequestTxn).String())
|
||||
default:
|
||||
@@ -167,7 +167,7 @@ type loggablePutRequest struct {
|
||||
IgnoreLease bool `protobuf:"varint,6,opt,name=ignore_lease,proto3"`
|
||||
}
|
||||
|
||||
func newLoggablePutRequest(request *PutRequest) *loggablePutRequest {
|
||||
func NewLoggablePutRequest(request *PutRequest) *loggablePutRequest {
|
||||
return &loggablePutRequest{
|
||||
request.Key,
|
||||
len(request.Value),
|
||||
|
||||
15
vendor/github.com/coreos/etcd/etcdserver/metrics.go
generated
vendored
15
vendor/github.com/coreos/etcd/etcdserver/metrics.go
generated
vendored
@@ -90,6 +90,12 @@ var (
|
||||
Name: "slow_read_indexes_total",
|
||||
Help: "The total number of pending read indexes not in sync with leader's or timed out read index requests.",
|
||||
})
|
||||
readIndexFailed = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "etcd",
|
||||
Subsystem: "server",
|
||||
Name: "read_indexes_failed_total",
|
||||
Help: "The total number of failed read indexes seen.",
|
||||
})
|
||||
quotaBackendBytes = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: "etcd",
|
||||
Subsystem: "server",
|
||||
@@ -110,6 +116,13 @@ var (
|
||||
Help: "Which Go version server is running with. 1 for 'server_go_version' label with current version.",
|
||||
},
|
||||
[]string{"server_go_version"})
|
||||
serverID = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: "etcd",
|
||||
Subsystem: "server",
|
||||
Name: "id",
|
||||
Help: "Server or member ID in hexadecimal format. 1 for 'server_id' label with current ID.",
|
||||
},
|
||||
[]string{"server_id"})
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -124,9 +137,11 @@ func init() {
|
||||
prometheus.MustRegister(proposalsFailed)
|
||||
prometheus.MustRegister(leaseExpired)
|
||||
prometheus.MustRegister(slowReadIndex)
|
||||
prometheus.MustRegister(readIndexFailed)
|
||||
prometheus.MustRegister(quotaBackendBytes)
|
||||
prometheus.MustRegister(currentVersion)
|
||||
prometheus.MustRegister(currentGoVersion)
|
||||
prometheus.MustRegister(serverID)
|
||||
|
||||
currentVersion.With(prometheus.Labels{
|
||||
"server_version": version.Version,
|
||||
|
||||
2
vendor/github.com/coreos/etcd/etcdserver/server.go
generated
vendored
2
vendor/github.com/coreos/etcd/etcdserver/server.go
generated
vendored
@@ -59,6 +59,7 @@ import (
|
||||
|
||||
"github.com/coreos/go-semver/semver"
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -435,6 +436,7 @@ func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) {
|
||||
reqIDGen: idutil.NewGenerator(uint16(id), time.Now()),
|
||||
forceVersionC: make(chan struct{}),
|
||||
}
|
||||
serverID.With(prometheus.Labels{"server_id": id.String()}).Set(1)
|
||||
|
||||
srv.applyV2 = &applierV2store{store: srv.store, cluster: srv.cluster}
|
||||
|
||||
|
||||
3
vendor/github.com/coreos/etcd/etcdserver/v3_server.go
generated
vendored
3
vendor/github.com/coreos/etcd/etcdserver/v3_server.go
generated
vendored
@@ -634,6 +634,7 @@ func (s *EtcdServer) linearizableReadLoop() {
|
||||
return
|
||||
}
|
||||
plog.Errorf("failed to get read index from raft: %v", err)
|
||||
readIndexFailed.Inc()
|
||||
nr.notify(err)
|
||||
continue
|
||||
}
|
||||
@@ -659,7 +660,7 @@ func (s *EtcdServer) linearizableReadLoop() {
|
||||
}
|
||||
|
||||
case <-time.After(s.Cfg.ReqTimeout()):
|
||||
plog.Warningf("timed out waiting for read index response")
|
||||
plog.Warningf("timed out waiting for read index response (local node might have slow network)")
|
||||
nr.notify(ErrTimeout)
|
||||
timeout = true
|
||||
slowReadIndex.Inc()
|
||||
|
||||
Reference in New Issue
Block a user