Update etcd client to 3.3.9
This commit is contained in:
3
vendor/github.com/coreos/etcd/integration/BUILD
generated
vendored
3
vendor/github.com/coreos/etcd/integration/BUILD
generated
vendored
@@ -31,8 +31,9 @@ go_library(
|
||||
"//vendor/github.com/coreos/etcd/pkg/types:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/rafthttp:go_default_library",
|
||||
"//vendor/github.com/coreos/pkg/capnslog:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
"//vendor/github.com/soheilhy/cmux:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/grpclog:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/keepalive:go_default_library",
|
||||
],
|
||||
)
|
||||
|
2
vendor/github.com/coreos/etcd/integration/bridge.go
generated
vendored
2
vendor/github.com/coreos/etcd/integration/bridge.go
generated
vendored
@@ -224,5 +224,5 @@ func (b *bridge) ioCopy(bc *bridgeConn, dst io.Writer, src io.Reader) (err error
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
167
vendor/github.com/coreos/etcd/integration/cluster.go
generated
vendored
167
vendor/github.com/coreos/etcd/integration/cluster.go
generated
vendored
@@ -15,6 +15,7 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
@@ -51,16 +52,19 @@ import (
|
||||
"github.com/coreos/etcd/rafthttp"
|
||||
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
"golang.org/x/net/context"
|
||||
"github.com/soheilhy/cmux"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
)
|
||||
|
||||
const (
|
||||
tickDuration = 10 * time.Millisecond
|
||||
clusterName = "etcd"
|
||||
requestTimeout = 20 * time.Second
|
||||
// RequestWaitTimeout is the time duration to wait for a request to go through or detect leader loss.
|
||||
RequestWaitTimeout = 3 * time.Second
|
||||
tickDuration = 10 * time.Millisecond
|
||||
requestTimeout = 20 * time.Second
|
||||
|
||||
clusterName = "etcd"
|
||||
basePort = 21000
|
||||
UrlScheme = "unix"
|
||||
UrlSchemeTLS = "unixs"
|
||||
@@ -97,6 +101,7 @@ type ClusterConfig struct {
|
||||
DiscoveryURL string
|
||||
UseGRPC bool
|
||||
QuotaBackendBytes int64
|
||||
MaxTxnOps uint
|
||||
MaxRequestBytes uint
|
||||
GRPCKeepAliveMinTime time.Duration
|
||||
GRPCKeepAliveInterval time.Duration
|
||||
@@ -237,6 +242,7 @@ func (c *cluster) mustNewMember(t *testing.T) *member {
|
||||
peerTLS: c.cfg.PeerTLS,
|
||||
clientTLS: c.cfg.ClientTLS,
|
||||
quotaBackendBytes: c.cfg.QuotaBackendBytes,
|
||||
maxTxnOps: c.cfg.MaxTxnOps,
|
||||
maxRequestBytes: c.cfg.MaxRequestBytes,
|
||||
grpcKeepAliveMinTime: c.cfg.GRPCKeepAliveMinTime,
|
||||
grpcKeepAliveInterval: c.cfg.GRPCKeepAliveInterval,
|
||||
@@ -289,10 +295,11 @@ func (c *cluster) addMemberByURL(t *testing.T, clientURL, peerURL string) error
|
||||
cc := MustNewHTTPClient(t, []string{clientURL}, c.cfg.ClientTLS)
|
||||
ma := client.NewMembersAPI(cc)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
|
||||
if _, err := ma.Add(ctx, peerURL); err != nil {
|
||||
_, err := ma.Add(ctx, peerURL)
|
||||
cancel()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cancel()
|
||||
|
||||
// wait for the add node entry applied in the cluster
|
||||
members := append(c.HTTPMembers(), client.Member{PeerURLs: []string{peerURL}, ClientURLs: []string{}})
|
||||
@@ -315,10 +322,11 @@ func (c *cluster) removeMember(t *testing.T, id uint64) error {
|
||||
cc := MustNewHTTPClient(t, c.URLs(), c.cfg.ClientTLS)
|
||||
ma := client.NewMembersAPI(cc)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
|
||||
if err := ma.Remove(ctx, types.ID(id).String()); err != nil {
|
||||
err := ma.Remove(ctx, types.ID(id).String())
|
||||
cancel()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cancel()
|
||||
newMembers := make([]*member, 0)
|
||||
for _, m := range c.Members {
|
||||
if uint64(m.s.ID()) != id {
|
||||
@@ -488,12 +496,13 @@ type member struct {
|
||||
// ClientTLSInfo enables client TLS when set
|
||||
ClientTLSInfo *transport.TLSInfo
|
||||
|
||||
raftHandler *testutil.PauseableHandler
|
||||
s *etcdserver.EtcdServer
|
||||
hss []*httptest.Server
|
||||
raftHandler *testutil.PauseableHandler
|
||||
s *etcdserver.EtcdServer
|
||||
serverClosers []func()
|
||||
|
||||
grpcServerOpts []grpc.ServerOption
|
||||
grpcServer *grpc.Server
|
||||
grpcServerPeer *grpc.Server
|
||||
grpcAddr string
|
||||
grpcBridge *bridge
|
||||
|
||||
@@ -512,6 +521,7 @@ type memberConfig struct {
|
||||
peerTLS *transport.TLSInfo
|
||||
clientTLS *transport.TLSInfo
|
||||
quotaBackendBytes int64
|
||||
maxTxnOps uint
|
||||
maxRequestBytes uint
|
||||
grpcKeepAliveMinTime time.Duration
|
||||
grpcKeepAliveInterval time.Duration
|
||||
@@ -566,6 +576,10 @@ func mustNewMember(t *testing.T, mcfg memberConfig) *member {
|
||||
m.InitialElectionTickAdvance = true
|
||||
m.TickMs = uint(tickDuration / time.Millisecond)
|
||||
m.QuotaBackendBytes = mcfg.quotaBackendBytes
|
||||
m.MaxTxnOps = mcfg.maxTxnOps
|
||||
if m.MaxTxnOps == 0 {
|
||||
m.MaxTxnOps = embed.DefaultMaxTxnOps
|
||||
}
|
||||
m.MaxRequestBytes = mcfg.maxRequestBytes
|
||||
if m.MaxRequestBytes == 0 {
|
||||
m.MaxRequestBytes = embed.DefaultMaxRequestBytes
|
||||
@@ -586,10 +600,11 @@ func mustNewMember(t *testing.T, mcfg memberConfig) *member {
|
||||
Timeout: mcfg.grpcKeepAliveTimeout,
|
||||
}))
|
||||
}
|
||||
|
||||
m.clientMaxCallSendMsgSize = mcfg.clientMaxCallSendMsgSize
|
||||
m.clientMaxCallRecvMsgSize = mcfg.clientMaxCallRecvMsgSize
|
||||
|
||||
m.InitialCorruptCheck = true
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
@@ -611,10 +626,12 @@ func (m *member) listenGRPC() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *member) electionTimeout() time.Duration {
|
||||
return time.Duration(m.s.Cfg.ElectionTicks) * time.Millisecond
|
||||
func (m *member) ElectionTimeout() time.Duration {
|
||||
return time.Duration(m.s.Cfg.ElectionTicks*int(m.s.Cfg.TickMs)) * time.Millisecond
|
||||
}
|
||||
|
||||
func (m *member) ID() types.ID { return m.s.ID() }
|
||||
|
||||
func (m *member) DropConnections() { m.grpcBridge.Reset() }
|
||||
func (m *member) PauseConnections() { m.grpcBridge.Pause() }
|
||||
func (m *member) UnpauseConnections() { m.grpcBridge.Unpause() }
|
||||
@@ -681,35 +698,86 @@ func (m *member) Clone(t *testing.T) *member {
|
||||
func (m *member) Launch() error {
|
||||
plog.Printf("launching %s (%s)", m.Name, m.grpcAddr)
|
||||
var err error
|
||||
if m.s, err = etcdserver.NewServer(&m.ServerConfig); err != nil {
|
||||
if m.s, err = etcdserver.NewServer(m.ServerConfig); err != nil {
|
||||
return fmt.Errorf("failed to initialize the etcd server: %v", err)
|
||||
}
|
||||
m.s.SyncTicker = time.NewTicker(500 * time.Millisecond)
|
||||
m.s.Start()
|
||||
|
||||
var peerTLScfg *tls.Config
|
||||
if m.PeerTLSInfo != nil && !m.PeerTLSInfo.Empty() {
|
||||
if peerTLScfg, err = m.PeerTLSInfo.ServerConfig(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if m.grpcListener != nil {
|
||||
var (
|
||||
tlscfg *tls.Config
|
||||
)
|
||||
if m.ClientTLSInfo != nil && !m.ClientTLSInfo.Empty() {
|
||||
tlscfg, err = m.ClientTLSInfo.ServerConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
m.grpcServer = v3rpc.Server(m.s, tlscfg, m.grpcServerOpts...)
|
||||
m.grpcServerPeer = v3rpc.Server(m.s, peerTLScfg)
|
||||
m.serverClient = v3client.New(m.s)
|
||||
lockpb.RegisterLockServer(m.grpcServer, v3lock.NewLockServer(m.serverClient))
|
||||
epb.RegisterElectionServer(m.grpcServer, v3election.NewElectionServer(m.serverClient))
|
||||
go m.grpcServer.Serve(m.grpcListener)
|
||||
}
|
||||
|
||||
m.raftHandler = &testutil.PauseableHandler{Next: etcdhttp.NewPeerHandler(m.s)}
|
||||
|
||||
h := (http.Handler)(m.raftHandler)
|
||||
if m.grpcListener != nil {
|
||||
h = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.ProtoMajor == 2 && strings.Contains(r.Header.Get("Content-Type"), "application/grpc") {
|
||||
m.grpcServerPeer.ServeHTTP(w, r)
|
||||
} else {
|
||||
m.raftHandler.ServeHTTP(w, r)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
for _, ln := range m.PeerListeners {
|
||||
cm := cmux.New(ln)
|
||||
// don't hang on matcher after closing listener
|
||||
cm.SetReadTimeout(time.Second)
|
||||
|
||||
if m.grpcServer != nil {
|
||||
grpcl := cm.Match(cmux.HTTP2())
|
||||
go m.grpcServerPeer.Serve(grpcl)
|
||||
}
|
||||
|
||||
// serve http1/http2 rafthttp/grpc
|
||||
ll := cm.Match(cmux.Any())
|
||||
if peerTLScfg != nil {
|
||||
if ll, err = transport.NewTLSListener(ll, m.PeerTLSInfo); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
hs := &httptest.Server{
|
||||
Listener: ln,
|
||||
Config: &http.Server{Handler: m.raftHandler},
|
||||
Listener: ll,
|
||||
Config: &http.Server{Handler: h, TLSConfig: peerTLScfg},
|
||||
TLS: peerTLScfg,
|
||||
}
|
||||
if m.PeerTLSInfo == nil {
|
||||
hs.Start()
|
||||
} else {
|
||||
info := m.PeerTLSInfo
|
||||
hs.TLS, err = info.ServerConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tlsCert, err := tlsutil.NewCert(info.CertFile, info.KeyFile, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hs.TLS.Certificates = []tls.Certificate{*tlsCert}
|
||||
hs.StartTLS()
|
||||
hs.Start()
|
||||
|
||||
donec := make(chan struct{})
|
||||
go func() {
|
||||
defer close(donec)
|
||||
cm.Serve()
|
||||
}()
|
||||
closer := func() {
|
||||
ll.Close()
|
||||
hs.CloseClientConnections()
|
||||
hs.Close()
|
||||
<-donec
|
||||
}
|
||||
m.hss = append(m.hss, hs)
|
||||
m.serverClosers = append(m.serverClosers, closer)
|
||||
}
|
||||
for _, ln := range m.ClientListeners {
|
||||
hs := &httptest.Server{
|
||||
@@ -760,23 +828,12 @@ func (m *member) Launch() error {
|
||||
|
||||
hs.StartTLS()
|
||||
}
|
||||
m.hss = append(m.hss, hs)
|
||||
}
|
||||
if m.grpcListener != nil {
|
||||
var (
|
||||
tlscfg *tls.Config
|
||||
)
|
||||
if m.ClientTLSInfo != nil && !m.ClientTLSInfo.Empty() {
|
||||
tlscfg, err = m.ClientTLSInfo.ServerConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
closer := func() {
|
||||
ln.Close()
|
||||
hs.CloseClientConnections()
|
||||
hs.Close()
|
||||
}
|
||||
m.grpcServer = v3rpc.Server(m.s, tlscfg, m.grpcServerOpts...)
|
||||
m.serverClient = v3client.New(m.s)
|
||||
lockpb.RegisterLockServer(m.grpcServer, v3lock.NewLockServer(m.serverClient))
|
||||
epb.RegisterElectionServer(m.grpcServer, v3election.NewElectionServer(m.serverClient))
|
||||
go m.grpcServer.Serve(m.grpcListener)
|
||||
m.serverClosers = append(m.serverClosers, closer)
|
||||
}
|
||||
|
||||
plog.Printf("launched %s (%s)", m.Name, m.grpcAddr)
|
||||
@@ -824,13 +881,16 @@ func (m *member) Close() {
|
||||
m.serverClient = nil
|
||||
}
|
||||
if m.grpcServer != nil {
|
||||
m.grpcServer.Stop()
|
||||
m.grpcServer.GracefulStop()
|
||||
m.grpcServer = nil
|
||||
m.grpcServerPeer.Stop()
|
||||
m.grpcServerPeer.GracefulStop()
|
||||
m.grpcServerPeer = nil
|
||||
}
|
||||
m.s.HardStop()
|
||||
for _, hs := range m.hss {
|
||||
hs.CloseClientConnections()
|
||||
hs.Close()
|
||||
for _, f := range m.serverClosers {
|
||||
f()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -838,7 +898,7 @@ func (m *member) Close() {
|
||||
func (m *member) Stop(t *testing.T) {
|
||||
plog.Printf("stopping %s (%s)", m.Name, m.grpcAddr)
|
||||
m.Close()
|
||||
m.hss = nil
|
||||
m.serverClosers = nil
|
||||
plog.Printf("stopped %s (%s)", m.Name, m.grpcAddr)
|
||||
}
|
||||
|
||||
@@ -976,6 +1036,9 @@ type ClusterV3 struct {
|
||||
// for each cluster member.
|
||||
func NewClusterV3(t *testing.T, cfg *ClusterConfig) *ClusterV3 {
|
||||
cfg.UseGRPC = true
|
||||
if os.Getenv("CLIENT_DEBUG") != "" {
|
||||
clientv3.SetLogger(grpclog.NewLoggerV2WithVerbosity(os.Stderr, os.Stderr, os.Stderr, 4))
|
||||
}
|
||||
clus := &ClusterV3{
|
||||
cluster: NewClusterByConfig(t, cfg),
|
||||
}
|
||||
|
4
vendor/github.com/coreos/etcd/integration/cluster_proxy.go
generated
vendored
4
vendor/github.com/coreos/etcd/integration/cluster_proxy.go
generated
vendored
@@ -21,7 +21,6 @@ import (
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/clientv3/namespace"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/proxy/grpcproxy"
|
||||
"github.com/coreos/etcd/proxy/grpcproxy/adapter"
|
||||
)
|
||||
@@ -58,6 +57,7 @@ func toGRPC(c *clientv3.Client) grpcAPI {
|
||||
lp, lpch := grpcproxy.NewLeaseProxy(c)
|
||||
mp := grpcproxy.NewMaintenanceProxy(c)
|
||||
clp, _ := grpcproxy.NewClusterProxy(c, "", "") // without registering proxy URLs
|
||||
authp := grpcproxy.NewAuthProxy(c)
|
||||
lockp := grpcproxy.NewLockProxy(c)
|
||||
electp := grpcproxy.NewElectionProxy(c)
|
||||
|
||||
@@ -67,7 +67,7 @@ func toGRPC(c *clientv3.Client) grpcAPI {
|
||||
adapter.LeaseServerToLeaseClient(lp),
|
||||
adapter.WatchServerToWatchClient(wp),
|
||||
adapter.MaintenanceServerToMaintenanceClient(mp),
|
||||
pb.NewAuthClient(c.ActiveConnection()),
|
||||
adapter.AuthServerToAuthClient(authp),
|
||||
adapter.LockServerToLockClient(lockp),
|
||||
adapter.ElectionServerToElectionClient(electp),
|
||||
}
|
||||
|
Reference in New Issue
Block a user