Version bump to etcd v3.2.11

This commit is contained in:
Joe Betz
2017-12-12 16:20:42 -08:00
parent 4956e65d59
commit 05afd248f2
287 changed files with 25980 additions and 5220 deletions

View File

@@ -13,9 +13,15 @@ go_library(
deps = [
"//vendor/github.com/coreos/etcd/client:go_default_library",
"//vendor/github.com/coreos/etcd/clientv3:go_default_library",
"//vendor/github.com/coreos/etcd/embed:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver/api:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver/api/v2http:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver/api/v3client:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver/api/v3election:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver/api/v3lock:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library",
"//vendor/github.com/coreos/etcd/pkg/testutil:go_default_library",
@@ -25,6 +31,7 @@ go_library(
"//vendor/github.com/coreos/pkg/capnslog:go_default_library",
"//vendor/golang.org/x/net/context:go_default_library",
"//vendor/google.golang.org/grpc:go_default_library",
"//vendor/google.golang.org/grpc/keepalive:go_default_library",
],
)

View File

@@ -17,6 +17,7 @@ package integration
import (
"fmt"
"io"
"io/ioutil"
"net"
"sync"
@@ -31,9 +32,10 @@ type bridge struct {
l net.Listener
conns map[*bridgeConn]struct{}
stopc chan struct{}
pausec chan struct{}
wg sync.WaitGroup
stopc chan struct{}
pausec chan struct{}
blackholec chan struct{}
wg sync.WaitGroup
mu sync.Mutex
}
@@ -41,11 +43,12 @@ type bridge struct {
func newBridge(addr string) (*bridge, error) {
b := &bridge{
// bridge "port" is ("%05d%05d0", port, pid) since go1.8 expects the port to be a number
inaddr: addr + "0",
outaddr: addr,
conns: make(map[*bridgeConn]struct{}),
stopc: make(chan struct{}),
pausec: make(chan struct{}),
inaddr: addr + "0",
outaddr: addr,
conns: make(map[*bridgeConn]struct{}),
stopc: make(chan struct{}),
pausec: make(chan struct{}),
blackholec: make(chan struct{}),
}
close(b.pausec)
@@ -152,12 +155,12 @@ func (b *bridge) serveConn(bc *bridgeConn) {
var wg sync.WaitGroup
wg.Add(2)
go func() {
io.Copy(bc.out, bc.in)
b.ioCopy(bc, bc.out, bc.in)
bc.close()
wg.Done()
}()
go func() {
io.Copy(bc.in, bc.out)
b.ioCopy(bc, bc.in, bc.out)
bc.close()
wg.Done()
}()
@@ -179,3 +182,47 @@ func (bc *bridgeConn) close() {
bc.in.Close()
bc.out.Close()
}
func (b *bridge) Blackhole() {
b.mu.Lock()
close(b.blackholec)
b.mu.Unlock()
}
func (b *bridge) Unblackhole() {
b.mu.Lock()
for bc := range b.conns {
bc.Close()
}
b.conns = make(map[*bridgeConn]struct{})
b.blackholec = make(chan struct{})
b.mu.Unlock()
}
// ref. https://github.com/golang/go/blob/master/src/io/io.go copyBuffer
func (b *bridge) ioCopy(bc *bridgeConn, dst io.Writer, src io.Reader) (err error) {
buf := make([]byte, 32*1024)
for {
select {
case <-b.blackholec:
io.Copy(ioutil.Discard, src)
return nil
default:
}
nr, er := src.Read(buf)
if nr > 0 {
nw, ew := dst.Write(buf[0:nr])
if ew != nil {
return ew
}
if nr != nw {
return io.ErrShortWrite
}
}
if er != nil {
err = er
break
}
}
return
}

View File

@@ -31,21 +31,28 @@ import (
"testing"
"time"
"golang.org/x/net/context"
"google.golang.org/grpc"
"github.com/coreos/etcd/client"
"github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/embed"
"github.com/coreos/etcd/etcdserver"
"github.com/coreos/etcd/etcdserver/api"
"github.com/coreos/etcd/etcdserver/api/etcdhttp"
"github.com/coreos/etcd/etcdserver/api/v2http"
"github.com/coreos/etcd/etcdserver/api/v3client"
"github.com/coreos/etcd/etcdserver/api/v3election"
epb "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb"
"github.com/coreos/etcd/etcdserver/api/v3lock"
lockpb "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb"
"github.com/coreos/etcd/etcdserver/api/v3rpc"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/pkg/testutil"
"github.com/coreos/etcd/pkg/transport"
"github.com/coreos/etcd/pkg/types"
"github.com/coreos/etcd/rafthttp"
"github.com/coreos/pkg/capnslog"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/keepalive"
)
const (
@@ -72,16 +79,29 @@ var (
ClientCertAuth: true,
}
testTLSInfoExpired = transport.TLSInfo{
KeyFile: "./fixtures-expired/server-key.pem",
CertFile: "./fixtures-expired/server.pem",
TrustedCAFile: "./fixtures-expired/etcd-root-ca.pem",
ClientCertAuth: true,
}
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "integration")
)
type ClusterConfig struct {
Size int
PeerTLS *transport.TLSInfo
ClientTLS *transport.TLSInfo
DiscoveryURL string
UseGRPC bool
QuotaBackendBytes int64
Size int
PeerTLS *transport.TLSInfo
ClientTLS *transport.TLSInfo
DiscoveryURL string
UseGRPC bool
QuotaBackendBytes int64
MaxRequestBytes uint
GRPCKeepAliveMinTime time.Duration
GRPCKeepAliveInterval time.Duration
GRPCKeepAliveTimeout time.Duration
// SkipCreatingClient to skip creating clients for each member.
SkipCreatingClient bool
}
type cluster struct {
@@ -89,11 +109,6 @@ type cluster struct {
Members []*member
}
func init() {
// manually enable v3 capability since we know the cluster members all support v3.
api.EnableCapability(api.V3rpcCapability)
}
func schemeFromTLSInfo(tls *transport.TLSInfo) string {
if tls == nil {
return UrlScheme
@@ -175,8 +190,12 @@ func (c *cluster) URL(i int) string {
// URLs returns a list of all active client URLs in the cluster
func (c *cluster) URLs() []string {
return getMembersURLs(c.Members)
}
func getMembersURLs(members []*member) []string {
urls := make([]string, 0)
for _, m := range c.Members {
for _, m := range members {
select {
case <-m.s.StopNotify():
continue
@@ -210,10 +229,14 @@ func (c *cluster) HTTPMembers() []client.Member {
func (c *cluster) mustNewMember(t *testing.T) *member {
m := mustNewMember(t,
memberConfig{
name: c.name(rand.Int()),
peerTLS: c.cfg.PeerTLS,
clientTLS: c.cfg.ClientTLS,
quotaBackendBytes: c.cfg.QuotaBackendBytes,
name: c.name(rand.Int()),
peerTLS: c.cfg.PeerTLS,
clientTLS: c.cfg.ClientTLS,
quotaBackendBytes: c.cfg.QuotaBackendBytes,
maxRequestBytes: c.cfg.MaxRequestBytes,
grpcKeepAliveMinTime: c.cfg.GRPCKeepAliveMinTime,
grpcKeepAliveInterval: c.cfg.GRPCKeepAliveInterval,
grpcKeepAliveTimeout: c.cfg.GRPCKeepAliveTimeout,
})
m.DiscoveryURL = c.cfg.DiscoveryURL
if c.cfg.UseGRPC {
@@ -312,9 +335,15 @@ func (c *cluster) removeMember(t *testing.T, id uint64) error {
}
func (c *cluster) Terminate(t *testing.T) {
var wg sync.WaitGroup
wg.Add(len(c.Members))
for _, m := range c.Members {
m.Terminate(t)
go func(mm *member) {
defer wg.Done()
mm.Terminate(t)
}(m)
}
wg.Wait()
}
func (c *cluster) waitMembersMatch(t *testing.T, membs []client.Member) {
@@ -331,7 +360,6 @@ func (c *cluster) waitMembersMatch(t *testing.T, membs []client.Member) {
time.Sleep(tickDuration)
}
}
return
}
func (c *cluster) WaitLeader(t *testing.T) int { return c.waitLeader(t, c.Members) }
@@ -343,6 +371,18 @@ func (c *cluster) waitLeader(t *testing.T, membs []*member) int {
for _, m := range membs {
possibleLead[uint64(m.s.ID())] = true
}
cc := MustNewHTTPClient(t, getMembersURLs(membs), nil)
kapi := client.NewKeysAPI(cc)
// ensure leader is up via linearizable get
for {
ctx, cancel := context.WithTimeout(context.Background(), 10*tickDuration)
_, err := kapi.Get(ctx, "0", &client.GetOptions{Quorum: true})
cancel()
if err == nil || strings.Contains(err.Error(), "Key not found") {
break
}
}
for lead == 0 || !possibleLead[lead] {
lead = 0
@@ -446,9 +486,13 @@ type member struct {
s *etcdserver.EtcdServer
hss []*httptest.Server
grpcServer *grpc.Server
grpcAddr string
grpcBridge *bridge
grpcServerOpts []grpc.ServerOption
grpcServer *grpc.Server
grpcAddr string
grpcBridge *bridge
// serverClient is a clientv3 that directly calls the etcdserver.
serverClient *clientv3.Client
keepDataDirTerminate bool
}
@@ -456,10 +500,14 @@ type member struct {
func (m *member) GRPCAddr() string { return m.grpcAddr }
type memberConfig struct {
name string
peerTLS *transport.TLSInfo
clientTLS *transport.TLSInfo
quotaBackendBytes int64
name string
peerTLS *transport.TLSInfo
clientTLS *transport.TLSInfo
quotaBackendBytes int64
maxRequestBytes uint
grpcKeepAliveMinTime time.Duration
grpcKeepAliveInterval time.Duration
grpcKeepAliveTimeout time.Duration
}
// mustNewMember return an inited member with the given name. If peerTLS is
@@ -507,6 +555,26 @@ func mustNewMember(t *testing.T, mcfg memberConfig) *member {
m.ElectionTicks = electionTicks
m.TickMs = uint(tickDuration / time.Millisecond)
m.QuotaBackendBytes = mcfg.quotaBackendBytes
m.MaxRequestBytes = mcfg.maxRequestBytes
if m.MaxRequestBytes == 0 {
m.MaxRequestBytes = embed.DefaultMaxRequestBytes
}
m.AuthToken = "simple" // for the purpose of integration testing, simple token is enough
m.grpcServerOpts = []grpc.ServerOption{}
if mcfg.grpcKeepAliveMinTime > time.Duration(0) {
m.grpcServerOpts = append(m.grpcServerOpts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{
MinTime: mcfg.grpcKeepAliveMinTime,
PermitWithoutStream: false,
}))
}
if mcfg.grpcKeepAliveInterval > time.Duration(0) &&
mcfg.grpcKeepAliveTimeout > time.Duration(0) {
m.grpcServerOpts = append(m.grpcServerOpts, grpc.KeepaliveParams(keepalive.ServerParameters{
Time: mcfg.grpcKeepAliveInterval,
Timeout: mcfg.grpcKeepAliveTimeout,
}))
}
return m
}
@@ -523,7 +591,7 @@ func (m *member) listenGRPC() error {
l.Close()
return err
}
m.grpcAddr = m.grpcBridge.URL()
m.grpcAddr = schemeFromTLSInfo(m.ClientTLSInfo) + "://" + m.grpcBridge.inaddr
m.grpcListener = l
return nil
}
@@ -535,6 +603,8 @@ func (m *member) electionTimeout() time.Duration {
func (m *member) DropConnections() { m.grpcBridge.Reset() }
func (m *member) PauseConnections() { m.grpcBridge.Pause() }
func (m *member) UnpauseConnections() { m.grpcBridge.Unpause() }
func (m *member) Blackhole() { m.grpcBridge.Blackhole() }
func (m *member) Unblackhole() { m.grpcBridge.Unblackhole() }
// NewClientV3 creates a new grpc client connection to the member
func NewClientV3(m *member) (*clientv3.Client, error) {
@@ -597,10 +667,10 @@ func (m *member) Launch() error {
if m.s, err = etcdserver.NewServer(&m.ServerConfig); err != nil {
return fmt.Errorf("failed to initialize the etcd server: %v", err)
}
m.s.SyncTicker = time.Tick(500 * time.Millisecond)
m.s.SyncTicker = time.NewTicker(500 * time.Millisecond)
m.s.Start()
m.raftHandler = &testutil.PauseableHandler{Next: v2http.NewPeerHandler(m.s)}
m.raftHandler = &testutil.PauseableHandler{Next: etcdhttp.NewPeerHandler(m.s)}
for _, ln := range m.PeerListeners {
hs := &httptest.Server{
@@ -644,7 +714,10 @@ func (m *member) Launch() error {
return err
}
}
m.grpcServer = v3rpc.Server(m.s, tlscfg)
m.grpcServer = v3rpc.Server(m.s, tlscfg, m.grpcServerOpts...)
m.serverClient = v3client.New(m.s)
lockpb.RegisterLockServer(m.grpcServer, v3lock.NewLockServer(m.serverClient))
epb.RegisterElectionServer(m.grpcServer, v3election.NewElectionServer(m.serverClient))
go m.grpcServer.Serve(m.grpcListener)
}
@@ -688,8 +761,12 @@ func (m *member) Close() {
m.grpcBridge.Close()
m.grpcBridge = nil
}
if m.serverClient != nil {
m.serverClient.Close()
m.serverClient = nil
}
if m.grpcServer != nil {
m.grpcServer.Stop()
m.grpcServer.GracefulStop()
m.grpcServer = nil
}
m.s.HardStop()
@@ -785,7 +862,7 @@ func (m *member) Metric(metricName string) (string, error) {
}
// InjectPartition drops connections from m to others, vice versa.
func (m *member) InjectPartition(t *testing.T, others []*member) {
func (m *member) InjectPartition(t *testing.T, others ...*member) {
for _, other := range others {
m.s.CutPeer(other.s.ID())
other.s.CutPeer(m.s.ID())
@@ -793,7 +870,7 @@ func (m *member) InjectPartition(t *testing.T, others []*member) {
}
// RecoverPartition recovers connections from m to others, vice versa.
func (m *member) RecoverPartition(t *testing.T, others []*member) {
func (m *member) RecoverPartition(t *testing.T, others ...*member) {
for _, other := range others {
m.s.MendPeer(other.s.ID())
other.s.MendPeer(m.s.ID())
@@ -845,12 +922,15 @@ func NewClusterV3(t *testing.T, cfg *ClusterConfig) *ClusterV3 {
cluster: NewClusterByConfig(t, cfg),
}
clus.Launch(t)
for _, m := range clus.Members {
client, err := NewClientV3(m)
if err != nil {
t.Fatalf("cannot create client: %v", err)
if !cfg.SkipCreatingClient {
for _, m := range clus.Members {
client, err := NewClientV3(m)
if err != nil {
t.Fatalf("cannot create client: %v", err)
}
clus.clients = append(clus.clients, client)
}
clus.clients = append(clus.clients, client)
}
return clus
@@ -897,4 +977,8 @@ type grpcAPI struct {
Maintenance pb.MaintenanceClient
// Auth is the authentication API for the client's connection.
Auth pb.AuthClient
// Lock is the lock API for the client's connection.
Lock lockpb.LockClient
// Election is the election API for the client's connection.
Election epb.ElectionClient
}

View File

@@ -18,6 +18,8 @@ package integration
import (
"github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb"
"github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
)
@@ -29,6 +31,8 @@ func toGRPC(c *clientv3.Client) grpcAPI {
pb.NewWatchClient(c.ActiveConnection()),
pb.NewMaintenanceClient(c.ActiveConnection()),
pb.NewAuthClient(c.ActiveConnection()),
v3lockpb.NewLockClient(c.ActiveConnection()),
v3electionpb.NewElectionClient(c.ActiveConnection()),
}
}

View File

@@ -20,8 +20,10 @@ import (
"sync"
"github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/clientv3/namespace"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/proxy/grpcproxy"
"github.com/coreos/etcd/proxy/grpcproxy/adapter"
)
var (
@@ -29,10 +31,13 @@ var (
proxies map[*clientv3.Client]grpcClientProxy = make(map[*clientv3.Client]grpcClientProxy)
)
const proxyNamespace = "proxy-namespace"
type grpcClientProxy struct {
grpc grpcAPI
wdonec <-chan struct{}
kvdonec <-chan struct{}
lpdonec <-chan struct{}
}
func toGRPC(c *clientv3.Client) grpcAPI {
@@ -43,17 +48,30 @@ func toGRPC(c *clientv3.Client) grpcAPI {
return v.grpc
}
wp, wpch := grpcproxy.NewWatchProxy(c)
// test namespacing proxy
c.KV = namespace.NewKV(c.KV, proxyNamespace)
c.Watcher = namespace.NewWatcher(c.Watcher, proxyNamespace)
c.Lease = namespace.NewLease(c.Lease, proxyNamespace)
// test coalescing/caching proxy
kvp, kvpch := grpcproxy.NewKvProxy(c)
wp, wpch := grpcproxy.NewWatchProxy(c)
lp, lpch := grpcproxy.NewLeaseProxy(c)
mp := grpcproxy.NewMaintenanceProxy(c)
clp, _ := grpcproxy.NewClusterProxy(c, "", "") // without registering proxy URLs
lockp := grpcproxy.NewLockProxy(c)
electp := grpcproxy.NewElectionProxy(c)
grpc := grpcAPI{
pb.NewClusterClient(c.ActiveConnection()),
grpcproxy.KvServerToKvClient(kvp),
pb.NewLeaseClient(c.ActiveConnection()),
grpcproxy.WatchServerToWatchClient(wp),
pb.NewMaintenanceClient(c.ActiveConnection()),
adapter.ClusterServerToClusterClient(clp),
adapter.KvServerToKvClient(kvp),
adapter.LeaseServerToLeaseClient(lp),
adapter.WatchServerToWatchClient(wp),
adapter.MaintenanceServerToMaintenanceClient(mp),
pb.NewAuthClient(c.ActiveConnection()),
adapter.LockServerToLockClient(lockp),
adapter.ElectionServerToElectionClient(electp),
}
proxies[c] = grpcClientProxy{grpc: grpc, wdonec: wpch, kvdonec: kvpch}
proxies[c] = grpcClientProxy{grpc: grpc, wdonec: wpch, kvdonec: kvpch, lpdonec: lpch}
return grpc
}
@@ -61,13 +79,17 @@ type proxyCloser struct {
clientv3.Watcher
wdonec <-chan struct{}
kvdonec <-chan struct{}
lclose func()
lpdonec <-chan struct{}
}
func (pc *proxyCloser) Close() error {
// client ctx is canceled before calling close, so kv will close out
// client ctx is canceled before calling close, so kv and lp will close out
<-pc.kvdonec
err := pc.Watcher.Close()
<-pc.wdonec
pc.lclose()
<-pc.lpdonec
return err
}
@@ -79,10 +101,14 @@ func newClientV3(cfg clientv3.Config) (*clientv3.Client, error) {
rpc := toGRPC(c)
c.KV = clientv3.NewKVFromKVClient(rpc.KV)
pmu.Lock()
lc := c.Lease
c.Lease = clientv3.NewLeaseFromLeaseClient(rpc.Lease, cfg.DialTimeout)
c.Watcher = &proxyCloser{
Watcher: clientv3.NewWatchFromWatchClient(rpc.Watch),
wdonec: proxies[c].wdonec,
kvdonec: proxies[c].kvdonec,
lclose: func() { lc.Close() },
lpdonec: proxies[c].lpdonec,
}
pmu.Unlock()
return c, nil