Level sets dependency graph to consume etcd 3.1.5
This commit is contained in:
3
vendor/github.com/coreos/etcd/integration/bridge.go
generated
vendored
3
vendor/github.com/coreos/etcd/integration/bridge.go
generated
vendored
@@ -39,7 +39,8 @@ type bridge struct {
|
||||
|
||||
func newBridge(addr string) (*bridge, error) {
|
||||
b := &bridge{
|
||||
inaddr: addr + ".bridge",
|
||||
// bridge "port" is ("%05d%05d0", port, pid) since go1.8 expects the port to be a number
|
||||
inaddr: addr + "0",
|
||||
outaddr: addr,
|
||||
conns: make(map[*bridgeConn]struct{}),
|
||||
stopc: make(chan struct{}, 1),
|
||||
|
118
vendor/github.com/coreos/etcd/integration/cluster.go
generated
vendored
118
vendor/github.com/coreos/etcd/integration/cluster.go
generated
vendored
@@ -276,12 +276,18 @@ func (c *cluster) AddMember(t *testing.T) {
|
||||
}
|
||||
|
||||
func (c *cluster) RemoveMember(t *testing.T, id uint64) {
|
||||
if err := c.removeMember(t, id); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *cluster) removeMember(t *testing.T, id uint64) error {
|
||||
// send remove request to the cluster
|
||||
cc := MustNewHTTPClient(t, c.URLs(), c.cfg.ClientTLS)
|
||||
ma := client.NewMembersAPI(cc)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
|
||||
if err := ma.Remove(ctx, types.ID(id).String()); err != nil {
|
||||
t.Fatalf("unexpected remove error %v", err)
|
||||
return err
|
||||
}
|
||||
cancel()
|
||||
newMembers := make([]*member, 0)
|
||||
@@ -302,6 +308,7 @@ func (c *cluster) RemoveMember(t *testing.T, id uint64) {
|
||||
}
|
||||
c.Members = newMembers
|
||||
c.waitMembersMatch(t, c.HTTPMembers())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *cluster) Terminate(t *testing.T) {
|
||||
@@ -329,6 +336,7 @@ func (c *cluster) waitMembersMatch(t *testing.T, membs []client.Member) {
|
||||
|
||||
func (c *cluster) WaitLeader(t *testing.T) int { return c.waitLeader(t, c.Members) }
|
||||
|
||||
// waitLeader waits until given members agree on the same leader.
|
||||
func (c *cluster) waitLeader(t *testing.T, membs []*member) int {
|
||||
possibleLead := make(map[uint64]bool)
|
||||
var lead uint64
|
||||
@@ -362,6 +370,28 @@ func (c *cluster) waitLeader(t *testing.T, membs []*member) int {
|
||||
return -1
|
||||
}
|
||||
|
||||
func (c *cluster) WaitNoLeader(t *testing.T) { c.waitNoLeader(t, c.Members) }
|
||||
|
||||
// waitNoLeader waits until given members lose leader.
|
||||
func (c *cluster) waitNoLeader(t *testing.T, membs []*member) {
|
||||
noLeader := false
|
||||
for !noLeader {
|
||||
noLeader = true
|
||||
for _, m := range membs {
|
||||
select {
|
||||
case <-m.s.StopNotify():
|
||||
continue
|
||||
default:
|
||||
}
|
||||
if m.s.Lead() != 0 {
|
||||
noLeader = false
|
||||
time.Sleep(10 * tickDuration)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *cluster) waitVersion() {
|
||||
for _, m := range c.Members {
|
||||
for {
|
||||
@@ -374,7 +404,7 @@ func (c *cluster) waitVersion() {
|
||||
}
|
||||
|
||||
func (c *cluster) name(i int) string {
|
||||
return fmt.Sprint("node", i)
|
||||
return fmt.Sprint(i)
|
||||
}
|
||||
|
||||
// isMembersEqual checks whether two members equal except ID field.
|
||||
@@ -390,7 +420,8 @@ func isMembersEqual(membs []client.Member, wmembs []client.Member) bool {
|
||||
|
||||
func newLocalListener(t *testing.T) net.Listener {
|
||||
c := atomic.AddInt64(&localListenCount, 1)
|
||||
addr := fmt.Sprintf("127.0.0.1:%d.%d.sock", c+basePort, os.Getpid())
|
||||
// Go 1.8+ allows only numbers in port
|
||||
addr := fmt.Sprintf("127.0.0.1:%05d%05d", c+basePort, os.Getpid())
|
||||
return NewListenerWithAddr(t, addr)
|
||||
}
|
||||
|
||||
@@ -418,6 +449,8 @@ type member struct {
|
||||
grpcServer *grpc.Server
|
||||
grpcAddr string
|
||||
grpcBridge *bridge
|
||||
|
||||
keepDataDirTerminate bool
|
||||
}
|
||||
|
||||
func (m *member) GRPCAddr() string { return m.grpcAddr }
|
||||
@@ -480,7 +513,7 @@ func mustNewMember(t *testing.T, mcfg memberConfig) *member {
|
||||
// listenGRPC starts a grpc server over a unix domain socket on the member
|
||||
func (m *member) listenGRPC() error {
|
||||
// prefix with localhost so cert has right domain
|
||||
m.grpcAddr = "localhost:" + m.Name + ".sock"
|
||||
m.grpcAddr = "localhost:" + m.Name
|
||||
l, err := transport.NewUnixListener(m.grpcAddr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("listen failed on grpc socket %s (%v)", m.grpcAddr, err)
|
||||
@@ -495,6 +528,10 @@ func (m *member) listenGRPC() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *member) electionTimeout() time.Duration {
|
||||
return time.Duration(m.s.Cfg.ElectionTicks) * time.Millisecond
|
||||
}
|
||||
|
||||
func (m *member) DropConnections() { m.grpcBridge.Reset() }
|
||||
|
||||
// NewClientV3 creates a new grpc client connection to the member
|
||||
@@ -515,7 +552,7 @@ func NewClientV3(m *member) (*clientv3.Client, error) {
|
||||
}
|
||||
cfg.TLS = tls
|
||||
}
|
||||
return clientv3.New(cfg)
|
||||
return newClientV3(cfg)
|
||||
}
|
||||
|
||||
// Clone returns a member with the same server configuration. The returned
|
||||
@@ -653,7 +690,7 @@ func (m *member) Close() {
|
||||
m.grpcServer.Stop()
|
||||
m.grpcServer = nil
|
||||
}
|
||||
m.s.Stop()
|
||||
m.s.HardStop()
|
||||
for _, hs := range m.hss {
|
||||
hs.CloseClientConnections()
|
||||
hs.Close()
|
||||
@@ -668,6 +705,15 @@ func (m *member) Stop(t *testing.T) {
|
||||
plog.Printf("stopped %s (%s)", m.Name, m.grpcAddr)
|
||||
}
|
||||
|
||||
// checkLeaderTransition waits for leader transition, returning the new leader ID.
|
||||
func checkLeaderTransition(t *testing.T, m *member, oldLead uint64) uint64 {
|
||||
interval := time.Duration(m.s.Cfg.TickMs) * time.Millisecond
|
||||
for m.s.Lead() == 0 || (m.s.Lead() == oldLead) {
|
||||
time.Sleep(interval)
|
||||
}
|
||||
return m.s.Lead()
|
||||
}
|
||||
|
||||
// StopNotify unblocks when a member stop completes
|
||||
func (m *member) StopNotify() <-chan struct{} {
|
||||
return m.s.StopNotify()
|
||||
@@ -702,12 +748,56 @@ func (m *member) Restart(t *testing.T) error {
|
||||
func (m *member) Terminate(t *testing.T) {
|
||||
plog.Printf("terminating %s (%s)", m.Name, m.grpcAddr)
|
||||
m.Close()
|
||||
if err := os.RemoveAll(m.ServerConfig.DataDir); err != nil {
|
||||
t.Fatal(err)
|
||||
if !m.keepDataDirTerminate {
|
||||
if err := os.RemoveAll(m.ServerConfig.DataDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
plog.Printf("terminated %s (%s)", m.Name, m.grpcAddr)
|
||||
}
|
||||
|
||||
// Metric gets the metric value for a member
|
||||
func (m *member) Metric(metricName string) (string, error) {
|
||||
cfgtls := transport.TLSInfo{}
|
||||
tr, err := transport.NewTimeoutTransport(cfgtls, time.Second, time.Second, time.Second)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
cli := &http.Client{Transport: tr}
|
||||
resp, err := cli.Get(m.ClientURLs[0].String() + "/metrics")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
b, rerr := ioutil.ReadAll(resp.Body)
|
||||
if rerr != nil {
|
||||
return "", rerr
|
||||
}
|
||||
lines := strings.Split(string(b), "\n")
|
||||
for _, l := range lines {
|
||||
if strings.HasPrefix(l, metricName) {
|
||||
return strings.Split(l, " ")[1], nil
|
||||
}
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// InjectPartition drops connections from m to others, vice versa.
|
||||
func (m *member) InjectPartition(t *testing.T, others []*member) {
|
||||
for _, other := range others {
|
||||
m.s.CutPeer(other.s.ID())
|
||||
other.s.CutPeer(m.s.ID())
|
||||
}
|
||||
}
|
||||
|
||||
// RecoverPartition recovers connections from m to others, vice versa.
|
||||
func (m *member) RecoverPartition(t *testing.T, others []*member) {
|
||||
for _, other := range others {
|
||||
m.s.MendPeer(other.s.ID())
|
||||
other.s.MendPeer(m.s.ID())
|
||||
}
|
||||
}
|
||||
|
||||
func MustNewHTTPClient(t *testing.T, eps []string, tls *transport.TLSInfo) client.Client {
|
||||
cfgtls := transport.TLSInfo{}
|
||||
if tls != nil {
|
||||
@@ -803,14 +893,6 @@ type grpcAPI struct {
|
||||
Watch pb.WatchClient
|
||||
// Maintenance is the maintenance API for the client's connection.
|
||||
Maintenance pb.MaintenanceClient
|
||||
}
|
||||
|
||||
func toGRPC(c *clientv3.Client) grpcAPI {
|
||||
return grpcAPI{
|
||||
pb.NewClusterClient(c.ActiveConnection()),
|
||||
pb.NewKVClient(c.ActiveConnection()),
|
||||
pb.NewLeaseClient(c.ActiveConnection()),
|
||||
pb.NewWatchClient(c.ActiveConnection()),
|
||||
pb.NewMaintenanceClient(c.ActiveConnection()),
|
||||
}
|
||||
// Auth is the authentication API for the client's connection.
|
||||
Auth pb.AuthClient
|
||||
}
|
||||
|
37
vendor/github.com/coreos/etcd/integration/cluster_direct.go
generated
vendored
Normal file
37
vendor/github.com/coreos/etcd/integration/cluster_direct.go
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !cluster_proxy
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
)
|
||||
|
||||
func toGRPC(c *clientv3.Client) grpcAPI {
|
||||
return grpcAPI{
|
||||
pb.NewClusterClient(c.ActiveConnection()),
|
||||
pb.NewKVClient(c.ActiveConnection()),
|
||||
pb.NewLeaseClient(c.ActiveConnection()),
|
||||
pb.NewWatchClient(c.ActiveConnection()),
|
||||
pb.NewMaintenanceClient(c.ActiveConnection()),
|
||||
pb.NewAuthClient(c.ActiveConnection()),
|
||||
}
|
||||
}
|
||||
|
||||
func newClientV3(cfg clientv3.Config) (*clientv3.Client, error) {
|
||||
return clientv3.New(cfg)
|
||||
}
|
89
vendor/github.com/coreos/etcd/integration/cluster_proxy.go
generated
vendored
Normal file
89
vendor/github.com/coreos/etcd/integration/cluster_proxy.go
generated
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build cluster_proxy
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/proxy/grpcproxy"
|
||||
)
|
||||
|
||||
var (
|
||||
pmu sync.Mutex
|
||||
proxies map[*clientv3.Client]grpcClientProxy = make(map[*clientv3.Client]grpcClientProxy)
|
||||
)
|
||||
|
||||
type grpcClientProxy struct {
|
||||
grpc grpcAPI
|
||||
wdonec <-chan struct{}
|
||||
kvdonec <-chan struct{}
|
||||
}
|
||||
|
||||
func toGRPC(c *clientv3.Client) grpcAPI {
|
||||
pmu.Lock()
|
||||
defer pmu.Unlock()
|
||||
|
||||
if v, ok := proxies[c]; ok {
|
||||
return v.grpc
|
||||
}
|
||||
|
||||
wp, wpch := grpcproxy.NewWatchProxy(c)
|
||||
kvp, kvpch := grpcproxy.NewKvProxy(c)
|
||||
grpc := grpcAPI{
|
||||
pb.NewClusterClient(c.ActiveConnection()),
|
||||
grpcproxy.KvServerToKvClient(kvp),
|
||||
pb.NewLeaseClient(c.ActiveConnection()),
|
||||
grpcproxy.WatchServerToWatchClient(wp),
|
||||
pb.NewMaintenanceClient(c.ActiveConnection()),
|
||||
pb.NewAuthClient(c.ActiveConnection()),
|
||||
}
|
||||
proxies[c] = grpcClientProxy{grpc: grpc, wdonec: wpch, kvdonec: kvpch}
|
||||
return grpc
|
||||
}
|
||||
|
||||
type proxyCloser struct {
|
||||
clientv3.Watcher
|
||||
wdonec <-chan struct{}
|
||||
kvdonec <-chan struct{}
|
||||
}
|
||||
|
||||
func (pc *proxyCloser) Close() error {
|
||||
// client ctx is canceled before calling close, so kv will close out
|
||||
<-pc.kvdonec
|
||||
err := pc.Watcher.Close()
|
||||
<-pc.wdonec
|
||||
return err
|
||||
}
|
||||
|
||||
func newClientV3(cfg clientv3.Config) (*clientv3.Client, error) {
|
||||
c, err := clientv3.New(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rpc := toGRPC(c)
|
||||
c.KV = clientv3.NewKVFromKVClient(rpc.KV)
|
||||
pmu.Lock()
|
||||
c.Watcher = &proxyCloser{
|
||||
Watcher: clientv3.NewWatchFromWatchClient(rpc.Watch),
|
||||
wdonec: proxies[c].wdonec,
|
||||
kvdonec: proxies[c].kvdonec,
|
||||
}
|
||||
pmu.Unlock()
|
||||
return c, nil
|
||||
}
|
Reference in New Issue
Block a user