Update etcd Godeps to 3.0.17
This commit is contained in:
53
vendor/github.com/coreos/etcd/clientv3/lease.go
generated
vendored
53
vendor/github.com/coreos/etcd/clientv3/lease.go
generated
vendored
@@ -143,9 +143,6 @@ func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, err
|
||||
if isHaltErr(cctx, err) {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
if nerr := l.newStream(); nerr != nil {
|
||||
return nil, nerr
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -164,9 +161,6 @@ func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse,
|
||||
if isHaltErr(ctx, err) {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
if nerr := l.newStream(); nerr != nil {
|
||||
return nil, nerr
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -213,10 +207,6 @@ func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAlive
|
||||
if isHaltErr(ctx, err) {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
|
||||
if nerr := l.newStream(); nerr != nil {
|
||||
return nil, nerr
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -312,10 +302,23 @@ func (l *lessor) recvKeepAliveLoop() {
|
||||
|
||||
// resetRecv opens a new lease stream and starts sending LeaseKeepAliveRequests
|
||||
func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) {
|
||||
if err := l.newStream(); err != nil {
|
||||
sctx, cancel := context.WithCancel(l.stopCtx)
|
||||
stream, err := l.remote.LeaseKeepAlive(sctx, grpc.FailFast(false))
|
||||
if err = toErr(sctx, err); err != nil {
|
||||
cancel()
|
||||
return nil, err
|
||||
}
|
||||
stream := l.getKeepAliveStream()
|
||||
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
if l.stream != nil && l.streamCancel != nil {
|
||||
l.stream.CloseSend()
|
||||
l.streamCancel()
|
||||
}
|
||||
|
||||
l.streamCancel = cancel
|
||||
l.stream = stream
|
||||
|
||||
go l.sendKeepAliveLoop(stream)
|
||||
return stream, nil
|
||||
}
|
||||
@@ -411,32 +414,6 @@ func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
|
||||
}
|
||||
}
|
||||
|
||||
func (l *lessor) getKeepAliveStream() pb.Lease_LeaseKeepAliveClient {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
return l.stream
|
||||
}
|
||||
|
||||
func (l *lessor) newStream() error {
|
||||
sctx, cancel := context.WithCancel(l.stopCtx)
|
||||
stream, err := l.remote.LeaseKeepAlive(sctx, grpc.FailFast(false))
|
||||
if err != nil {
|
||||
cancel()
|
||||
return toErr(sctx, err)
|
||||
}
|
||||
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
if l.stream != nil && l.streamCancel != nil {
|
||||
l.stream.CloseSend()
|
||||
l.streamCancel()
|
||||
}
|
||||
|
||||
l.streamCancel = cancel
|
||||
l.stream = stream
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ka *keepAlive) Close() {
|
||||
close(ka.donec)
|
||||
for _, ch := range ka.chs {
|
||||
|
||||
5
vendor/github.com/coreos/etcd/clientv3/op.go
generated
vendored
5
vendor/github.com/coreos/etcd/clientv3/op.go
generated
vendored
@@ -215,14 +215,15 @@ func WithPrefix() OpOption {
|
||||
}
|
||||
}
|
||||
|
||||
// WithRange specifies the range of 'Get' or 'Delete' requests.
|
||||
// WithRange specifies the range of 'Get', 'Delete', 'Watch' requests.
|
||||
// For example, 'Get' requests with 'WithRange(end)' returns
|
||||
// the keys in the range [key, end).
|
||||
// endKey must be lexicographically greater than start key.
|
||||
func WithRange(endKey string) OpOption {
|
||||
return func(op *Op) { op.end = []byte(endKey) }
|
||||
}
|
||||
|
||||
// WithFromKey specifies the range of 'Get' or 'Delete' requests
|
||||
// WithFromKey specifies the range of 'Get', 'Delete', 'Watch' requests
|
||||
// to be equal or greater than the key in the argument.
|
||||
func WithFromKey() OpOption { return WithRange("\x00") }
|
||||
|
||||
|
||||
97
vendor/github.com/coreos/etcd/clientv3/watch.go
generated
vendored
97
vendor/github.com/coreos/etcd/clientv3/watch.go
generated
vendored
@@ -125,8 +125,6 @@ type watchGrpcStream struct {
|
||||
reqc chan *watchRequest
|
||||
// respc receives data from the watch client
|
||||
respc chan *pb.WatchResponse
|
||||
// stopc is sent to the main goroutine to stop all processing
|
||||
stopc chan struct{}
|
||||
// donec closes to broadcast shutdown
|
||||
donec chan struct{}
|
||||
// errc transmits errors from grpc Recv to the watch stream reconn logic
|
||||
@@ -204,7 +202,6 @@ func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream {
|
||||
|
||||
respc: make(chan *pb.WatchResponse),
|
||||
reqc: make(chan *watchRequest),
|
||||
stopc: make(chan struct{}),
|
||||
donec: make(chan struct{}),
|
||||
errc: make(chan error, 1),
|
||||
closingc: make(chan *watcherStream),
|
||||
@@ -300,7 +297,7 @@ func (w *watcher) Close() (err error) {
|
||||
}
|
||||
|
||||
func (w *watchGrpcStream) Close() (err error) {
|
||||
close(w.stopc)
|
||||
w.cancel()
|
||||
<-w.donec
|
||||
select {
|
||||
case err = <-w.errc:
|
||||
@@ -347,7 +344,7 @@ func (w *watchGrpcStream) closeSubstream(ws *watcherStream) {
|
||||
// close subscriber's channel
|
||||
if closeErr := w.closeErr; closeErr != nil && ws.initReq.ctx.Err() == nil {
|
||||
go w.sendCloseSubstream(ws, &WatchResponse{closeErr: w.closeErr})
|
||||
} else {
|
||||
} else if ws.outc != nil {
|
||||
close(ws.outc)
|
||||
}
|
||||
if ws.id != -1 {
|
||||
@@ -472,7 +469,7 @@ func (w *watchGrpcStream) run() {
|
||||
wc.Send(ws.initReq.toPB())
|
||||
}
|
||||
cancelSet = make(map[int64]struct{})
|
||||
case <-w.stopc:
|
||||
case <-w.ctx.Done():
|
||||
return
|
||||
case ws := <-w.closingc:
|
||||
w.closeSubstream(ws)
|
||||
@@ -597,6 +594,8 @@ func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{
|
||||
nextRev = wr.Events[len(wr.Events)-1].Kv.ModRevision + 1
|
||||
}
|
||||
ws.initReq.rev = nextRev
|
||||
case <-w.ctx.Done():
|
||||
return
|
||||
case <-ws.initReq.ctx.Done():
|
||||
return
|
||||
case <-resumec:
|
||||
@@ -608,34 +607,78 @@ func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{
|
||||
}
|
||||
|
||||
func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) {
|
||||
// connect to grpc stream
|
||||
// mark all substreams as resuming
|
||||
close(w.resumec)
|
||||
w.resumec = make(chan struct{})
|
||||
w.joinSubstreams()
|
||||
for _, ws := range w.substreams {
|
||||
ws.id = -1
|
||||
w.resuming = append(w.resuming, ws)
|
||||
}
|
||||
// strip out nils, if any
|
||||
var resuming []*watcherStream
|
||||
for _, ws := range w.resuming {
|
||||
if ws != nil {
|
||||
resuming = append(resuming, ws)
|
||||
}
|
||||
}
|
||||
w.resuming = resuming
|
||||
w.substreams = make(map[int64]*watcherStream)
|
||||
|
||||
// connect to grpc stream while accepting watcher cancelation
|
||||
stopc := make(chan struct{})
|
||||
donec := w.waitCancelSubstreams(stopc)
|
||||
wc, err := w.openWatchClient()
|
||||
close(stopc)
|
||||
<-donec
|
||||
|
||||
// serve all non-closing streams, even if there's a client error
|
||||
// so that the teardown path can shutdown the streams as expected.
|
||||
for _, ws := range w.resuming {
|
||||
if ws.closing {
|
||||
continue
|
||||
}
|
||||
ws.donec = make(chan struct{})
|
||||
go w.serveSubstream(ws, w.resumec)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, v3rpc.Error(err)
|
||||
}
|
||||
// mark all substreams as resuming
|
||||
if len(w.substreams)+len(w.resuming) > 0 {
|
||||
close(w.resumec)
|
||||
w.resumec = make(chan struct{})
|
||||
w.joinSubstreams()
|
||||
for _, ws := range w.substreams {
|
||||
ws.id = -1
|
||||
w.resuming = append(w.resuming, ws)
|
||||
}
|
||||
for _, ws := range w.resuming {
|
||||
if ws == nil || ws.closing {
|
||||
continue
|
||||
}
|
||||
ws.donec = make(chan struct{})
|
||||
go w.serveSubstream(ws, w.resumec)
|
||||
}
|
||||
}
|
||||
w.substreams = make(map[int64]*watcherStream)
|
||||
|
||||
// receive data from new grpc stream
|
||||
go w.serveWatchClient(wc)
|
||||
return wc, nil
|
||||
}
|
||||
|
||||
func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan struct{} {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(w.resuming))
|
||||
donec := make(chan struct{})
|
||||
for i := range w.resuming {
|
||||
go func(ws *watcherStream) {
|
||||
defer wg.Done()
|
||||
if ws.closing {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-ws.initReq.ctx.Done():
|
||||
// closed ws will be removed from resuming
|
||||
ws.closing = true
|
||||
close(ws.outc)
|
||||
ws.outc = nil
|
||||
go func() { w.closingc <- ws }()
|
||||
case <-stopc:
|
||||
}
|
||||
}(w.resuming[i])
|
||||
}
|
||||
go func() {
|
||||
defer close(donec)
|
||||
wg.Wait()
|
||||
}()
|
||||
return donec
|
||||
}
|
||||
|
||||
// joinSubstream waits for all substream goroutines to complete
|
||||
func (w *watchGrpcStream) joinSubstreams() {
|
||||
for _, ws := range w.substreams {
|
||||
@@ -652,9 +695,9 @@ func (w *watchGrpcStream) joinSubstreams() {
|
||||
func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) {
|
||||
for {
|
||||
select {
|
||||
case <-w.stopc:
|
||||
case <-w.ctx.Done():
|
||||
if err == nil {
|
||||
return nil, context.Canceled
|
||||
return nil, w.ctx.Err()
|
||||
}
|
||||
return nil, err
|
||||
default:
|
||||
|
||||
Reference in New Issue
Block a user