Update etcd client to 3.3.9
This commit is contained in:
1
vendor/github.com/coreos/etcd/clientv3/BUILD
generated
vendored
1
vendor/github.com/coreos/etcd/clientv3/BUILD
generated
vendored
@@ -32,7 +32,6 @@ go_library(
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/pkg/types:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/codes:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/credentials:go_default_library",
|
||||
|
||||
2
vendor/github.com/coreos/etcd/clientv3/auth.go
generated
vendored
2
vendor/github.com/coreos/etcd/clientv3/auth.go
generated
vendored
@@ -15,13 +15,13 @@
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/coreos/etcd/auth/authpb"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
|
||||
2
vendor/github.com/coreos/etcd/clientv3/client.go
generated
vendored
2
vendor/github.com/coreos/etcd/clientv3/client.go
generated
vendored
@@ -15,6 +15,7 @@
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -27,7 +28,6 @@ import (
|
||||
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
|
||||
3
vendor/github.com/coreos/etcd/clientv3/cluster.go
generated
vendored
3
vendor/github.com/coreos/etcd/clientv3/cluster.go
generated
vendored
@@ -15,10 +15,11 @@
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
|
||||
20
vendor/github.com/coreos/etcd/clientv3/compare.go
generated
vendored
20
vendor/github.com/coreos/etcd/clientv3/compare.go
generated
vendored
@@ -60,6 +60,8 @@ func Compare(cmp Cmp, result string, v interface{}) Cmp {
|
||||
cmp.TargetUnion = &pb.Compare_CreateRevision{CreateRevision: mustInt64(v)}
|
||||
case pb.Compare_MOD:
|
||||
cmp.TargetUnion = &pb.Compare_ModRevision{ModRevision: mustInt64(v)}
|
||||
case pb.Compare_LEASE:
|
||||
cmp.TargetUnion = &pb.Compare_Lease{Lease: mustInt64orLeaseID(v)}
|
||||
default:
|
||||
panic("Unknown compare type")
|
||||
}
|
||||
@@ -82,6 +84,12 @@ func ModRevision(key string) Cmp {
|
||||
return Cmp{Key: []byte(key), Target: pb.Compare_MOD}
|
||||
}
|
||||
|
||||
// LeaseValue compares a key's LeaseID to a value of your choosing. The empty
|
||||
// LeaseID is 0, otherwise known as `NoLease`.
|
||||
func LeaseValue(key string) Cmp {
|
||||
return Cmp{Key: []byte(key), Target: pb.Compare_LEASE}
|
||||
}
|
||||
|
||||
// KeyBytes returns the byte slice holding with the comparison key.
|
||||
func (cmp *Cmp) KeyBytes() []byte { return cmp.Key }
|
||||
|
||||
@@ -99,6 +107,18 @@ func (cmp *Cmp) ValueBytes() []byte {
|
||||
// WithValueBytes sets the byte slice for the comparison's value.
|
||||
func (cmp *Cmp) WithValueBytes(v []byte) { cmp.TargetUnion.(*pb.Compare_Value).Value = v }
|
||||
|
||||
// WithRange sets the comparison to scan the range [key, end).
|
||||
func (cmp Cmp) WithRange(end string) Cmp {
|
||||
cmp.RangeEnd = []byte(end)
|
||||
return cmp
|
||||
}
|
||||
|
||||
// WithPrefix sets the comparison to scan all keys prefixed by the key.
|
||||
func (cmp Cmp) WithPrefix() Cmp {
|
||||
cmp.RangeEnd = getPrefix(cmp.Key)
|
||||
return cmp
|
||||
}
|
||||
|
||||
// mustInt64 panics if val isn't an int or int64. It returns an int64 otherwise.
|
||||
func mustInt64(val interface{}) int64 {
|
||||
if v, ok := val.(int64); ok {
|
||||
|
||||
1
vendor/github.com/coreos/etcd/clientv3/concurrency/BUILD
generated
vendored
1
vendor/github.com/coreos/etcd/clientv3/concurrency/BUILD
generated
vendored
@@ -17,7 +17,6 @@ go_library(
|
||||
"//vendor/github.com/coreos/etcd/clientv3:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
3
vendor/github.com/coreos/etcd/clientv3/concurrency/election.go
generated
vendored
3
vendor/github.com/coreos/etcd/clientv3/concurrency/election.go
generated
vendored
@@ -15,14 +15,13 @@
|
||||
package concurrency
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
v3 "github.com/coreos/etcd/clientv3"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/mvcc/mvccpb"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
3
vendor/github.com/coreos/etcd/clientv3/concurrency/key.go
generated
vendored
3
vendor/github.com/coreos/etcd/clientv3/concurrency/key.go
generated
vendored
@@ -15,13 +15,12 @@
|
||||
package concurrency
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
v3 "github.com/coreos/etcd/clientv3"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/mvcc/mvccpb"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func waitDelete(ctx context.Context, client *v3.Client, key string, rev int64) error {
|
||||
|
||||
3
vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go
generated
vendored
3
vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go
generated
vendored
@@ -15,13 +15,12 @@
|
||||
package concurrency
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
v3 "github.com/coreos/etcd/clientv3"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// Mutex implements the sync Locker interface with etcd
|
||||
|
||||
3
vendor/github.com/coreos/etcd/clientv3/concurrency/session.go
generated
vendored
3
vendor/github.com/coreos/etcd/clientv3/concurrency/session.go
generated
vendored
@@ -15,11 +15,10 @@
|
||||
package concurrency
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
v3 "github.com/coreos/etcd/clientv3"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
const defaultSessionTTL = 60
|
||||
|
||||
3
vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go
generated
vendored
3
vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go
generated
vendored
@@ -15,11 +15,10 @@
|
||||
package concurrency
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
|
||||
v3 "github.com/coreos/etcd/clientv3"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// STM is an interface for software transactional memory.
|
||||
|
||||
8
vendor/github.com/coreos/etcd/clientv3/config.go
generated
vendored
8
vendor/github.com/coreos/etcd/clientv3/config.go
generated
vendored
@@ -15,10 +15,10 @@
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
@@ -33,12 +33,12 @@ type Config struct {
|
||||
// DialTimeout is the timeout for failing to establish a connection.
|
||||
DialTimeout time.Duration `json:"dial-timeout"`
|
||||
|
||||
// DialKeepAliveTime is the time in seconds after which client pings the server to see if
|
||||
// DialKeepAliveTime is the time after which client pings the server to see if
|
||||
// transport is alive.
|
||||
DialKeepAliveTime time.Duration `json:"dial-keep-alive-time"`
|
||||
|
||||
// DialKeepAliveTimeout is the time in seconds that the client waits for a response for the
|
||||
// keep-alive probe. If the response is not received in this time, the connection is closed.
|
||||
// DialKeepAliveTimeout is the time that the client waits for a response for the
|
||||
// keep-alive probe. If the response is not received in this time, the connection is closed.
|
||||
DialKeepAliveTimeout time.Duration `json:"dial-keep-alive-timeout"`
|
||||
|
||||
// MaxCallSendMsgSize is the client-side request send limit in bytes.
|
||||
|
||||
39
vendor/github.com/coreos/etcd/clientv3/doc.go
generated
vendored
39
vendor/github.com/coreos/etcd/clientv3/doc.go
generated
vendored
@@ -16,6 +16,22 @@
|
||||
//
|
||||
// Create client using `clientv3.New`:
|
||||
//
|
||||
// // expect dial time-out on ipv4 blackhole
|
||||
// _, err := clientv3.New(clientv3.Config{
|
||||
// Endpoints: []string{"http://254.0.0.1:12345"},
|
||||
// DialTimeout: 2 * time.Second
|
||||
// })
|
||||
//
|
||||
// // etcd clientv3 >= v3.2.10, grpc/grpc-go >= v1.7.3
|
||||
// if err == context.DeadlineExceeded {
|
||||
// // handle errors
|
||||
// }
|
||||
//
|
||||
// // etcd clientv3 <= v3.2.9, grpc/grpc-go <= v1.2.1
|
||||
// if err == grpc.ErrClientConnTimeout {
|
||||
// // handle errors
|
||||
// }
|
||||
//
|
||||
// cli, err := clientv3.New(clientv3.Config{
|
||||
// Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"},
|
||||
// DialTimeout: 5 * time.Second,
|
||||
@@ -41,10 +57,11 @@
|
||||
// The Client has internal state (watchers and leases), so Clients should be reused instead of created as needed.
|
||||
// Clients are safe for concurrent use by multiple goroutines.
|
||||
//
|
||||
// etcd client returns 2 types of errors:
|
||||
// etcd client returns 3 types of errors:
|
||||
//
|
||||
// 1. context error: canceled or deadline exceeded.
|
||||
// 2. gRPC error: see https://github.com/coreos/etcd/blob/master/etcdserver/api/v3rpc/rpctypes/error.go
|
||||
// 1. context error: canceled or deadline exceeded.
|
||||
// 2. gRPC status error: e.g. when clock drifts in server-side before client's context deadline exceeded.
|
||||
// 3. gRPC error: see https://github.com/coreos/etcd/blob/master/etcdserver/api/v3rpc/rpctypes/error.go
|
||||
//
|
||||
// Here is the example code to handle client errors:
|
||||
//
|
||||
@@ -54,6 +71,12 @@
|
||||
// // ctx is canceled by another routine
|
||||
// } else if err == context.DeadlineExceeded {
|
||||
// // ctx is attached with a deadline and it exceeded
|
||||
// } else if ev, ok := status.FromError(err); ok {
|
||||
// code := ev.Code()
|
||||
// if code == codes.DeadlineExceeded {
|
||||
// // server-side context might have timed-out first (due to clock skew)
|
||||
// // while original client-side context is not timed-out yet
|
||||
// }
|
||||
// } else if verr, ok := err.(*v3rpc.ErrEmptyKey); ok {
|
||||
// // process (verr.Errors)
|
||||
// } else {
|
||||
@@ -61,4 +84,14 @@
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// go func() { cli.Close() }()
|
||||
// _, err := kvc.Get(ctx, "a")
|
||||
// if err != nil {
|
||||
// if err == context.Canceled {
|
||||
// // grpc balancer calls 'Get' with an inflight client.Close
|
||||
// } else if err == grpc.ErrClientConnClosing {
|
||||
// // grpc balancer calls 'Get' after client.Close.
|
||||
// }
|
||||
// }
|
||||
//
|
||||
package clientv3
|
||||
|
||||
38
vendor/github.com/coreos/etcd/clientv3/health_balancer.go
generated
vendored
38
vendor/github.com/coreos/etcd/clientv3/health_balancer.go
generated
vendored
@@ -15,13 +15,13 @@
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
healthpb "google.golang.org/grpc/health/grpc_health_v1"
|
||||
@@ -158,34 +158,26 @@ func (b *healthBalancer) pinned() string {
|
||||
|
||||
func (b *healthBalancer) hostPortError(hostPort string, err error) {
|
||||
if b.endpoint(hostPort) == "" {
|
||||
if logger.V(4) {
|
||||
logger.Infof("clientv3/balancer: %q is stale (skip marking as unhealthy on %q)", hostPort, err.Error())
|
||||
}
|
||||
logger.Lvl(4).Infof("clientv3/balancer: %q is stale (skip marking as unhealthy on %q)", hostPort, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
b.unhealthyMu.Lock()
|
||||
b.unhealthyHostPorts[hostPort] = time.Now()
|
||||
b.unhealthyMu.Unlock()
|
||||
if logger.V(4) {
|
||||
logger.Infof("clientv3/balancer: %q is marked unhealthy (%q)", hostPort, err.Error())
|
||||
}
|
||||
logger.Lvl(4).Infof("clientv3/balancer: %q is marked unhealthy (%q)", hostPort, err.Error())
|
||||
}
|
||||
|
||||
func (b *healthBalancer) removeUnhealthy(hostPort, msg string) {
|
||||
if b.endpoint(hostPort) == "" {
|
||||
if logger.V(4) {
|
||||
logger.Infof("clientv3/balancer: %q was not in unhealthy (%q)", hostPort, msg)
|
||||
}
|
||||
logger.Lvl(4).Infof("clientv3/balancer: %q was not in unhealthy (%q)", hostPort, msg)
|
||||
return
|
||||
}
|
||||
|
||||
b.unhealthyMu.Lock()
|
||||
delete(b.unhealthyHostPorts, hostPort)
|
||||
b.unhealthyMu.Unlock()
|
||||
if logger.V(4) {
|
||||
logger.Infof("clientv3/balancer: %q is removed from unhealthy (%q)", hostPort, msg)
|
||||
}
|
||||
logger.Lvl(4).Infof("clientv3/balancer: %q is removed from unhealthy (%q)", hostPort, msg)
|
||||
}
|
||||
|
||||
func (b *healthBalancer) countUnhealthy() (count int) {
|
||||
@@ -207,9 +199,7 @@ func (b *healthBalancer) cleanupUnhealthy() {
|
||||
for k, v := range b.unhealthyHostPorts {
|
||||
if time.Since(v) > b.healthCheckTimeout {
|
||||
delete(b.unhealthyHostPorts, k)
|
||||
if logger.V(4) {
|
||||
logger.Infof("clientv3/balancer: removed %q from unhealthy after %v", k, b.healthCheckTimeout)
|
||||
}
|
||||
logger.Lvl(4).Infof("clientv3/balancer: removed %q from unhealthy after %v", k, b.healthCheckTimeout)
|
||||
}
|
||||
}
|
||||
b.unhealthyMu.Unlock()
|
||||
@@ -412,9 +402,7 @@ func (b *healthBalancer) Up(addr grpc.Address) func(error) {
|
||||
}
|
||||
|
||||
if b.pinAddr != "" {
|
||||
if logger.V(4) {
|
||||
logger.Infof("clientv3/balancer: %q is up but not pinned (already pinned %q)", addr.Addr, b.pinAddr)
|
||||
}
|
||||
logger.Lvl(4).Infof("clientv3/balancer: %q is up but not pinned (already pinned %q)", addr.Addr, b.pinAddr)
|
||||
return func(err error) {}
|
||||
}
|
||||
|
||||
@@ -422,9 +410,7 @@ func (b *healthBalancer) Up(addr grpc.Address) func(error) {
|
||||
close(b.upc)
|
||||
b.downc = make(chan struct{})
|
||||
b.pinAddr = addr.Addr
|
||||
if logger.V(4) {
|
||||
logger.Infof("clientv3/balancer: pin %q", addr.Addr)
|
||||
}
|
||||
logger.Lvl(4).Infof("clientv3/balancer: pin %q", addr.Addr)
|
||||
|
||||
// notify client that a connection is up
|
||||
b.readyOnce.Do(func() { close(b.readyc) })
|
||||
@@ -441,9 +427,7 @@ func (b *healthBalancer) Up(addr grpc.Address) func(error) {
|
||||
close(b.downc)
|
||||
b.pinAddr = ""
|
||||
b.mu.Unlock()
|
||||
if logger.V(4) {
|
||||
logger.Infof("clientv3/balancer: unpin %q (%q)", addr.Addr, err.Error())
|
||||
}
|
||||
logger.Lvl(4).Infof("clientv3/balancer: unpin %q (%q)", addr.Addr, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -470,9 +454,7 @@ func (b *healthBalancer) mayPin(addr grpc.Address) bool {
|
||||
// 3. grpc-healthcheck still SERVING, thus retry to pin
|
||||
// instead, return before grpc-healthcheck if failed within healthcheck timeout
|
||||
if elapsed := time.Since(failedTime); elapsed < b.healthCheckTimeout {
|
||||
if logger.V(4) {
|
||||
logger.Infof("clientv3/balancer: %q is up but not pinned (failed %v ago, require minimum %v after failure)", addr.Addr, elapsed, b.healthCheckTimeout)
|
||||
}
|
||||
logger.Lvl(4).Infof("clientv3/balancer: %q is up but not pinned (failed %v ago, require minimum %v after failure)", addr.Addr, elapsed, b.healthCheckTimeout)
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
3
vendor/github.com/coreos/etcd/clientv3/kv.go
generated
vendored
3
vendor/github.com/coreos/etcd/clientv3/kv.go
generated
vendored
@@ -15,9 +15,10 @@
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
|
||||
49
vendor/github.com/coreos/etcd/clientv3/lease.go
generated
vendored
49
vendor/github.com/coreos/etcd/clientv3/lease.go
generated
vendored
@@ -15,13 +15,13 @@
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
@@ -51,7 +51,7 @@ type LeaseTimeToLiveResponse struct {
|
||||
*pb.ResponseHeader
|
||||
ID LeaseID `json:"id"`
|
||||
|
||||
// TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds.
|
||||
// TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. Expired lease will return -1.
|
||||
TTL int64 `json:"ttl"`
|
||||
|
||||
// GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
|
||||
@@ -67,6 +67,12 @@ type LeaseStatus struct {
|
||||
// TODO: TTL int64
|
||||
}
|
||||
|
||||
// LeaseLeasesResponse wraps the protobuf message LeaseLeasesResponse.
|
||||
type LeaseLeasesResponse struct {
|
||||
*pb.ResponseHeader
|
||||
Leases []LeaseStatus `json:"leases"`
|
||||
}
|
||||
|
||||
const (
|
||||
// defaultTTL is the assumed lease TTL used for the first keepalive
|
||||
// deadline before the actual TTL is known to the client.
|
||||
@@ -108,11 +114,32 @@ type Lease interface {
|
||||
// TimeToLive retrieves the lease information of the given lease ID.
|
||||
TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error)
|
||||
|
||||
// KeepAlive keeps the given lease alive forever.
|
||||
// Leases retrieves all leases.
|
||||
Leases(ctx context.Context) (*LeaseLeasesResponse, error)
|
||||
|
||||
// KeepAlive keeps the given lease alive forever. If the keepalive response
|
||||
// posted to the channel is not consumed immediately, the lease client will
|
||||
// continue sending keep alive requests to the etcd server at least every
|
||||
// second until latest response is consumed.
|
||||
//
|
||||
// The returned "LeaseKeepAliveResponse" channel closes if underlying keep
|
||||
// alive stream is interrupted in some way the client cannot handle itself;
|
||||
// given context "ctx" is canceled or timed out. "LeaseKeepAliveResponse"
|
||||
// from this closed channel is nil.
|
||||
//
|
||||
// If client keep alive loop halts with an unexpected error (e.g. "etcdserver:
|
||||
// no leader") or canceled by the caller (e.g. context.Canceled), the error
|
||||
// is returned. Otherwise, it retries.
|
||||
//
|
||||
// TODO(v4.0): post errors to last keep alive message before closing
|
||||
// (see https://github.com/coreos/etcd/pull/7866)
|
||||
KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error)
|
||||
|
||||
// KeepAliveOnce renews the lease once. In most of the cases, KeepAlive
|
||||
// should be used instead of KeepAliveOnce.
|
||||
// KeepAliveOnce renews the lease once. The response corresponds to the
|
||||
// first message from calling KeepAlive. If the response has a recoverable
|
||||
// error, KeepAliveOnce will retry the RPC with a new keep alive message.
|
||||
//
|
||||
// In most of the cases, Keepalive should be used instead of KeepAliveOnce.
|
||||
KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error)
|
||||
|
||||
// Close releases all resources Lease keeps for efficient communication
|
||||
@@ -221,6 +248,18 @@ func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (l *lessor) Leases(ctx context.Context) (*LeaseLeasesResponse, error) {
|
||||
resp, err := l.remote.LeaseLeases(ctx, &pb.LeaseLeasesRequest{}, l.callOpts...)
|
||||
if err == nil {
|
||||
leases := make([]LeaseStatus, len(resp.Leases))
|
||||
for i := range resp.Leases {
|
||||
leases[i] = LeaseStatus{ID: LeaseID(resp.Leases[i].ID)}
|
||||
}
|
||||
return &LeaseLeasesResponse{ResponseHeader: resp.GetHeader(), Leases: leases}, nil
|
||||
}
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) {
|
||||
ch := make(chan *LeaseKeepAliveResponse, LeaseResponseChSize)
|
||||
|
||||
|
||||
85
vendor/github.com/coreos/etcd/clientv3/logger.go
generated
vendored
85
vendor/github.com/coreos/etcd/clientv3/logger.go
generated
vendored
@@ -23,10 +23,23 @@ import (
|
||||
|
||||
// Logger is the logger used by client library.
|
||||
// It implements grpclog.LoggerV2 interface.
|
||||
type Logger grpclog.LoggerV2
|
||||
type Logger interface {
|
||||
grpclog.LoggerV2
|
||||
|
||||
// Lvl returns logger if logger's verbosity level >= "lvl".
|
||||
// Otherwise, logger that discards all logs.
|
||||
Lvl(lvl int) Logger
|
||||
|
||||
// to satisfy capnslog
|
||||
|
||||
Print(args ...interface{})
|
||||
Printf(format string, args ...interface{})
|
||||
Println(args ...interface{})
|
||||
}
|
||||
|
||||
var (
|
||||
logger settableLogger
|
||||
loggerMu sync.RWMutex
|
||||
logger Logger
|
||||
)
|
||||
|
||||
type settableLogger struct {
|
||||
@@ -36,38 +49,35 @@ type settableLogger struct {
|
||||
|
||||
func init() {
|
||||
// disable client side logs by default
|
||||
logger.mu.Lock()
|
||||
logger.l = grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard)
|
||||
|
||||
// logger has to override the grpclog at initialization so that
|
||||
// any changes to the grpclog go through logger with locking
|
||||
// instead of through SetLogger
|
||||
//
|
||||
// now updates only happen through settableLogger.set
|
||||
grpclog.SetLoggerV2(&logger)
|
||||
logger.mu.Unlock()
|
||||
logger = &settableLogger{}
|
||||
SetLogger(grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard))
|
||||
}
|
||||
|
||||
// SetLogger sets client-side Logger. By default, logs are disabled.
|
||||
func SetLogger(l Logger) {
|
||||
logger.set(l)
|
||||
// SetLogger sets client-side Logger.
|
||||
func SetLogger(l grpclog.LoggerV2) {
|
||||
loggerMu.Lock()
|
||||
logger = NewLogger(l)
|
||||
// override grpclog so that any changes happen with locking
|
||||
grpclog.SetLoggerV2(logger)
|
||||
loggerMu.Unlock()
|
||||
}
|
||||
|
||||
// GetLogger returns the current logger.
|
||||
func GetLogger() Logger {
|
||||
return logger.get()
|
||||
loggerMu.RLock()
|
||||
l := logger
|
||||
loggerMu.RUnlock()
|
||||
return l
|
||||
}
|
||||
|
||||
func (s *settableLogger) set(l Logger) {
|
||||
s.mu.Lock()
|
||||
logger.l = l
|
||||
grpclog.SetLoggerV2(&logger)
|
||||
s.mu.Unlock()
|
||||
// NewLogger returns a new Logger with grpclog.LoggerV2.
|
||||
func NewLogger(gl grpclog.LoggerV2) Logger {
|
||||
return &settableLogger{l: gl}
|
||||
}
|
||||
|
||||
func (s *settableLogger) get() Logger {
|
||||
func (s *settableLogger) get() grpclog.LoggerV2 {
|
||||
s.mu.RLock()
|
||||
l := logger.l
|
||||
l := s.l
|
||||
s.mu.RUnlock()
|
||||
return l
|
||||
}
|
||||
@@ -94,3 +104,32 @@ func (s *settableLogger) Print(args ...interface{}) { s.get().In
|
||||
func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Infof(format, args...) }
|
||||
func (s *settableLogger) Println(args ...interface{}) { s.get().Infoln(args...) }
|
||||
func (s *settableLogger) V(l int) bool { return s.get().V(l) }
|
||||
func (s *settableLogger) Lvl(lvl int) Logger {
|
||||
s.mu.RLock()
|
||||
l := s.l
|
||||
s.mu.RUnlock()
|
||||
if l.V(lvl) {
|
||||
return s
|
||||
}
|
||||
return &noLogger{}
|
||||
}
|
||||
|
||||
type noLogger struct{}
|
||||
|
||||
func (*noLogger) Info(args ...interface{}) {}
|
||||
func (*noLogger) Infof(format string, args ...interface{}) {}
|
||||
func (*noLogger) Infoln(args ...interface{}) {}
|
||||
func (*noLogger) Warning(args ...interface{}) {}
|
||||
func (*noLogger) Warningf(format string, args ...interface{}) {}
|
||||
func (*noLogger) Warningln(args ...interface{}) {}
|
||||
func (*noLogger) Error(args ...interface{}) {}
|
||||
func (*noLogger) Errorf(format string, args ...interface{}) {}
|
||||
func (*noLogger) Errorln(args ...interface{}) {}
|
||||
func (*noLogger) Fatal(args ...interface{}) {}
|
||||
func (*noLogger) Fatalf(format string, args ...interface{}) {}
|
||||
func (*noLogger) Fatalln(args ...interface{}) {}
|
||||
func (*noLogger) Print(args ...interface{}) {}
|
||||
func (*noLogger) Printf(format string, args ...interface{}) {}
|
||||
func (*noLogger) Println(args ...interface{}) {}
|
||||
func (*noLogger) V(l int) bool { return false }
|
||||
func (ng *noLogger) Lvl(lvl int) Logger { return ng }
|
||||
|
||||
43
vendor/github.com/coreos/etcd/clientv3/maintenance.go
generated
vendored
43
vendor/github.com/coreos/etcd/clientv3/maintenance.go
generated
vendored
@@ -15,11 +15,11 @@
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
@@ -28,6 +28,8 @@ type (
|
||||
AlarmResponse pb.AlarmResponse
|
||||
AlarmMember pb.AlarmMember
|
||||
StatusResponse pb.StatusResponse
|
||||
HashKVResponse pb.HashKVResponse
|
||||
MoveLeaderResponse pb.MoveLeaderResponse
|
||||
)
|
||||
|
||||
type Maintenance interface {
|
||||
@@ -49,8 +51,17 @@ type Maintenance interface {
|
||||
// Status gets the status of the endpoint.
|
||||
Status(ctx context.Context, endpoint string) (*StatusResponse, error)
|
||||
|
||||
// HashKV returns a hash of the KV state at the time of the RPC.
|
||||
// If revision is zero, the hash is computed on all keys. If the revision
|
||||
// is non-zero, the hash is computed on all keys at or below the given revision.
|
||||
HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error)
|
||||
|
||||
// Snapshot provides a reader for a point-in-time snapshot of etcd.
|
||||
Snapshot(ctx context.Context) (io.ReadCloser, error)
|
||||
|
||||
// MoveLeader requests current leader to transfer its leadership to the transferee.
|
||||
// Request must be made to the leader.
|
||||
MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error)
|
||||
}
|
||||
|
||||
type maintenance struct {
|
||||
@@ -159,6 +170,19 @@ func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusRespo
|
||||
return (*StatusResponse)(resp), nil
|
||||
}
|
||||
|
||||
func (m *maintenance) HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) {
|
||||
remote, cancel, err := m.dial(endpoint)
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
defer cancel()
|
||||
resp, err := remote.HashKV(ctx, &pb.HashKVRequest{Revision: rev}, m.callOpts...)
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
return (*HashKVResponse)(resp), nil
|
||||
}
|
||||
|
||||
func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
|
||||
ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, m.callOpts...)
|
||||
if err != nil {
|
||||
@@ -183,5 +207,20 @@ func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
|
||||
}
|
||||
pw.Close()
|
||||
}()
|
||||
return pr, nil
|
||||
return &snapshotReadCloser{ctx: ctx, ReadCloser: pr}, nil
|
||||
}
|
||||
|
||||
type snapshotReadCloser struct {
|
||||
ctx context.Context
|
||||
io.ReadCloser
|
||||
}
|
||||
|
||||
func (rc *snapshotReadCloser) Read(p []byte) (n int, err error) {
|
||||
n, err = rc.ReadCloser.Read(p)
|
||||
return n, toErr(rc.ctx, err)
|
||||
}
|
||||
|
||||
func (m *maintenance) MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) {
|
||||
resp, err := m.remote.MoveLeader(ctx, &pb.MoveLeaderRequest{TargetID: transfereeID}, m.callOpts...)
|
||||
return (*MoveLeaderResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
1
vendor/github.com/coreos/etcd/clientv3/namespace/BUILD
generated
vendored
1
vendor/github.com/coreos/etcd/clientv3/namespace/BUILD
generated
vendored
@@ -16,7 +16,6 @@ go_library(
|
||||
"//vendor/github.com/coreos/etcd/clientv3:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
69
vendor/github.com/coreos/etcd/clientv3/namespace/kv.go
generated
vendored
69
vendor/github.com/coreos/etcd/clientv3/namespace/kv.go
generated
vendored
@@ -15,11 +15,11 @@
|
||||
package namespace
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type kvPrefix struct {
|
||||
@@ -74,7 +74,7 @@ func (kv *kvPrefix) Delete(ctx context.Context, key string, opts ...clientv3.OpO
|
||||
}
|
||||
|
||||
func (kv *kvPrefix) Do(ctx context.Context, op clientv3.Op) (clientv3.OpResponse, error) {
|
||||
if len(op.KeyBytes()) == 0 {
|
||||
if len(op.KeyBytes()) == 0 && !op.IsTxn() {
|
||||
return clientv3.OpResponse{}, rpctypes.ErrEmptyKey
|
||||
}
|
||||
r, err := kv.KV.Do(ctx, kv.prefixOp(op))
|
||||
@@ -88,6 +88,8 @@ func (kv *kvPrefix) Do(ctx context.Context, op clientv3.Op) (clientv3.OpResponse
|
||||
kv.unprefixPutResponse(r.Put())
|
||||
case r.Del() != nil:
|
||||
kv.unprefixDeleteResponse(r.Del())
|
||||
case r.Txn() != nil:
|
||||
kv.unprefixTxnResponse(r.Txn())
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
@@ -102,31 +104,17 @@ func (kv *kvPrefix) Txn(ctx context.Context) clientv3.Txn {
|
||||
}
|
||||
|
||||
func (txn *txnPrefix) If(cs ...clientv3.Cmp) clientv3.Txn {
|
||||
newCmps := make([]clientv3.Cmp, len(cs))
|
||||
for i := range cs {
|
||||
newCmps[i] = cs[i]
|
||||
pfxKey, _ := txn.kv.prefixInterval(cs[i].KeyBytes(), nil)
|
||||
newCmps[i].WithKeyBytes(pfxKey)
|
||||
}
|
||||
txn.Txn = txn.Txn.If(newCmps...)
|
||||
txn.Txn = txn.Txn.If(txn.kv.prefixCmps(cs)...)
|
||||
return txn
|
||||
}
|
||||
|
||||
func (txn *txnPrefix) Then(ops ...clientv3.Op) clientv3.Txn {
|
||||
newOps := make([]clientv3.Op, len(ops))
|
||||
for i := range ops {
|
||||
newOps[i] = txn.kv.prefixOp(ops[i])
|
||||
}
|
||||
txn.Txn = txn.Txn.Then(newOps...)
|
||||
txn.Txn = txn.Txn.Then(txn.kv.prefixOps(ops)...)
|
||||
return txn
|
||||
}
|
||||
|
||||
func (txn *txnPrefix) Else(ops ...clientv3.Op) clientv3.Txn {
|
||||
newOps := make([]clientv3.Op, len(ops))
|
||||
for i := range ops {
|
||||
newOps[i] = txn.kv.prefixOp(ops[i])
|
||||
}
|
||||
txn.Txn = txn.Txn.Else(newOps...)
|
||||
txn.Txn = txn.Txn.Else(txn.kv.prefixOps(ops)...)
|
||||
return txn
|
||||
}
|
||||
|
||||
@@ -140,10 +128,14 @@ func (txn *txnPrefix) Commit() (*clientv3.TxnResponse, error) {
|
||||
}
|
||||
|
||||
func (kv *kvPrefix) prefixOp(op clientv3.Op) clientv3.Op {
|
||||
begin, end := kv.prefixInterval(op.KeyBytes(), op.RangeBytes())
|
||||
op.WithKeyBytes(begin)
|
||||
op.WithRangeBytes(end)
|
||||
return op
|
||||
if !op.IsTxn() {
|
||||
begin, end := kv.prefixInterval(op.KeyBytes(), op.RangeBytes())
|
||||
op.WithKeyBytes(begin)
|
||||
op.WithRangeBytes(end)
|
||||
return op
|
||||
}
|
||||
cmps, thenOps, elseOps := op.Txn()
|
||||
return clientv3.OpTxn(kv.prefixCmps(cmps), kv.prefixOps(thenOps), kv.prefixOps(elseOps))
|
||||
}
|
||||
|
||||
func (kv *kvPrefix) unprefixGetResponse(resp *clientv3.GetResponse) {
|
||||
@@ -179,11 +171,36 @@ func (kv *kvPrefix) unprefixTxnResponse(resp *clientv3.TxnResponse) {
|
||||
if tv.ResponseDeleteRange != nil {
|
||||
kv.unprefixDeleteResponse((*clientv3.DeleteResponse)(tv.ResponseDeleteRange))
|
||||
}
|
||||
case *pb.ResponseOp_ResponseTxn:
|
||||
if tv.ResponseTxn != nil {
|
||||
kv.unprefixTxnResponse((*clientv3.TxnResponse)(tv.ResponseTxn))
|
||||
}
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *kvPrefix) prefixInterval(key, end []byte) (pfxKey []byte, pfxEnd []byte) {
|
||||
return prefixInterval(p.pfx, key, end)
|
||||
func (kv *kvPrefix) prefixInterval(key, end []byte) (pfxKey []byte, pfxEnd []byte) {
|
||||
return prefixInterval(kv.pfx, key, end)
|
||||
}
|
||||
|
||||
func (kv *kvPrefix) prefixCmps(cs []clientv3.Cmp) []clientv3.Cmp {
|
||||
newCmps := make([]clientv3.Cmp, len(cs))
|
||||
for i := range cs {
|
||||
newCmps[i] = cs[i]
|
||||
pfxKey, endKey := kv.prefixInterval(cs[i].KeyBytes(), cs[i].RangeEnd)
|
||||
newCmps[i].WithKeyBytes(pfxKey)
|
||||
if len(cs[i].RangeEnd) != 0 {
|
||||
newCmps[i].RangeEnd = endKey
|
||||
}
|
||||
}
|
||||
return newCmps
|
||||
}
|
||||
|
||||
func (kv *kvPrefix) prefixOps(ops []clientv3.Op) []clientv3.Op {
|
||||
newOps := make([]clientv3.Op, len(ops))
|
||||
for i := range ops {
|
||||
newOps[i] = kv.prefixOp(ops[i])
|
||||
}
|
||||
return newOps
|
||||
}
|
||||
|
||||
3
vendor/github.com/coreos/etcd/clientv3/namespace/lease.go
generated
vendored
3
vendor/github.com/coreos/etcd/clientv3/namespace/lease.go
generated
vendored
@@ -16,10 +16,9 @@ package namespace
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type leasePrefix struct {
|
||||
|
||||
3
vendor/github.com/coreos/etcd/clientv3/namespace/watch.go
generated
vendored
3
vendor/github.com/coreos/etcd/clientv3/namespace/watch.go
generated
vendored
@@ -15,11 +15,10 @@
|
||||
package namespace
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type watcherPrefix struct {
|
||||
|
||||
1
vendor/github.com/coreos/etcd/clientv3/naming/BUILD
generated
vendored
1
vendor/github.com/coreos/etcd/clientv3/naming/BUILD
generated
vendored
@@ -11,7 +11,6 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/coreos/etcd/clientv3:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/codes:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/naming:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/status:go_default_library",
|
||||
|
||||
3
vendor/github.com/coreos/etcd/clientv3/naming/grpc.go
generated
vendored
3
vendor/github.com/coreos/etcd/clientv3/naming/grpc.go
generated
vendored
@@ -15,6 +15,7 @@
|
||||
package naming
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
@@ -23,8 +24,6 @@ import (
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/naming"
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var ErrWatcherClosed = fmt.Errorf("naming: watch closed")
|
||||
|
||||
2
vendor/github.com/coreos/etcd/clientv3/op.go
generated
vendored
2
vendor/github.com/coreos/etcd/clientv3/op.go
generated
vendored
@@ -181,6 +181,8 @@ func (op Op) toRequestOp() *pb.RequestOp {
|
||||
case tDeleteRange:
|
||||
r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV}
|
||||
return &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{RequestDeleteRange: r}}
|
||||
case tTxn:
|
||||
return &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{RequestTxn: op.toTxnRequest()}}
|
||||
default:
|
||||
panic("Unknown Op")
|
||||
}
|
||||
|
||||
2
vendor/github.com/coreos/etcd/clientv3/ready_wait.go
generated
vendored
2
vendor/github.com/coreos/etcd/clientv3/ready_wait.go
generated
vendored
@@ -14,7 +14,7 @@
|
||||
|
||||
package clientv3
|
||||
|
||||
import "golang.org/x/net/context"
|
||||
import "context"
|
||||
|
||||
// TODO: remove this when "FailFast=false" is fixed.
|
||||
// See https://github.com/grpc/grpc-go/issues/1532.
|
||||
|
||||
357
vendor/github.com/coreos/etcd/clientv3/retry.go
generated
vendored
357
vendor/github.com/coreos/etcd/clientv3/retry.go
generated
vendored
@@ -15,19 +15,36 @@
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
type retryPolicy uint8
|
||||
|
||||
const (
|
||||
repeatable retryPolicy = iota
|
||||
nonRepeatable
|
||||
)
|
||||
|
||||
type rpcFunc func(ctx context.Context) error
|
||||
type retryRPCFunc func(context.Context, rpcFunc) error
|
||||
type retryRPCFunc func(context.Context, rpcFunc, retryPolicy) error
|
||||
type retryStopErrFunc func(error) bool
|
||||
|
||||
// immutable requests (e.g. Get) should be retried unless it's
|
||||
// an obvious server-side error (e.g. rpctypes.ErrRequestTooLarge).
|
||||
//
|
||||
// "isRepeatableStopError" returns "true" when an immutable request
|
||||
// is interrupted by server-side or gRPC-side error and its status
|
||||
// code is not transient (!= codes.Unavailable).
|
||||
//
|
||||
// Returning "true" means retry should stop, since client cannot
|
||||
// handle itself even with retries.
|
||||
func isRepeatableStopError(err error) bool {
|
||||
eErr := rpctypes.Error(err)
|
||||
// always stop retry on etcd errors
|
||||
@@ -39,6 +56,17 @@ func isRepeatableStopError(err error) bool {
|
||||
return ev.Code() != codes.Unavailable
|
||||
}
|
||||
|
||||
// mutable requests (e.g. Put, Delete, Txn) should only be retried
|
||||
// when the status code is codes.Unavailable when initial connection
|
||||
// has not been established (no pinned endpoint).
|
||||
//
|
||||
// "isNonRepeatableStopError" returns "true" when a mutable request
|
||||
// is interrupted by non-transient error that client cannot handle itself,
|
||||
// or transient error while the connection has already been established
|
||||
// (pinned endpoint exists).
|
||||
//
|
||||
// Returning "true" means retry should stop, otherwise it violates
|
||||
// write-at-most-once semantics.
|
||||
func isNonRepeatableStopError(err error) bool {
|
||||
ev, _ := status.FromError(err)
|
||||
if ev.Code() != codes.Unavailable {
|
||||
@@ -48,8 +76,15 @@ func isNonRepeatableStopError(err error) bool {
|
||||
return desc != "there is no address available" && desc != "there is no connection available"
|
||||
}
|
||||
|
||||
func (c *Client) newRetryWrapper(isStop retryStopErrFunc) retryRPCFunc {
|
||||
return func(rpcCtx context.Context, f rpcFunc) error {
|
||||
func (c *Client) newRetryWrapper() retryRPCFunc {
|
||||
return func(rpcCtx context.Context, f rpcFunc, rp retryPolicy) error {
|
||||
var isStop retryStopErrFunc
|
||||
switch rp {
|
||||
case repeatable:
|
||||
isStop = isRepeatableStopError
|
||||
case nonRepeatable:
|
||||
isStop = isNonRepeatableStopError
|
||||
}
|
||||
for {
|
||||
if err := readyWait(rpcCtx, c.ctx, c.balancer.ConnectNotify()); err != nil {
|
||||
return err
|
||||
@@ -59,17 +94,13 @@ func (c *Client) newRetryWrapper(isStop retryStopErrFunc) retryRPCFunc {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if logger.V(4) {
|
||||
logger.Infof("clientv3/retry: error %q on pinned endpoint %q", err.Error(), pinned)
|
||||
}
|
||||
logger.Lvl(4).Infof("clientv3/retry: error %q on pinned endpoint %q", err.Error(), pinned)
|
||||
|
||||
if s, ok := status.FromError(err); ok && (s.Code() == codes.Unavailable || s.Code() == codes.DeadlineExceeded || s.Code() == codes.Internal) {
|
||||
// mark this before endpoint switch is triggered
|
||||
c.balancer.hostPortError(pinned, err)
|
||||
c.balancer.next()
|
||||
if logger.V(4) {
|
||||
logger.Infof("clientv3/retry: switching from %q due to error %q", pinned, err.Error())
|
||||
}
|
||||
logger.Lvl(4).Infof("clientv3/retry: switching from %q due to error %q", pinned, err.Error())
|
||||
}
|
||||
|
||||
if isStop(err) {
|
||||
@@ -79,24 +110,20 @@ func (c *Client) newRetryWrapper(isStop retryStopErrFunc) retryRPCFunc {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Client) newAuthRetryWrapper() retryRPCFunc {
|
||||
return func(rpcCtx context.Context, f rpcFunc) error {
|
||||
func (c *Client) newAuthRetryWrapper(retryf retryRPCFunc) retryRPCFunc {
|
||||
return func(rpcCtx context.Context, f rpcFunc, rp retryPolicy) error {
|
||||
for {
|
||||
pinned := c.balancer.pinned()
|
||||
err := f(rpcCtx)
|
||||
err := retryf(rpcCtx, f, rp)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if logger.V(4) {
|
||||
logger.Infof("clientv3/auth-retry: error %q on pinned endpoint %q", err.Error(), pinned)
|
||||
}
|
||||
logger.Lvl(4).Infof("clientv3/auth-retry: error %q on pinned endpoint %q", err.Error(), pinned)
|
||||
// always stop retry on etcd errors other than invalid auth token
|
||||
if rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken {
|
||||
gterr := c.getToken(rpcCtx)
|
||||
if gterr != nil {
|
||||
if logger.V(4) {
|
||||
logger.Infof("clientv3/auth-retry: cannot retry due to error %q(%q) on pinned endpoint %q", err.Error(), gterr.Error(), pinned)
|
||||
}
|
||||
logger.Lvl(4).Infof("clientv3/auth-retry: cannot retry due to error %q(%q) on pinned endpoint %q", err.Error(), gterr.Error(), pinned)
|
||||
return err // return the original error for simplicity
|
||||
}
|
||||
continue
|
||||
@@ -106,366 +133,364 @@ func (c *Client) newAuthRetryWrapper() retryRPCFunc {
|
||||
}
|
||||
}
|
||||
|
||||
type retryKVClient struct {
|
||||
kc pb.KVClient
|
||||
retryf retryRPCFunc
|
||||
}
|
||||
|
||||
// RetryKVClient implements a KVClient.
|
||||
func RetryKVClient(c *Client) pb.KVClient {
|
||||
repeatableRetry := c.newRetryWrapper(isRepeatableStopError)
|
||||
nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError)
|
||||
conn := pb.NewKVClient(c.conn)
|
||||
retryBasic := &retryKVClient{&nonRepeatableKVClient{conn, nonRepeatableRetry}, repeatableRetry}
|
||||
retryAuthWrapper := c.newAuthRetryWrapper()
|
||||
return &retryKVClient{
|
||||
&nonRepeatableKVClient{retryBasic, retryAuthWrapper},
|
||||
retryAuthWrapper}
|
||||
kc: pb.NewKVClient(c.conn),
|
||||
retryf: c.newAuthRetryWrapper(c.newRetryWrapper()),
|
||||
}
|
||||
}
|
||||
|
||||
type retryKVClient struct {
|
||||
*nonRepeatableKVClient
|
||||
repeatableRetry retryRPCFunc
|
||||
}
|
||||
|
||||
func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) {
|
||||
err = rkv.repeatableRetry(ctx, func(rctx context.Context) error {
|
||||
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rkv.kc.Range(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, repeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
type nonRepeatableKVClient struct {
|
||||
kc pb.KVClient
|
||||
nonRepeatableRetry retryRPCFunc
|
||||
}
|
||||
|
||||
func (rkv *nonRepeatableKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) {
|
||||
err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||
func (rkv *retryKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) {
|
||||
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rkv.kc.Put(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, nonRepeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rkv *nonRepeatableKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) {
|
||||
err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||
func (rkv *retryKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) {
|
||||
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rkv.kc.DeleteRange(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, nonRepeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rkv *nonRepeatableKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) {
|
||||
// TODO: repeatableRetry if read-only txn
|
||||
err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||
func (rkv *retryKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) {
|
||||
// TODO: "repeatable" for read-only txn
|
||||
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rkv.kc.Txn(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, nonRepeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rkv *nonRepeatableKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) {
|
||||
err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||
func (rkv *retryKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) {
|
||||
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rkv.kc.Compact(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, nonRepeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
type retryLeaseClient struct {
|
||||
lc pb.LeaseClient
|
||||
repeatableRetry retryRPCFunc
|
||||
lc pb.LeaseClient
|
||||
retryf retryRPCFunc
|
||||
}
|
||||
|
||||
// RetryLeaseClient implements a LeaseClient.
|
||||
func RetryLeaseClient(c *Client) pb.LeaseClient {
|
||||
retry := &retryLeaseClient{
|
||||
pb.NewLeaseClient(c.conn),
|
||||
c.newRetryWrapper(isRepeatableStopError),
|
||||
return &retryLeaseClient{
|
||||
lc: pb.NewLeaseClient(c.conn),
|
||||
retryf: c.newAuthRetryWrapper(c.newRetryWrapper()),
|
||||
}
|
||||
return &retryLeaseClient{retry, c.newAuthRetryWrapper()}
|
||||
}
|
||||
|
||||
func (rlc *retryLeaseClient) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (resp *pb.LeaseTimeToLiveResponse, err error) {
|
||||
err = rlc.repeatableRetry(ctx, func(rctx context.Context) error {
|
||||
err = rlc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rlc.lc.LeaseTimeToLive(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, repeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rlc *retryLeaseClient) LeaseLeases(ctx context.Context, in *pb.LeaseLeasesRequest, opts ...grpc.CallOption) (resp *pb.LeaseLeasesResponse, err error) {
|
||||
err = rlc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rlc.lc.LeaseLeases(rctx, in, opts...)
|
||||
return err
|
||||
}, repeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) {
|
||||
err = rlc.repeatableRetry(ctx, func(rctx context.Context) error {
|
||||
err = rlc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rlc.lc.LeaseGrant(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, repeatable)
|
||||
return resp, err
|
||||
|
||||
}
|
||||
|
||||
func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) {
|
||||
err = rlc.repeatableRetry(ctx, func(rctx context.Context) error {
|
||||
err = rlc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rlc.lc.LeaseRevoke(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, repeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rlc *retryLeaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (stream pb.Lease_LeaseKeepAliveClient, err error) {
|
||||
err = rlc.repeatableRetry(ctx, func(rctx context.Context) error {
|
||||
err = rlc.retryf(ctx, func(rctx context.Context) error {
|
||||
stream, err = rlc.lc.LeaseKeepAlive(rctx, opts...)
|
||||
return err
|
||||
})
|
||||
}, repeatable)
|
||||
return stream, err
|
||||
}
|
||||
|
||||
type retryClusterClient struct {
|
||||
*nonRepeatableClusterClient
|
||||
repeatableRetry retryRPCFunc
|
||||
cc pb.ClusterClient
|
||||
retryf retryRPCFunc
|
||||
}
|
||||
|
||||
// RetryClusterClient implements a ClusterClient.
|
||||
func RetryClusterClient(c *Client) pb.ClusterClient {
|
||||
repeatableRetry := c.newRetryWrapper(isRepeatableStopError)
|
||||
nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError)
|
||||
cc := pb.NewClusterClient(c.conn)
|
||||
return &retryClusterClient{&nonRepeatableClusterClient{cc, nonRepeatableRetry}, repeatableRetry}
|
||||
return &retryClusterClient{
|
||||
cc: pb.NewClusterClient(c.conn),
|
||||
retryf: c.newRetryWrapper(),
|
||||
}
|
||||
}
|
||||
|
||||
func (rcc *retryClusterClient) MemberList(ctx context.Context, in *pb.MemberListRequest, opts ...grpc.CallOption) (resp *pb.MemberListResponse, err error) {
|
||||
err = rcc.repeatableRetry(ctx, func(rctx context.Context) error {
|
||||
err = rcc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rcc.cc.MemberList(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, repeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
type nonRepeatableClusterClient struct {
|
||||
cc pb.ClusterClient
|
||||
nonRepeatableRetry retryRPCFunc
|
||||
}
|
||||
|
||||
func (rcc *nonRepeatableClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) {
|
||||
err = rcc.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||
func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) {
|
||||
err = rcc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rcc.cc.MemberAdd(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, nonRepeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rcc *nonRepeatableClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) {
|
||||
err = rcc.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||
func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) {
|
||||
err = rcc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rcc.cc.MemberRemove(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, nonRepeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rcc *nonRepeatableClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) {
|
||||
err = rcc.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||
func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) {
|
||||
err = rcc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rcc.cc.MemberUpdate(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, nonRepeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
type retryMaintenanceClient struct {
|
||||
mc pb.MaintenanceClient
|
||||
retryf retryRPCFunc
|
||||
}
|
||||
|
||||
// RetryMaintenanceClient implements a Maintenance.
|
||||
func RetryMaintenanceClient(c *Client, conn *grpc.ClientConn) pb.MaintenanceClient {
|
||||
repeatableRetry := c.newRetryWrapper(isRepeatableStopError)
|
||||
nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError)
|
||||
mc := pb.NewMaintenanceClient(conn)
|
||||
return &retryMaintenanceClient{&nonRepeatableMaintenanceClient{mc, nonRepeatableRetry}, repeatableRetry}
|
||||
}
|
||||
|
||||
type retryMaintenanceClient struct {
|
||||
*nonRepeatableMaintenanceClient
|
||||
repeatableRetry retryRPCFunc
|
||||
return &retryMaintenanceClient{
|
||||
mc: pb.NewMaintenanceClient(conn),
|
||||
retryf: c.newRetryWrapper(),
|
||||
}
|
||||
}
|
||||
|
||||
func (rmc *retryMaintenanceClient) Alarm(ctx context.Context, in *pb.AlarmRequest, opts ...grpc.CallOption) (resp *pb.AlarmResponse, err error) {
|
||||
err = rmc.repeatableRetry(ctx, func(rctx context.Context) error {
|
||||
err = rmc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rmc.mc.Alarm(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, repeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rmc *retryMaintenanceClient) Status(ctx context.Context, in *pb.StatusRequest, opts ...grpc.CallOption) (resp *pb.StatusResponse, err error) {
|
||||
err = rmc.repeatableRetry(ctx, func(rctx context.Context) error {
|
||||
err = rmc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rmc.mc.Status(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, repeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rmc *retryMaintenanceClient) Hash(ctx context.Context, in *pb.HashRequest, opts ...grpc.CallOption) (resp *pb.HashResponse, err error) {
|
||||
err = rmc.repeatableRetry(ctx, func(rctx context.Context) error {
|
||||
err = rmc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rmc.mc.Hash(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, repeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rmc *retryMaintenanceClient) HashKV(ctx context.Context, in *pb.HashKVRequest, opts ...grpc.CallOption) (resp *pb.HashKVResponse, err error) {
|
||||
err = rmc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rmc.mc.HashKV(rctx, in, opts...)
|
||||
return err
|
||||
}, repeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rmc *retryMaintenanceClient) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (stream pb.Maintenance_SnapshotClient, err error) {
|
||||
err = rmc.repeatableRetry(ctx, func(rctx context.Context) error {
|
||||
err = rmc.retryf(ctx, func(rctx context.Context) error {
|
||||
stream, err = rmc.mc.Snapshot(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, repeatable)
|
||||
return stream, err
|
||||
}
|
||||
|
||||
type nonRepeatableMaintenanceClient struct {
|
||||
mc pb.MaintenanceClient
|
||||
nonRepeatableRetry retryRPCFunc
|
||||
func (rmc *retryMaintenanceClient) MoveLeader(ctx context.Context, in *pb.MoveLeaderRequest, opts ...grpc.CallOption) (resp *pb.MoveLeaderResponse, err error) {
|
||||
err = rmc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rmc.mc.MoveLeader(rctx, in, opts...)
|
||||
return err
|
||||
}, repeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rmc *nonRepeatableMaintenanceClient) Defragment(ctx context.Context, in *pb.DefragmentRequest, opts ...grpc.CallOption) (resp *pb.DefragmentResponse, err error) {
|
||||
err = rmc.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||
func (rmc *retryMaintenanceClient) Defragment(ctx context.Context, in *pb.DefragmentRequest, opts ...grpc.CallOption) (resp *pb.DefragmentResponse, err error) {
|
||||
err = rmc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rmc.mc.Defragment(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, nonRepeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
type retryAuthClient struct {
|
||||
*nonRepeatableAuthClient
|
||||
repeatableRetry retryRPCFunc
|
||||
ac pb.AuthClient
|
||||
retryf retryRPCFunc
|
||||
}
|
||||
|
||||
// RetryAuthClient implements a AuthClient.
|
||||
func RetryAuthClient(c *Client) pb.AuthClient {
|
||||
repeatableRetry := c.newRetryWrapper(isRepeatableStopError)
|
||||
nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError)
|
||||
ac := pb.NewAuthClient(c.conn)
|
||||
return &retryAuthClient{&nonRepeatableAuthClient{ac, nonRepeatableRetry}, repeatableRetry}
|
||||
return &retryAuthClient{
|
||||
ac: pb.NewAuthClient(c.conn),
|
||||
retryf: c.newRetryWrapper(),
|
||||
}
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (resp *pb.AuthUserListResponse, err error) {
|
||||
err = rac.repeatableRetry(ctx, func(rctx context.Context) error {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.ac.UserList(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, repeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGetResponse, err error) {
|
||||
err = rac.repeatableRetry(ctx, func(rctx context.Context) error {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.ac.UserGet(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, repeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGetResponse, err error) {
|
||||
err = rac.repeatableRetry(ctx, func(rctx context.Context) error {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.ac.RoleGet(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, repeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleListResponse, err error) {
|
||||
err = rac.repeatableRetry(ctx, func(rctx context.Context) error {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.ac.RoleList(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, repeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
type nonRepeatableAuthClient struct {
|
||||
ac pb.AuthClient
|
||||
nonRepeatableRetry retryRPCFunc
|
||||
}
|
||||
|
||||
func (rac *nonRepeatableAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) {
|
||||
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||
func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.ac.AuthEnable(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, nonRepeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *nonRepeatableAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) {
|
||||
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||
func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.ac.AuthDisable(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, nonRepeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *nonRepeatableAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) {
|
||||
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||
func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.ac.UserAdd(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, nonRepeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *nonRepeatableAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) {
|
||||
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||
func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.ac.UserDelete(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, nonRepeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *nonRepeatableAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) {
|
||||
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||
func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.ac.UserChangePassword(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, nonRepeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *nonRepeatableAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) {
|
||||
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||
func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.ac.UserGrantRole(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, nonRepeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *nonRepeatableAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) {
|
||||
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||
func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.ac.UserRevokeRole(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, nonRepeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *nonRepeatableAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) {
|
||||
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||
func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.ac.RoleAdd(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, nonRepeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *nonRepeatableAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) {
|
||||
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||
func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.ac.RoleDelete(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, nonRepeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *nonRepeatableAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) {
|
||||
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||
func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.ac.RoleGrantPermission(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, nonRepeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *nonRepeatableAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) {
|
||||
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||
func (rac *retryAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.ac.RoleRevokePermission(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, nonRepeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *nonRepeatableAuthClient) Authenticate(ctx context.Context, in *pb.AuthenticateRequest, opts ...grpc.CallOption) (resp *pb.AuthenticateResponse, err error) {
|
||||
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||
func (rac *retryAuthClient) Authenticate(ctx context.Context, in *pb.AuthenticateRequest, opts ...grpc.CallOption) (resp *pb.AuthenticateResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.ac.Authenticate(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, nonRepeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
2
vendor/github.com/coreos/etcd/clientv3/txn.go
generated
vendored
2
vendor/github.com/coreos/etcd/clientv3/txn.go
generated
vendored
@@ -15,11 +15,11 @@
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
|
||||
4
vendor/github.com/coreos/etcd/clientv3/watch.go
generated
vendored
4
vendor/github.com/coreos/etcd/clientv3/watch.go
generated
vendored
@@ -15,6 +15,7 @@
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -23,7 +24,6 @@ import (
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
mvccpb "github.com/coreos/etcd/mvcc/mvccpb"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
@@ -470,7 +470,7 @@ func (w *watchGrpcStream) run() {
|
||||
if ws := w.nextResume(); ws != nil {
|
||||
wc.Send(ws.initReq.toPB())
|
||||
}
|
||||
case pbresp.Canceled:
|
||||
case pbresp.Canceled && pbresp.CompactRevision == 0:
|
||||
delete(cancelSet, pbresp.WatchId)
|
||||
if ws, ok := w.substreams[pbresp.WatchId]; ok {
|
||||
// signal to stream goroutine to update closingc
|
||||
|
||||
Reference in New Issue
Block a user