Version bump to etcd v3.2.11

This commit is contained in:
Joe Betz
2017-12-12 16:20:42 -08:00
parent 4956e65d59
commit 05afd248f2
287 changed files with 25980 additions and 5220 deletions

View File

@@ -4,6 +4,7 @@ go_library(
name = "go_default_library",
srcs = [
"doc.go",
"jwt.go",
"range_perm_cache.go",
"simple_token.go",
"store.go",
@@ -14,10 +15,14 @@ go_library(
"//vendor/github.com/coreos/etcd/auth/authpb:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library",
"//vendor/github.com/coreos/etcd/mvcc/backend:go_default_library",
"//vendor/github.com/coreos/etcd/pkg/adt:go_default_library",
"//vendor/github.com/coreos/pkg/capnslog:go_default_library",
"//vendor/github.com/dgrijalva/jwt-go:go_default_library",
"//vendor/golang.org/x/crypto/bcrypt:go_default_library",
"//vendor/golang.org/x/net/context:go_default_library",
"//vendor/google.golang.org/grpc/credentials:go_default_library",
"//vendor/google.golang.org/grpc/metadata:go_default_library",
"//vendor/google.golang.org/grpc/peer:go_default_library",
],
)

View File

@@ -803,7 +803,7 @@ func init() { proto.RegisterFile("auth.proto", fileDescriptorAuth) }
var fileDescriptorAuth = []byte{
// 288 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4a, 0xc3, 0x30,
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4a, 0xc3, 0x30,
0x1c, 0xc6, 0x9b, 0xb6, 0x1b, 0xed, 0x5f, 0x27, 0x25, 0x0c, 0x0c, 0x13, 0x42, 0xe9, 0xa9, 0x78,
0xa8, 0xb0, 0x5d, 0xbc, 0x2a, 0xf6, 0x20, 0x78, 0x90, 0x50, 0xf1, 0x28, 0x1d, 0x0d, 0x75, 0x6c,
0x6d, 0x4a, 0x32, 0x91, 0xbe, 0x89, 0x07, 0x1f, 0x68, 0xc7, 0x3d, 0x82, 0xab, 0x2f, 0x22, 0x4d,

137
vendor/github.com/coreos/etcd/auth/jwt.go generated vendored Normal file
View File

@@ -0,0 +1,137 @@
// Copyright 2017 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package auth
import (
"crypto/rsa"
"io/ioutil"
jwt "github.com/dgrijalva/jwt-go"
"golang.org/x/net/context"
)
type tokenJWT struct {
signMethod string
signKey *rsa.PrivateKey
verifyKey *rsa.PublicKey
}
func (t *tokenJWT) enable() {}
func (t *tokenJWT) disable() {}
func (t *tokenJWT) invalidateUser(string) {}
func (t *tokenJWT) genTokenPrefix() (string, error) { return "", nil }
func (t *tokenJWT) info(ctx context.Context, token string, rev uint64) (*AuthInfo, bool) {
// rev isn't used in JWT, it is only used in simple token
var (
username string
revision uint64
)
parsed, err := jwt.Parse(token, func(token *jwt.Token) (interface{}, error) {
return t.verifyKey, nil
})
switch err.(type) {
case nil:
if !parsed.Valid {
plog.Warningf("invalid jwt token: %s", token)
return nil, false
}
claims := parsed.Claims.(jwt.MapClaims)
username = claims["username"].(string)
revision = uint64(claims["revision"].(float64))
default:
plog.Warningf("failed to parse jwt token: %s", err)
return nil, false
}
return &AuthInfo{Username: username, Revision: revision}, true
}
func (t *tokenJWT) assign(ctx context.Context, username string, revision uint64) (string, error) {
// Future work: let a jwt token include permission information would be useful for
// permission checking in proxy side.
tk := jwt.NewWithClaims(jwt.GetSigningMethod(t.signMethod),
jwt.MapClaims{
"username": username,
"revision": revision,
})
token, err := tk.SignedString(t.signKey)
if err != nil {
plog.Debugf("failed to sign jwt token: %s", err)
return "", err
}
plog.Debugf("jwt token: %s", token)
return token, err
}
func prepareOpts(opts map[string]string) (jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath string, err error) {
for k, v := range opts {
switch k {
case "sign-method":
jwtSignMethod = v
case "pub-key":
jwtPubKeyPath = v
case "priv-key":
jwtPrivKeyPath = v
default:
plog.Errorf("unknown token specific option: %s", k)
return "", "", "", ErrInvalidAuthOpts
}
}
return jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath, nil
}
func newTokenProviderJWT(opts map[string]string) (*tokenJWT, error) {
jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath, err := prepareOpts(opts)
if err != nil {
return nil, ErrInvalidAuthOpts
}
t := &tokenJWT{}
t.signMethod = jwtSignMethod
verifyBytes, err := ioutil.ReadFile(jwtPubKeyPath)
if err != nil {
plog.Errorf("failed to read public key (%s) for jwt: %s", jwtPubKeyPath, err)
return nil, err
}
t.verifyKey, err = jwt.ParseRSAPublicKeyFromPEM(verifyBytes)
if err != nil {
plog.Errorf("failed to parse public key (%s): %s", jwtPubKeyPath, err)
return nil, err
}
signBytes, err := ioutil.ReadFile(jwtPrivKeyPath)
if err != nil {
plog.Errorf("failed to read private key (%s) for jwt: %s", jwtPrivKeyPath, err)
return nil, err
}
t.signKey, err = jwt.ParseRSAPrivateKeyFromPEM(signBytes)
if err != nil {
plog.Errorf("failed to parse private key (%s): %s", jwtPrivKeyPath, err)
return nil, err
}
return t, nil
}

View File

@@ -15,93 +15,11 @@
package auth
import (
"bytes"
"sort"
"github.com/coreos/etcd/auth/authpb"
"github.com/coreos/etcd/mvcc/backend"
"github.com/coreos/etcd/pkg/adt"
)
// isSubset returns true if a is a subset of b.
// If a is a prefix of b, then a is a subset of b.
// Given intervals [a1,a2) and [b1,b2), is
// the a interval a subset of b?
func isSubset(a, b *rangePerm) bool {
switch {
case len(a.end) == 0 && len(b.end) == 0:
// a, b are both keys
return bytes.Equal(a.begin, b.begin)
case len(b.end) == 0:
// b is a key, a is a range
return false
case len(a.end) == 0:
// a is a key, b is a range. need b1 <= a1 and a1 < b2
return bytes.Compare(b.begin, a.begin) <= 0 && bytes.Compare(a.begin, b.end) < 0
default:
// both are ranges. need b1 <= a1 and a2 <= b2
return bytes.Compare(b.begin, a.begin) <= 0 && bytes.Compare(a.end, b.end) <= 0
}
}
func isRangeEqual(a, b *rangePerm) bool {
return bytes.Equal(a.begin, b.begin) && bytes.Equal(a.end, b.end)
}
// removeSubsetRangePerms removes any rangePerms that are subsets of other rangePerms.
// If there are equal ranges, removeSubsetRangePerms only keeps one of them.
// It returns a sorted rangePerm slice.
func removeSubsetRangePerms(perms []*rangePerm) (newp []*rangePerm) {
sort.Sort(RangePermSliceByBegin(perms))
var prev *rangePerm
for i := range perms {
if i == 0 {
prev = perms[i]
newp = append(newp, perms[i])
continue
}
if isRangeEqual(perms[i], prev) {
continue
}
if isSubset(perms[i], prev) {
continue
}
if isSubset(prev, perms[i]) {
prev = perms[i]
newp[len(newp)-1] = perms[i]
continue
}
prev = perms[i]
newp = append(newp, perms[i])
}
return newp
}
// mergeRangePerms merges adjacent rangePerms.
func mergeRangePerms(perms []*rangePerm) []*rangePerm {
var merged []*rangePerm
perms = removeSubsetRangePerms(perms)
i := 0
for i < len(perms) {
begin, next := i, i
for next+1 < len(perms) && bytes.Compare(perms[next].end, perms[next+1].begin) >= 0 {
next++
}
// don't merge ["a", "b") with ["b", ""), because perms[next+1].end is empty.
if next != begin && len(perms[next].end) > 0 {
merged = append(merged, &rangePerm{begin: perms[begin].begin, end: perms[next].end})
} else {
merged = append(merged, perms[begin])
if next != begin {
merged = append(merged, perms[next])
}
}
i = next + 1
}
return merged
}
func getMergedPerms(tx backend.BatchTx, userName string) *unifiedRangePermissions {
user := getUser(tx, userName)
if user == nil {
@@ -109,7 +27,8 @@ func getMergedPerms(tx backend.BatchTx, userName string) *unifiedRangePermission
return nil
}
var readPerms, writePerms []*rangePerm
readPerms := &adt.IntervalTree{}
writePerms := &adt.IntervalTree{}
for _, roleName := range user.Roles {
role := getRole(tx, roleName)
@@ -118,48 +37,66 @@ func getMergedPerms(tx backend.BatchTx, userName string) *unifiedRangePermission
}
for _, perm := range role.KeyPermission {
rp := &rangePerm{begin: perm.Key, end: perm.RangeEnd}
var ivl adt.Interval
var rangeEnd []byte
if len(perm.RangeEnd) != 1 || perm.RangeEnd[0] != 0 {
rangeEnd = perm.RangeEnd
}
if len(perm.RangeEnd) != 0 {
ivl = adt.NewBytesAffineInterval(perm.Key, rangeEnd)
} else {
ivl = adt.NewBytesAffinePoint(perm.Key)
}
switch perm.PermType {
case authpb.READWRITE:
readPerms = append(readPerms, rp)
writePerms = append(writePerms, rp)
readPerms.Insert(ivl, struct{}{})
writePerms.Insert(ivl, struct{}{})
case authpb.READ:
readPerms = append(readPerms, rp)
readPerms.Insert(ivl, struct{}{})
case authpb.WRITE:
writePerms = append(writePerms, rp)
writePerms.Insert(ivl, struct{}{})
}
}
}
return &unifiedRangePermissions{
readPerms: mergeRangePerms(readPerms),
writePerms: mergeRangePerms(writePerms),
readPerms: readPerms,
writePerms: writePerms,
}
}
func checkKeyPerm(cachedPerms *unifiedRangePermissions, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool {
var tocheck []*rangePerm
func checkKeyInterval(cachedPerms *unifiedRangePermissions, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool {
if len(rangeEnd) == 1 && rangeEnd[0] == 0 {
rangeEnd = nil
}
ivl := adt.NewBytesAffineInterval(key, rangeEnd)
switch permtyp {
case authpb.READ:
tocheck = cachedPerms.readPerms
return cachedPerms.readPerms.Contains(ivl)
case authpb.WRITE:
tocheck = cachedPerms.writePerms
return cachedPerms.writePerms.Contains(ivl)
default:
plog.Panicf("unknown auth type: %v", permtyp)
}
return false
}
requiredPerm := &rangePerm{begin: key, end: rangeEnd}
for _, perm := range tocheck {
if isSubset(requiredPerm, perm) {
return true
}
func checkKeyPoint(cachedPerms *unifiedRangePermissions, key []byte, permtyp authpb.Permission_Type) bool {
pt := adt.NewBytesAffinePoint(key)
switch permtyp {
case authpb.READ:
return cachedPerms.readPerms.Intersects(pt)
case authpb.WRITE:
return cachedPerms.writePerms.Intersects(pt)
default:
plog.Panicf("unknown auth type: %v", permtyp)
}
return false
}
@@ -175,7 +112,11 @@ func (as *authStore) isRangeOpPermitted(tx backend.BatchTx, userName string, key
as.rangePermCache[userName] = perms
}
return checkKeyPerm(as.rangePermCache[userName], key, rangeEnd, permtyp)
if len(rangeEnd) == 0 {
return checkKeyPoint(as.rangePermCache[userName], key, permtyp)
}
return checkKeyInterval(as.rangePermCache[userName], key, rangeEnd, permtyp)
}
func (as *authStore) clearCachedPerm() {
@@ -187,35 +128,6 @@ func (as *authStore) invalidateCachedPerm(userName string) {
}
type unifiedRangePermissions struct {
// readPerms[i] and readPerms[j] (i != j) don't overlap
readPerms []*rangePerm
// writePerms[i] and writePerms[j] (i != j) don't overlap, too
writePerms []*rangePerm
}
type rangePerm struct {
begin, end []byte
}
type RangePermSliceByBegin []*rangePerm
func (slice RangePermSliceByBegin) Len() int {
return len(slice)
}
func (slice RangePermSliceByBegin) Less(i, j int) bool {
switch bytes.Compare(slice[i].begin, slice[j].begin) {
case 0: // begin(i) == begin(j)
return bytes.Compare(slice[i].end, slice[j].end) == -1
case -1: // begin(i) < begin(j)
return true
default:
return false
}
}
func (slice RangePermSliceByBegin) Swap(i, j int) {
slice[i], slice[j] = slice[j], slice[i]
readPerms *adt.IntervalTree
writePerms *adt.IntervalTree
}

View File

@@ -19,10 +19,14 @@ package auth
import (
"crypto/rand"
"fmt"
"math/big"
"strconv"
"strings"
"sync"
"time"
"golang.org/x/net/context"
)
const (
@@ -90,24 +94,14 @@ func (tm *simpleTokenTTLKeeper) run() {
}
}
func (as *authStore) enable() {
delf := func(tk string) {
if username, ok := as.simpleTokens[tk]; ok {
plog.Infof("deleting token %s for user %s", tk, username)
delete(as.simpleTokens, tk)
}
}
as.simpleTokenKeeper = &simpleTokenTTLKeeper{
tokens: make(map[string]time.Time),
donec: make(chan struct{}),
stopc: make(chan struct{}),
deleteTokenFunc: delf,
mu: &as.simpleTokensMu,
}
go as.simpleTokenKeeper.run()
type tokenSimple struct {
indexWaiter func(uint64) <-chan struct{}
simpleTokenKeeper *simpleTokenTTLKeeper
simpleTokensMu sync.Mutex
simpleTokens map[string]string // token -> username
}
func (as *authStore) GenSimpleToken() (string, error) {
func (t *tokenSimple) genTokenPrefix() (string, error) {
ret := make([]byte, defaultSimpleTokenLength)
for i := 0; i < defaultSimpleTokenLength; i++ {
@@ -122,28 +116,105 @@ func (as *authStore) GenSimpleToken() (string, error) {
return string(ret), nil
}
func (as *authStore) assignSimpleTokenToUser(username, token string) {
as.simpleTokensMu.Lock()
_, ok := as.simpleTokens[token]
func (t *tokenSimple) assignSimpleTokenToUser(username, token string) {
t.simpleTokensMu.Lock()
_, ok := t.simpleTokens[token]
if ok {
plog.Panicf("token %s is alredy used", token)
}
as.simpleTokens[token] = username
as.simpleTokenKeeper.addSimpleToken(token)
as.simpleTokensMu.Unlock()
t.simpleTokens[token] = username
t.simpleTokenKeeper.addSimpleToken(token)
t.simpleTokensMu.Unlock()
}
func (as *authStore) invalidateUser(username string) {
if as.simpleTokenKeeper == nil {
func (t *tokenSimple) invalidateUser(username string) {
if t.simpleTokenKeeper == nil {
return
}
as.simpleTokensMu.Lock()
for token, name := range as.simpleTokens {
t.simpleTokensMu.Lock()
for token, name := range t.simpleTokens {
if strings.Compare(name, username) == 0 {
delete(as.simpleTokens, token)
as.simpleTokenKeeper.deleteSimpleToken(token)
delete(t.simpleTokens, token)
t.simpleTokenKeeper.deleteSimpleToken(token)
}
}
as.simpleTokensMu.Unlock()
t.simpleTokensMu.Unlock()
}
func (t *tokenSimple) enable() {
delf := func(tk string) {
if username, ok := t.simpleTokens[tk]; ok {
plog.Infof("deleting token %s for user %s", tk, username)
delete(t.simpleTokens, tk)
}
}
t.simpleTokenKeeper = &simpleTokenTTLKeeper{
tokens: make(map[string]time.Time),
donec: make(chan struct{}),
stopc: make(chan struct{}),
deleteTokenFunc: delf,
mu: &t.simpleTokensMu,
}
go t.simpleTokenKeeper.run()
}
func (t *tokenSimple) disable() {
t.simpleTokensMu.Lock()
tk := t.simpleTokenKeeper
t.simpleTokenKeeper = nil
t.simpleTokens = make(map[string]string) // invalidate all tokens
t.simpleTokensMu.Unlock()
if tk != nil {
tk.stop()
}
}
func (t *tokenSimple) info(ctx context.Context, token string, revision uint64) (*AuthInfo, bool) {
if !t.isValidSimpleToken(ctx, token) {
return nil, false
}
t.simpleTokensMu.Lock()
username, ok := t.simpleTokens[token]
if ok && t.simpleTokenKeeper != nil {
t.simpleTokenKeeper.resetSimpleToken(token)
}
t.simpleTokensMu.Unlock()
return &AuthInfo{Username: username, Revision: revision}, ok
}
func (t *tokenSimple) assign(ctx context.Context, username string, rev uint64) (string, error) {
// rev isn't used in simple token, it is only used in JWT
index := ctx.Value("index").(uint64)
simpleToken := ctx.Value("simpleToken").(string)
token := fmt.Sprintf("%s.%d", simpleToken, index)
t.assignSimpleTokenToUser(username, token)
return token, nil
}
func (t *tokenSimple) isValidSimpleToken(ctx context.Context, token string) bool {
splitted := strings.Split(token, ".")
if len(splitted) != 2 {
return false
}
index, err := strconv.Atoi(splitted[1])
if err != nil {
return false
}
select {
case <-t.indexWaiter(uint64(index)):
return true
case <-ctx.Done():
}
return false
}
func newTokenProviderSimple(indexWaiter func(uint64) <-chan struct{}) *tokenSimple {
return &tokenSimple{
simpleTokens: make(map[string]string),
indexWaiter: indexWaiter,
}
}

View File

@@ -18,11 +18,10 @@ import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"github.com/coreos/etcd/auth/authpb"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
@@ -30,7 +29,9 @@ import (
"github.com/coreos/pkg/capnslog"
"golang.org/x/crypto/bcrypt"
"golang.org/x/net/context"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
)
var (
@@ -60,6 +61,8 @@ var (
ErrAuthNotEnabled = errors.New("auth: authentication is not enabled")
ErrAuthOldRevision = errors.New("auth: revision in header is old")
ErrInvalidAuthToken = errors.New("auth: invalid auth token")
ErrInvalidAuthOpts = errors.New("auth: invalid auth options")
ErrInvalidAuthMgmt = errors.New("auth: invalid auth management")
// BcryptCost is the algorithm cost / strength for hashing auth passwords
BcryptCost = bcrypt.DefaultCost
@@ -129,10 +132,6 @@ type AuthStore interface {
// RoleList gets a list of all roles
RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error)
// AuthInfoFromToken gets a username from the given Token and current revision number
// (The revision number is used for preventing the TOCTOU problem)
AuthInfoFromToken(token string) (*AuthInfo, bool)
// IsPutPermitted checks put permission of the user
IsPutPermitted(authInfo *AuthInfo, key []byte) error
@@ -145,8 +144,9 @@ type AuthStore interface {
// IsAdminPermitted checks admin permission of the user
IsAdminPermitted(authInfo *AuthInfo) error
// GenSimpleToken produces a simple random string
GenSimpleToken() (string, error)
// GenTokenPrefix produces a random string in a case of simple token
// in a case of JWT, it produces an empty string
GenTokenPrefix() (string, error)
// Revision gets current revision of authStore
Revision() uint64
@@ -159,33 +159,32 @@ type AuthStore interface {
// AuthInfoFromCtx gets AuthInfo from gRPC's context
AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error)
// AuthInfoFromTLS gets AuthInfo from TLS info of gRPC's context
AuthInfoFromTLS(ctx context.Context) *AuthInfo
}
type TokenProvider interface {
info(ctx context.Context, token string, revision uint64) (*AuthInfo, bool)
assign(ctx context.Context, username string, revision uint64) (string, error)
enable()
disable()
invalidateUser(string)
genTokenPrefix() (string, error)
}
type authStore struct {
// atomic operations; need 64-bit align, or 32-bit tests will crash
revision uint64
be backend.Backend
enabled bool
enabledMu sync.RWMutex
rangePermCache map[string]*unifiedRangePermissions // username -> unifiedRangePermissions
revision uint64
// tokenSimple in v3.2+
indexWaiter func(uint64) <-chan struct{}
simpleTokenKeeper *simpleTokenTTLKeeper
simpleTokensMu sync.Mutex
simpleTokens map[string]string // token -> username
}
func newDeleterFunc(as *authStore) func(string) {
return func(t string) {
as.simpleTokensMu.Lock()
defer as.simpleTokensMu.Unlock()
if username, ok := as.simpleTokens[t]; ok {
plog.Infof("deleting token %s for user %s", t, username)
delete(as.simpleTokens, t)
}
}
tokenProvider TokenProvider
}
func (as *authStore) AuthEnable() error {
@@ -215,11 +214,11 @@ func (as *authStore) AuthEnable() error {
tx.UnsafePut(authBucketName, enableFlagKey, authEnabled)
as.enabled = true
as.enable()
as.tokenProvider.enable()
as.rangePermCache = make(map[string]*unifiedRangePermissions)
as.revision = getRevision(tx)
as.setRevision(getRevision(tx))
plog.Noticef("Authentication enabled")
@@ -241,15 +240,7 @@ func (as *authStore) AuthDisable() {
b.ForceCommit()
as.enabled = false
as.simpleTokensMu.Lock()
tk := as.simpleTokenKeeper
as.simpleTokenKeeper = nil
as.simpleTokens = make(map[string]string) // invalidate all tokens
as.simpleTokensMu.Unlock()
if tk != nil {
tk.stop()
}
as.tokenProvider.disable()
plog.Noticef("Authentication disabled")
}
@@ -260,10 +251,7 @@ func (as *authStore) Close() error {
if !as.enabled {
return nil
}
if as.simpleTokenKeeper != nil {
as.simpleTokenKeeper.stop()
as.simpleTokenKeeper = nil
}
as.tokenProvider.disable()
return nil
}
@@ -272,10 +260,6 @@ func (as *authStore) Authenticate(ctx context.Context, username, password string
return nil, ErrAuthNotEnabled
}
// TODO(mitake): after adding jwt support, branching based on values of ctx is required
index := ctx.Value("index").(uint64)
simpleToken := ctx.Value("simpleToken").(string)
tx := as.be.BatchTx()
tx.Lock()
defer tx.Unlock()
@@ -285,14 +269,23 @@ func (as *authStore) Authenticate(ctx context.Context, username, password string
return nil, ErrAuthFailed
}
token := fmt.Sprintf("%s.%d", simpleToken, index)
as.assignSimpleTokenToUser(username, token)
// Password checking is already performed in the API layer, so we don't need to check for now.
// Staleness of password can be detected with OCC in the API layer, too.
plog.Infof("authorized %s, token is %s", username, token)
token, err := as.tokenProvider.assign(ctx, username, as.Revision())
if err != nil {
return nil, err
}
plog.Debugf("authorized %s, token is %s", username, token)
return &pb.AuthenticateResponse{Token: token}, nil
}
func (as *authStore) CheckPassword(username, password string) (uint64, error) {
if !as.isAuthEnabled() {
return 0, ErrAuthNotEnabled
}
tx := as.be.BatchTx()
tx.Lock()
defer tx.Unlock()
@@ -322,7 +315,7 @@ func (as *authStore) Recover(be backend.Backend) {
}
}
as.revision = getRevision(tx)
as.setRevision(getRevision(tx))
tx.Unlock()
@@ -366,6 +359,11 @@ func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse,
}
func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) {
if as.enabled && strings.Compare(r.Name, rootUser) == 0 {
plog.Errorf("the user root must not be deleted")
return nil, ErrInvalidAuthMgmt
}
tx := as.be.BatchTx()
tx.Lock()
defer tx.Unlock()
@@ -380,7 +378,7 @@ func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDelete
as.commitRevision(tx)
as.invalidateCachedPerm(r.Name)
as.invalidateUser(r.Name)
as.tokenProvider.invalidateUser(r.Name)
plog.Noticef("deleted a user: %s", r.Name)
@@ -416,7 +414,7 @@ func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*p
as.commitRevision(tx)
as.invalidateCachedPerm(r.Name)
as.invalidateUser(r.Name)
as.tokenProvider.invalidateUser(r.Name)
plog.Noticef("changed a password of a user: %s", r.Name)
@@ -491,6 +489,11 @@ func (as *authStore) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListRespon
}
func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
if as.enabled && strings.Compare(r.Name, rootUser) == 0 && strings.Compare(r.Role, rootRole) == 0 {
plog.Errorf("the role root must not be revoked from the user root")
return nil, ErrInvalidAuthMgmt
}
tx := as.be.BatchTx()
tx.Lock()
defer tx.Unlock()
@@ -593,17 +596,10 @@ func (as *authStore) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest)
}
func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
// TODO(mitake): current scheme of role deletion allows existing users to have the deleted roles
//
// Assume a case like below:
// create a role r1
// create a user u1 and grant r1 to u1
// delete r1
//
// After this sequence, u1 is still granted the role r1. So if admin create a new role with the name r1,
// the new r1 is automatically granted u1.
// In some cases, it would be confusing. So we need to provide an option for deleting the grant relation
// from all users.
if as.enabled && strings.Compare(r.Role, rootRole) == 0 {
plog.Errorf("the role root must not be deleted")
return nil, ErrInvalidAuthMgmt
}
tx := as.be.BatchTx()
tx.Lock()
@@ -616,6 +612,28 @@ func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDelete
delRole(tx, r.Role)
users := getAllUsers(tx)
for _, user := range users {
updatedUser := &authpb.User{
Name: user.Name,
Password: user.Password,
}
for _, role := range user.Roles {
if strings.Compare(role, r.Role) != 0 {
updatedUser.Roles = append(updatedUser.Roles, role)
}
}
if len(updatedUser.Roles) == len(user.Roles) {
continue
}
putUser(tx, updatedUser)
as.invalidateCachedPerm(string(user.Name))
}
as.commitRevision(tx)
plog.Noticef("deleted role %s", r.Role)
@@ -645,15 +663,8 @@ func (as *authStore) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse,
return &pb.AuthRoleAddResponse{}, nil
}
func (as *authStore) AuthInfoFromToken(token string) (*AuthInfo, bool) {
// same as '(t *tokenSimple) info' in v3.2+
as.simpleTokensMu.Lock()
username, ok := as.simpleTokens[token]
if ok && as.simpleTokenKeeper != nil {
as.simpleTokenKeeper.resetSimpleToken(token)
}
as.simpleTokensMu.Unlock()
return &AuthInfo{Username: username, Revision: as.revision}, ok
func (as *authStore) authInfoFromToken(ctx context.Context, token string) (*AuthInfo, bool) {
return as.tokenProvider.info(ctx, token, as.Revision())
}
type permSlice []*authpb.Permission
@@ -723,7 +734,7 @@ func (as *authStore) isOpPermitted(userName string, revision uint64, key, rangeE
return ErrUserEmpty
}
if revision < as.revision {
if revision < as.Revision() {
return ErrAuthOldRevision
}
@@ -886,7 +897,7 @@ func (as *authStore) isAuthEnabled() bool {
return as.enabled
}
func NewAuthStore(be backend.Backend, indexWaiter func(uint64) <-chan struct{}) *authStore {
func NewAuthStore(be backend.Backend, tp TokenProvider) *authStore {
tx := be.BatchTx()
tx.Lock()
@@ -904,18 +915,17 @@ func NewAuthStore(be backend.Backend, indexWaiter func(uint64) <-chan struct{})
as := &authStore{
be: be,
simpleTokens: make(map[string]string),
revision: getRevision(tx),
indexWaiter: indexWaiter,
enabled: enabled,
rangePermCache: make(map[string]*unifiedRangePermissions),
tokenProvider: tp,
}
if enabled {
as.enable()
as.tokenProvider.enable()
}
if as.revision == 0 {
if as.Revision() == 0 {
as.commitRevision(tx)
}
@@ -935,9 +945,9 @@ func hasRootRole(u *authpb.User) bool {
}
func (as *authStore) commitRevision(tx backend.BatchTx) {
as.revision++
atomic.AddUint64(&as.revision, 1)
revBytes := make([]byte, revBytesLen)
binary.BigEndian.PutUint64(revBytes, as.revision)
binary.BigEndian.PutUint64(revBytes, as.Revision())
tx.UnsafePut(authBucketName, revisionKey, revBytes)
}
@@ -951,31 +961,38 @@ func getRevision(tx backend.BatchTx) uint64 {
return binary.BigEndian.Uint64(vs[0])
}
func (as *authStore) Revision() uint64 {
return as.revision
func (as *authStore) setRevision(rev uint64) {
atomic.StoreUint64(&as.revision, rev)
}
func (as *authStore) isValidSimpleToken(token string, ctx context.Context) bool {
splitted := strings.Split(token, ".")
if len(splitted) != 2 {
return false
}
index, err := strconv.Atoi(splitted[1])
if err != nil {
return false
func (as *authStore) Revision() uint64 {
return atomic.LoadUint64(&as.revision)
}
func (as *authStore) AuthInfoFromTLS(ctx context.Context) *AuthInfo {
peer, ok := peer.FromContext(ctx)
if !ok || peer == nil || peer.AuthInfo == nil {
return nil
}
select {
case <-as.indexWaiter(uint64(index)):
return true
case <-ctx.Done():
tlsInfo := peer.AuthInfo.(credentials.TLSInfo)
for _, chains := range tlsInfo.State.VerifiedChains {
for _, chain := range chains {
cn := chain.Subject.CommonName
plog.Debugf("found common name %s", cn)
return &AuthInfo{
Username: cn,
Revision: as.Revision(),
}
}
}
return false
return nil
}
func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) {
md, ok := metadata.FromContext(ctx)
md, ok := metadata.FromIncomingContext(ctx)
if !ok {
return nil, nil
}
@@ -986,14 +1003,57 @@ func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) {
}
token := ts[0]
if !as.isValidSimpleToken(token, ctx) {
return nil, ErrInvalidAuthToken
}
authInfo, uok := as.AuthInfoFromToken(token)
authInfo, uok := as.authInfoFromToken(ctx, token)
if !uok {
plog.Warningf("invalid auth token: %s", token)
return nil, ErrInvalidAuthToken
}
return authInfo, nil
}
func (as *authStore) GenTokenPrefix() (string, error) {
return as.tokenProvider.genTokenPrefix()
}
func decomposeOpts(optstr string) (string, map[string]string, error) {
opts := strings.Split(optstr, ",")
tokenType := opts[0]
typeSpecificOpts := make(map[string]string)
for i := 1; i < len(opts); i++ {
pair := strings.Split(opts[i], "=")
if len(pair) != 2 {
plog.Errorf("invalid token specific option: %s", optstr)
return "", nil, ErrInvalidAuthOpts
}
if _, ok := typeSpecificOpts[pair[0]]; ok {
plog.Errorf("invalid token specific option, duplicated parameters (%s): %s", pair[0], optstr)
return "", nil, ErrInvalidAuthOpts
}
typeSpecificOpts[pair[0]] = pair[1]
}
return tokenType, typeSpecificOpts, nil
}
func NewTokenProvider(tokenOpts string, indexWaiter func(uint64) <-chan struct{}) (TokenProvider, error) {
tokenType, typeSpecificOpts, err := decomposeOpts(tokenOpts)
if err != nil {
return nil, ErrInvalidAuthOpts
}
switch tokenType {
case "simple":
plog.Warningf("simple token is not cryptographically signed")
return newTokenProviderSimple(indexWaiter), nil
case "jwt":
return newTokenProviderJWT(typeSpecificOpts)
default:
plog.Errorf("unknown token type: %s", tokenType)
return nil, ErrInvalidAuthOpts
}
}

View File

@@ -14,14 +14,15 @@ go_library(
"keys.generated.go",
"keys.go",
"members.go",
"srv.go",
"util.go",
],
importpath = "github.com/coreos/etcd/client",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/coreos/etcd/pkg/pathutil:go_default_library",
"//vendor/github.com/coreos/etcd/pkg/srv:go_default_library",
"//vendor/github.com/coreos/etcd/pkg/types:go_default_library",
"//vendor/github.com/coreos/etcd/version:go_default_library",
"//vendor/github.com/ugorji/go/codec:go_default_library",
"//vendor/golang.org/x/net/context:go_default_library",
],

View File

@@ -15,6 +15,7 @@
package client
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
@@ -27,6 +28,8 @@ import (
"sync"
"time"
"github.com/coreos/etcd/version"
"golang.org/x/net/context"
)
@@ -201,6 +204,9 @@ type Client interface {
// returned
SetEndpoints(eps []string) error
// GetVersion retrieves the current etcd server and cluster version
GetVersion(ctx context.Context) (*version.Versions, error)
httpClient
}
@@ -366,12 +372,7 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo
if err == context.Canceled || err == context.DeadlineExceeded {
return nil, nil, err
}
if isOneShot {
return nil, nil, err
}
continue
}
if resp.StatusCode/100 == 5 {
} else if resp.StatusCode/100 == 5 {
switch resp.StatusCode {
case http.StatusInternalServerError, http.StatusServiceUnavailable:
// TODO: make sure this is a no leader response
@@ -379,10 +380,16 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo
default:
cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode)))
}
if isOneShot {
return nil, nil, cerr.Errors[0]
err = cerr.Errors[0]
}
if err != nil {
if !isOneShot {
continue
}
continue
c.Lock()
c.pinned = (k + 1) % leps
c.Unlock()
return nil, nil, err
}
if k != pinned {
c.Lock()
@@ -477,6 +484,33 @@ func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration
}
}
func (c *httpClusterClient) GetVersion(ctx context.Context) (*version.Versions, error) {
act := &getAction{Prefix: "/version"}
resp, body, err := c.Do(ctx, act)
if err != nil {
return nil, err
}
switch resp.StatusCode {
case http.StatusOK:
if len(body) == 0 {
return nil, ErrEmptyBody
}
var vresp version.Versions
if err := json.Unmarshal(body, &vresp); err != nil {
return nil, ErrInvalidJSON
}
return &vresp, nil
default:
var etcdErr Error
if err := json.Unmarshal(body, &etcdErr); err != nil {
return nil, ErrInvalidJSON
}
return nil, etcdErr
}
}
type roundTripResponse struct {
resp *http.Response
err error

View File

@@ -14,8 +14,27 @@
package client
import (
"github.com/coreos/etcd/pkg/srv"
)
// Discoverer is an interface that wraps the Discover method.
type Discoverer interface {
// Discover looks up the etcd servers for the domain.
Discover(domain string) ([]string, error)
}
type srvDiscover struct{}
// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records.
func NewSRVDiscover() Discoverer {
return &srvDiscover{}
}
func (d *srvDiscover) Discover(domain string) ([]string, error) {
srvs, err := srv.GetClient("etcd-client", domain)
if err != nil {
return nil, err
}
return srvs.Endpoints, nil
}

View File

@@ -1,65 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"fmt"
"net"
"net/url"
)
var (
// indirection for testing
lookupSRV = net.LookupSRV
)
type srvDiscover struct{}
// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records.
func NewSRVDiscover() Discoverer {
return &srvDiscover{}
}
// Discover looks up the etcd servers for the domain.
func (d *srvDiscover) Discover(domain string) ([]string, error) {
var urls []*url.URL
updateURLs := func(service, scheme string) error {
_, addrs, err := lookupSRV(service, "tcp", domain)
if err != nil {
return err
}
for _, srv := range addrs {
urls = append(urls, &url.URL{
Scheme: scheme,
Host: net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port)),
})
}
return nil
}
errHTTPS := updateURLs("etcd-client-ssl", "https")
errHTTP := updateURLs("etcd-client", "http")
if errHTTPS != nil && errHTTP != nil {
return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP)
}
endpoints := make([]string, len(urls))
for i := range urls {
endpoints[i] = urls[i].String()
}
return endpoints, nil
}

View File

@@ -4,18 +4,19 @@ go_library(
name = "go_default_library",
srcs = [
"auth.go",
"balancer.go",
"client.go",
"cluster.go",
"compact_op.go",
"compare.go",
"config.go",
"doc.go",
"health_balancer.go",
"kv.go",
"lease.go",
"logger.go",
"maintenance.go",
"op.go",
"ready_wait.go",
"retry.go",
"sort.go",
"txn.go",
@@ -28,15 +29,15 @@ go_library(
"//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library",
"//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library",
"//vendor/github.com/coreos/etcd/pkg/tlsutil:go_default_library",
"//vendor/github.com/ghodss/yaml:go_default_library",
"//vendor/github.com/grpc-ecosystem/go-grpc-prometheus:go_default_library",
"//vendor/golang.org/x/net/context:go_default_library",
"//vendor/google.golang.org/grpc:go_default_library",
"//vendor/google.golang.org/grpc/codes:go_default_library",
"//vendor/google.golang.org/grpc/credentials:go_default_library",
"//vendor/google.golang.org/grpc/grpclog:go_default_library",
"//vendor/google.golang.org/grpc/health/grpc_health_v1:go_default_library",
"//vendor/google.golang.org/grpc/keepalive:go_default_library",
"//vendor/google.golang.org/grpc/metadata:go_default_library",
"//vendor/google.golang.org/grpc/status:go_default_library",
],
)
@@ -49,7 +50,12 @@ filegroup(
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
srcs = [
":package-srcs",
"//vendor/github.com/coreos/etcd/clientv3/concurrency:all-srcs",
"//vendor/github.com/coreos/etcd/clientv3/namespace:all-srcs",
"//vendor/github.com/coreos/etcd/clientv3/naming:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -1,6 +1,6 @@
# etcd/clientv3
[![Godoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/coreos/etcd/clientv3)
[![Godoc](https://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/coreos/etcd/clientv3)
`etcd/clientv3` is the official Go etcd client for v3.
@@ -32,7 +32,7 @@ pass `context.WithTimeout` to APIs:
```go
ctx, cancel := context.WithTimeout(context.Background(), timeout)
resp, err := kvc.Put(ctx, "sample_key", "sample_value")
resp, err := cli.Put(ctx, "sample_key", "sample_value")
cancel()
if err != nil {
// handle error!
@@ -57,7 +57,7 @@ etcd client returns 2 types of errors:
Here is the example code to handle client errors:
```go
resp, err := kvc.Put(ctx, "", "")
resp, err := cli.Put(ctx, "", "")
if err != nil {
switch err {
case context.Canceled:
@@ -76,6 +76,10 @@ if err != nil {
The etcd client optionally exposes RPC metrics through [go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus). See the [examples](https://github.com/coreos/etcd/blob/master/clientv3/example_metrics_test.go).
## Namespacing
The [namespace](https://godoc.org/github.com/coreos/etcd/clientv3/namespace) package provides `clientv3` interface wrappers to transparently isolate client requests to a user-defined prefix.
## Examples
More code examples can be found at [GoDoc](https://godoc.org/github.com/coreos/etcd/clientv3).

View File

@@ -20,6 +20,7 @@ import (
"github.com/coreos/etcd/auth/authpb"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
@@ -100,28 +101,20 @@ type Auth interface {
}
type auth struct {
c *Client
conn *grpc.ClientConn // conn in-use
remote pb.AuthClient
}
func NewAuth(c *Client) Auth {
conn := c.ActiveConnection()
return &auth{
conn: c.ActiveConnection(),
remote: pb.NewAuthClient(conn),
c: c,
}
return &auth{remote: RetryAuthClient(c)}
}
func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) {
resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, grpc.FailFast(false))
resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{})
return (*AuthEnableResponse)(resp), toErr(ctx, err)
}
func (auth *auth) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) {
resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, grpc.FailFast(false))
resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{})
return (*AuthDisableResponse)(resp), toErr(ctx, err)
}
@@ -146,12 +139,12 @@ func (auth *auth) UserGrantRole(ctx context.Context, user string, role string) (
}
func (auth *auth) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) {
resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, grpc.FailFast(false))
resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name})
return (*AuthUserGetResponse)(resp), toErr(ctx, err)
}
func (auth *auth) UserList(ctx context.Context) (*AuthUserListResponse, error) {
resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, grpc.FailFast(false))
resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{})
return (*AuthUserListResponse)(resp), toErr(ctx, err)
}
@@ -176,12 +169,12 @@ func (auth *auth) RoleGrantPermission(ctx context.Context, name string, key, ran
}
func (auth *auth) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) {
resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, grpc.FailFast(false))
resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role})
return (*AuthRoleGetResponse)(resp), toErr(ctx, err)
}
func (auth *auth) RoleList(ctx context.Context) (*AuthRoleListResponse, error) {
resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, grpc.FailFast(false))
resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{})
return (*AuthRoleListResponse)(resp), toErr(ctx, err)
}
@@ -209,7 +202,7 @@ type authenticator struct {
}
func (auth *authenticator) authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) {
resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, grpc.FailFast(false))
resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password})
return (*AuthenticateResponse)(resp), toErr(ctx, err)
}

View File

@@ -1,239 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"net/url"
"strings"
"sync"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
// ErrNoAddrAvilable is returned by Get() when the balancer does not have
// any active connection to endpoints at the time.
// This error is returned only when opts.BlockingWait is true.
var ErrNoAddrAvilable = grpc.Errorf(codes.Unavailable, "there is no address available")
// simpleBalancer does the bare minimum to expose multiple eps
// to the grpc reconnection code path
type simpleBalancer struct {
// addrs are the client's endpoints for grpc
addrs []grpc.Address
// notifyCh notifies grpc of the set of addresses for connecting
notifyCh chan []grpc.Address
// readyc closes once the first connection is up
readyc chan struct{}
readyOnce sync.Once
// mu protects upEps, pinAddr, and connectingAddr
mu sync.RWMutex
// upEps holds the current endpoints that have an active connection
upEps map[string]struct{}
// upc closes when upEps transitions from empty to non-zero or the balancer closes.
upc chan struct{}
// grpc issues TLS cert checks using the string passed into dial so
// that string must be the host. To recover the full scheme://host URL,
// have a map from hosts to the original endpoint.
host2ep map[string]string
// pinAddr is the currently pinned address; set to the empty string on
// intialization and shutdown.
pinAddr string
closed bool
}
func newSimpleBalancer(eps []string) *simpleBalancer {
notifyCh := make(chan []grpc.Address, 1)
addrs := make([]grpc.Address, len(eps))
for i := range eps {
addrs[i].Addr = getHost(eps[i])
}
notifyCh <- addrs
sb := &simpleBalancer{
addrs: addrs,
notifyCh: notifyCh,
readyc: make(chan struct{}),
upEps: make(map[string]struct{}),
upc: make(chan struct{}),
host2ep: getHost2ep(eps),
}
return sb
}
func (b *simpleBalancer) Start(target string, config grpc.BalancerConfig) error { return nil }
func (b *simpleBalancer) ConnectNotify() <-chan struct{} {
b.mu.Lock()
defer b.mu.Unlock()
return b.upc
}
func (b *simpleBalancer) getEndpoint(host string) string {
b.mu.Lock()
defer b.mu.Unlock()
return b.host2ep[host]
}
func getHost2ep(eps []string) map[string]string {
hm := make(map[string]string, len(eps))
for i := range eps {
_, host, _ := parseEndpoint(eps[i])
hm[host] = eps[i]
}
return hm
}
func (b *simpleBalancer) updateAddrs(eps []string) {
np := getHost2ep(eps)
b.mu.Lock()
defer b.mu.Unlock()
match := len(np) == len(b.host2ep)
for k, v := range np {
if b.host2ep[k] != v {
match = false
break
}
}
if match {
// same endpoints, so no need to update address
return
}
b.host2ep = np
addrs := make([]grpc.Address, 0, len(eps))
for i := range eps {
addrs = append(addrs, grpc.Address{Addr: getHost(eps[i])})
}
b.addrs = addrs
b.notifyCh <- addrs
}
func (b *simpleBalancer) Up(addr grpc.Address) func(error) {
b.mu.Lock()
defer b.mu.Unlock()
// gRPC might call Up after it called Close. We add this check
// to "fix" it up at application layer. Or our simplerBalancer
// might panic since b.upc is closed.
if b.closed {
return func(err error) {}
}
if len(b.upEps) == 0 {
// notify waiting Get()s and pin first connected address
close(b.upc)
b.pinAddr = addr.Addr
}
b.upEps[addr.Addr] = struct{}{}
// notify client that a connection is up
b.readyOnce.Do(func() { close(b.readyc) })
return func(err error) {
b.mu.Lock()
delete(b.upEps, addr.Addr)
if len(b.upEps) == 0 && b.pinAddr != "" {
b.upc = make(chan struct{})
} else if b.pinAddr == addr.Addr {
// choose new random up endpoint
for k := range b.upEps {
b.pinAddr = k
break
}
}
b.mu.Unlock()
}
}
func (b *simpleBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) {
var addr string
// If opts.BlockingWait is false (for fail-fast RPCs), it should return
// an address it has notified via Notify immediately instead of blocking.
if !opts.BlockingWait {
b.mu.RLock()
closed := b.closed
addr = b.pinAddr
upEps := len(b.upEps)
b.mu.RUnlock()
if closed {
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
}
if upEps == 0 {
return grpc.Address{Addr: ""}, nil, ErrNoAddrAvilable
}
return grpc.Address{Addr: addr}, func() {}, nil
}
for {
b.mu.RLock()
ch := b.upc
b.mu.RUnlock()
select {
case <-ch:
case <-ctx.Done():
return grpc.Address{Addr: ""}, nil, ctx.Err()
}
b.mu.RLock()
addr = b.pinAddr
upEps := len(b.upEps)
b.mu.RUnlock()
if addr == "" {
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
}
if upEps > 0 {
break
}
}
return grpc.Address{Addr: addr}, func() {}, nil
}
func (b *simpleBalancer) Notify() <-chan []grpc.Address { return b.notifyCh }
func (b *simpleBalancer) Close() error {
b.mu.Lock()
defer b.mu.Unlock()
// In case gRPC calls close twice. TODO: remove the checking
// when we are sure that gRPC wont call close twice.
if b.closed {
return nil
}
b.closed = true
close(b.notifyCh)
// terminate all waiting Get()s
b.pinAddr = ""
if len(b.upEps) == 0 {
close(b.upc)
}
return nil
}
func getHost(ep string) string {
url, uerr := url.Parse(ep)
if uerr != nil || !strings.Contains(ep, "://") {
return ep
}
return url.Host
}

View File

@@ -20,22 +20,25 @@ import (
"fmt"
"net"
"net/url"
"strconv"
"strings"
"sync"
"time"
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
)
var (
ErrNoAvailableEndpoints = errors.New("etcdclient: no available endpoints")
ErrOldCluster = errors.New("etcdclient: old cluster version")
)
// Client provides and manages an etcd v3 client session.
@@ -47,19 +50,20 @@ type Client struct {
Auth
Maintenance
conn *grpc.ClientConn
cfg Config
creds *credentials.TransportCredentials
balancer *simpleBalancer
retryWrapper retryRpcFunc
retryAuthWrapper retryRpcFunc
conn *grpc.ClientConn
dialerrc chan error
cfg Config
creds *credentials.TransportCredentials
balancer *healthBalancer
mu sync.Mutex
ctx context.Context
cancel context.CancelFunc
// Username is a username for authentication
// Username is a user name for authentication.
Username string
// Password is a password for authentication
// Password is a password for authentication.
Password string
// tokenCred is an instance of WithPerRPCCredentials()'s argument
tokenCred *authTokenCredential
@@ -74,26 +78,28 @@ func New(cfg Config) (*Client, error) {
return newClient(&cfg)
}
// NewCtxClient creates a client with a context but no underlying grpc
// connection. This is useful for embedded cases that override the
// service interface implementations and do not need connection management.
func NewCtxClient(ctx context.Context) *Client {
cctx, cancel := context.WithCancel(ctx)
return &Client{ctx: cctx, cancel: cancel}
}
// NewFromURL creates a new etcdv3 client from a URL.
func NewFromURL(url string) (*Client, error) {
return New(Config{Endpoints: []string{url}})
}
// NewFromConfigFile creates a new etcdv3 client from a configuration file.
func NewFromConfigFile(path string) (*Client, error) {
cfg, err := configFromFile(path)
if err != nil {
return nil, err
}
return New(*cfg)
}
// Close shuts down the client's etcd connections.
func (c *Client) Close() error {
c.cancel()
c.Watcher.Close()
c.Lease.Close()
return toErr(c.ctx, c.conn.Close())
if c.conn != nil {
return toErr(c.ctx, c.conn.Close())
}
return c.ctx.Err()
}
// Ctx is a context for "out of band" messages (e.g., for sending
@@ -111,8 +117,23 @@ func (c *Client) Endpoints() (eps []string) {
// SetEndpoints updates client's endpoints.
func (c *Client) SetEndpoints(eps ...string) {
c.mu.Lock()
c.cfg.Endpoints = eps
c.balancer.updateAddrs(eps)
c.mu.Unlock()
c.balancer.updateAddrs(eps...)
// updating notifyCh can trigger new connections,
// need update addrs if all connections are down
// or addrs does not include pinAddr.
c.balancer.mu.RLock()
update := !hasAddr(c.balancer.addrs, c.balancer.pinAddr)
c.balancer.mu.RUnlock()
if update {
select {
case c.balancer.updateAddrsC <- notifyNext:
case <-c.balancer.stopc:
}
}
}
// Sync synchronizes client's endpoints with the known endpoints from the etcd membership.
@@ -139,8 +160,10 @@ func (c *Client) autoSync() {
case <-c.ctx.Done():
return
case <-time.After(c.cfg.AutoSyncInterval):
ctx, _ := context.WithTimeout(c.ctx, 5*time.Second)
if err := c.Sync(ctx); err != nil && err != c.ctx.Err() {
ctx, cancel := context.WithTimeout(c.ctx, 5*time.Second)
err := c.Sync(ctx)
cancel()
if err != nil && err != c.ctx.Err() {
logger.Println("Auto sync endpoints failed:", err)
}
}
@@ -169,7 +192,7 @@ func parseEndpoint(endpoint string) (proto string, host string, scheme string) {
host = endpoint
url, uerr := url.Parse(endpoint)
if uerr != nil || !strings.Contains(endpoint, "://") {
return
return proto, host, scheme
}
scheme = url.Scheme
@@ -177,12 +200,13 @@ func parseEndpoint(endpoint string) (proto string, host string, scheme string) {
host = url.Host
switch url.Scheme {
case "http", "https":
case "unix":
case "unix", "unixs":
proto = "unix"
host = url.Host + url.Path
default:
proto, host = "", ""
}
return
return proto, host, scheme
}
func (c *Client) processCreds(scheme string) (creds *credentials.TransportCredentials) {
@@ -191,7 +215,7 @@ func (c *Client) processCreds(scheme string) (creds *credentials.TransportCreden
case "unix":
case "http":
creds = nil
case "https":
case "https", "unixs":
if creds != nil {
break
}
@@ -201,7 +225,7 @@ func (c *Client) processCreds(scheme string) (creds *credentials.TransportCreden
default:
creds = nil
}
return
return creds
}
// dialSetupOpts gives the dial opts prior to any authentication
@@ -209,10 +233,22 @@ func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts
if c.cfg.DialTimeout > 0 {
opts = []grpc.DialOption{grpc.WithTimeout(c.cfg.DialTimeout)}
}
if c.cfg.DialKeepAliveTime > 0 {
params := keepalive.ClientParameters{
Time: c.cfg.DialKeepAliveTime,
Timeout: c.cfg.DialKeepAliveTimeout,
}
opts = append(opts, grpc.WithKeepaliveParams(params))
}
opts = append(opts, dopts...)
f := func(host string, t time.Duration) (net.Conn, error) {
proto, host, _ := parseEndpoint(c.balancer.getEndpoint(host))
proto, host, _ := parseEndpoint(c.balancer.endpoint(host))
if host == "" && endpoint != "" {
// dialing an endpoint not in the balancer; use
// endpoint passed into dial
proto, host, _ = parseEndpoint(endpoint)
}
if proto == "" {
return nil, fmt.Errorf("unknown scheme for %q", host)
}
@@ -222,7 +258,14 @@ func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts
default:
}
dialer := &net.Dialer{Timeout: t}
return dialer.DialContext(c.ctx, proto, host)
conn, err := dialer.DialContext(c.ctx, proto, host)
if err != nil {
select {
case c.dialerrc <- err:
default:
}
}
return conn, err
}
opts = append(opts, grpc.WithDialer(f))
@@ -288,21 +331,23 @@ func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientCo
defer cancel()
ctx = cctx
}
if err := c.getToken(ctx); err != nil {
if err == ctx.Err() && ctx.Err() != c.ctx.Err() {
err = grpc.ErrClientConnTimeout
}
return nil, err
}
opts = append(opts, grpc.WithPerRPCCredentials(c.tokenCred))
err := c.getToken(ctx)
if err != nil {
if toErr(ctx, err) != rpctypes.ErrAuthNotEnabled {
if err == ctx.Err() && ctx.Err() != c.ctx.Err() {
err = context.DeadlineExceeded
}
return nil, err
}
} else {
opts = append(opts, grpc.WithPerRPCCredentials(c.tokenCred))
}
}
// add metrics options
opts = append(opts, grpc.WithUnaryInterceptor(prometheus.UnaryClientInterceptor))
opts = append(opts, grpc.WithStreamInterceptor(prometheus.StreamClientInterceptor))
opts = append(opts, c.cfg.DialOptions...)
conn, err := grpc.Dial(host, opts...)
conn, err := grpc.DialContext(c.ctx, host, opts...)
if err != nil {
return nil, err
}
@@ -313,7 +358,7 @@ func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientCo
// when the cluster has a leader.
func WithRequireLeader(ctx context.Context) context.Context {
md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
return metadata.NewContext(ctx, md)
return metadata.NewOutgoingContext(ctx, md)
}
func newClient(cfg *Config) (*Client, error) {
@@ -327,20 +372,31 @@ func newClient(cfg *Config) (*Client, error) {
}
// use a temporary skeleton client to bootstrap first connection
ctx, cancel := context.WithCancel(context.TODO())
baseCtx := context.TODO()
if cfg.Context != nil {
baseCtx = cfg.Context
}
ctx, cancel := context.WithCancel(baseCtx)
client := &Client{
conn: nil,
cfg: *cfg,
creds: creds,
ctx: ctx,
cancel: cancel,
conn: nil,
dialerrc: make(chan error, 1),
cfg: *cfg,
creds: creds,
ctx: ctx,
cancel: cancel,
}
if cfg.Username != "" && cfg.Password != "" {
client.Username = cfg.Username
client.Password = cfg.Password
}
client.balancer = newSimpleBalancer(cfg.Endpoints)
client.balancer = newHealthBalancer(cfg.Endpoints, cfg.DialTimeout, func(ep string) (bool, error) {
return grpcHealthCheck(client, ep)
})
// use Endpoints[0] so that for https:// without any tls config given, then
// grpc will assume the certificate server name is the endpoint host.
conn, err := client.dial(cfg.Endpoints[0], grpc.WithBalancer(client.balancer))
if err != nil {
client.cancel()
@@ -348,24 +404,27 @@ func newClient(cfg *Config) (*Client, error) {
return nil, err
}
client.conn = conn
client.retryWrapper = client.newRetryWrapper()
client.retryAuthWrapper = client.newAuthRetryWrapper()
// wait for a connection
if cfg.DialTimeout > 0 {
hasConn := false
waitc := time.After(cfg.DialTimeout)
select {
case <-client.balancer.readyc:
case <-client.balancer.ready():
hasConn = true
case <-ctx.Done():
case <-waitc:
}
if !hasConn {
err := context.DeadlineExceeded
select {
case err = <-client.dialerrc:
default:
}
client.cancel()
client.balancer.Close()
conn.Close()
return nil, grpc.ErrClientConnTimeout
return nil, err
}
}
@@ -376,10 +435,57 @@ func newClient(cfg *Config) (*Client, error) {
client.Auth = NewAuth(client)
client.Maintenance = NewMaintenance(client)
if cfg.RejectOldCluster {
if err := client.checkVersion(); err != nil {
client.Close()
return nil, err
}
}
go client.autoSync()
return client, nil
}
func (c *Client) checkVersion() (err error) {
var wg sync.WaitGroup
errc := make(chan error, len(c.cfg.Endpoints))
ctx, cancel := context.WithCancel(c.ctx)
if c.cfg.DialTimeout > 0 {
ctx, cancel = context.WithTimeout(ctx, c.cfg.DialTimeout)
}
wg.Add(len(c.cfg.Endpoints))
for _, ep := range c.cfg.Endpoints {
// if cluster is current, any endpoint gives a recent version
go func(e string) {
defer wg.Done()
resp, rerr := c.Status(ctx, e)
if rerr != nil {
errc <- rerr
return
}
vs := strings.Split(resp.Version, ".")
maj, min := 0, 0
if len(vs) >= 2 {
maj, _ = strconv.Atoi(vs[0])
min, rerr = strconv.Atoi(vs[1])
}
if maj < 3 || (maj == 3 && min < 2) {
rerr = ErrOldCluster
}
errc <- rerr
}(ep)
}
// wait for success
for i := 0; i < len(c.cfg.Endpoints); i++ {
if err = <-errc; err == nil {
break
}
}
cancel()
wg.Wait()
return err
}
// ActiveConnection returns the current in-use connection
func (c *Client) ActiveConnection() *grpc.ClientConn { return c.conn }
@@ -392,14 +498,14 @@ func isHaltErr(ctx context.Context, err error) bool {
if err == nil {
return false
}
code := grpc.Code(err)
ev, _ := status.FromError(err)
// Unavailable codes mean the system will be right back.
// (e.g., can't connect, lost leader)
// Treat Internal codes as if something failed, leaving the
// system in an inconsistent state, but retrying could make progress.
// (e.g., failed in middle of send, corrupted frame)
// TODO: are permanent Internal errors possible from grpc?
return code != codes.Unavailable && code != codes.Internal
return ev.Code() != codes.Unavailable && ev.Code() != codes.Internal
}
func toErr(ctx context.Context, err error) error {
@@ -410,7 +516,8 @@ func toErr(ctx context.Context, err error) error {
if _, ok := err.(rpctypes.EtcdError); ok {
return err
}
code := grpc.Code(err)
ev, _ := status.FromError(err)
code := ev.Code()
switch code {
case codes.DeadlineExceeded:
fallthrough
@@ -419,9 +526,16 @@ func toErr(ctx context.Context, err error) error {
err = ctx.Err()
}
case codes.Unavailable:
err = ErrNoAvailableEndpoints
case codes.FailedPrecondition:
err = grpc.ErrClientConnClosing
}
return err
}
func canceledByCaller(stopCtx context.Context, err error) bool {
if stopCtx.Err() == nil || err == nil {
return false
}
return err == context.Canceled || err == context.DeadlineExceeded
}

View File

@@ -16,8 +16,8 @@ package clientv3
import (
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
type (
@@ -50,53 +50,43 @@ func NewCluster(c *Client) Cluster {
return &cluster{remote: RetryClusterClient(c)}
}
func NewClusterFromClusterClient(remote pb.ClusterClient) Cluster {
return &cluster{remote: remote}
}
func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
r := &pb.MemberAddRequest{PeerURLs: peerAddrs}
resp, err := c.remote.MemberAdd(ctx, r)
if err == nil {
return (*MemberAddResponse)(resp), nil
}
if isHaltErr(ctx, err) {
if err != nil {
return nil, toErr(ctx, err)
}
return nil, toErr(ctx, err)
return (*MemberAddResponse)(resp), nil
}
func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) {
r := &pb.MemberRemoveRequest{ID: id}
resp, err := c.remote.MemberRemove(ctx, r)
if err == nil {
return (*MemberRemoveResponse)(resp), nil
}
if isHaltErr(ctx, err) {
if err != nil {
return nil, toErr(ctx, err)
}
return nil, toErr(ctx, err)
return (*MemberRemoveResponse)(resp), nil
}
func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) {
// it is safe to retry on update.
for {
r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs}
resp, err := c.remote.MemberUpdate(ctx, r, grpc.FailFast(false))
if err == nil {
return (*MemberUpdateResponse)(resp), nil
}
if isHaltErr(ctx, err) {
return nil, toErr(ctx, err)
}
r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs}
resp, err := c.remote.MemberUpdate(ctx, r)
if err == nil {
return (*MemberUpdateResponse)(resp), nil
}
return nil, toErr(ctx, err)
}
func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) {
// it is safe to retry on list.
for {
resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, grpc.FailFast(false))
if err == nil {
return (*MemberListResponse)(resp), nil
}
if isHaltErr(ctx, err) {
return nil, toErr(ctx, err)
}
resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{})
if err == nil {
return (*MemberListResponse)(resp), nil
}
return nil, toErr(ctx, err)
}

View File

@@ -44,10 +44,8 @@ func (op CompactOp) toRequest() *pb.CompactionRequest {
return &pb.CompactionRequest{Revision: op.revision, Physical: op.physical}
}
// WithCompactPhysical makes compact RPC call wait until
// the compaction is physically applied to the local database
// such that compacted entries are totally removed from the
// backend database.
// WithCompactPhysical makes Compact wait until all compacted entries are
// removed from the etcd server's storage.
func WithCompactPhysical() CompactOption {
return func(op *CompactOp) { op.physical = true }
}

View File

@@ -82,6 +82,24 @@ func ModRevision(key string) Cmp {
return Cmp{Key: []byte(key), Target: pb.Compare_MOD}
}
// KeyBytes returns the byte slice holding with the comparison key.
func (cmp *Cmp) KeyBytes() []byte { return cmp.Key }
// WithKeyBytes sets the byte slice for the comparison key.
func (cmp *Cmp) WithKeyBytes(key []byte) { cmp.Key = key }
// ValueBytes returns the byte slice holding the comparison value, if any.
func (cmp *Cmp) ValueBytes() []byte {
if tu, ok := cmp.TargetUnion.(*pb.Compare_Value); ok {
return tu.Value
}
return nil
}
// WithValueBytes sets the byte slice for the comparison's value.
func (cmp *Cmp) WithValueBytes(v []byte) { cmp.TargetUnion.(*pb.Compare_Value).Value = v }
// mustInt64 panics if val isn't an int or int64. It returns an int64 otherwise.
func mustInt64(val interface{}) int64 {
if v, ok := val.(int64); ok {
return v
@@ -91,3 +109,12 @@ func mustInt64(val interface{}) int64 {
}
panic("bad value")
}
// mustInt64orLeaseID panics if val isn't a LeaseID, int or int64. It returns an
// int64 otherwise.
func mustInt64orLeaseID(val interface{}) int64 {
if v, ok := val.(LeaseID); ok {
return int64(v)
}
return mustInt64(val)
}

View File

@@ -0,0 +1,35 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"election.go",
"key.go",
"mutex.go",
"session.go",
"stm.go",
],
importpath = "github.com/coreos/etcd/clientv3/concurrency",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/coreos/etcd/clientv3:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library",
"//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library",
"//vendor/golang.org/x/net/context:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,17 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package concurrency implements concurrency operations on top of
// etcd such as distributed locks, barriers, and elections.
package concurrency

View File

@@ -0,0 +1,246 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package concurrency
import (
"errors"
"fmt"
v3 "github.com/coreos/etcd/clientv3"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/mvcc/mvccpb"
"golang.org/x/net/context"
)
var (
ErrElectionNotLeader = errors.New("election: not leader")
ErrElectionNoLeader = errors.New("election: no leader")
)
type Election struct {
session *Session
keyPrefix string
leaderKey string
leaderRev int64
leaderSession *Session
hdr *pb.ResponseHeader
}
// NewElection returns a new election on a given key prefix.
func NewElection(s *Session, pfx string) *Election {
return &Election{session: s, keyPrefix: pfx + "/"}
}
// ResumeElection initializes an election with a known leader.
func ResumeElection(s *Session, pfx string, leaderKey string, leaderRev int64) *Election {
return &Election{
session: s,
leaderKey: leaderKey,
leaderRev: leaderRev,
leaderSession: s,
}
}
// Campaign puts a value as eligible for the election. It blocks until
// it is elected, an error occurs, or the context is cancelled.
func (e *Election) Campaign(ctx context.Context, val string) error {
s := e.session
client := e.session.Client()
k := fmt.Sprintf("%s%x", e.keyPrefix, s.Lease())
txn := client.Txn(ctx).If(v3.Compare(v3.CreateRevision(k), "=", 0))
txn = txn.Then(v3.OpPut(k, val, v3.WithLease(s.Lease())))
txn = txn.Else(v3.OpGet(k))
resp, err := txn.Commit()
if err != nil {
return err
}
e.leaderKey, e.leaderRev, e.leaderSession = k, resp.Header.Revision, s
if !resp.Succeeded {
kv := resp.Responses[0].GetResponseRange().Kvs[0]
e.leaderRev = kv.CreateRevision
if string(kv.Value) != val {
if err = e.Proclaim(ctx, val); err != nil {
e.Resign(ctx)
return err
}
}
}
_, err = waitDeletes(ctx, client, e.keyPrefix, e.leaderRev-1)
if err != nil {
// clean up in case of context cancel
select {
case <-ctx.Done():
e.Resign(client.Ctx())
default:
e.leaderSession = nil
}
return err
}
e.hdr = resp.Header
return nil
}
// Proclaim lets the leader announce a new value without another election.
func (e *Election) Proclaim(ctx context.Context, val string) error {
if e.leaderSession == nil {
return ErrElectionNotLeader
}
client := e.session.Client()
cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev)
txn := client.Txn(ctx).If(cmp)
txn = txn.Then(v3.OpPut(e.leaderKey, val, v3.WithLease(e.leaderSession.Lease())))
tresp, terr := txn.Commit()
if terr != nil {
return terr
}
if !tresp.Succeeded {
e.leaderKey = ""
return ErrElectionNotLeader
}
e.hdr = tresp.Header
return nil
}
// Resign lets a leader start a new election.
func (e *Election) Resign(ctx context.Context) (err error) {
if e.leaderSession == nil {
return nil
}
client := e.session.Client()
cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev)
resp, err := client.Txn(ctx).If(cmp).Then(v3.OpDelete(e.leaderKey)).Commit()
if err == nil {
e.hdr = resp.Header
}
e.leaderKey = ""
e.leaderSession = nil
return err
}
// Leader returns the leader value for the current election.
func (e *Election) Leader(ctx context.Context) (*v3.GetResponse, error) {
client := e.session.Client()
resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
if err != nil {
return nil, err
} else if len(resp.Kvs) == 0 {
// no leader currently elected
return nil, ErrElectionNoLeader
}
return resp, nil
}
// Observe returns a channel that reliably observes ordered leader proposals
// as GetResponse values on every current elected leader key. It will not
// necessarily fetch all historical leader updates, but will always post the
// most recent leader value.
//
// The channel closes when the context is canceled or the underlying watcher
// is otherwise disrupted.
func (e *Election) Observe(ctx context.Context) <-chan v3.GetResponse {
retc := make(chan v3.GetResponse)
go e.observe(ctx, retc)
return retc
}
func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) {
client := e.session.Client()
defer close(ch)
for {
resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
if err != nil {
return
}
var kv *mvccpb.KeyValue
var hdr *pb.ResponseHeader
if len(resp.Kvs) == 0 {
cctx, cancel := context.WithCancel(ctx)
// wait for first key put on prefix
opts := []v3.OpOption{v3.WithRev(resp.Header.Revision), v3.WithPrefix()}
wch := client.Watch(cctx, e.keyPrefix, opts...)
for kv == nil {
wr, ok := <-wch
if !ok || wr.Err() != nil {
cancel()
return
}
// only accept puts; a delete will make observe() spin
for _, ev := range wr.Events {
if ev.Type == mvccpb.PUT {
hdr, kv = &wr.Header, ev.Kv
// may have multiple revs; hdr.rev = the last rev
// set to kv's rev in case batch has multiple Puts
hdr.Revision = kv.ModRevision
break
}
}
}
cancel()
} else {
hdr, kv = resp.Header, resp.Kvs[0]
}
select {
case ch <- v3.GetResponse{Header: hdr, Kvs: []*mvccpb.KeyValue{kv}}:
case <-ctx.Done():
return
}
cctx, cancel := context.WithCancel(ctx)
wch := client.Watch(cctx, string(kv.Key), v3.WithRev(hdr.Revision+1))
keyDeleted := false
for !keyDeleted {
wr, ok := <-wch
if !ok {
cancel()
return
}
for _, ev := range wr.Events {
if ev.Type == mvccpb.DELETE {
keyDeleted = true
break
}
resp.Header = &wr.Header
resp.Kvs = []*mvccpb.KeyValue{ev.Kv}
select {
case ch <- *resp:
case <-cctx.Done():
cancel()
return
}
}
}
cancel()
}
}
// Key returns the leader key if elected, empty string otherwise.
func (e *Election) Key() string { return e.leaderKey }
// Rev returns the leader key's creation revision, if elected.
func (e *Election) Rev() int64 { return e.leaderRev }
// Header is the response header from the last successful election proposal.
func (e *Election) Header() *pb.ResponseHeader { return e.hdr }

View File

@@ -0,0 +1,66 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package concurrency
import (
"fmt"
v3 "github.com/coreos/etcd/clientv3"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/mvcc/mvccpb"
"golang.org/x/net/context"
)
func waitDelete(ctx context.Context, client *v3.Client, key string, rev int64) error {
cctx, cancel := context.WithCancel(ctx)
defer cancel()
var wr v3.WatchResponse
wch := client.Watch(cctx, key, v3.WithRev(rev))
for wr = range wch {
for _, ev := range wr.Events {
if ev.Type == mvccpb.DELETE {
return nil
}
}
}
if err := wr.Err(); err != nil {
return err
}
if err := ctx.Err(); err != nil {
return err
}
return fmt.Errorf("lost watcher waiting for delete")
}
// waitDeletes efficiently waits until all keys matching the prefix and no greater
// than the create revision.
func waitDeletes(ctx context.Context, client *v3.Client, pfx string, maxCreateRev int64) (*pb.ResponseHeader, error) {
getOpts := append(v3.WithLastCreate(), v3.WithMaxCreateRev(maxCreateRev))
for {
resp, err := client.Get(ctx, pfx, getOpts...)
if err != nil {
return nil, err
}
if len(resp.Kvs) == 0 {
return resp.Header, nil
}
lastKey := string(resp.Kvs[0].Key)
if err = waitDelete(ctx, client, lastKey, resp.Header.Revision); err != nil {
return nil, err
}
}
}

View File

@@ -0,0 +1,119 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package concurrency
import (
"fmt"
"sync"
v3 "github.com/coreos/etcd/clientv3"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
)
// Mutex implements the sync Locker interface with etcd
type Mutex struct {
s *Session
pfx string
myKey string
myRev int64
hdr *pb.ResponseHeader
}
func NewMutex(s *Session, pfx string) *Mutex {
return &Mutex{s, pfx + "/", "", -1, nil}
}
// Lock locks the mutex with a cancelable context. If the context is canceled
// while trying to acquire the lock, the mutex tries to clean its stale lock entry.
func (m *Mutex) Lock(ctx context.Context) error {
s := m.s
client := m.s.Client()
m.myKey = fmt.Sprintf("%s%x", m.pfx, s.Lease())
cmp := v3.Compare(v3.CreateRevision(m.myKey), "=", 0)
// put self in lock waiters via myKey; oldest waiter holds lock
put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease()))
// reuse key in case this session already holds the lock
get := v3.OpGet(m.myKey)
// fetch current holder to complete uncontended path with only one RPC
getOwner := v3.OpGet(m.pfx, v3.WithFirstCreate()...)
resp, err := client.Txn(ctx).If(cmp).Then(put, getOwner).Else(get, getOwner).Commit()
if err != nil {
return err
}
m.myRev = resp.Header.Revision
if !resp.Succeeded {
m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision
}
// if no key on prefix / the minimum rev is key, already hold the lock
ownerKey := resp.Responses[1].GetResponseRange().Kvs
if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev {
m.hdr = resp.Header
return nil
}
// wait for deletion revisions prior to myKey
hdr, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1)
// release lock key if cancelled
select {
case <-ctx.Done():
m.Unlock(client.Ctx())
default:
m.hdr = hdr
}
return werr
}
func (m *Mutex) Unlock(ctx context.Context) error {
client := m.s.Client()
if _, err := client.Delete(ctx, m.myKey); err != nil {
return err
}
m.myKey = "\x00"
m.myRev = -1
return nil
}
func (m *Mutex) IsOwner() v3.Cmp {
return v3.Compare(v3.CreateRevision(m.myKey), "=", m.myRev)
}
func (m *Mutex) Key() string { return m.myKey }
// Header is the response header received from etcd on acquiring the lock.
func (m *Mutex) Header() *pb.ResponseHeader { return m.hdr }
type lockerMutex struct{ *Mutex }
func (lm *lockerMutex) Lock() {
client := lm.s.Client()
if err := lm.Mutex.Lock(client.Ctx()); err != nil {
panic(err)
}
}
func (lm *lockerMutex) Unlock() {
client := lm.s.Client()
if err := lm.Mutex.Unlock(client.Ctx()); err != nil {
panic(err)
}
}
// NewLocker creates a sync.Locker backed by an etcd mutex.
func NewLocker(s *Session, pfx string) sync.Locker {
return &lockerMutex{NewMutex(s, pfx)}
}

View File

@@ -0,0 +1,142 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package concurrency
import (
"time"
v3 "github.com/coreos/etcd/clientv3"
"golang.org/x/net/context"
)
const defaultSessionTTL = 60
// Session represents a lease kept alive for the lifetime of a client.
// Fault-tolerant applications may use sessions to reason about liveness.
type Session struct {
client *v3.Client
opts *sessionOptions
id v3.LeaseID
cancel context.CancelFunc
donec <-chan struct{}
}
// NewSession gets the leased session for a client.
func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) {
ops := &sessionOptions{ttl: defaultSessionTTL, ctx: client.Ctx()}
for _, opt := range opts {
opt(ops)
}
id := ops.leaseID
if id == v3.NoLease {
resp, err := client.Grant(ops.ctx, int64(ops.ttl))
if err != nil {
return nil, err
}
id = v3.LeaseID(resp.ID)
}
ctx, cancel := context.WithCancel(ops.ctx)
keepAlive, err := client.KeepAlive(ctx, id)
if err != nil || keepAlive == nil {
cancel()
return nil, err
}
donec := make(chan struct{})
s := &Session{client: client, opts: ops, id: id, cancel: cancel, donec: donec}
// keep the lease alive until client error or cancelled context
go func() {
defer close(donec)
for range keepAlive {
// eat messages until keep alive channel closes
}
}()
return s, nil
}
// Client is the etcd client that is attached to the session.
func (s *Session) Client() *v3.Client {
return s.client
}
// Lease is the lease ID for keys bound to the session.
func (s *Session) Lease() v3.LeaseID { return s.id }
// Done returns a channel that closes when the lease is orphaned, expires, or
// is otherwise no longer being refreshed.
func (s *Session) Done() <-chan struct{} { return s.donec }
// Orphan ends the refresh for the session lease. This is useful
// in case the state of the client connection is indeterminate (revoke
// would fail) or when transferring lease ownership.
func (s *Session) Orphan() {
s.cancel()
<-s.donec
}
// Close orphans the session and revokes the session lease.
func (s *Session) Close() error {
s.Orphan()
// if revoke takes longer than the ttl, lease is expired anyway
ctx, cancel := context.WithTimeout(s.opts.ctx, time.Duration(s.opts.ttl)*time.Second)
_, err := s.client.Revoke(ctx, s.id)
cancel()
return err
}
type sessionOptions struct {
ttl int
leaseID v3.LeaseID
ctx context.Context
}
// SessionOption configures Session.
type SessionOption func(*sessionOptions)
// WithTTL configures the session's TTL in seconds.
// If TTL is <= 0, the default 60 seconds TTL will be used.
func WithTTL(ttl int) SessionOption {
return func(so *sessionOptions) {
if ttl > 0 {
so.ttl = ttl
}
}
}
// WithLease specifies the existing leaseID to be used for the session.
// This is useful in process restart scenario, for example, to reclaim
// leadership from an election prior to restart.
func WithLease(leaseID v3.LeaseID) SessionOption {
return func(so *sessionOptions) {
so.leaseID = leaseID
}
}
// WithContext assigns a context to the session instead of defaulting to
// using the client context. This is useful for canceling NewSession and
// Close operations immediately without having to close the client. If the
// context is canceled before Close() completes, the session's lease will be
// abandoned and left to expire instead of being revoked.
func WithContext(ctx context.Context) SessionOption {
return func(so *sessionOptions) {
so.ctx = ctx
}
}

View File

@@ -0,0 +1,388 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package concurrency
import (
"math"
v3 "github.com/coreos/etcd/clientv3"
"golang.org/x/net/context"
)
// STM is an interface for software transactional memory.
type STM interface {
// Get returns the value for a key and inserts the key in the txn's read set.
// If Get fails, it aborts the transaction with an error, never returning.
Get(key ...string) string
// Put adds a value for a key to the write set.
Put(key, val string, opts ...v3.OpOption)
// Rev returns the revision of a key in the read set.
Rev(key string) int64
// Del deletes a key.
Del(key string)
// commit attempts to apply the txn's changes to the server.
commit() *v3.TxnResponse
reset()
}
// Isolation is an enumeration of transactional isolation levels which
// describes how transactions should interfere and conflict.
type Isolation int
const (
// SerializableSnapshot provides serializable isolation and also checks
// for write conflicts.
SerializableSnapshot Isolation = iota
// Serializable reads within the same transaction attempt return data
// from the at the revision of the first read.
Serializable
// RepeatableReads reads within the same transaction attempt always
// return the same data.
RepeatableReads
// ReadCommitted reads keys from any committed revision.
ReadCommitted
)
// stmError safely passes STM errors through panic to the STM error channel.
type stmError struct{ err error }
type stmOptions struct {
iso Isolation
ctx context.Context
prefetch []string
}
type stmOption func(*stmOptions)
// WithIsolation specifies the transaction isolation level.
func WithIsolation(lvl Isolation) stmOption {
return func(so *stmOptions) { so.iso = lvl }
}
// WithAbortContext specifies the context for permanently aborting the transaction.
func WithAbortContext(ctx context.Context) stmOption {
return func(so *stmOptions) { so.ctx = ctx }
}
// WithPrefetch is a hint to prefetch a list of keys before trying to apply.
// If an STM transaction will unconditionally fetch a set of keys, prefetching
// those keys will save the round-trip cost from requesting each key one by one
// with Get().
func WithPrefetch(keys ...string) stmOption {
return func(so *stmOptions) { so.prefetch = append(so.prefetch, keys...) }
}
// NewSTM initiates a new STM instance, using serializable snapshot isolation by default.
func NewSTM(c *v3.Client, apply func(STM) error, so ...stmOption) (*v3.TxnResponse, error) {
opts := &stmOptions{ctx: c.Ctx()}
for _, f := range so {
f(opts)
}
if len(opts.prefetch) != 0 {
f := apply
apply = func(s STM) error {
s.Get(opts.prefetch...)
return f(s)
}
}
return runSTM(mkSTM(c, opts), apply)
}
func mkSTM(c *v3.Client, opts *stmOptions) STM {
switch opts.iso {
case SerializableSnapshot:
s := &stmSerializable{
stm: stm{client: c, ctx: opts.ctx},
prefetch: make(map[string]*v3.GetResponse),
}
s.conflicts = func() []v3.Cmp {
return append(s.rset.cmps(), s.wset.cmps(s.rset.first()+1)...)
}
return s
case Serializable:
s := &stmSerializable{
stm: stm{client: c, ctx: opts.ctx},
prefetch: make(map[string]*v3.GetResponse),
}
s.conflicts = func() []v3.Cmp { return s.rset.cmps() }
return s
case RepeatableReads:
s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}}
s.conflicts = func() []v3.Cmp { return s.rset.cmps() }
return s
case ReadCommitted:
s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}}
s.conflicts = func() []v3.Cmp { return nil }
return s
default:
panic("unsupported stm")
}
}
type stmResponse struct {
resp *v3.TxnResponse
err error
}
func runSTM(s STM, apply func(STM) error) (*v3.TxnResponse, error) {
outc := make(chan stmResponse, 1)
go func() {
defer func() {
if r := recover(); r != nil {
e, ok := r.(stmError)
if !ok {
// client apply panicked
panic(r)
}
outc <- stmResponse{nil, e.err}
}
}()
var out stmResponse
for {
s.reset()
if out.err = apply(s); out.err != nil {
break
}
if out.resp = s.commit(); out.resp != nil {
break
}
}
outc <- out
}()
r := <-outc
return r.resp, r.err
}
// stm implements repeatable-read software transactional memory over etcd
type stm struct {
client *v3.Client
ctx context.Context
// rset holds read key values and revisions
rset readSet
// wset holds overwritten keys and their values
wset writeSet
// getOpts are the opts used for gets
getOpts []v3.OpOption
// conflicts computes the current conflicts on the txn
conflicts func() []v3.Cmp
}
type stmPut struct {
val string
op v3.Op
}
type readSet map[string]*v3.GetResponse
func (rs readSet) add(keys []string, txnresp *v3.TxnResponse) {
for i, resp := range txnresp.Responses {
rs[keys[i]] = (*v3.GetResponse)(resp.GetResponseRange())
}
}
// first returns the store revision from the first fetch
func (rs readSet) first() int64 {
ret := int64(math.MaxInt64 - 1)
for _, resp := range rs {
if rev := resp.Header.Revision; rev < ret {
ret = rev
}
}
return ret
}
// cmps guards the txn from updates to read set
func (rs readSet) cmps() []v3.Cmp {
cmps := make([]v3.Cmp, 0, len(rs))
for k, rk := range rs {
cmps = append(cmps, isKeyCurrent(k, rk))
}
return cmps
}
type writeSet map[string]stmPut
func (ws writeSet) get(keys ...string) *stmPut {
for _, key := range keys {
if wv, ok := ws[key]; ok {
return &wv
}
}
return nil
}
// cmps returns a cmp list testing no writes have happened past rev
func (ws writeSet) cmps(rev int64) []v3.Cmp {
cmps := make([]v3.Cmp, 0, len(ws))
for key := range ws {
cmps = append(cmps, v3.Compare(v3.ModRevision(key), "<", rev))
}
return cmps
}
// puts is the list of ops for all pending writes
func (ws writeSet) puts() []v3.Op {
puts := make([]v3.Op, 0, len(ws))
for _, v := range ws {
puts = append(puts, v.op)
}
return puts
}
func (s *stm) Get(keys ...string) string {
if wv := s.wset.get(keys...); wv != nil {
return wv.val
}
return respToValue(s.fetch(keys...))
}
func (s *stm) Put(key, val string, opts ...v3.OpOption) {
s.wset[key] = stmPut{val, v3.OpPut(key, val, opts...)}
}
func (s *stm) Del(key string) { s.wset[key] = stmPut{"", v3.OpDelete(key)} }
func (s *stm) Rev(key string) int64 {
if resp := s.fetch(key); resp != nil && len(resp.Kvs) != 0 {
return resp.Kvs[0].ModRevision
}
return 0
}
func (s *stm) commit() *v3.TxnResponse {
txnresp, err := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...).Commit()
if err != nil {
panic(stmError{err})
}
if txnresp.Succeeded {
return txnresp
}
return nil
}
func (s *stm) fetch(keys ...string) *v3.GetResponse {
if len(keys) == 0 {
return nil
}
ops := make([]v3.Op, len(keys))
for i, key := range keys {
if resp, ok := s.rset[key]; ok {
return resp
}
ops[i] = v3.OpGet(key, s.getOpts...)
}
txnresp, err := s.client.Txn(s.ctx).Then(ops...).Commit()
if err != nil {
panic(stmError{err})
}
s.rset.add(keys, txnresp)
return (*v3.GetResponse)(txnresp.Responses[0].GetResponseRange())
}
func (s *stm) reset() {
s.rset = make(map[string]*v3.GetResponse)
s.wset = make(map[string]stmPut)
}
type stmSerializable struct {
stm
prefetch map[string]*v3.GetResponse
}
func (s *stmSerializable) Get(keys ...string) string {
if wv := s.wset.get(keys...); wv != nil {
return wv.val
}
firstRead := len(s.rset) == 0
for _, key := range keys {
if resp, ok := s.prefetch[key]; ok {
delete(s.prefetch, key)
s.rset[key] = resp
}
}
resp := s.stm.fetch(keys...)
if firstRead {
// txn's base revision is defined by the first read
s.getOpts = []v3.OpOption{
v3.WithRev(resp.Header.Revision),
v3.WithSerializable(),
}
}
return respToValue(resp)
}
func (s *stmSerializable) Rev(key string) int64 {
s.Get(key)
return s.stm.Rev(key)
}
func (s *stmSerializable) gets() ([]string, []v3.Op) {
keys := make([]string, 0, len(s.rset))
ops := make([]v3.Op, 0, len(s.rset))
for k := range s.rset {
keys = append(keys, k)
ops = append(ops, v3.OpGet(k))
}
return keys, ops
}
func (s *stmSerializable) commit() *v3.TxnResponse {
keys, getops := s.gets()
txn := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...)
// use Else to prefetch keys in case of conflict to save a round trip
txnresp, err := txn.Else(getops...).Commit()
if err != nil {
panic(stmError{err})
}
if txnresp.Succeeded {
return txnresp
}
// load prefetch with Else data
s.rset.add(keys, txnresp)
s.prefetch = s.rset
s.getOpts = nil
return nil
}
func isKeyCurrent(k string, r *v3.GetResponse) v3.Cmp {
if len(r.Kvs) != 0 {
return v3.Compare(v3.ModRevision(k), "=", r.Kvs[0].ModRevision)
}
return v3.Compare(v3.ModRevision(k), "=", 0)
}
func respToValue(resp *v3.GetResponse) string {
if resp == nil || len(resp.Kvs) == 0 {
return ""
}
return string(resp.Kvs[0].Value)
}
// NewSTMRepeatable is deprecated.
func NewSTMRepeatable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(RepeatableReads))
}
// NewSTMSerializable is deprecated.
func NewSTMSerializable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(Serializable))
}
// NewSTMReadCommitted is deprecated.
func NewSTMReadCommitted(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(ReadCommitted))
}

View File

@@ -16,98 +16,47 @@ package clientv3
import (
"crypto/tls"
"crypto/x509"
"io/ioutil"
"time"
"github.com/coreos/etcd/pkg/tlsutil"
"github.com/ghodss/yaml"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
type Config struct {
// Endpoints is a list of URLs
Endpoints []string
// Endpoints is a list of URLs.
Endpoints []string `json:"endpoints"`
// AutoSyncInterval is the interval to update endpoints with its latest members.
// 0 disables auto-sync. By default auto-sync is disabled.
AutoSyncInterval time.Duration
AutoSyncInterval time.Duration `json:"auto-sync-interval"`
// DialTimeout is the timeout for failing to establish a connection.
DialTimeout time.Duration
DialTimeout time.Duration `json:"dial-timeout"`
// DialKeepAliveTime is the time in seconds after which client pings the server to see if
// transport is alive.
DialKeepAliveTime time.Duration `json:"dial-keep-alive-time"`
// DialKeepAliveTimeout is the time in seconds that the client waits for a response for the
// keep-alive probe. If the response is not received in this time, the connection is closed.
DialKeepAliveTimeout time.Duration `json:"dial-keep-alive-timeout"`
// TLS holds the client secure credentials, if any.
TLS *tls.Config
// Username is a username for authentication
Username string
// Username is a user name for authentication.
Username string `json:"username"`
// Password is a password for authentication
Password string
}
type yamlConfig struct {
Endpoints []string `json:"endpoints"`
AutoSyncInterval time.Duration `json:"auto-sync-interval"`
DialTimeout time.Duration `json:"dial-timeout"`
InsecureTransport bool `json:"insecure-transport"`
InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify"`
Certfile string `json:"cert-file"`
Keyfile string `json:"key-file"`
CAfile string `json:"ca-file"`
}
func configFromFile(fpath string) (*Config, error) {
b, err := ioutil.ReadFile(fpath)
if err != nil {
return nil, err
}
yc := &yamlConfig{}
err = yaml.Unmarshal(b, yc)
if err != nil {
return nil, err
}
cfg := &Config{
Endpoints: yc.Endpoints,
AutoSyncInterval: yc.AutoSyncInterval,
DialTimeout: yc.DialTimeout,
}
if yc.InsecureTransport {
cfg.TLS = nil
return cfg, nil
}
var (
cert *tls.Certificate
cp *x509.CertPool
)
if yc.Certfile != "" && yc.Keyfile != "" {
cert, err = tlsutil.NewCert(yc.Certfile, yc.Keyfile, nil)
if err != nil {
return nil, err
}
}
if yc.CAfile != "" {
cp, err = tlsutil.NewCertPool([]string{yc.CAfile})
if err != nil {
return nil, err
}
}
tlscfg := &tls.Config{
MinVersion: tls.VersionTLS10,
InsecureSkipVerify: yc.InsecureSkipTLSVerify,
RootCAs: cp,
}
if cert != nil {
tlscfg.Certificates = []tls.Certificate{*cert}
}
cfg.TLS = tlscfg
return cfg, nil
// Password is a password for authentication.
Password string `json:"password"`
// RejectOldCluster when set will refuse to create a client against an outdated cluster.
RejectOldCluster bool `json:"reject-old-cluster"`
// DialOptions is a list of dial options for the grpc client (e.g., for interceptors).
DialOptions []grpc.DialOption
// Context is the default client context; it can be used to cancel grpc dial out and
// other operations that do not have an explicit context.
Context context.Context
}

View File

@@ -28,7 +28,7 @@
// Make sure to close the client after using it. If the client is not closed, the
// connection will have leaky goroutines.
//
// To specify client request timeout, pass context.WithTimeout to APIs:
// To specify a client request timeout, wrap the context with context.WithTimeout:
//
// ctx, cancel := context.WithTimeout(context.Background(), timeout)
// resp, err := kvc.Put(ctx, "sample_key", "sample_value")

View File

@@ -0,0 +1,627 @@
// Copyright 2017 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"errors"
"net/url"
"strings"
"sync"
"time"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
healthpb "google.golang.org/grpc/health/grpc_health_v1"
"google.golang.org/grpc/status"
)
const (
minHealthRetryDuration = 3 * time.Second
unknownService = "unknown service grpc.health.v1.Health"
)
// ErrNoAddrAvilable is returned by Get() when the balancer does not have
// any active connection to endpoints at the time.
// This error is returned only when opts.BlockingWait is true.
var ErrNoAddrAvilable = status.Error(codes.Unavailable, "there is no address available")
type healthCheckFunc func(ep string) (bool, error)
type notifyMsg int
const (
notifyReset notifyMsg = iota
notifyNext
)
// healthBalancer does the bare minimum to expose multiple eps
// to the grpc reconnection code path
type healthBalancer struct {
// addrs are the client's endpoint addresses for grpc
addrs []grpc.Address
// eps holds the raw endpoints from the client
eps []string
// notifyCh notifies grpc of the set of addresses for connecting
notifyCh chan []grpc.Address
// readyc closes once the first connection is up
readyc chan struct{}
readyOnce sync.Once
// healthCheck checks an endpoint's health.
healthCheck healthCheckFunc
healthCheckTimeout time.Duration
unhealthyMu sync.RWMutex
unhealthyHostPorts map[string]time.Time
// mu protects all fields below.
mu sync.RWMutex
// upc closes when pinAddr transitions from empty to non-empty or the balancer closes.
upc chan struct{}
// downc closes when grpc calls down() on pinAddr
downc chan struct{}
// stopc is closed to signal updateNotifyLoop should stop.
stopc chan struct{}
stopOnce sync.Once
wg sync.WaitGroup
// donec closes when all goroutines are exited
donec chan struct{}
// updateAddrsC notifies updateNotifyLoop to update addrs.
updateAddrsC chan notifyMsg
// grpc issues TLS cert checks using the string passed into dial so
// that string must be the host. To recover the full scheme://host URL,
// have a map from hosts to the original endpoint.
hostPort2ep map[string]string
// pinAddr is the currently pinned address; set to the empty string on
// initialization and shutdown.
pinAddr string
closed bool
}
func newHealthBalancer(eps []string, timeout time.Duration, hc healthCheckFunc) *healthBalancer {
notifyCh := make(chan []grpc.Address)
addrs := eps2addrs(eps)
hb := &healthBalancer{
addrs: addrs,
eps: eps,
notifyCh: notifyCh,
readyc: make(chan struct{}),
healthCheck: hc,
unhealthyHostPorts: make(map[string]time.Time),
upc: make(chan struct{}),
stopc: make(chan struct{}),
downc: make(chan struct{}),
donec: make(chan struct{}),
updateAddrsC: make(chan notifyMsg),
hostPort2ep: getHostPort2ep(eps),
}
if timeout < minHealthRetryDuration {
timeout = minHealthRetryDuration
}
hb.healthCheckTimeout = timeout
close(hb.downc)
go hb.updateNotifyLoop()
hb.wg.Add(1)
go func() {
defer hb.wg.Done()
hb.updateUnhealthy()
}()
return hb
}
func (b *healthBalancer) Start(target string, config grpc.BalancerConfig) error { return nil }
func (b *healthBalancer) ConnectNotify() <-chan struct{} {
b.mu.Lock()
defer b.mu.Unlock()
return b.upc
}
func (b *healthBalancer) ready() <-chan struct{} { return b.readyc }
func (b *healthBalancer) endpoint(hostPort string) string {
b.mu.RLock()
defer b.mu.RUnlock()
return b.hostPort2ep[hostPort]
}
func (b *healthBalancer) pinned() string {
b.mu.RLock()
defer b.mu.RUnlock()
return b.pinAddr
}
func (b *healthBalancer) hostPortError(hostPort string, err error) {
if b.endpoint(hostPort) == "" {
if logger.V(4) {
logger.Infof("clientv3/balancer: %q is stale (skip marking as unhealthy on %q)", hostPort, err.Error())
}
return
}
b.unhealthyMu.Lock()
b.unhealthyHostPorts[hostPort] = time.Now()
b.unhealthyMu.Unlock()
if logger.V(4) {
logger.Infof("clientv3/balancer: %q is marked unhealthy (%q)", hostPort, err.Error())
}
}
func (b *healthBalancer) removeUnhealthy(hostPort, msg string) {
if b.endpoint(hostPort) == "" {
if logger.V(4) {
logger.Infof("clientv3/balancer: %q was not in unhealthy (%q)", hostPort, msg)
}
return
}
b.unhealthyMu.Lock()
delete(b.unhealthyHostPorts, hostPort)
b.unhealthyMu.Unlock()
if logger.V(4) {
logger.Infof("clientv3/balancer: %q is removed from unhealthy (%q)", hostPort, msg)
}
}
func (b *healthBalancer) countUnhealthy() (count int) {
b.unhealthyMu.RLock()
count = len(b.unhealthyHostPorts)
b.unhealthyMu.RUnlock()
return count
}
func (b *healthBalancer) isUnhealthy(hostPort string) (unhealthy bool) {
b.unhealthyMu.RLock()
_, unhealthy = b.unhealthyHostPorts[hostPort]
b.unhealthyMu.RUnlock()
return unhealthy
}
func (b *healthBalancer) cleanupUnhealthy() {
b.unhealthyMu.Lock()
for k, v := range b.unhealthyHostPorts {
if time.Since(v) > b.healthCheckTimeout {
delete(b.unhealthyHostPorts, k)
if logger.V(4) {
logger.Infof("clientv3/balancer: removed %q from unhealthy after %v", k, b.healthCheckTimeout)
}
}
}
b.unhealthyMu.Unlock()
}
func (b *healthBalancer) liveAddrs() ([]grpc.Address, map[string]struct{}) {
unhealthyCnt := b.countUnhealthy()
b.mu.RLock()
defer b.mu.RUnlock()
hbAddrs := b.addrs
if len(b.addrs) == 1 || unhealthyCnt == 0 || unhealthyCnt == len(b.addrs) {
liveHostPorts := make(map[string]struct{}, len(b.hostPort2ep))
for k := range b.hostPort2ep {
liveHostPorts[k] = struct{}{}
}
return hbAddrs, liveHostPorts
}
addrs := make([]grpc.Address, 0, len(b.addrs)-unhealthyCnt)
liveHostPorts := make(map[string]struct{}, len(addrs))
for _, addr := range b.addrs {
if !b.isUnhealthy(addr.Addr) {
addrs = append(addrs, addr)
liveHostPorts[addr.Addr] = struct{}{}
}
}
return addrs, liveHostPorts
}
func (b *healthBalancer) updateUnhealthy() {
for {
select {
case <-time.After(b.healthCheckTimeout):
b.cleanupUnhealthy()
pinned := b.pinned()
if pinned == "" || b.isUnhealthy(pinned) {
select {
case b.updateAddrsC <- notifyNext:
case <-b.stopc:
return
}
}
case <-b.stopc:
return
}
}
}
func (b *healthBalancer) updateAddrs(eps ...string) {
np := getHostPort2ep(eps)
b.mu.Lock()
defer b.mu.Unlock()
match := len(np) == len(b.hostPort2ep)
if match {
for k, v := range np {
if b.hostPort2ep[k] != v {
match = false
break
}
}
}
if match {
// same endpoints, so no need to update address
return
}
b.hostPort2ep = np
b.addrs, b.eps = eps2addrs(eps), eps
b.unhealthyMu.Lock()
b.unhealthyHostPorts = make(map[string]time.Time)
b.unhealthyMu.Unlock()
}
func (b *healthBalancer) next() {
b.mu.RLock()
downc := b.downc
b.mu.RUnlock()
select {
case b.updateAddrsC <- notifyNext:
case <-b.stopc:
}
// wait until disconnect so new RPCs are not issued on old connection
select {
case <-downc:
case <-b.stopc:
}
}
func (b *healthBalancer) updateNotifyLoop() {
defer close(b.donec)
for {
b.mu.RLock()
upc, downc, addr := b.upc, b.downc, b.pinAddr
b.mu.RUnlock()
// downc or upc should be closed
select {
case <-downc:
downc = nil
default:
}
select {
case <-upc:
upc = nil
default:
}
switch {
case downc == nil && upc == nil:
// stale
select {
case <-b.stopc:
return
default:
}
case downc == nil:
b.notifyAddrs(notifyReset)
select {
case <-upc:
case msg := <-b.updateAddrsC:
b.notifyAddrs(msg)
case <-b.stopc:
return
}
case upc == nil:
select {
// close connections that are not the pinned address
case b.notifyCh <- []grpc.Address{{Addr: addr}}:
case <-downc:
case <-b.stopc:
return
}
select {
case <-downc:
b.notifyAddrs(notifyReset)
case msg := <-b.updateAddrsC:
b.notifyAddrs(msg)
case <-b.stopc:
return
}
}
}
}
func (b *healthBalancer) notifyAddrs(msg notifyMsg) {
if msg == notifyNext {
select {
case b.notifyCh <- []grpc.Address{}:
case <-b.stopc:
return
}
}
b.mu.RLock()
pinAddr := b.pinAddr
downc := b.downc
b.mu.RUnlock()
addrs, hostPorts := b.liveAddrs()
var waitDown bool
if pinAddr != "" {
_, ok := hostPorts[pinAddr]
waitDown = !ok
}
select {
case b.notifyCh <- addrs:
if waitDown {
select {
case <-downc:
case <-b.stopc:
}
}
case <-b.stopc:
}
}
func (b *healthBalancer) Up(addr grpc.Address) func(error) {
if !b.mayPin(addr) {
return func(err error) {}
}
b.mu.Lock()
defer b.mu.Unlock()
// gRPC might call Up after it called Close. We add this check
// to "fix" it up at application layer. Otherwise, will panic
// if b.upc is already closed.
if b.closed {
return func(err error) {}
}
// gRPC might call Up on a stale address.
// Prevent updating pinAddr with a stale address.
if !hasAddr(b.addrs, addr.Addr) {
return func(err error) {}
}
if b.pinAddr != "" {
if logger.V(4) {
logger.Infof("clientv3/balancer: %q is up but not pinned (already pinned %q)", addr.Addr, b.pinAddr)
}
return func(err error) {}
}
// notify waiting Get()s and pin first connected address
close(b.upc)
b.downc = make(chan struct{})
b.pinAddr = addr.Addr
if logger.V(4) {
logger.Infof("clientv3/balancer: pin %q", addr.Addr)
}
// notify client that a connection is up
b.readyOnce.Do(func() { close(b.readyc) })
return func(err error) {
// If connected to a black hole endpoint or a killed server, the gRPC ping
// timeout will induce a network I/O error, and retrying until success;
// finding healthy endpoint on retry could take several timeouts and redials.
// To avoid wasting retries, gray-list unhealthy endpoints.
b.hostPortError(addr.Addr, err)
b.mu.Lock()
b.upc = make(chan struct{})
close(b.downc)
b.pinAddr = ""
b.mu.Unlock()
if logger.V(4) {
logger.Infof("clientv3/balancer: unpin %q (%q)", addr.Addr, err.Error())
}
}
}
func (b *healthBalancer) mayPin(addr grpc.Address) bool {
if b.endpoint(addr.Addr) == "" { // stale host:port
return false
}
b.unhealthyMu.RLock()
unhealthyCnt := len(b.unhealthyHostPorts)
failedTime, bad := b.unhealthyHostPorts[addr.Addr]
b.unhealthyMu.RUnlock()
b.mu.RLock()
skip := len(b.addrs) == 1 || unhealthyCnt == 0 || len(b.addrs) == unhealthyCnt
b.mu.RUnlock()
if skip || !bad {
return true
}
// prevent isolated member's endpoint from being infinitely retried, as follows:
// 1. keepalive pings detects GoAway with http2.ErrCodeEnhanceYourCalm
// 2. balancer 'Up' unpins with grpc: failed with network I/O error
// 3. grpc-healthcheck still SERVING, thus retry to pin
// instead, return before grpc-healthcheck if failed within healthcheck timeout
if elapsed := time.Since(failedTime); elapsed < b.healthCheckTimeout {
if logger.V(4) {
logger.Infof("clientv3/balancer: %q is up but not pinned (failed %v ago, require minimum %v after failure)", addr.Addr, elapsed, b.healthCheckTimeout)
}
return false
}
if ok, _ := b.healthCheck(addr.Addr); ok {
b.removeUnhealthy(addr.Addr, "health check success")
return true
}
b.hostPortError(addr.Addr, errors.New("health check failed"))
return false
}
func (b *healthBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) {
var (
addr string
closed bool
)
// If opts.BlockingWait is false (for fail-fast RPCs), it should return
// an address it has notified via Notify immediately instead of blocking.
if !opts.BlockingWait {
b.mu.RLock()
closed = b.closed
addr = b.pinAddr
b.mu.RUnlock()
if closed {
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
}
if addr == "" {
return grpc.Address{Addr: ""}, nil, ErrNoAddrAvilable
}
return grpc.Address{Addr: addr}, func() {}, nil
}
for {
b.mu.RLock()
ch := b.upc
b.mu.RUnlock()
select {
case <-ch:
case <-b.donec:
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
case <-ctx.Done():
return grpc.Address{Addr: ""}, nil, ctx.Err()
}
b.mu.RLock()
closed = b.closed
addr = b.pinAddr
b.mu.RUnlock()
// Close() which sets b.closed = true can be called before Get(), Get() must exit if balancer is closed.
if closed {
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
}
if addr != "" {
break
}
}
return grpc.Address{Addr: addr}, func() {}, nil
}
func (b *healthBalancer) Notify() <-chan []grpc.Address { return b.notifyCh }
func (b *healthBalancer) Close() error {
b.mu.Lock()
// In case gRPC calls close twice. TODO: remove the checking
// when we are sure that gRPC wont call close twice.
if b.closed {
b.mu.Unlock()
<-b.donec
return nil
}
b.closed = true
b.stopOnce.Do(func() { close(b.stopc) })
b.pinAddr = ""
// In the case of following scenario:
// 1. upc is not closed; no pinned address
// 2. client issues an RPC, calling invoke(), which calls Get(), enters for loop, blocks
// 3. client.conn.Close() calls balancer.Close(); closed = true
// 4. for loop in Get() never exits since ctx is the context passed in by the client and may not be canceled
// we must close upc so Get() exits from blocking on upc
select {
case <-b.upc:
default:
// terminate all waiting Get()s
close(b.upc)
}
b.mu.Unlock()
b.wg.Wait()
// wait for updateNotifyLoop to finish
<-b.donec
close(b.notifyCh)
return nil
}
func grpcHealthCheck(client *Client, ep string) (bool, error) {
conn, err := client.dial(ep)
if err != nil {
return false, err
}
defer conn.Close()
cli := healthpb.NewHealthClient(conn)
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
resp, err := cli.Check(ctx, &healthpb.HealthCheckRequest{})
cancel()
if err != nil {
if s, ok := status.FromError(err); ok && s.Code() == codes.Unavailable {
if s.Message() == unknownService { // etcd < v3.3.0
return true, nil
}
}
return false, err
}
return resp.Status == healthpb.HealthCheckResponse_SERVING, nil
}
func hasAddr(addrs []grpc.Address, targetAddr string) bool {
for _, addr := range addrs {
if targetAddr == addr.Addr {
return true
}
}
return false
}
func getHost(ep string) string {
url, uerr := url.Parse(ep)
if uerr != nil || !strings.Contains(ep, "://") {
return ep
}
return url.Host
}
func eps2addrs(eps []string) []grpc.Address {
addrs := make([]grpc.Address, len(eps))
for i := range eps {
addrs[i].Addr = getHost(eps[i])
}
return addrs
}
func getHostPort2ep(eps []string) map[string]string {
hm := make(map[string]string, len(eps))
for i := range eps {
_, host, _ := parseEndpoint(eps[i])
hm[host] = eps[i]
}
return hm
}

View File

@@ -16,8 +16,8 @@ package clientv3
import (
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
type (
@@ -32,7 +32,7 @@ type KV interface {
// Put puts a key-value pair into etcd.
// Note that key,value can be plain bytes array and string is
// an immutable representation of that bytes array.
// To get a string of bytes, do string([]byte(0x10, 0x20)).
// To get a string of bytes, do string([]byte{0x10, 0x20}).
Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error)
// Get retrieves keys.
@@ -51,11 +51,6 @@ type KV interface {
// Compact compacts etcd KV history before the given rev.
Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error)
// Do applies a single Op on KV without a transaction.
// Do is useful when declaring operations to be issued at a later time
// whereas Get/Put/Delete are for better suited for when the operation
// should be immediately issued at time of declaration.
// Do applies a single Op on KV without a transaction.
// Do is useful when creating arbitrary operations to be issued at a
// later time; the user can range over the operations, calling Do to
@@ -71,11 +66,26 @@ type OpResponse struct {
put *PutResponse
get *GetResponse
del *DeleteResponse
txn *TxnResponse
}
func (op OpResponse) Put() *PutResponse { return op.put }
func (op OpResponse) Get() *GetResponse { return op.get }
func (op OpResponse) Del() *DeleteResponse { return op.del }
func (op OpResponse) Txn() *TxnResponse { return op.txn }
func (resp *PutResponse) OpResponse() OpResponse {
return OpResponse{put: resp}
}
func (resp *GetResponse) OpResponse() OpResponse {
return OpResponse{get: resp}
}
func (resp *DeleteResponse) OpResponse() OpResponse {
return OpResponse{del: resp}
}
func (resp *TxnResponse) OpResponse() OpResponse {
return OpResponse{txn: resp}
}
type kv struct {
remote pb.KVClient
@@ -120,35 +130,17 @@ func (kv *kv) Txn(ctx context.Context) Txn {
}
func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) {
for {
resp, err := kv.do(ctx, op)
if err == nil {
return resp, nil
}
if isHaltErr(ctx, err) {
return resp, toErr(ctx, err)
}
// do not retry on modifications
if op.isWrite() {
return resp, toErr(ctx, err)
}
}
}
func (kv *kv) do(ctx context.Context, op Op) (OpResponse, error) {
var err error
switch op.t {
// TODO: handle other ops
case tRange:
var resp *pb.RangeResponse
resp, err = kv.remote.Range(ctx, op.toRangeRequest(), grpc.FailFast(false))
resp, err = kv.remote.Range(ctx, op.toRangeRequest())
if err == nil {
return OpResponse{get: (*GetResponse)(resp)}, nil
}
case tPut:
var resp *pb.PutResponse
r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV}
r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease}
resp, err = kv.remote.Put(ctx, r)
if err == nil {
return OpResponse{put: (*PutResponse)(resp)}, nil
@@ -160,8 +152,14 @@ func (kv *kv) do(ctx context.Context, op Op) (OpResponse, error) {
if err == nil {
return OpResponse{del: (*DeleteResponse)(resp)}, nil
}
case tTxn:
var resp *pb.TxnResponse
resp, err = kv.remote.Txn(ctx, op.toTxnRequest())
if err == nil {
return OpResponse{txn: (*TxnResponse)(resp)}, nil
}
default:
panic("Unknown op")
}
return OpResponse{}, err
return OpResponse{}, toErr(ctx, err)
}

View File

@@ -20,8 +20,9 @@ import (
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
)
type (
@@ -29,7 +30,7 @@ type (
LeaseID int64
)
// LeaseGrantResponse is used to convert the protobuf grant response.
// LeaseGrantResponse wraps the protobuf message LeaseGrantResponse.
type LeaseGrantResponse struct {
*pb.ResponseHeader
ID LeaseID
@@ -37,14 +38,14 @@ type LeaseGrantResponse struct {
Error string
}
// LeaseKeepAliveResponse is used to convert the protobuf keepalive response.
// LeaseKeepAliveResponse wraps the protobuf message LeaseKeepAliveResponse.
type LeaseKeepAliveResponse struct {
*pb.ResponseHeader
ID LeaseID
TTL int64
}
// LeaseTimeToLiveResponse is used to convert the protobuf lease timetolive response.
// LeaseTimeToLiveResponse wraps the protobuf message LeaseTimeToLiveResponse.
type LeaseTimeToLiveResponse struct {
*pb.ResponseHeader
ID LeaseID `json:"id"`
@@ -59,6 +60,12 @@ type LeaseTimeToLiveResponse struct {
Keys [][]byte `json:"keys"`
}
// LeaseStatus represents a lease status.
type LeaseStatus struct {
ID LeaseID `json:"id"`
// TODO: TTL int64
}
const (
// defaultTTL is the assumed lease TTL used for the first keepalive
// deadline before the actual TTL is known to the client.
@@ -67,6 +74,9 @@ const (
leaseResponseChSize = 16
// NoLease is a lease ID for the absence of a lease.
NoLease LeaseID = 0
// retryConnWait is how long to wait before retrying request due to an error
retryConnWait = 500 * time.Millisecond
)
// ErrKeepAliveHalted is returned if client keep alive loop halts with an unexpected error.
@@ -97,7 +107,7 @@ type Lease interface {
// KeepAlive keeps the given lease alive forever.
KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error)
// KeepAliveOnce renews the lease once. In most of the cases, Keepalive
// KeepAliveOnce renews the lease once. In most of the cases, KeepAlive
// should be used instead of KeepAliveOnce.
KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error)
@@ -126,6 +136,9 @@ type lessor struct {
// firstKeepAliveTimeout is the timeout for the first keepalive request
// before the actual TTL is known to the lease client
firstKeepAliveTimeout time.Duration
// firstKeepAliveOnce ensures stream starts after first KeepAlive call.
firstKeepAliveOnce sync.Once
}
// keepAlive multiplexes a keepalive for a lease over multiple channels
@@ -141,85 +154,62 @@ type keepAlive struct {
}
func NewLease(c *Client) Lease {
return NewLeaseFromLeaseClient(RetryLeaseClient(c), c.cfg.DialTimeout+time.Second)
}
func NewLeaseFromLeaseClient(remote pb.LeaseClient, keepAliveTimeout time.Duration) Lease {
l := &lessor{
donec: make(chan struct{}),
keepAlives: make(map[LeaseID]*keepAlive),
remote: RetryLeaseClient(c),
firstKeepAliveTimeout: c.cfg.DialTimeout + time.Second,
remote: remote,
firstKeepAliveTimeout: keepAliveTimeout,
}
if l.firstKeepAliveTimeout == time.Second {
l.firstKeepAliveTimeout = defaultTTL
}
l.stopCtx, l.stopCancel = context.WithCancel(context.Background())
go l.recvKeepAliveLoop()
go l.deadlineLoop()
reqLeaderCtx := WithRequireLeader(context.Background())
l.stopCtx, l.stopCancel = context.WithCancel(reqLeaderCtx)
return l
}
func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) {
cctx, cancel := context.WithCancel(ctx)
done := cancelWhenStop(cancel, l.stopCtx.Done())
defer close(done)
for {
r := &pb.LeaseGrantRequest{TTL: ttl}
resp, err := l.remote.LeaseGrant(cctx, r)
if err == nil {
gresp := &LeaseGrantResponse{
ResponseHeader: resp.GetHeader(),
ID: LeaseID(resp.ID),
TTL: resp.TTL,
Error: resp.Error,
}
return gresp, nil
}
if isHaltErr(cctx, err) {
return nil, toErr(cctx, err)
r := &pb.LeaseGrantRequest{TTL: ttl}
resp, err := l.remote.LeaseGrant(ctx, r)
if err == nil {
gresp := &LeaseGrantResponse{
ResponseHeader: resp.GetHeader(),
ID: LeaseID(resp.ID),
TTL: resp.TTL,
Error: resp.Error,
}
return gresp, nil
}
return nil, toErr(ctx, err)
}
func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) {
cctx, cancel := context.WithCancel(ctx)
done := cancelWhenStop(cancel, l.stopCtx.Done())
defer close(done)
for {
r := &pb.LeaseRevokeRequest{ID: int64(id)}
resp, err := l.remote.LeaseRevoke(cctx, r)
if err == nil {
return (*LeaseRevokeResponse)(resp), nil
}
if isHaltErr(ctx, err) {
return nil, toErr(ctx, err)
}
r := &pb.LeaseRevokeRequest{ID: int64(id)}
resp, err := l.remote.LeaseRevoke(ctx, r)
if err == nil {
return (*LeaseRevokeResponse)(resp), nil
}
return nil, toErr(ctx, err)
}
func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) {
cctx, cancel := context.WithCancel(ctx)
done := cancelWhenStop(cancel, l.stopCtx.Done())
defer close(done)
for {
r := toLeaseTimeToLiveRequest(id, opts...)
resp, err := l.remote.LeaseTimeToLive(cctx, r, grpc.FailFast(false))
if err == nil {
gresp := &LeaseTimeToLiveResponse{
ResponseHeader: resp.GetHeader(),
ID: LeaseID(resp.ID),
TTL: resp.TTL,
GrantedTTL: resp.GrantedTTL,
Keys: resp.Keys,
}
return gresp, nil
}
if isHaltErr(cctx, err) {
return nil, toErr(cctx, err)
r := toLeaseTimeToLiveRequest(id, opts...)
resp, err := l.remote.LeaseTimeToLive(ctx, r)
if err == nil {
gresp := &LeaseTimeToLiveResponse{
ResponseHeader: resp.GetHeader(),
ID: LeaseID(resp.ID),
TTL: resp.TTL,
GrantedTTL: resp.GrantedTTL,
Keys: resp.Keys,
}
return gresp, nil
}
return nil, toErr(ctx, err)
}
func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) {
@@ -254,19 +244,19 @@ func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAl
l.mu.Unlock()
go l.keepAliveCtxCloser(id, ctx, ka.donec)
l.firstKeepAliveOnce.Do(func() {
go l.recvKeepAliveLoop()
go l.deadlineLoop()
})
return ch, nil
}
func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
cctx, cancel := context.WithCancel(ctx)
done := cancelWhenStop(cancel, l.stopCtx.Done())
defer close(done)
for {
resp, err := l.keepAliveOnce(cctx, id)
resp, err := l.keepAliveOnce(ctx, id)
if err == nil {
if resp.TTL == 0 {
if resp.TTL <= 0 {
err = rpctypes.ErrLeaseNotFound
}
return resp, err
@@ -279,6 +269,8 @@ func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAlive
func (l *lessor) Close() error {
l.stopCancel()
// close for synchronous teardown if stream goroutines never launched
l.firstKeepAliveOnce.Do(func() { close(l.donec) })
<-l.donec
return nil
}
@@ -315,11 +307,50 @@ func (l *lessor) keepAliveCtxCloser(id LeaseID, ctx context.Context, donec <-cha
}
}
// closeRequireLeader scans keepAlives for ctxs that have require leader
// and closes the associated channels.
func (l *lessor) closeRequireLeader() {
l.mu.Lock()
defer l.mu.Unlock()
for _, ka := range l.keepAlives {
reqIdxs := 0
// find all required leader channels, close, mark as nil
for i, ctx := range ka.ctxs {
md, ok := metadata.FromOutgoingContext(ctx)
if !ok {
continue
}
ks := md[rpctypes.MetadataRequireLeaderKey]
if len(ks) < 1 || ks[0] != rpctypes.MetadataHasLeader {
continue
}
close(ka.chs[i])
ka.chs[i] = nil
reqIdxs++
}
if reqIdxs == 0 {
continue
}
// remove all channels that required a leader from keepalive
newChs := make([]chan<- *LeaseKeepAliveResponse, len(ka.chs)-reqIdxs)
newCtxs := make([]context.Context, len(newChs))
newIdx := 0
for i := range ka.chs {
if ka.chs[i] == nil {
continue
}
newChs[newIdx], newCtxs[newIdx] = ka.chs[i], ka.ctxs[newIdx]
newIdx++
}
ka.chs, ka.ctxs = newChs, newCtxs
}
}
func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
cctx, cancel := context.WithCancel(ctx)
defer cancel()
stream, err := l.remote.LeaseKeepAlive(cctx, grpc.FailFast(false))
stream, err := l.remote.LeaseKeepAlive(cctx)
if err != nil {
return nil, toErr(ctx, err)
}
@@ -348,32 +379,50 @@ func (l *lessor) recvKeepAliveLoop() (gerr error) {
close(l.donec)
l.loopErr = gerr
for _, ka := range l.keepAlives {
ka.Close()
ka.close()
}
l.keepAlives = make(map[LeaseID]*keepAlive)
l.mu.Unlock()
}()
stream, serr := l.resetRecv()
for serr == nil {
resp, err := stream.Recv()
for {
stream, err := l.resetRecv()
if err != nil {
if isHaltErr(l.stopCtx, err) {
if canceledByCaller(l.stopCtx, err) {
return err
}
stream, serr = l.resetRecv()
continue
} else {
for {
resp, err := stream.Recv()
if err != nil {
if canceledByCaller(l.stopCtx, err) {
return err
}
if toErr(l.stopCtx, err) == rpctypes.ErrNoLeader {
l.closeRequireLeader()
}
break
}
l.recvKeepAlive(resp)
}
}
select {
case <-time.After(retryConnWait):
continue
case <-l.stopCtx.Done():
return l.stopCtx.Err()
}
l.recvKeepAlive(resp)
}
return serr
}
// resetRecv opens a new lease stream and starts sending LeaseKeepAliveRequests
// resetRecv opens a new lease stream and starts sending keep alive requests.
func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) {
sctx, cancel := context.WithCancel(l.stopCtx)
stream, err := l.remote.LeaseKeepAlive(sctx, grpc.FailFast(false))
if err = toErr(sctx, err); err != nil {
stream, err := l.remote.LeaseKeepAlive(sctx)
if err != nil {
cancel()
return nil, err
}
@@ -381,7 +430,6 @@ func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) {
l.mu.Lock()
defer l.mu.Unlock()
if l.stream != nil && l.streamCancel != nil {
l.stream.CloseSend()
l.streamCancel()
}
@@ -411,7 +459,7 @@ func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) {
if karesp.TTL <= 0 {
// lease expired; close all keep alive channels
delete(l.keepAlives, karesp.ID)
ka.Close()
ka.close()
return
}
@@ -441,7 +489,7 @@ func (l *lessor) deadlineLoop() {
for id, ka := range l.keepAlives {
if ka.deadline.Before(now) {
// waited too long for response; lease may be expired
ka.Close()
ka.close()
delete(l.keepAlives, id)
}
}
@@ -449,19 +497,9 @@ func (l *lessor) deadlineLoop() {
}
}
// sendKeepAliveLoop sends LeaseKeepAliveRequests for the lifetime of a lease stream
// sendKeepAliveLoop sends keep alive requests for the lifetime of the given stream.
func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
for {
select {
case <-time.After(500 * time.Millisecond):
case <-stream.Context().Done():
return
case <-l.donec:
return
case <-l.stopCtx.Done():
return
}
var tosend []LeaseID
now := time.Now()
@@ -480,29 +518,22 @@ func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
return
}
}
select {
case <-time.After(500 * time.Millisecond):
case <-stream.Context().Done():
return
case <-l.donec:
return
case <-l.stopCtx.Done():
return
}
}
}
func (ka *keepAlive) Close() {
func (ka *keepAlive) close() {
close(ka.donec)
for _, ch := range ka.chs {
close(ch)
}
}
// cancelWhenStop calls cancel when the given stopc fires. It returns a done chan. done
// should be closed when the work is finished. When done fires, cancelWhenStop will release
// its internal resource.
func cancelWhenStop(cancel context.CancelFunc, stopc <-chan struct{}) chan<- struct{} {
done := make(chan struct{}, 1)
go func() {
select {
case <-stopc:
case <-done:
}
cancel()
}()
return done
}

View File

@@ -16,36 +16,35 @@ package clientv3
import (
"io/ioutil"
"log"
"sync"
"google.golang.org/grpc/grpclog"
)
// Logger is the logger used by client library.
// It implements grpclog.Logger interface.
type Logger grpclog.Logger
// It implements grpclog.LoggerV2 interface.
type Logger grpclog.LoggerV2
var (
logger settableLogger
)
type settableLogger struct {
l grpclog.Logger
l grpclog.LoggerV2
mu sync.RWMutex
}
func init() {
// disable client side logs by default
logger.mu.Lock()
logger.l = log.New(ioutil.Discard, "", 0)
logger.l = grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard)
// logger has to override the grpclog at initialization so that
// any changes to the grpclog go through logger with locking
// instead of through SetLogger
//
// now updates only happen through settableLogger.set
grpclog.SetLogger(&logger)
grpclog.SetLoggerV2(&logger)
logger.mu.Unlock()
}
@@ -62,6 +61,7 @@ func GetLogger() Logger {
func (s *settableLogger) set(l Logger) {
s.mu.Lock()
logger.l = l
grpclog.SetLoggerV2(&logger)
s.mu.Unlock()
}
@@ -72,11 +72,25 @@ func (s *settableLogger) get() Logger {
return l
}
// implement the grpclog.Logger interface
// implement the grpclog.LoggerV2 interface
func (s *settableLogger) Info(args ...interface{}) { s.get().Info(args...) }
func (s *settableLogger) Infof(format string, args ...interface{}) { s.get().Infof(format, args...) }
func (s *settableLogger) Infoln(args ...interface{}) { s.get().Infoln(args...) }
func (s *settableLogger) Warning(args ...interface{}) { s.get().Warning(args...) }
func (s *settableLogger) Warningf(format string, args ...interface{}) {
s.get().Warningf(format, args...)
}
func (s *settableLogger) Warningln(args ...interface{}) { s.get().Warningln(args...) }
func (s *settableLogger) Error(args ...interface{}) { s.get().Error(args...) }
func (s *settableLogger) Errorf(format string, args ...interface{}) {
s.get().Errorf(format, args...)
}
func (s *settableLogger) Errorln(args ...interface{}) { s.get().Errorln(args...) }
func (s *settableLogger) Fatal(args ...interface{}) { s.get().Fatal(args...) }
func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.get().Fatalf(format, args...) }
func (s *settableLogger) Fatalln(args ...interface{}) { s.get().Fatalln(args...) }
func (s *settableLogger) Print(args ...interface{}) { s.get().Print(args...) }
func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Printf(format, args...) }
func (s *settableLogger) Println(args ...interface{}) { s.get().Println(args...) }
func (s *settableLogger) Print(args ...interface{}) { s.get().Info(args...) }
func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Infof(format, args...) }
func (s *settableLogger) Println(args ...interface{}) { s.get().Infoln(args...) }
func (s *settableLogger) V(l int) bool { return s.get().V(l) }

View File

@@ -18,8 +18,8 @@ import (
"io"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
type (
@@ -36,7 +36,7 @@ type Maintenance interface {
// AlarmDisarm disarms a given alarm.
AlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error)
// Defragment defragments storage backend of the etcd member with given endpoint.
// Defragment releases wasted space from internal fragmentation on a given etcd member.
// Defragment is only needed when deleting a large number of keys and want to reclaim
// the resources.
// Defragment is an expensive operation. User should avoid defragmenting multiple members
@@ -48,17 +48,36 @@ type Maintenance interface {
// Status gets the status of the endpoint.
Status(ctx context.Context, endpoint string) (*StatusResponse, error)
// Snapshot provides a reader for a snapshot of a backend.
// Snapshot provides a reader for a point-in-time snapshot of etcd.
Snapshot(ctx context.Context) (io.ReadCloser, error)
}
type maintenance struct {
c *Client
dial func(endpoint string) (pb.MaintenanceClient, func(), error)
remote pb.MaintenanceClient
}
func NewMaintenance(c *Client) Maintenance {
return &maintenance{c: c, remote: pb.NewMaintenanceClient(c.conn)}
return &maintenance{
dial: func(endpoint string) (pb.MaintenanceClient, func(), error) {
conn, err := c.dial(endpoint)
if err != nil {
return nil, nil, err
}
cancel := func() { conn.Close() }
return RetryMaintenanceClient(c, conn), cancel, nil
},
remote: RetryMaintenanceClient(c, c.conn),
}
}
func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient) Maintenance {
return &maintenance{
dial: func(string) (pb.MaintenanceClient, func(), error) {
return remote, func() {}, nil
},
remote: remote,
}
}
func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) {
@@ -67,15 +86,11 @@ func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) {
MemberID: 0, // all
Alarm: pb.AlarmType_NONE, // all
}
for {
resp, err := m.remote.Alarm(ctx, req, grpc.FailFast(false))
if err == nil {
return (*AlarmResponse)(resp), nil
}
if isHaltErr(ctx, err) {
return nil, toErr(ctx, err)
}
resp, err := m.remote.Alarm(ctx, req)
if err == nil {
return (*AlarmResponse)(resp), nil
}
return nil, toErr(ctx, err)
}
func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) {
@@ -101,7 +116,7 @@ func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmR
return &ret, nil
}
resp, err := m.remote.Alarm(ctx, req, grpc.FailFast(false))
resp, err := m.remote.Alarm(ctx, req)
if err == nil {
return (*AlarmResponse)(resp), nil
}
@@ -109,13 +124,12 @@ func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmR
}
func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) {
conn, err := m.c.Dial(endpoint)
remote, cancel, err := m.dial(endpoint)
if err != nil {
return nil, toErr(ctx, err)
}
defer conn.Close()
remote := pb.NewMaintenanceClient(conn)
resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, grpc.FailFast(false))
defer cancel()
resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{})
if err != nil {
return nil, toErr(ctx, err)
}
@@ -123,13 +137,12 @@ func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*Defragm
}
func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) {
conn, err := m.c.Dial(endpoint)
remote, cancel, err := m.dial(endpoint)
if err != nil {
return nil, toErr(ctx, err)
}
defer conn.Close()
remote := pb.NewMaintenanceClient(conn)
resp, err := remote.Status(ctx, &pb.StatusRequest{}, grpc.FailFast(false))
defer cancel()
resp, err := remote.Status(ctx, &pb.StatusRequest{})
if err != nil {
return nil, toErr(ctx, err)
}
@@ -137,7 +150,7 @@ func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusRespo
}
func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, grpc.FailFast(false))
ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{})
if err != nil {
return nil, toErr(ctx, err)
}

34
vendor/github.com/coreos/etcd/clientv3/namespace/BUILD generated vendored Normal file
View File

@@ -0,0 +1,34 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"kv.go",
"lease.go",
"util.go",
"watch.go",
],
importpath = "github.com/coreos/etcd/clientv3/namespace",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/coreos/etcd/clientv3:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library",
"//vendor/golang.org/x/net/context:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,43 @@
// Copyright 2017 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package namespace is a clientv3 wrapper that translates all keys to begin
// with a given prefix.
//
// First, create a client:
//
// cli, err := clientv3.New(clientv3.Config{Endpoints: []string{"localhost:2379"}})
// if err != nil {
// // handle error!
// }
//
// Next, override the client interfaces:
//
// unprefixedKV := cli.KV
// cli.KV = namespace.NewKV(cli.KV, "my-prefix/")
// cli.Watcher = namespace.NewWatcher(cli.Watcher, "my-prefix/")
// cli.Lease = namespace.NewLease(cli.Lease, "my-prefix/")
//
// Now calls using 'cli' will namespace / prefix all keys with "my-prefix/":
//
// cli.Put(context.TODO(), "abc", "123")
// resp, _ := unprefixedKV.Get(context.TODO(), "my-prefix/abc")
// fmt.Printf("%s\n", resp.Kvs[0].Value)
// // Output: 123
// unprefixedKV.Put(context.TODO(), "my-prefix/abc", "456")
// resp, _ = cli.Get("abc")
// fmt.Printf("%s\n", resp.Kvs[0].Value)
// // Output: 456
//
package namespace

189
vendor/github.com/coreos/etcd/clientv3/namespace/kv.go generated vendored Normal file
View File

@@ -0,0 +1,189 @@
// Copyright 2017 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package namespace
import (
"github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
)
type kvPrefix struct {
clientv3.KV
pfx string
}
// NewKV wraps a KV instance so that all requests
// are prefixed with a given string.
func NewKV(kv clientv3.KV, prefix string) clientv3.KV {
return &kvPrefix{kv, prefix}
}
func (kv *kvPrefix) Put(ctx context.Context, key, val string, opts ...clientv3.OpOption) (*clientv3.PutResponse, error) {
if len(key) == 0 {
return nil, rpctypes.ErrEmptyKey
}
op := kv.prefixOp(clientv3.OpPut(key, val, opts...))
r, err := kv.KV.Do(ctx, op)
if err != nil {
return nil, err
}
put := r.Put()
kv.unprefixPutResponse(put)
return put, nil
}
func (kv *kvPrefix) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) {
if len(key) == 0 {
return nil, rpctypes.ErrEmptyKey
}
r, err := kv.KV.Do(ctx, kv.prefixOp(clientv3.OpGet(key, opts...)))
if err != nil {
return nil, err
}
get := r.Get()
kv.unprefixGetResponse(get)
return get, nil
}
func (kv *kvPrefix) Delete(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.DeleteResponse, error) {
if len(key) == 0 {
return nil, rpctypes.ErrEmptyKey
}
r, err := kv.KV.Do(ctx, kv.prefixOp(clientv3.OpDelete(key, opts...)))
if err != nil {
return nil, err
}
del := r.Del()
kv.unprefixDeleteResponse(del)
return del, nil
}
func (kv *kvPrefix) Do(ctx context.Context, op clientv3.Op) (clientv3.OpResponse, error) {
if len(op.KeyBytes()) == 0 {
return clientv3.OpResponse{}, rpctypes.ErrEmptyKey
}
r, err := kv.KV.Do(ctx, kv.prefixOp(op))
if err != nil {
return r, err
}
switch {
case r.Get() != nil:
kv.unprefixGetResponse(r.Get())
case r.Put() != nil:
kv.unprefixPutResponse(r.Put())
case r.Del() != nil:
kv.unprefixDeleteResponse(r.Del())
}
return r, nil
}
type txnPrefix struct {
clientv3.Txn
kv *kvPrefix
}
func (kv *kvPrefix) Txn(ctx context.Context) clientv3.Txn {
return &txnPrefix{kv.KV.Txn(ctx), kv}
}
func (txn *txnPrefix) If(cs ...clientv3.Cmp) clientv3.Txn {
newCmps := make([]clientv3.Cmp, len(cs))
for i := range cs {
newCmps[i] = cs[i]
pfxKey, _ := txn.kv.prefixInterval(cs[i].KeyBytes(), nil)
newCmps[i].WithKeyBytes(pfxKey)
}
txn.Txn = txn.Txn.If(newCmps...)
return txn
}
func (txn *txnPrefix) Then(ops ...clientv3.Op) clientv3.Txn {
newOps := make([]clientv3.Op, len(ops))
for i := range ops {
newOps[i] = txn.kv.prefixOp(ops[i])
}
txn.Txn = txn.Txn.Then(newOps...)
return txn
}
func (txn *txnPrefix) Else(ops ...clientv3.Op) clientv3.Txn {
newOps := make([]clientv3.Op, len(ops))
for i := range ops {
newOps[i] = txn.kv.prefixOp(ops[i])
}
txn.Txn = txn.Txn.Else(newOps...)
return txn
}
func (txn *txnPrefix) Commit() (*clientv3.TxnResponse, error) {
resp, err := txn.Txn.Commit()
if err != nil {
return nil, err
}
txn.kv.unprefixTxnResponse(resp)
return resp, nil
}
func (kv *kvPrefix) prefixOp(op clientv3.Op) clientv3.Op {
begin, end := kv.prefixInterval(op.KeyBytes(), op.RangeBytes())
op.WithKeyBytes(begin)
op.WithRangeBytes(end)
return op
}
func (kv *kvPrefix) unprefixGetResponse(resp *clientv3.GetResponse) {
for i := range resp.Kvs {
resp.Kvs[i].Key = resp.Kvs[i].Key[len(kv.pfx):]
}
}
func (kv *kvPrefix) unprefixPutResponse(resp *clientv3.PutResponse) {
if resp.PrevKv != nil {
resp.PrevKv.Key = resp.PrevKv.Key[len(kv.pfx):]
}
}
func (kv *kvPrefix) unprefixDeleteResponse(resp *clientv3.DeleteResponse) {
for i := range resp.PrevKvs {
resp.PrevKvs[i].Key = resp.PrevKvs[i].Key[len(kv.pfx):]
}
}
func (kv *kvPrefix) unprefixTxnResponse(resp *clientv3.TxnResponse) {
for _, r := range resp.Responses {
switch tv := r.Response.(type) {
case *pb.ResponseOp_ResponseRange:
if tv.ResponseRange != nil {
kv.unprefixGetResponse((*clientv3.GetResponse)(tv.ResponseRange))
}
case *pb.ResponseOp_ResponsePut:
if tv.ResponsePut != nil {
kv.unprefixPutResponse((*clientv3.PutResponse)(tv.ResponsePut))
}
case *pb.ResponseOp_ResponseDeleteRange:
if tv.ResponseDeleteRange != nil {
kv.unprefixDeleteResponse((*clientv3.DeleteResponse)(tv.ResponseDeleteRange))
}
default:
}
}
}
func (p *kvPrefix) prefixInterval(key, end []byte) (pfxKey []byte, pfxEnd []byte) {
return prefixInterval(p.pfx, key, end)
}

View File

@@ -0,0 +1,58 @@
// Copyright 2017 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package namespace
import (
"bytes"
"github.com/coreos/etcd/clientv3"
"golang.org/x/net/context"
)
type leasePrefix struct {
clientv3.Lease
pfx []byte
}
// NewLease wraps a Lease interface to filter for only keys with a prefix
// and remove that prefix when fetching attached keys through TimeToLive.
func NewLease(l clientv3.Lease, prefix string) clientv3.Lease {
return &leasePrefix{l, []byte(prefix)}
}
func (l *leasePrefix) TimeToLive(ctx context.Context, id clientv3.LeaseID, opts ...clientv3.LeaseOption) (*clientv3.LeaseTimeToLiveResponse, error) {
resp, err := l.Lease.TimeToLive(ctx, id, opts...)
if err != nil {
return nil, err
}
if len(resp.Keys) > 0 {
var outKeys [][]byte
for i := range resp.Keys {
if len(resp.Keys[i]) < len(l.pfx) {
// too short
continue
}
if !bytes.Equal(resp.Keys[i][:len(l.pfx)], l.pfx) {
// doesn't match prefix
continue
}
// strip prefix
outKeys = append(outKeys, resp.Keys[i][len(l.pfx):])
}
resp.Keys = outKeys
}
return resp, nil
}

View File

@@ -0,0 +1,42 @@
// Copyright 2017 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package namespace
func prefixInterval(pfx string, key, end []byte) (pfxKey []byte, pfxEnd []byte) {
pfxKey = make([]byte, len(pfx)+len(key))
copy(pfxKey[copy(pfxKey, pfx):], key)
if len(end) == 1 && end[0] == 0 {
// the edge of the keyspace
pfxEnd = make([]byte, len(pfx))
copy(pfxEnd, pfx)
ok := false
for i := len(pfxEnd) - 1; i >= 0; i-- {
if pfxEnd[i]++; pfxEnd[i] != 0 {
ok = true
break
}
}
if !ok {
// 0xff..ff => 0x00
pfxEnd = []byte{0}
}
} else if len(end) >= 1 {
pfxEnd = make([]byte, len(pfx)+len(end))
copy(pfxEnd[copy(pfxEnd, pfx):], end)
}
return pfxKey, pfxEnd
}

View File

@@ -0,0 +1,84 @@
// Copyright 2017 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package namespace
import (
"sync"
"github.com/coreos/etcd/clientv3"
"golang.org/x/net/context"
)
type watcherPrefix struct {
clientv3.Watcher
pfx string
wg sync.WaitGroup
stopc chan struct{}
stopOnce sync.Once
}
// NewWatcher wraps a Watcher instance so that all Watch requests
// are prefixed with a given string and all Watch responses have
// the prefix removed.
func NewWatcher(w clientv3.Watcher, prefix string) clientv3.Watcher {
return &watcherPrefix{Watcher: w, pfx: prefix, stopc: make(chan struct{})}
}
func (w *watcherPrefix) Watch(ctx context.Context, key string, opts ...clientv3.OpOption) clientv3.WatchChan {
// since OpOption is opaque, determine range for prefixing through an OpGet
op := clientv3.OpGet(key, opts...)
end := op.RangeBytes()
pfxBegin, pfxEnd := prefixInterval(w.pfx, []byte(key), end)
if pfxEnd != nil {
opts = append(opts, clientv3.WithRange(string(pfxEnd)))
}
wch := w.Watcher.Watch(ctx, string(pfxBegin), opts...)
// translate watch events from prefixed to unprefixed
pfxWch := make(chan clientv3.WatchResponse)
w.wg.Add(1)
go func() {
defer func() {
close(pfxWch)
w.wg.Done()
}()
for wr := range wch {
for i := range wr.Events {
wr.Events[i].Kv.Key = wr.Events[i].Kv.Key[len(w.pfx):]
if wr.Events[i].PrevKv != nil {
wr.Events[i].PrevKv.Key = wr.Events[i].Kv.Key
}
}
select {
case pfxWch <- wr:
case <-ctx.Done():
return
case <-w.stopc:
return
}
}
}()
return pfxWch
}
func (w *watcherPrefix) Close() error {
err := w.Watcher.Close()
w.stopOnce.Do(func() { close(w.stopc) })
w.wg.Wait()
return err
}

32
vendor/github.com/coreos/etcd/clientv3/naming/BUILD generated vendored Normal file
View File

@@ -0,0 +1,32 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"grpc.go",
],
importpath = "github.com/coreos/etcd/clientv3/naming",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/coreos/etcd/clientv3:go_default_library",
"//vendor/golang.org/x/net/context:go_default_library",
"//vendor/google.golang.org/grpc/codes:go_default_library",
"//vendor/google.golang.org/grpc/naming:go_default_library",
"//vendor/google.golang.org/grpc/status:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

56
vendor/github.com/coreos/etcd/clientv3/naming/doc.go generated vendored Normal file
View File

@@ -0,0 +1,56 @@
// Copyright 2017 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package naming provides an etcd-backed gRPC resolver for discovering gRPC services.
//
// To use, first import the packages:
//
// import (
// "github.com/coreos/etcd/clientv3"
// etcdnaming "github.com/coreos/etcd/clientv3/naming"
//
// "google.golang.org/grpc"
// "google.golang.org/grpc/naming"
// )
//
// First, register new endpoint addresses for a service:
//
// func etcdAdd(c *clientv3.Client, service, addr string) error {
// r := &etcdnaming.GRPCResolver{Client: c}
// return r.Update(c.Ctx(), service, naming.Update{Op: naming.Add, Addr: addr})
// }
//
// Dial an RPC service using the etcd gRPC resolver and a gRPC Balancer:
//
// func etcdDial(c *clientv3.Client, service string) (*grpc.ClientConn, error) {
// r := &etcdnaming.GRPCResolver{Client: c}
// b := grpc.RoundRobin(r)
// return grpc.Dial(service, grpc.WithBalancer(b))
// }
//
// Optionally, force delete an endpoint:
//
// func etcdDelete(c *clientv3, service, addr string) error {
// r := &etcdnaming.GRPCResolver{Client: c}
// return r.Update(c.Ctx(), "my-service", naming.Update{Op: naming.Delete, Addr: "1.2.3.4"})
// }
//
// Or register an expiring endpoint with a lease:
//
// func etcdLeaseAdd(c *clientv3.Client, lid clientv3.LeaseID, service, addr string) error {
// r := &etcdnaming.GRPCResolver{Client: c}
// return r.Update(c.Ctx(), service, naming.Update{Op: naming.Add, Addr: addr}, clientv3.WithLease(lid))
// }
//
package naming

132
vendor/github.com/coreos/etcd/clientv3/naming/grpc.go generated vendored Normal file
View File

@@ -0,0 +1,132 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package naming
import (
"encoding/json"
"fmt"
etcd "github.com/coreos/etcd/clientv3"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/naming"
"google.golang.org/grpc/status"
"golang.org/x/net/context"
)
var ErrWatcherClosed = fmt.Errorf("naming: watch closed")
// GRPCResolver creates a grpc.Watcher for a target to track its resolution changes.
type GRPCResolver struct {
// Client is an initialized etcd client.
Client *etcd.Client
}
func (gr *GRPCResolver) Update(ctx context.Context, target string, nm naming.Update, opts ...etcd.OpOption) (err error) {
switch nm.Op {
case naming.Add:
var v []byte
if v, err = json.Marshal(nm); err != nil {
return status.Error(codes.InvalidArgument, err.Error())
}
_, err = gr.Client.KV.Put(ctx, target+"/"+nm.Addr, string(v), opts...)
case naming.Delete:
_, err = gr.Client.Delete(ctx, target+"/"+nm.Addr, opts...)
default:
return status.Error(codes.InvalidArgument, "naming: bad naming op")
}
return err
}
func (gr *GRPCResolver) Resolve(target string) (naming.Watcher, error) {
ctx, cancel := context.WithCancel(context.Background())
w := &gRPCWatcher{c: gr.Client, target: target + "/", ctx: ctx, cancel: cancel}
return w, nil
}
type gRPCWatcher struct {
c *etcd.Client
target string
ctx context.Context
cancel context.CancelFunc
wch etcd.WatchChan
err error
}
// Next gets the next set of updates from the etcd resolver.
// Calls to Next should be serialized; concurrent calls are not safe since
// there is no way to reconcile the update ordering.
func (gw *gRPCWatcher) Next() ([]*naming.Update, error) {
if gw.wch == nil {
// first Next() returns all addresses
return gw.firstNext()
}
if gw.err != nil {
return nil, gw.err
}
// process new events on target/*
wr, ok := <-gw.wch
if !ok {
gw.err = status.Error(codes.Unavailable, ErrWatcherClosed.Error())
return nil, gw.err
}
if gw.err = wr.Err(); gw.err != nil {
return nil, gw.err
}
updates := make([]*naming.Update, 0, len(wr.Events))
for _, e := range wr.Events {
var jupdate naming.Update
var err error
switch e.Type {
case etcd.EventTypePut:
err = json.Unmarshal(e.Kv.Value, &jupdate)
jupdate.Op = naming.Add
case etcd.EventTypeDelete:
err = json.Unmarshal(e.PrevKv.Value, &jupdate)
jupdate.Op = naming.Delete
}
if err == nil {
updates = append(updates, &jupdate)
}
}
return updates, nil
}
func (gw *gRPCWatcher) firstNext() ([]*naming.Update, error) {
// Use serialized request so resolution still works if the target etcd
// server is partitioned away from the quorum.
resp, err := gw.c.Get(gw.ctx, gw.target, etcd.WithPrefix(), etcd.WithSerializable())
if gw.err = err; err != nil {
return nil, err
}
updates := make([]*naming.Update, 0, len(resp.Kvs))
for _, kv := range resp.Kvs {
var jupdate naming.Update
if err := json.Unmarshal(kv.Value, &jupdate); err != nil {
continue
}
updates = append(updates, &jupdate)
}
opts := []etcd.OpOption{etcd.WithRev(resp.Header.Revision + 1), etcd.WithPrefix(), etcd.WithPrevKV()}
gw.wch = gw.c.Watch(gw.ctx, gw.target, opts...)
return updates, nil
}
func (gw *gRPCWatcher) Close() { gw.cancel() }

View File

@@ -23,6 +23,7 @@ const (
tRange opType = iota + 1
tPut
tDeleteRange
tTxn
)
var (
@@ -52,6 +53,10 @@ type Op struct {
// for watch, put, delete
prevKV bool
// for put
ignoreValue bool
ignoreLease bool
// progressNotify is for progress updates.
progressNotify bool
// createdNotify is for created event
@@ -63,8 +68,69 @@ type Op struct {
// for put
val []byte
leaseID LeaseID
// txn
cmps []Cmp
thenOps []Op
elseOps []Op
}
// accessors / mutators
func (op Op) IsTxn() bool { return op.t == tTxn }
func (op Op) Txn() ([]Cmp, []Op, []Op) { return op.cmps, op.thenOps, op.elseOps }
// KeyBytes returns the byte slice holding the Op's key.
func (op Op) KeyBytes() []byte { return op.key }
// WithKeyBytes sets the byte slice for the Op's key.
func (op *Op) WithKeyBytes(key []byte) { op.key = key }
// RangeBytes returns the byte slice holding with the Op's range end, if any.
func (op Op) RangeBytes() []byte { return op.end }
// Rev returns the requested revision, if any.
func (op Op) Rev() int64 { return op.rev }
// IsPut returns true iff the operation is a Put.
func (op Op) IsPut() bool { return op.t == tPut }
// IsGet returns true iff the operation is a Get.
func (op Op) IsGet() bool { return op.t == tRange }
// IsDelete returns true iff the operation is a Delete.
func (op Op) IsDelete() bool { return op.t == tDeleteRange }
// IsSerializable returns true if the serializable field is true.
func (op Op) IsSerializable() bool { return op.serializable == true }
// IsKeysOnly returns whether keysOnly is set.
func (op Op) IsKeysOnly() bool { return op.keysOnly == true }
// IsCountOnly returns whether countOnly is set.
func (op Op) IsCountOnly() bool { return op.countOnly == true }
// MinModRev returns the operation's minimum modify revision.
func (op Op) MinModRev() int64 { return op.minModRev }
// MaxModRev returns the operation's maximum modify revision.
func (op Op) MaxModRev() int64 { return op.maxModRev }
// MinCreateRev returns the operation's minimum create revision.
func (op Op) MinCreateRev() int64 { return op.minCreateRev }
// MaxCreateRev returns the operation's maximum create revision.
func (op Op) MaxCreateRev() int64 { return op.maxCreateRev }
// WithRangeBytes sets the byte slice for the Op's range end.
func (op *Op) WithRangeBytes(end []byte) { op.end = end }
// ValueBytes returns the byte slice holding the Op's value, if any.
func (op Op) ValueBytes() []byte { return op.val }
// WithValueBytes sets the byte slice for the Op's value.
func (op *Op) WithValueBytes(v []byte) { op.val = v }
func (op Op) toRangeRequest() *pb.RangeRequest {
if op.t != tRange {
panic("op.t != tRange")
@@ -89,12 +155,28 @@ func (op Op) toRangeRequest() *pb.RangeRequest {
return r
}
func (op Op) toTxnRequest() *pb.TxnRequest {
thenOps := make([]*pb.RequestOp, len(op.thenOps))
for i, tOp := range op.thenOps {
thenOps[i] = tOp.toRequestOp()
}
elseOps := make([]*pb.RequestOp, len(op.elseOps))
for i, eOp := range op.elseOps {
elseOps[i] = eOp.toRequestOp()
}
cmps := make([]*pb.Compare, len(op.cmps))
for i := range op.cmps {
cmps[i] = (*pb.Compare)(&op.cmps[i])
}
return &pb.TxnRequest{Compare: cmps, Success: thenOps, Failure: elseOps}
}
func (op Op) toRequestOp() *pb.RequestOp {
switch op.t {
case tRange:
return &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: op.toRangeRequest()}}
case tPut:
r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV}
r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease}
return &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: r}}
case tDeleteRange:
r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV}
@@ -105,6 +187,19 @@ func (op Op) toRequestOp() *pb.RequestOp {
}
func (op Op) isWrite() bool {
if op.t == tTxn {
for _, tOp := range op.thenOps {
if tOp.isWrite() {
return true
}
}
for _, tOp := range op.elseOps {
if tOp.isWrite() {
return true
}
}
return false
}
return op.t != tRange
}
@@ -170,6 +265,10 @@ func OpPut(key, val string, opts ...OpOption) Op {
return ret
}
func OpTxn(cmps []Cmp, thenOps []Op, elseOps []Op) Op {
return Op{t: tTxn, cmps: cmps, thenOps: thenOps, elseOps: elseOps}
}
func opWatch(key string, opts ...OpOption) Op {
ret := Op{t: tRange, key: []byte(key)}
ret.applyOpts(opts)
@@ -207,6 +306,7 @@ func WithLease(leaseID LeaseID) OpOption {
}
// WithLimit limits the number of results to return from 'Get' request.
// If WithLimit is given a 0 limit, it is treated as no limit.
func WithLimit(n int64) OpOption { return func(op *Op) { op.limit = n } }
// WithRev specifies the store revision for 'Get' request.
@@ -222,9 +322,9 @@ func WithSort(target SortTarget, order SortOrder) OpOption {
if target == SortByKey && order == SortAscend {
// If order != SortNone, server fetches the entire key-space,
// and then applies the sort and limit, if provided.
// Since current mvcc.Range implementation returns results
// sorted by keys in lexicographically ascending order,
// client should ignore SortOrder if the target is SortByKey.
// Since by default the server returns results sorted by keys
// in lexicographically ascending order, the client should ignore
// SortOrder if the target is SortByKey.
order = SortNone
}
op.sort = &SortOption{target, order}
@@ -257,6 +357,10 @@ func getPrefix(key []byte) []byte {
// can return 'foo1', 'foo2', and so on.
func WithPrefix() OpOption {
return func(op *Op) {
if len(op.key) == 0 {
op.key, op.end = []byte{0}, []byte{0}
return
}
op.end = getPrefix(op.key)
}
}
@@ -360,6 +464,24 @@ func WithPrevKV() OpOption {
}
}
// WithIgnoreValue updates the key using its current value.
// This option can not be combined with non-empty values.
// Returns an error if the key does not exist.
func WithIgnoreValue() OpOption {
return func(op *Op) {
op.ignoreValue = true
}
}
// WithIgnoreLease updates the key using its current lease.
// This option can not be combined with WithLease.
// Returns an error if the key does not exist.
func WithIgnoreLease() OpOption {
return func(op *Op) {
op.ignoreLease = true
}
}
// LeaseOp represents an Operation that lease can execute.
type LeaseOp struct {
id LeaseID
@@ -377,8 +499,7 @@ func (op *LeaseOp) applyOpts(opts []LeaseOption) {
}
}
// WithAttachedKeys requests lease timetolive API to return
// attached keys of given lease ID.
// WithAttachedKeys makes TimeToLive list the keys attached to the given lease ID.
func WithAttachedKeys() LeaseOption {
return func(op *LeaseOp) { op.attachedKeys = true }
}

30
vendor/github.com/coreos/etcd/clientv3/ready_wait.go generated vendored Normal file
View File

@@ -0,0 +1,30 @@
// Copyright 2017 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import "golang.org/x/net/context"
// TODO: remove this when "FailFast=false" is fixed.
// See https://github.com/grpc/grpc-go/issues/1532.
func readyWait(rpcCtx, clientCtx context.Context, ready <-chan struct{}) error {
select {
case <-ready:
return nil
case <-rpcCtx.Done():
return rpcCtx.Err()
case <-clientCtx.Done():
return clientCtx.Err()
}
}

View File

@@ -17,135 +17,183 @@ package clientv3
import (
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
type rpcFunc func(ctx context.Context) error
type retryRpcFunc func(context.Context, rpcFunc) error
type retryRPCFunc func(context.Context, rpcFunc) error
type retryStopErrFunc func(error) bool
func (c *Client) newRetryWrapper() retryRpcFunc {
func isRepeatableStopError(err error) bool {
eErr := rpctypes.Error(err)
// always stop retry on etcd errors
if serverErr, ok := eErr.(rpctypes.EtcdError); ok && serverErr.Code() != codes.Unavailable {
return true
}
// only retry if unavailable
ev, _ := status.FromError(err)
return ev.Code() != codes.Unavailable
}
func isNonRepeatableStopError(err error) bool {
ev, _ := status.FromError(err)
if ev.Code() != codes.Unavailable {
return true
}
desc := rpctypes.ErrorDesc(err)
return desc != "there is no address available" && desc != "there is no connection available"
}
func (c *Client) newRetryWrapper(isStop retryStopErrFunc) retryRPCFunc {
return func(rpcCtx context.Context, f rpcFunc) error {
for {
if err := readyWait(rpcCtx, c.ctx, c.balancer.ConnectNotify()); err != nil {
return err
}
pinned := c.balancer.pinned()
err := f(rpcCtx)
if err == nil {
return nil
}
eErr := rpctypes.Error(err)
// always stop retry on etcd errors
if _, ok := eErr.(rpctypes.EtcdError); ok {
return err
if logger.V(4) {
logger.Infof("clientv3/retry: error %q on pinned endpoint %q", err.Error(), pinned)
}
// only retry if unavailable
if grpc.Code(err) != codes.Unavailable {
return err
if s, ok := status.FromError(err); ok && (s.Code() == codes.Unavailable || s.Code() == codes.DeadlineExceeded || s.Code() == codes.Internal) {
// mark this before endpoint switch is triggered
c.balancer.hostPortError(pinned, err)
c.balancer.next()
if logger.V(4) {
logger.Infof("clientv3/retry: switching from %q due to error %q", pinned, err.Error())
}
}
select {
case <-c.balancer.ConnectNotify():
case <-rpcCtx.Done():
return rpcCtx.Err()
case <-c.ctx.Done():
return c.ctx.Err()
if isStop(err) {
return err
}
}
}
}
func (c *Client) newAuthRetryWrapper() retryRpcFunc {
func (c *Client) newAuthRetryWrapper() retryRPCFunc {
return func(rpcCtx context.Context, f rpcFunc) error {
for {
pinned := c.balancer.pinned()
err := f(rpcCtx)
if err == nil {
return nil
}
if logger.V(4) {
logger.Infof("clientv3/auth-retry: error %q on pinned endpoint %q", err.Error(), pinned)
}
// always stop retry on etcd errors other than invalid auth token
if rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken {
gterr := c.getToken(rpcCtx)
if gterr != nil {
if logger.V(4) {
logger.Infof("clientv3/auth-retry: cannot retry due to error %q(%q) on pinned endpoint %q", err.Error(), gterr.Error(), pinned)
}
return err // return the original error for simplicity
}
continue
}
return err
}
}
}
// RetryKVClient implements a KVClient that uses the client's FailFast retry policy.
// RetryKVClient implements a KVClient.
func RetryKVClient(c *Client) pb.KVClient {
retryWrite := &retryWriteKVClient{pb.NewKVClient(c.conn), c.retryWrapper}
return &retryKVClient{&retryWriteKVClient{retryWrite, c.retryAuthWrapper}}
repeatableRetry := c.newRetryWrapper(isRepeatableStopError)
nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError)
conn := pb.NewKVClient(c.conn)
retryBasic := &retryKVClient{&nonRepeatableKVClient{conn, nonRepeatableRetry}, repeatableRetry}
retryAuthWrapper := c.newAuthRetryWrapper()
return &retryKVClient{
&nonRepeatableKVClient{retryBasic, retryAuthWrapper},
retryAuthWrapper}
}
type retryKVClient struct {
*retryWriteKVClient
*nonRepeatableKVClient
repeatableRetry retryRPCFunc
}
func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) {
err = rkv.retryf(ctx, func(rctx context.Context) error {
resp, err = rkv.retryWriteKVClient.Range(rctx, in, opts...)
err = rkv.repeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rkv.kc.Range(rctx, in, opts...)
return err
})
return resp, err
}
type retryWriteKVClient struct {
pb.KVClient
retryf retryRpcFunc
type nonRepeatableKVClient struct {
kc pb.KVClient
nonRepeatableRetry retryRPCFunc
}
func (rkv *retryWriteKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) {
err = rkv.retryf(ctx, func(rctx context.Context) error {
resp, err = rkv.KVClient.Put(rctx, in, opts...)
func (rkv *nonRepeatableKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) {
err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rkv.kc.Put(rctx, in, opts...)
return err
})
return resp, err
}
func (rkv *retryWriteKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) {
err = rkv.retryf(ctx, func(rctx context.Context) error {
resp, err = rkv.KVClient.DeleteRange(rctx, in, opts...)
func (rkv *nonRepeatableKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) {
err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rkv.kc.DeleteRange(rctx, in, opts...)
return err
})
return resp, err
}
func (rkv *retryWriteKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) {
err = rkv.retryf(ctx, func(rctx context.Context) error {
resp, err = rkv.KVClient.Txn(rctx, in, opts...)
func (rkv *nonRepeatableKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) {
// TODO: repeatableRetry if read-only txn
err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rkv.kc.Txn(rctx, in, opts...)
return err
})
return resp, err
}
func (rkv *retryWriteKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) {
err = rkv.retryf(ctx, func(rctx context.Context) error {
resp, err = rkv.KVClient.Compact(rctx, in, opts...)
func (rkv *nonRepeatableKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) {
err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rkv.kc.Compact(rctx, in, opts...)
return err
})
return resp, err
}
type retryLeaseClient struct {
pb.LeaseClient
retryf retryRpcFunc
lc pb.LeaseClient
repeatableRetry retryRPCFunc
}
// RetryLeaseClient implements a LeaseClient that uses the client's FailFast retry policy.
// RetryLeaseClient implements a LeaseClient.
func RetryLeaseClient(c *Client) pb.LeaseClient {
retry := &retryLeaseClient{pb.NewLeaseClient(c.conn), c.retryWrapper}
return &retryLeaseClient{retry, c.retryAuthWrapper}
retry := &retryLeaseClient{
pb.NewLeaseClient(c.conn),
c.newRetryWrapper(isRepeatableStopError),
}
return &retryLeaseClient{retry, c.newAuthRetryWrapper()}
}
func (rlc *retryLeaseClient) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (resp *pb.LeaseTimeToLiveResponse, err error) {
err = rlc.repeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rlc.lc.LeaseTimeToLive(rctx, in, opts...)
return err
})
return resp, err
}
func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) {
err = rlc.retryf(ctx, func(rctx context.Context) error {
resp, err = rlc.LeaseClient.LeaseGrant(rctx, in, opts...)
err = rlc.repeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rlc.lc.LeaseGrant(rctx, in, opts...)
return err
})
return resp, err
@@ -153,140 +201,270 @@ func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRe
}
func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) {
err = rlc.retryf(ctx, func(rctx context.Context) error {
resp, err = rlc.LeaseClient.LeaseRevoke(rctx, in, opts...)
err = rlc.repeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rlc.lc.LeaseRevoke(rctx, in, opts...)
return err
})
return resp, err
}
func (rlc *retryLeaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (stream pb.Lease_LeaseKeepAliveClient, err error) {
err = rlc.repeatableRetry(ctx, func(rctx context.Context) error {
stream, err = rlc.lc.LeaseKeepAlive(rctx, opts...)
return err
})
return stream, err
}
type retryClusterClient struct {
pb.ClusterClient
retryf retryRpcFunc
*nonRepeatableClusterClient
repeatableRetry retryRPCFunc
}
// RetryClusterClient implements a ClusterClient that uses the client's FailFast retry policy.
// RetryClusterClient implements a ClusterClient.
func RetryClusterClient(c *Client) pb.ClusterClient {
return &retryClusterClient{pb.NewClusterClient(c.conn), c.retryWrapper}
repeatableRetry := c.newRetryWrapper(isRepeatableStopError)
nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError)
cc := pb.NewClusterClient(c.conn)
return &retryClusterClient{&nonRepeatableClusterClient{cc, nonRepeatableRetry}, repeatableRetry}
}
func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) {
err = rcc.retryf(ctx, func(rctx context.Context) error {
resp, err = rcc.ClusterClient.MemberAdd(rctx, in, opts...)
func (rcc *retryClusterClient) MemberList(ctx context.Context, in *pb.MemberListRequest, opts ...grpc.CallOption) (resp *pb.MemberListResponse, err error) {
err = rcc.repeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rcc.cc.MemberList(rctx, in, opts...)
return err
})
return resp, err
}
func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) {
err = rcc.retryf(ctx, func(rctx context.Context) error {
resp, err = rcc.ClusterClient.MemberRemove(rctx, in, opts...)
type nonRepeatableClusterClient struct {
cc pb.ClusterClient
nonRepeatableRetry retryRPCFunc
}
func (rcc *nonRepeatableClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) {
err = rcc.nonRepeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rcc.cc.MemberAdd(rctx, in, opts...)
return err
})
return resp, err
}
func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) {
err = rcc.retryf(ctx, func(rctx context.Context) error {
resp, err = rcc.ClusterClient.MemberUpdate(rctx, in, opts...)
func (rcc *nonRepeatableClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) {
err = rcc.nonRepeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rcc.cc.MemberRemove(rctx, in, opts...)
return err
})
return resp, err
}
func (rcc *nonRepeatableClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) {
err = rcc.nonRepeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rcc.cc.MemberUpdate(rctx, in, opts...)
return err
})
return resp, err
}
// RetryMaintenanceClient implements a Maintenance.
func RetryMaintenanceClient(c *Client, conn *grpc.ClientConn) pb.MaintenanceClient {
repeatableRetry := c.newRetryWrapper(isRepeatableStopError)
nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError)
mc := pb.NewMaintenanceClient(conn)
return &retryMaintenanceClient{&nonRepeatableMaintenanceClient{mc, nonRepeatableRetry}, repeatableRetry}
}
type retryMaintenanceClient struct {
*nonRepeatableMaintenanceClient
repeatableRetry retryRPCFunc
}
func (rmc *retryMaintenanceClient) Alarm(ctx context.Context, in *pb.AlarmRequest, opts ...grpc.CallOption) (resp *pb.AlarmResponse, err error) {
err = rmc.repeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rmc.mc.Alarm(rctx, in, opts...)
return err
})
return resp, err
}
func (rmc *retryMaintenanceClient) Status(ctx context.Context, in *pb.StatusRequest, opts ...grpc.CallOption) (resp *pb.StatusResponse, err error) {
err = rmc.repeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rmc.mc.Status(rctx, in, opts...)
return err
})
return resp, err
}
func (rmc *retryMaintenanceClient) Hash(ctx context.Context, in *pb.HashRequest, opts ...grpc.CallOption) (resp *pb.HashResponse, err error) {
err = rmc.repeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rmc.mc.Hash(rctx, in, opts...)
return err
})
return resp, err
}
func (rmc *retryMaintenanceClient) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (stream pb.Maintenance_SnapshotClient, err error) {
err = rmc.repeatableRetry(ctx, func(rctx context.Context) error {
stream, err = rmc.mc.Snapshot(rctx, in, opts...)
return err
})
return stream, err
}
type nonRepeatableMaintenanceClient struct {
mc pb.MaintenanceClient
nonRepeatableRetry retryRPCFunc
}
func (rmc *nonRepeatableMaintenanceClient) Defragment(ctx context.Context, in *pb.DefragmentRequest, opts ...grpc.CallOption) (resp *pb.DefragmentResponse, err error) {
err = rmc.nonRepeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rmc.mc.Defragment(rctx, in, opts...)
return err
})
return resp, err
}
type retryAuthClient struct {
pb.AuthClient
retryf retryRpcFunc
*nonRepeatableAuthClient
repeatableRetry retryRPCFunc
}
// RetryAuthClient implements a AuthClient that uses the client's FailFast retry policy.
// RetryAuthClient implements a AuthClient.
func RetryAuthClient(c *Client) pb.AuthClient {
return &retryAuthClient{pb.NewAuthClient(c.conn), c.retryWrapper}
repeatableRetry := c.newRetryWrapper(isRepeatableStopError)
nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError)
ac := pb.NewAuthClient(c.conn)
return &retryAuthClient{&nonRepeatableAuthClient{ac, nonRepeatableRetry}, repeatableRetry}
}
func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) {
err = rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.AuthEnable(rctx, in, opts...)
func (rac *retryAuthClient) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (resp *pb.AuthUserListResponse, err error) {
err = rac.repeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rac.ac.UserList(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) {
err = rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.AuthDisable(rctx, in, opts...)
func (rac *retryAuthClient) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGetResponse, err error) {
err = rac.repeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rac.ac.UserGet(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) {
err = rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.UserAdd(rctx, in, opts...)
func (rac *retryAuthClient) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGetResponse, err error) {
err = rac.repeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rac.ac.RoleGet(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) {
err = rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.UserDelete(rctx, in, opts...)
func (rac *retryAuthClient) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleListResponse, err error) {
err = rac.repeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rac.ac.RoleList(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) {
err = rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.UserChangePassword(rctx, in, opts...)
type nonRepeatableAuthClient struct {
ac pb.AuthClient
nonRepeatableRetry retryRPCFunc
}
func (rac *nonRepeatableAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) {
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rac.ac.AuthEnable(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) {
err = rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.UserGrantRole(rctx, in, opts...)
func (rac *nonRepeatableAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) {
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rac.ac.AuthDisable(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) {
err = rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.UserRevokeRole(rctx, in, opts...)
func (rac *nonRepeatableAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) {
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rac.ac.UserAdd(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) {
err = rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.RoleAdd(rctx, in, opts...)
func (rac *nonRepeatableAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) {
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rac.ac.UserDelete(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) {
err = rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.RoleDelete(rctx, in, opts...)
func (rac *nonRepeatableAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) {
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rac.ac.UserChangePassword(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) {
err = rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.RoleGrantPermission(rctx, in, opts...)
func (rac *nonRepeatableAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) {
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rac.ac.UserGrantRole(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *retryAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) {
err = rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.RoleRevokePermission(rctx, in, opts...)
func (rac *nonRepeatableAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) {
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rac.ac.UserRevokeRole(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *nonRepeatableAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) {
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rac.ac.RoleAdd(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *nonRepeatableAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) {
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rac.ac.RoleDelete(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *nonRepeatableAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) {
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rac.ac.RoleGrantPermission(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *nonRepeatableAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) {
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rac.ac.RoleRevokePermission(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *nonRepeatableAuthClient) Authenticate(ctx context.Context, in *pb.AuthenticateRequest, opts ...grpc.CallOption) (resp *pb.AuthenticateResponse, err error) {
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rac.ac.Authenticate(rctx, in, opts...)
return err
})
return resp, err

View File

@@ -18,13 +18,13 @@ import (
"sync"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
// Txn is the interface that wraps mini-transactions.
//
// Tx.If(
// Txn(context.TODO()).If(
// Compare(Value(k1), ">", v1),
// Compare(Version(k1), "=", 2)
// ).Then(
@@ -49,8 +49,6 @@ type Txn interface {
// Commit tries to commit the transaction.
Commit() (*TxnResponse, error)
// TODO: add a Do for shortcut the txn without any condition?
}
type txn struct {
@@ -137,30 +135,14 @@ func (txn *txn) Else(ops ...Op) Txn {
func (txn *txn) Commit() (*TxnResponse, error) {
txn.mu.Lock()
defer txn.mu.Unlock()
for {
resp, err := txn.commit()
if err == nil {
return resp, err
}
if isHaltErr(txn.ctx, err) {
return nil, toErr(txn.ctx, err)
}
if txn.isWrite {
return nil, toErr(txn.ctx, err)
}
}
}
func (txn *txn) commit() (*TxnResponse, error) {
r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas}
var opts []grpc.CallOption
if !txn.isWrite {
opts = []grpc.CallOption{grpc.FailFast(false)}
}
resp, err := txn.kv.remote.Txn(txn.ctx, r, opts...)
var resp *pb.TxnResponse
var err error
resp, err = txn.kv.remote.Txn(txn.ctx, r)
if err != nil {
return nil, err
return nil, toErr(txn.ctx, err)
}
return (*TxnResponse)(resp), nil
}

View File

@@ -22,8 +22,12 @@ import (
v3rpc "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
mvccpb "github.com/coreos/etcd/mvcc/mvccpb"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
)
const (
@@ -39,10 +43,9 @@ type WatchChan <-chan WatchResponse
type Watcher interface {
// Watch watches on a key or prefix. The watched events will be returned
// through the returned channel.
// If the watch is slow or the required rev is compacted, the watch request
// might be canceled from the server-side and the chan will be closed.
// 'opts' can be: 'WithRev' and/or 'WithPrefix'.
// through the returned channel. If revisions waiting to be sent over the
// watch are compacted, then the watch will be canceled by the server, the
// client will post a compacted error watch response, and the channel will close.
Watch(ctx context.Context, key string, opts ...OpOption) WatchChan
// Close closes the watcher and cancels all watch requests.
@@ -65,6 +68,9 @@ type WatchResponse struct {
Created bool
closeErr error
// cancelReason is a reason of canceling watch
cancelReason string
}
// IsCreate returns true if the event tells that the key is newly created.
@@ -85,6 +91,9 @@ func (wr *WatchResponse) Err() error {
case wr.CompactRevision != 0:
return v3rpc.ErrCompacted
case wr.Canceled:
if len(wr.cancelReason) != 0 {
return v3rpc.Error(status.Error(codes.FailedPrecondition, wr.cancelReason))
}
return v3rpc.ErrFutureRev
}
return nil
@@ -128,7 +137,7 @@ type watchGrpcStream struct {
respc chan *pb.WatchResponse
// donec closes to broadcast shutdown
donec chan struct{}
// errc transmits errors from grpc Recv to the watch stream reconn logic
// errc transmits errors from grpc Recv to the watch stream reconnect logic
errc chan error
// closingc gets the watcherStream of closing watchers
closingc chan *watcherStream
@@ -207,16 +216,15 @@ func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream {
owner: w,
remote: w.remote,
ctx: ctx,
ctxKey: fmt.Sprintf("%v", inctx),
ctxKey: streamKeyFromCtx(inctx),
cancel: cancel,
substreams: make(map[int64]*watcherStream),
respc: make(chan *pb.WatchResponse),
reqc: make(chan *watchRequest),
donec: make(chan struct{}),
errc: make(chan error, 1),
closingc: make(chan *watcherStream),
resumec: make(chan struct{}),
respc: make(chan *pb.WatchResponse),
reqc: make(chan *watchRequest),
donec: make(chan struct{}),
errc: make(chan error, 1),
closingc: make(chan *watcherStream),
resumec: make(chan struct{}),
}
go wgs.run()
return wgs
@@ -247,7 +255,7 @@ func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) Watch
}
ok := false
ctxKey := fmt.Sprintf("%v", ctx)
ctxKey := streamKeyFromCtx(ctx)
// find or allocate appropriate grpc watch stream
w.mu.Lock()
@@ -310,14 +318,14 @@ func (w *watcher) Close() (err error) {
w.streams = nil
w.mu.Unlock()
for _, wgs := range streams {
if werr := wgs.Close(); werr != nil {
if werr := wgs.close(); werr != nil {
err = werr
}
}
return err
}
func (w *watchGrpcStream) Close() (err error) {
func (w *watchGrpcStream) close() (err error) {
w.cancel()
<-w.donec
select {
@@ -428,7 +436,7 @@ func (w *watchGrpcStream) run() {
initReq: *wreq,
id: -1,
outc: outc,
// unbufffered so resumes won't cause repeat events
// unbuffered so resumes won't cause repeat events
recvc: make(chan *WatchResponse),
}
@@ -480,7 +488,7 @@ func (w *watchGrpcStream) run() {
req := &pb.WatchRequest{RequestUnion: cr}
wc.Send(req)
}
// watch client failed to recv; spawn another if possible
// watch client failed on Recv; spawn another if possible
case err := <-w.errc:
if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader {
closeErr = err
@@ -520,10 +528,6 @@ func (w *watchGrpcStream) nextResume() *watcherStream {
// dispatchEvent sends a WatchResponse to the appropriate watcher stream
func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool {
ws, ok := w.substreams[pbresp.WatchId]
if !ok {
return false
}
events := make([]*Event, len(pbresp.Events))
for i, ev := range pbresp.Events {
events[i] = (*Event)(ev)
@@ -534,6 +538,11 @@ func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool {
CompactRevision: pbresp.CompactRevision,
Created: pbresp.Created,
Canceled: pbresp.Canceled,
cancelReason: pbresp.CancelReason,
}
ws, ok := w.substreams[pbresp.WatchId]
if !ok {
return false
}
select {
case ws.recvc <- wr:
@@ -725,7 +734,11 @@ func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan str
ws.closing = true
close(ws.outc)
ws.outc = nil
go func() { w.closingc <- ws }()
w.wg.Add(1)
go func() {
defer w.wg.Done()
w.closingc <- ws
}()
case <-stopc:
}
}(w.resuming[i])
@@ -737,7 +750,7 @@ func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan str
return donec
}
// joinSubstream waits for all substream goroutines to complete
// joinSubstreams waits for all substream goroutines to complete.
func (w *watchGrpcStream) joinSubstreams() {
for _, ws := range w.substreams {
<-ws.donec
@@ -749,7 +762,9 @@ func (w *watchGrpcStream) joinSubstreams() {
}
}
// openWatchClient retries opening a watchclient until retryConnection fails
// openWatchClient retries opening a watch client until success or halt.
// manually retry in case "ws==nil && err==nil"
// TODO: remove FailFast=false
func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) {
for {
select {
@@ -770,7 +785,7 @@ func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error)
return ws, nil
}
// toPB converts an internal watch request structure to its protobuf messagefunc (wr *watchRequest)
// toPB converts an internal watch request structure to its protobuf WatchRequest structure.
func (wr *watchRequest) toPB() *pb.WatchRequest {
req := &pb.WatchCreateRequest{
StartRevision: wr.rev,
@@ -783,3 +798,10 @@ func (wr *watchRequest) toPB() *pb.WatchRequest {
cr := &pb.WatchRequest_CreateRequest{CreateRequest: req}
return &pb.WatchRequest{RequestUnion: cr}
}
func streamKeyFromCtx(ctx context.Context) string {
if md, ok := metadata.FromOutgoingContext(ctx); ok {
return fmt.Sprintf("%+v", md)
}
return ""
}

View File

@@ -30,7 +30,8 @@ var (
)
const (
checkCompactionInterval = 5 * time.Minute
checkCompactionInterval = 5 * time.Minute
executeCompactionInterval = time.Hour
)
type Compactable interface {
@@ -41,6 +42,8 @@ type RevGetter interface {
Rev() int64
}
// Periodic compacts the log by purging revisions older than
// the configured retention time. Compaction happens hourly.
type Periodic struct {
clock clockwork.Clock
periodInHour int
@@ -85,11 +88,12 @@ func (t *Periodic) Run() {
continue
}
}
if clock.Now().Sub(last) < time.Duration(t.periodInHour)*time.Hour {
if clock.Now().Sub(last) < executeCompactionInterval {
continue
}
rev := t.getRev(t.periodInHour)
rev, remaining := t.getRev(t.periodInHour)
if rev < 0 {
continue
}
@@ -97,7 +101,7 @@ func (t *Periodic) Run() {
plog.Noticef("Starting auto-compaction at revision %d", rev)
_, err := t.c.Compact(t.ctx, &pb.CompactionRequest{Revision: rev})
if err == nil || err == mvcc.ErrCompacted {
t.revs = make([]int64, 0)
t.revs = remaining
last = clock.Now()
plog.Noticef("Finished auto-compaction at revision %d", rev)
} else {
@@ -124,10 +128,10 @@ func (t *Periodic) Resume() {
t.paused = false
}
func (t *Periodic) getRev(h int) int64 {
func (t *Periodic) getRev(h int) (int64, []int64) {
i := len(t.revs) - int(time.Duration(h)*time.Hour/checkCompactionInterval)
if i < 0 {
return -1
return -1, t.revs
}
return t.revs[i]
return t.revs[i], t.revs[i+1:]
}

View File

@@ -2,10 +2,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"discovery.go",
"srv.go",
],
srcs = ["discovery.go"],
importpath = "github.com/coreos/etcd/discovery",
visibility = ["//visibility:public"],
deps = [

View File

@@ -1,104 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package discovery
import (
"fmt"
"net"
"net/url"
"strings"
"github.com/coreos/etcd/pkg/types"
)
var (
// indirection for testing
lookupSRV = net.LookupSRV
resolveTCPAddr = net.ResolveTCPAddr
)
// SRVGetCluster gets the cluster information via DNS discovery.
// TODO(barakmich): Currently ignores priority and weight (as they don't make as much sense for a bootstrap)
// Also doesn't do any lookups for the token (though it could)
// Also sees each entry as a separate instance.
func SRVGetCluster(name, dns string, defaultToken string, apurls types.URLs) (string, string, error) {
tempName := int(0)
tcp2ap := make(map[string]url.URL)
// First, resolve the apurls
for _, url := range apurls {
tcpAddr, err := resolveTCPAddr("tcp", url.Host)
if err != nil {
plog.Errorf("couldn't resolve host %s during SRV discovery", url.Host)
return "", "", err
}
tcp2ap[tcpAddr.String()] = url
}
stringParts := []string{}
updateNodeMap := func(service, scheme string) error {
_, addrs, err := lookupSRV(service, "tcp", dns)
if err != nil {
return err
}
for _, srv := range addrs {
port := fmt.Sprintf("%d", srv.Port)
host := net.JoinHostPort(srv.Target, port)
tcpAddr, err := resolveTCPAddr("tcp", host)
if err != nil {
plog.Warningf("couldn't resolve host %s during SRV discovery", host)
continue
}
n := ""
url, ok := tcp2ap[tcpAddr.String()]
if ok {
n = name
}
if n == "" {
n = fmt.Sprintf("%d", tempName)
tempName++
}
// SRV records have a trailing dot but URL shouldn't.
shortHost := strings.TrimSuffix(srv.Target, ".")
urlHost := net.JoinHostPort(shortHost, port)
stringParts = append(stringParts, fmt.Sprintf("%s=%s://%s", n, scheme, urlHost))
plog.Noticef("got bootstrap from DNS for %s at %s://%s", service, scheme, urlHost)
if ok && url.Scheme != scheme {
plog.Errorf("bootstrap at %s from DNS for %s has scheme mismatch with expected peer %s", scheme+"://"+urlHost, service, url.String())
}
}
return nil
}
failCount := 0
err := updateNodeMap("etcd-server-ssl", "https")
srvErr := make([]string, 2)
if err != nil {
srvErr[0] = fmt.Sprintf("error querying DNS SRV records for _etcd-server-ssl %s", err)
failCount++
}
err = updateNodeMap("etcd-server", "http")
if err != nil {
srvErr[1] = fmt.Sprintf("error querying DNS SRV records for _etcd-server %s", err)
failCount++
}
if failCount == 2 {
plog.Warningf(srvErr[0])
plog.Warningf(srvErr[1])
plog.Errorf("SRV discovery failed: too many errors querying DNS SRV records")
return "", "", err
}
return strings.Join(stringParts, ","), defaultToken, nil
}

60
vendor/github.com/coreos/etcd/embed/BUILD generated vendored Normal file
View File

@@ -0,0 +1,60 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"config.go",
"doc.go",
"etcd.go",
"serve.go",
"util.go",
],
importpath = "github.com/coreos/etcd/embed",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/cockroachdb/cmux:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver/api/v2http:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver/api/v3client:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver/api/v3election:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver/api/v3lock:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw:go_default_library",
"//vendor/github.com/coreos/etcd/pkg/cors:go_default_library",
"//vendor/github.com/coreos/etcd/pkg/debugutil:go_default_library",
"//vendor/github.com/coreos/etcd/pkg/netutil:go_default_library",
"//vendor/github.com/coreos/etcd/pkg/runtime:go_default_library",
"//vendor/github.com/coreos/etcd/pkg/srv:go_default_library",
"//vendor/github.com/coreos/etcd/pkg/transport:go_default_library",
"//vendor/github.com/coreos/etcd/pkg/types:go_default_library",
"//vendor/github.com/coreos/etcd/rafthttp:go_default_library",
"//vendor/github.com/coreos/etcd/wal:go_default_library",
"//vendor/github.com/coreos/pkg/capnslog:go_default_library",
"//vendor/github.com/ghodss/yaml:go_default_library",
"//vendor/github.com/grpc-ecosystem/grpc-gateway/runtime:go_default_library",
"//vendor/golang.org/x/net/context:go_default_library",
"//vendor/golang.org/x/net/trace:go_default_library",
"//vendor/google.golang.org/grpc:go_default_library",
"//vendor/google.golang.org/grpc/credentials:go_default_library",
"//vendor/google.golang.org/grpc/keepalive:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

464
vendor/github.com/coreos/etcd/embed/config.go generated vendored Normal file
View File

@@ -0,0 +1,464 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package embed
import (
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"strings"
"time"
"github.com/coreos/etcd/etcdserver"
"github.com/coreos/etcd/pkg/cors"
"github.com/coreos/etcd/pkg/netutil"
"github.com/coreos/etcd/pkg/srv"
"github.com/coreos/etcd/pkg/transport"
"github.com/coreos/etcd/pkg/types"
"github.com/ghodss/yaml"
"google.golang.org/grpc"
)
const (
ClusterStateFlagNew = "new"
ClusterStateFlagExisting = "existing"
DefaultName = "default"
DefaultMaxSnapshots = 5
DefaultMaxWALs = 5
DefaultMaxRequestBytes = 1.5 * 1024 * 1024
DefaultGRPCKeepAliveMinTime = 5 * time.Second
DefaultGRPCKeepAliveInterval = 2 * time.Hour
DefaultGRPCKeepAliveTimeout = 20 * time.Second
DefaultListenPeerURLs = "http://localhost:2380"
DefaultListenClientURLs = "http://localhost:2379"
// maxElectionMs specifies the maximum value of election timeout.
// More details are listed in ../Documentation/tuning.md#time-parameters.
maxElectionMs = 50000
)
var (
ErrConflictBootstrapFlags = fmt.Errorf("multiple discovery or bootstrap flags are set. " +
"Choose one of \"initial-cluster\", \"discovery\" or \"discovery-srv\"")
ErrUnsetAdvertiseClientURLsFlag = fmt.Errorf("--advertise-client-urls is required when --listen-client-urls is set explicitly")
DefaultInitialAdvertisePeerURLs = "http://localhost:2380"
DefaultAdvertiseClientURLs = "http://localhost:2379"
defaultHostname string
defaultHostStatus error
)
func init() {
defaultHostname, defaultHostStatus = netutil.GetDefaultHost()
}
// Config holds the arguments for configuring an etcd server.
type Config struct {
// member
CorsInfo *cors.CORSInfo
LPUrls, LCUrls []url.URL
Dir string `json:"data-dir"`
WalDir string `json:"wal-dir"`
MaxSnapFiles uint `json:"max-snapshots"`
MaxWalFiles uint `json:"max-wals"`
Name string `json:"name"`
SnapCount uint64 `json:"snapshot-count"`
AutoCompactionRetention int `json:"auto-compaction-retention"`
// TickMs is the number of milliseconds between heartbeat ticks.
// TODO: decouple tickMs and heartbeat tick (current heartbeat tick = 1).
// make ticks a cluster wide configuration.
TickMs uint `json:"heartbeat-interval"`
ElectionMs uint `json:"election-timeout"`
QuotaBackendBytes int64 `json:"quota-backend-bytes"`
MaxRequestBytes uint `json:"max-request-bytes"`
// gRPC server options
// GRPCKeepAliveMinTime is the minimum interval that a client should
// wait before pinging server. When client pings "too fast", server
// sends goaway and closes the connection (errors: too_many_pings,
// http2.ErrCodeEnhanceYourCalm). When too slow, nothing happens.
// Server expects client pings only when there is any active streams
// (PermitWithoutStream is set false).
GRPCKeepAliveMinTime time.Duration `json:"grpc-keepalive-min-time"`
// GRPCKeepAliveInterval is the frequency of server-to-client ping
// to check if a connection is alive. Close a non-responsive connection
// after an additional duration of Timeout. 0 to disable.
GRPCKeepAliveInterval time.Duration `json:"grpc-keepalive-interval"`
// GRPCKeepAliveTimeout is the additional duration of wait
// before closing a non-responsive connection. 0 to disable.
GRPCKeepAliveTimeout time.Duration `json:"grpc-keepalive-timeout"`
// clustering
APUrls, ACUrls []url.URL
ClusterState string `json:"initial-cluster-state"`
DNSCluster string `json:"discovery-srv"`
Dproxy string `json:"discovery-proxy"`
Durl string `json:"discovery"`
InitialCluster string `json:"initial-cluster"`
InitialClusterToken string `json:"initial-cluster-token"`
StrictReconfigCheck bool `json:"strict-reconfig-check"`
EnableV2 bool `json:"enable-v2"`
// security
ClientTLSInfo transport.TLSInfo
ClientAutoTLS bool
PeerTLSInfo transport.TLSInfo
PeerAutoTLS bool
// debug
Debug bool `json:"debug"`
LogPkgLevels string `json:"log-package-levels"`
EnablePprof bool `json:"enable-pprof"`
Metrics string `json:"metrics"`
// ForceNewCluster starts a new cluster even if previously started; unsafe.
ForceNewCluster bool `json:"force-new-cluster"`
// UserHandlers is for registering users handlers and only used for
// embedding etcd into other applications.
// The map key is the route path for the handler, and
// you must ensure it can't be conflicted with etcd's.
UserHandlers map[string]http.Handler `json:"-"`
// ServiceRegister is for registering users' gRPC services. A simple usage example:
// cfg := embed.NewConfig()
// cfg.ServerRegister = func(s *grpc.Server) {
// pb.RegisterFooServer(s, &fooServer{})
// pb.RegisterBarServer(s, &barServer{})
// }
// embed.StartEtcd(cfg)
ServiceRegister func(*grpc.Server) `json:"-"`
// auth
AuthToken string `json:"auth-token"`
}
// configYAML holds the config suitable for yaml parsing
type configYAML struct {
Config
configJSON
}
// configJSON has file options that are translated into Config options
type configJSON struct {
LPUrlsJSON string `json:"listen-peer-urls"`
LCUrlsJSON string `json:"listen-client-urls"`
CorsJSON string `json:"cors"`
APUrlsJSON string `json:"initial-advertise-peer-urls"`
ACUrlsJSON string `json:"advertise-client-urls"`
ClientSecurityJSON securityConfig `json:"client-transport-security"`
PeerSecurityJSON securityConfig `json:"peer-transport-security"`
}
type securityConfig struct {
CAFile string `json:"ca-file"`
CertFile string `json:"cert-file"`
KeyFile string `json:"key-file"`
CertAuth bool `json:"client-cert-auth"`
TrustedCAFile string `json:"trusted-ca-file"`
AutoTLS bool `json:"auto-tls"`
}
// NewConfig creates a new Config populated with default values.
func NewConfig() *Config {
lpurl, _ := url.Parse(DefaultListenPeerURLs)
apurl, _ := url.Parse(DefaultInitialAdvertisePeerURLs)
lcurl, _ := url.Parse(DefaultListenClientURLs)
acurl, _ := url.Parse(DefaultAdvertiseClientURLs)
cfg := &Config{
CorsInfo: &cors.CORSInfo{},
MaxSnapFiles: DefaultMaxSnapshots,
MaxWalFiles: DefaultMaxWALs,
Name: DefaultName,
SnapCount: etcdserver.DefaultSnapCount,
MaxRequestBytes: DefaultMaxRequestBytes,
GRPCKeepAliveMinTime: DefaultGRPCKeepAliveMinTime,
GRPCKeepAliveInterval: DefaultGRPCKeepAliveInterval,
GRPCKeepAliveTimeout: DefaultGRPCKeepAliveTimeout,
TickMs: 100,
ElectionMs: 1000,
LPUrls: []url.URL{*lpurl},
LCUrls: []url.URL{*lcurl},
APUrls: []url.URL{*apurl},
ACUrls: []url.URL{*acurl},
ClusterState: ClusterStateFlagNew,
InitialClusterToken: "etcd-cluster",
StrictReconfigCheck: true,
Metrics: "basic",
EnableV2: true,
AuthToken: "simple",
}
cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name)
return cfg
}
func ConfigFromFile(path string) (*Config, error) {
cfg := &configYAML{Config: *NewConfig()}
if err := cfg.configFromFile(path); err != nil {
return nil, err
}
return &cfg.Config, nil
}
func (cfg *configYAML) configFromFile(path string) error {
b, err := ioutil.ReadFile(path)
if err != nil {
return err
}
defaultInitialCluster := cfg.InitialCluster
err = yaml.Unmarshal(b, cfg)
if err != nil {
return err
}
if cfg.LPUrlsJSON != "" {
u, err := types.NewURLs(strings.Split(cfg.LPUrlsJSON, ","))
if err != nil {
plog.Fatalf("unexpected error setting up listen-peer-urls: %v", err)
}
cfg.LPUrls = []url.URL(u)
}
if cfg.LCUrlsJSON != "" {
u, err := types.NewURLs(strings.Split(cfg.LCUrlsJSON, ","))
if err != nil {
plog.Fatalf("unexpected error setting up listen-client-urls: %v", err)
}
cfg.LCUrls = []url.URL(u)
}
if cfg.CorsJSON != "" {
if err := cfg.CorsInfo.Set(cfg.CorsJSON); err != nil {
plog.Panicf("unexpected error setting up cors: %v", err)
}
}
if cfg.APUrlsJSON != "" {
u, err := types.NewURLs(strings.Split(cfg.APUrlsJSON, ","))
if err != nil {
plog.Fatalf("unexpected error setting up initial-advertise-peer-urls: %v", err)
}
cfg.APUrls = []url.URL(u)
}
if cfg.ACUrlsJSON != "" {
u, err := types.NewURLs(strings.Split(cfg.ACUrlsJSON, ","))
if err != nil {
plog.Fatalf("unexpected error setting up advertise-peer-urls: %v", err)
}
cfg.ACUrls = []url.URL(u)
}
// If a discovery flag is set, clear default initial cluster set by InitialClusterFromName
if (cfg.Durl != "" || cfg.DNSCluster != "") && cfg.InitialCluster == defaultInitialCluster {
cfg.InitialCluster = ""
}
if cfg.ClusterState == "" {
cfg.ClusterState = ClusterStateFlagNew
}
copySecurityDetails := func(tls *transport.TLSInfo, ysc *securityConfig) {
tls.CAFile = ysc.CAFile
tls.CertFile = ysc.CertFile
tls.KeyFile = ysc.KeyFile
tls.ClientCertAuth = ysc.CertAuth
tls.TrustedCAFile = ysc.TrustedCAFile
}
copySecurityDetails(&cfg.ClientTLSInfo, &cfg.ClientSecurityJSON)
copySecurityDetails(&cfg.PeerTLSInfo, &cfg.PeerSecurityJSON)
cfg.ClientAutoTLS = cfg.ClientSecurityJSON.AutoTLS
cfg.PeerAutoTLS = cfg.PeerSecurityJSON.AutoTLS
return cfg.Validate()
}
func (cfg *Config) Validate() error {
if err := checkBindURLs(cfg.LPUrls); err != nil {
return err
}
if err := checkBindURLs(cfg.LCUrls); err != nil {
return err
}
// Check if conflicting flags are passed.
nSet := 0
for _, v := range []bool{cfg.Durl != "", cfg.InitialCluster != "", cfg.DNSCluster != ""} {
if v {
nSet++
}
}
if cfg.ClusterState != ClusterStateFlagNew && cfg.ClusterState != ClusterStateFlagExisting {
return fmt.Errorf("unexpected clusterState %q", cfg.ClusterState)
}
if nSet > 1 {
return ErrConflictBootstrapFlags
}
if 5*cfg.TickMs > cfg.ElectionMs {
return fmt.Errorf("--election-timeout[%vms] should be at least as 5 times as --heartbeat-interval[%vms]", cfg.ElectionMs, cfg.TickMs)
}
if cfg.ElectionMs > maxElectionMs {
return fmt.Errorf("--election-timeout[%vms] is too long, and should be set less than %vms", cfg.ElectionMs, maxElectionMs)
}
// check this last since proxying in etcdmain may make this OK
if cfg.LCUrls != nil && cfg.ACUrls == nil {
return ErrUnsetAdvertiseClientURLsFlag
}
return nil
}
// PeerURLsMapAndToken sets up an initial peer URLsMap and cluster token for bootstrap or discovery.
func (cfg *Config) PeerURLsMapAndToken(which string) (urlsmap types.URLsMap, token string, err error) {
token = cfg.InitialClusterToken
switch {
case cfg.Durl != "":
urlsmap = types.URLsMap{}
// If using discovery, generate a temporary cluster based on
// self's advertised peer URLs
urlsmap[cfg.Name] = cfg.APUrls
token = cfg.Durl
case cfg.DNSCluster != "":
clusterStrs, cerr := srv.GetCluster("etcd-server", cfg.Name, cfg.DNSCluster, cfg.APUrls)
if cerr != nil {
plog.Errorf("couldn't resolve during SRV discovery (%v)", cerr)
return nil, "", cerr
}
for _, s := range clusterStrs {
plog.Noticef("got bootstrap from DNS for etcd-server at %s", s)
}
clusterStr := strings.Join(clusterStrs, ",")
if strings.Contains(clusterStr, "https://") && cfg.PeerTLSInfo.CAFile == "" {
cfg.PeerTLSInfo.ServerName = cfg.DNSCluster
}
urlsmap, err = types.NewURLsMap(clusterStr)
// only etcd member must belong to the discovered cluster.
// proxy does not need to belong to the discovered cluster.
if which == "etcd" {
if _, ok := urlsmap[cfg.Name]; !ok {
return nil, "", fmt.Errorf("cannot find local etcd member %q in SRV records", cfg.Name)
}
}
default:
// We're statically configured, and cluster has appropriately been set.
urlsmap, err = types.NewURLsMap(cfg.InitialCluster)
}
return urlsmap, token, err
}
func (cfg Config) InitialClusterFromName(name string) (ret string) {
if len(cfg.APUrls) == 0 {
return ""
}
n := name
if name == "" {
n = DefaultName
}
for i := range cfg.APUrls {
ret = ret + "," + n + "=" + cfg.APUrls[i].String()
}
return ret[1:]
}
func (cfg Config) IsNewCluster() bool { return cfg.ClusterState == ClusterStateFlagNew }
func (cfg Config) ElectionTicks() int { return int(cfg.ElectionMs / cfg.TickMs) }
func (cfg Config) defaultPeerHost() bool {
return len(cfg.APUrls) == 1 && cfg.APUrls[0].String() == DefaultInitialAdvertisePeerURLs
}
func (cfg Config) defaultClientHost() bool {
return len(cfg.ACUrls) == 1 && cfg.ACUrls[0].String() == DefaultAdvertiseClientURLs
}
// UpdateDefaultClusterFromName updates cluster advertise URLs with, if available, default host,
// if advertise URLs are default values(localhost:2379,2380) AND if listen URL is 0.0.0.0.
// e.g. advertise peer URL localhost:2380 or listen peer URL 0.0.0.0:2380
// then the advertise peer host would be updated with machine's default host,
// while keeping the listen URL's port.
// User can work around this by explicitly setting URL with 127.0.0.1.
// It returns the default hostname, if used, and the error, if any, from getting the machine's default host.
// TODO: check whether fields are set instead of whether fields have default value
func (cfg *Config) UpdateDefaultClusterFromName(defaultInitialCluster string) (string, error) {
if defaultHostname == "" || defaultHostStatus != nil {
// update 'initial-cluster' when only the name is specified (e.g. 'etcd --name=abc')
if cfg.Name != DefaultName && cfg.InitialCluster == defaultInitialCluster {
cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name)
}
return "", defaultHostStatus
}
used := false
pip, pport := cfg.LPUrls[0].Hostname(), cfg.LPUrls[0].Port()
if cfg.defaultPeerHost() && pip == "0.0.0.0" {
cfg.APUrls[0] = url.URL{Scheme: cfg.APUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, pport)}
used = true
}
// update 'initial-cluster' when only the name is specified (e.g. 'etcd --name=abc')
if cfg.Name != DefaultName && cfg.InitialCluster == defaultInitialCluster {
cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name)
}
cip, cport := cfg.LCUrls[0].Hostname(), cfg.LCUrls[0].Port()
if cfg.defaultClientHost() && cip == "0.0.0.0" {
cfg.ACUrls[0] = url.URL{Scheme: cfg.ACUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, cport)}
used = true
}
dhost := defaultHostname
if !used {
dhost = ""
}
return dhost, defaultHostStatus
}
// checkBindURLs returns an error if any URL uses a domain name.
// TODO: return error in 3.2.0
func checkBindURLs(urls []url.URL) error {
for _, url := range urls {
if url.Scheme == "unix" || url.Scheme == "unixs" {
continue
}
host, _, err := net.SplitHostPort(url.Host)
if err != nil {
return err
}
if host == "localhost" {
// special case for local address
// TODO: support /etc/hosts ?
continue
}
if net.ParseIP(host) == nil {
return fmt.Errorf("expected IP in URL for binding (%s)", url.String())
}
}
return nil
}

45
vendor/github.com/coreos/etcd/embed/doc.go generated vendored Normal file
View File

@@ -0,0 +1,45 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package embed provides bindings for embedding an etcd server in a program.
Launch an embedded etcd server using the configuration defaults:
import (
"log"
"time"
"github.com/coreos/etcd/embed"
)
func main() {
cfg := embed.NewConfig()
cfg.Dir = "default.etcd"
e, err := embed.StartEtcd(cfg)
if err != nil {
log.Fatal(err)
}
defer e.Close()
select {
case <-e.Server.ReadyNotify():
log.Printf("Server is ready!")
case <-time.After(60 * time.Second):
e.Server.Stop() // trigger a shutdown
log.Printf("Server took too long to start!")
}
log.Fatal(<-e.Err())
}
*/
package embed

453
vendor/github.com/coreos/etcd/embed/etcd.go generated vendored Normal file
View File

@@ -0,0 +1,453 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package embed
import (
"context"
"crypto/tls"
"fmt"
"io/ioutil"
defaultLog "log"
"net"
"net/http"
"path/filepath"
"sync"
"time"
"github.com/coreos/etcd/etcdserver"
"github.com/coreos/etcd/etcdserver/api/etcdhttp"
"github.com/coreos/etcd/etcdserver/api/v2http"
"github.com/coreos/etcd/pkg/cors"
"github.com/coreos/etcd/pkg/debugutil"
runtimeutil "github.com/coreos/etcd/pkg/runtime"
"github.com/coreos/etcd/pkg/transport"
"github.com/coreos/etcd/pkg/types"
"github.com/coreos/etcd/rafthttp"
"github.com/coreos/pkg/capnslog"
"google.golang.org/grpc"
"google.golang.org/grpc/keepalive"
)
var plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "embed")
const (
// internal fd usage includes disk usage and transport usage.
// To read/write snapshot, snap pkg needs 1. In normal case, wal pkg needs
// at most 2 to read/lock/write WALs. One case that it needs to 2 is to
// read all logs after some snapshot index, which locates at the end of
// the second last and the head of the last. For purging, it needs to read
// directory, so it needs 1. For fd monitor, it needs 1.
// For transport, rafthttp builds two long-polling connections and at most
// four temporary connections with each member. There are at most 9 members
// in a cluster, so it should reserve 96.
// For the safety, we set the total reserved number to 150.
reservedInternalFDNum = 150
)
// Etcd contains a running etcd server and its listeners.
type Etcd struct {
Peers []*peerListener
Clients []net.Listener
Server *etcdserver.EtcdServer
cfg Config
stopc chan struct{}
errc chan error
sctxs map[string]*serveCtx
closeOnce sync.Once
}
type peerListener struct {
net.Listener
serve func() error
close func(context.Context) error
}
// StartEtcd launches the etcd server and HTTP handlers for client/server communication.
// The returned Etcd.Server is not guaranteed to have joined the cluster. Wait
// on the Etcd.Server.ReadyNotify() channel to know when it completes and is ready for use.
func StartEtcd(inCfg *Config) (e *Etcd, err error) {
if err = inCfg.Validate(); err != nil {
return nil, err
}
serving := false
e = &Etcd{cfg: *inCfg, stopc: make(chan struct{})}
cfg := &e.cfg
defer func() {
if e == nil || err == nil {
return
}
if !serving {
// errored before starting gRPC server for serveCtx.grpcServerC
for _, sctx := range e.sctxs {
close(sctx.grpcServerC)
}
}
e.Close()
e = nil
}()
if e.Peers, err = startPeerListeners(cfg); err != nil {
return
}
if e.sctxs, err = startClientListeners(cfg); err != nil {
return
}
for _, sctx := range e.sctxs {
e.Clients = append(e.Clients, sctx.l)
}
var (
urlsmap types.URLsMap
token string
)
if !isMemberInitialized(cfg) {
urlsmap, token, err = cfg.PeerURLsMapAndToken("etcd")
if err != nil {
return e, fmt.Errorf("error setting up initial cluster: %v", err)
}
}
srvcfg := &etcdserver.ServerConfig{
Name: cfg.Name,
ClientURLs: cfg.ACUrls,
PeerURLs: cfg.APUrls,
DataDir: cfg.Dir,
DedicatedWALDir: cfg.WalDir,
SnapCount: cfg.SnapCount,
MaxSnapFiles: cfg.MaxSnapFiles,
MaxWALFiles: cfg.MaxWalFiles,
InitialPeerURLsMap: urlsmap,
InitialClusterToken: token,
DiscoveryURL: cfg.Durl,
DiscoveryProxy: cfg.Dproxy,
NewCluster: cfg.IsNewCluster(),
ForceNewCluster: cfg.ForceNewCluster,
PeerTLSInfo: cfg.PeerTLSInfo,
TickMs: cfg.TickMs,
ElectionTicks: cfg.ElectionTicks(),
AutoCompactionRetention: cfg.AutoCompactionRetention,
QuotaBackendBytes: cfg.QuotaBackendBytes,
MaxRequestBytes: cfg.MaxRequestBytes,
StrictReconfigCheck: cfg.StrictReconfigCheck,
ClientCertAuthEnabled: cfg.ClientTLSInfo.ClientCertAuth,
AuthToken: cfg.AuthToken,
}
if e.Server, err = etcdserver.NewServer(srvcfg); err != nil {
return
}
// configure peer handlers after rafthttp.Transport started
ph := etcdhttp.NewPeerHandler(e.Server)
for _, p := range e.Peers {
srv := &http.Server{
Handler: ph,
ReadTimeout: 5 * time.Minute,
ErrorLog: defaultLog.New(ioutil.Discard, "", 0), // do not log user error
}
l := p.Listener
p.serve = func() error { return srv.Serve(l) }
p.close = func(ctx context.Context) error {
// gracefully shutdown http.Server
// close open listeners, idle connections
// until context cancel or time-out
return srv.Shutdown(ctx)
}
}
// buffer channel so goroutines on closed connections won't wait forever
e.errc = make(chan error, len(e.Peers)+len(e.Clients)+2*len(e.sctxs))
e.Server.Start()
if err = e.serve(); err != nil {
return
}
serving = true
return
}
// Config returns the current configuration.
func (e *Etcd) Config() Config {
return e.cfg
}
func (e *Etcd) Close() {
e.closeOnce.Do(func() { close(e.stopc) })
timeout := 2 * time.Second
if e.Server != nil {
timeout = e.Server.Cfg.ReqTimeout()
}
for _, sctx := range e.sctxs {
for gs := range sctx.grpcServerC {
ch := make(chan struct{})
go func() {
defer close(ch)
// close listeners to stop accepting new connections,
// will block on any existing transports
gs.GracefulStop()
}()
// wait until all pending RPCs are finished
select {
case <-ch:
case <-time.After(timeout):
// took too long, manually close open transports
// e.g. watch streams
gs.Stop()
// concurrent GracefulStop should be interrupted
<-ch
}
}
}
for _, sctx := range e.sctxs {
sctx.cancel()
}
for i := range e.Clients {
if e.Clients[i] != nil {
e.Clients[i].Close()
}
}
// close rafthttp transports
if e.Server != nil {
e.Server.Stop()
}
// close all idle connections in peer handler (wait up to 1-second)
for i := range e.Peers {
if e.Peers[i] != nil && e.Peers[i].close != nil {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
e.Peers[i].close(ctx)
cancel()
}
}
}
func (e *Etcd) Err() <-chan error { return e.errc }
func startPeerListeners(cfg *Config) (peers []*peerListener, err error) {
if cfg.PeerAutoTLS && cfg.PeerTLSInfo.Empty() {
phosts := make([]string, len(cfg.LPUrls))
for i, u := range cfg.LPUrls {
phosts[i] = u.Host
}
cfg.PeerTLSInfo, err = transport.SelfCert(filepath.Join(cfg.Dir, "fixtures", "peer"), phosts)
if err != nil {
plog.Fatalf("could not get certs (%v)", err)
}
} else if cfg.PeerAutoTLS {
plog.Warningf("ignoring peer auto TLS since certs given")
}
if !cfg.PeerTLSInfo.Empty() {
plog.Infof("peerTLS: %s", cfg.PeerTLSInfo)
}
peers = make([]*peerListener, len(cfg.LPUrls))
defer func() {
if err == nil {
return
}
for i := range peers {
if peers[i] != nil && peers[i].close != nil {
plog.Info("stopping listening for peers on ", cfg.LPUrls[i].String())
peers[i].close(context.Background())
}
}
}()
for i, u := range cfg.LPUrls {
if u.Scheme == "http" {
if !cfg.PeerTLSInfo.Empty() {
plog.Warningf("The scheme of peer url %s is HTTP while peer key/cert files are presented. Ignored peer key/cert files.", u.String())
}
if cfg.PeerTLSInfo.ClientCertAuth {
plog.Warningf("The scheme of peer url %s is HTTP while client cert auth (--peer-client-cert-auth) is enabled. Ignored client cert auth for this url.", u.String())
}
}
peers[i] = &peerListener{close: func(context.Context) error { return nil }}
peers[i].Listener, err = rafthttp.NewListener(u, &cfg.PeerTLSInfo)
if err != nil {
return nil, err
}
// once serve, overwrite with 'http.Server.Shutdown'
peers[i].close = func(context.Context) error {
return peers[i].Listener.Close()
}
plog.Info("listening for peers on ", u.String())
}
return peers, nil
}
func startClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err error) {
if cfg.ClientAutoTLS && cfg.ClientTLSInfo.Empty() {
chosts := make([]string, len(cfg.LCUrls))
for i, u := range cfg.LCUrls {
chosts[i] = u.Host
}
cfg.ClientTLSInfo, err = transport.SelfCert(filepath.Join(cfg.Dir, "fixtures", "client"), chosts)
if err != nil {
plog.Fatalf("could not get certs (%v)", err)
}
} else if cfg.ClientAutoTLS {
plog.Warningf("ignoring client auto TLS since certs given")
}
if cfg.EnablePprof {
plog.Infof("pprof is enabled under %s", debugutil.HTTPPrefixPProf)
}
sctxs = make(map[string]*serveCtx)
for _, u := range cfg.LCUrls {
sctx := newServeCtx()
if u.Scheme == "http" || u.Scheme == "unix" {
if !cfg.ClientTLSInfo.Empty() {
plog.Warningf("The scheme of client url %s is HTTP while peer key/cert files are presented. Ignored key/cert files.", u.String())
}
if cfg.ClientTLSInfo.ClientCertAuth {
plog.Warningf("The scheme of client url %s is HTTP while client cert auth (--client-cert-auth) is enabled. Ignored client cert auth for this url.", u.String())
}
}
if (u.Scheme == "https" || u.Scheme == "unixs") && cfg.ClientTLSInfo.Empty() {
return nil, fmt.Errorf("TLS key/cert (--cert-file, --key-file) must be provided for client url %s with HTTPs scheme", u.String())
}
proto := "tcp"
addr := u.Host
if u.Scheme == "unix" || u.Scheme == "unixs" {
proto = "unix"
addr = u.Host + u.Path
}
sctx.secure = u.Scheme == "https" || u.Scheme == "unixs"
sctx.insecure = !sctx.secure
if oldctx := sctxs[addr]; oldctx != nil {
oldctx.secure = oldctx.secure || sctx.secure
oldctx.insecure = oldctx.insecure || sctx.insecure
continue
}
if sctx.l, err = net.Listen(proto, addr); err != nil {
return nil, err
}
// net.Listener will rewrite ipv4 0.0.0.0 to ipv6 [::], breaking
// hosts that disable ipv6. So, use the address given by the user.
sctx.addr = addr
if fdLimit, fderr := runtimeutil.FDLimit(); fderr == nil {
if fdLimit <= reservedInternalFDNum {
plog.Fatalf("file descriptor limit[%d] of etcd process is too low, and should be set higher than %d to ensure internal usage", fdLimit, reservedInternalFDNum)
}
sctx.l = transport.LimitListener(sctx.l, int(fdLimit-reservedInternalFDNum))
}
if proto == "tcp" {
if sctx.l, err = transport.NewKeepAliveListener(sctx.l, "tcp", nil); err != nil {
return nil, err
}
}
plog.Info("listening for client requests on ", u.Host)
defer func() {
if err != nil {
sctx.l.Close()
plog.Info("stopping listening for client requests on ", u.Host)
}
}()
for k := range cfg.UserHandlers {
sctx.userHandlers[k] = cfg.UserHandlers[k]
}
sctx.serviceRegister = cfg.ServiceRegister
if cfg.EnablePprof || cfg.Debug {
sctx.registerPprof()
}
if cfg.Debug {
sctx.registerTrace()
}
sctxs[addr] = sctx
}
return sctxs, nil
}
func (e *Etcd) serve() (err error) {
var ctlscfg *tls.Config
if !e.cfg.ClientTLSInfo.Empty() {
plog.Infof("ClientTLS: %s", e.cfg.ClientTLSInfo)
if ctlscfg, err = e.cfg.ClientTLSInfo.ServerConfig(); err != nil {
return err
}
}
if e.cfg.CorsInfo.String() != "" {
plog.Infof("cors = %s", e.cfg.CorsInfo)
}
// Start the peer server in a goroutine
for _, pl := range e.Peers {
go func(l *peerListener) {
e.errHandler(l.serve())
}(pl)
}
// Start a client server goroutine for each listen address
var h http.Handler
if e.Config().EnableV2 {
h = v2http.NewClientHandler(e.Server, e.Server.Cfg.ReqTimeout())
} else {
mux := http.NewServeMux()
etcdhttp.HandleBasic(mux, e.Server)
h = mux
}
h = http.Handler(&cors.CORSHandler{Handler: h, Info: e.cfg.CorsInfo})
gopts := []grpc.ServerOption{}
if e.cfg.GRPCKeepAliveMinTime > time.Duration(0) {
gopts = append(gopts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{
MinTime: e.cfg.GRPCKeepAliveMinTime,
PermitWithoutStream: false,
}))
}
if e.cfg.GRPCKeepAliveInterval > time.Duration(0) &&
e.cfg.GRPCKeepAliveTimeout > time.Duration(0) {
gopts = append(gopts, grpc.KeepaliveParams(keepalive.ServerParameters{
Time: e.cfg.GRPCKeepAliveInterval,
Timeout: e.cfg.GRPCKeepAliveTimeout,
}))
}
for _, sctx := range e.sctxs {
go func(s *serveCtx) {
e.errHandler(s.serve(e.Server, ctlscfg, h, e.errHandler, gopts...))
}(sctx)
}
return nil
}
func (e *Etcd) errHandler(err error) {
select {
case <-e.stopc:
return
default:
}
select {
case <-e.stopc:
case e.errc <- err:
}
}

236
vendor/github.com/coreos/etcd/embed/serve.go generated vendored Normal file
View File

@@ -0,0 +1,236 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package embed
import (
"crypto/tls"
"io/ioutil"
defaultLog "log"
"net"
"net/http"
"strings"
"github.com/coreos/etcd/etcdserver"
"github.com/coreos/etcd/etcdserver/api/v3client"
"github.com/coreos/etcd/etcdserver/api/v3election"
"github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb"
v3electiongw "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw"
"github.com/coreos/etcd/etcdserver/api/v3lock"
"github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb"
v3lockgw "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw"
"github.com/coreos/etcd/etcdserver/api/v3rpc"
etcdservergw "github.com/coreos/etcd/etcdserver/etcdserverpb/gw"
"github.com/coreos/etcd/pkg/debugutil"
"github.com/cockroachdb/cmux"
gw "github.com/grpc-ecosystem/grpc-gateway/runtime"
"golang.org/x/net/context"
"golang.org/x/net/trace"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
)
type serveCtx struct {
l net.Listener
addr string
secure bool
insecure bool
ctx context.Context
cancel context.CancelFunc
userHandlers map[string]http.Handler
serviceRegister func(*grpc.Server)
grpcServerC chan *grpc.Server
}
func newServeCtx() *serveCtx {
ctx, cancel := context.WithCancel(context.Background())
return &serveCtx{ctx: ctx, cancel: cancel, userHandlers: make(map[string]http.Handler),
grpcServerC: make(chan *grpc.Server, 2), // in case sctx.insecure,sctx.secure true
}
}
// serve accepts incoming connections on the listener l,
// creating a new service goroutine for each. The service goroutines
// read requests and then call handler to reply to them.
func (sctx *serveCtx) serve(
s *etcdserver.EtcdServer,
tlscfg *tls.Config,
handler http.Handler,
errHandler func(error),
gopts ...grpc.ServerOption) error {
logger := defaultLog.New(ioutil.Discard, "etcdhttp", 0)
<-s.ReadyNotify()
plog.Info("ready to serve client requests")
m := cmux.New(sctx.l)
v3c := v3client.New(s)
servElection := v3election.NewElectionServer(v3c)
servLock := v3lock.NewLockServer(v3c)
if sctx.insecure {
gs := v3rpc.Server(s, nil, gopts...)
sctx.grpcServerC <- gs
v3electionpb.RegisterElectionServer(gs, servElection)
v3lockpb.RegisterLockServer(gs, servLock)
if sctx.serviceRegister != nil {
sctx.serviceRegister(gs)
}
grpcl := m.Match(cmux.HTTP2())
go func() { errHandler(gs.Serve(grpcl)) }()
opts := []grpc.DialOption{
grpc.WithInsecure(),
}
gwmux, err := sctx.registerGateway(opts)
if err != nil {
return err
}
httpmux := sctx.createMux(gwmux, handler)
srvhttp := &http.Server{
Handler: httpmux,
ErrorLog: logger, // do not log user error
}
httpl := m.Match(cmux.HTTP1())
go func() { errHandler(srvhttp.Serve(httpl)) }()
plog.Noticef("serving insecure client requests on %s, this is strongly discouraged!", sctx.l.Addr().String())
}
if sctx.secure {
gs := v3rpc.Server(s, tlscfg, gopts...)
sctx.grpcServerC <- gs
v3electionpb.RegisterElectionServer(gs, servElection)
v3lockpb.RegisterLockServer(gs, servLock)
if sctx.serviceRegister != nil {
sctx.serviceRegister(gs)
}
handler = grpcHandlerFunc(gs, handler)
dtls := tlscfg.Clone()
// trust local server
dtls.InsecureSkipVerify = true
creds := credentials.NewTLS(dtls)
opts := []grpc.DialOption{grpc.WithTransportCredentials(creds)}
gwmux, err := sctx.registerGateway(opts)
if err != nil {
return err
}
tlsl := tls.NewListener(m.Match(cmux.Any()), tlscfg)
// TODO: add debug flag; enable logging when debug flag is set
httpmux := sctx.createMux(gwmux, handler)
srv := &http.Server{
Handler: httpmux,
TLSConfig: tlscfg,
ErrorLog: logger, // do not log user error
}
go func() { errHandler(srv.Serve(tlsl)) }()
plog.Infof("serving client requests on %s", sctx.l.Addr().String())
}
close(sctx.grpcServerC)
return m.Serve()
}
// grpcHandlerFunc returns an http.Handler that delegates to grpcServer on incoming gRPC
// connections or otherHandler otherwise. Copied from cockroachdb.
func grpcHandlerFunc(grpcServer *grpc.Server, otherHandler http.Handler) http.Handler {
if otherHandler == nil {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
grpcServer.ServeHTTP(w, r)
})
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.ProtoMajor == 2 && strings.Contains(r.Header.Get("Content-Type"), "application/grpc") {
grpcServer.ServeHTTP(w, r)
} else {
otherHandler.ServeHTTP(w, r)
}
})
}
type registerHandlerFunc func(context.Context, *gw.ServeMux, *grpc.ClientConn) error
func (sctx *serveCtx) registerGateway(opts []grpc.DialOption) (*gw.ServeMux, error) {
ctx := sctx.ctx
conn, err := grpc.DialContext(ctx, sctx.addr, opts...)
if err != nil {
return nil, err
}
gwmux := gw.NewServeMux()
handlers := []registerHandlerFunc{
etcdservergw.RegisterKVHandler,
etcdservergw.RegisterWatchHandler,
etcdservergw.RegisterLeaseHandler,
etcdservergw.RegisterClusterHandler,
etcdservergw.RegisterMaintenanceHandler,
etcdservergw.RegisterAuthHandler,
v3lockgw.RegisterLockHandler,
v3electiongw.RegisterElectionHandler,
}
for _, h := range handlers {
if err := h(ctx, gwmux, conn); err != nil {
return nil, err
}
}
go func() {
<-ctx.Done()
if cerr := conn.Close(); cerr != nil {
plog.Warningf("failed to close conn to %s: %v", sctx.l.Addr().String(), cerr)
}
}()
return gwmux, nil
}
func (sctx *serveCtx) createMux(gwmux *gw.ServeMux, handler http.Handler) *http.ServeMux {
httpmux := http.NewServeMux()
for path, h := range sctx.userHandlers {
httpmux.Handle(path, h)
}
httpmux.Handle("/v3alpha/", gwmux)
if handler != nil {
httpmux.Handle("/", handler)
}
return httpmux
}
func (sctx *serveCtx) registerUserHandler(s string, h http.Handler) {
if sctx.userHandlers[s] != nil {
plog.Warningf("path %s already registered by user handler", s)
return
}
sctx.userHandlers[s] = h
}
func (sctx *serveCtx) registerPprof() {
for p, h := range debugutil.PProfHandlers() {
sctx.registerUserHandler(p, h)
}
}
func (sctx *serveCtx) registerTrace() {
reqf := func(w http.ResponseWriter, r *http.Request) { trace.Render(w, r, true) }
sctx.registerUserHandler("/debug/requests", http.HandlerFunc(reqf))
evf := func(w http.ResponseWriter, r *http.Request) { trace.RenderEvents(w, r, true) }
sctx.registerUserHandler("/debug/events", http.HandlerFunc(evf))
}

30
vendor/github.com/coreos/etcd/embed/util.go generated vendored Normal file
View File

@@ -0,0 +1,30 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package embed
import (
"path/filepath"
"github.com/coreos/etcd/wal"
)
func isMemberInitialized(cfg *Config) bool {
waldir := cfg.WalDir
if waldir == "" {
waldir = filepath.Join(cfg.Dir, "member", "wal")
}
return wal.Exist(waldir)
}

View File

@@ -154,9 +154,10 @@ func (e Error) StatusCode() int {
return status
}
func (e Error) WriteTo(w http.ResponseWriter) {
func (e Error) WriteTo(w http.ResponseWriter) error {
w.Header().Add("X-Etcd-Index", fmt.Sprint(e.Index))
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(e.StatusCode())
fmt.Fprintln(w, e.toJsonString())
_, err := w.Write([]byte(e.toJsonString() + "\n"))
return err
}

View File

@@ -6,6 +6,7 @@ go_library(
"apply.go",
"apply_auth.go",
"apply_v2.go",
"backend.go",
"cluster_util.go",
"config.go",
"consistent_index.go",
@@ -40,7 +41,6 @@ go_library(
"//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library",
"//vendor/github.com/coreos/etcd/pkg/contention:go_default_library",
"//vendor/github.com/coreos/etcd/pkg/fileutil:go_default_library",
"//vendor/github.com/coreos/etcd/pkg/httputil:go_default_library",
"//vendor/github.com/coreos/etcd/pkg/idutil:go_default_library",
"//vendor/github.com/coreos/etcd/pkg/netutil:go_default_library",
"//vendor/github.com/coreos/etcd/pkg/pbutil:go_default_library",

View File

@@ -29,7 +29,11 @@ filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp:all-srcs",
"//vendor/github.com/coreos/etcd/etcdserver/api/v2http:all-srcs",
"//vendor/github.com/coreos/etcd/etcdserver/api/v3client:all-srcs",
"//vendor/github.com/coreos/etcd/etcdserver/api/v3election:all-srcs",
"//vendor/github.com/coreos/etcd/etcdserver/api/v3lock:all-srcs",
"//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc:all-srcs",
],
tags = ["automanaged"],

View File

@@ -33,11 +33,10 @@ var (
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api")
// capabilityMaps is a static map of version to capability map.
// the base capabilities is the set of capability 2.0 supports.
capabilityMaps = map[string]map[Capability]bool{
"2.3.0": {AuthCapability: true},
"3.0.0": {AuthCapability: true, V3rpcCapability: true},
"3.1.0": {AuthCapability: true, V3rpcCapability: true},
"3.2.0": {AuthCapability: true, V3rpcCapability: true},
}
enableMapMu sync.RWMutex
@@ -48,7 +47,10 @@ var (
)
func init() {
enabledMap = make(map[Capability]bool)
enabledMap = map[Capability]bool{
AuthCapability: true,
V3rpcCapability: true,
}
}
// UpdateCapability updates the enabledMap when the cluster version increases.

View File

@@ -0,0 +1,40 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"base.go",
"peer.go",
],
importpath = "github.com/coreos/etcd/etcdserver/api/etcdhttp",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/coreos/etcd/error:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver/api:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library",
"//vendor/github.com/coreos/etcd/lease/leasehttp:go_default_library",
"//vendor/github.com/coreos/etcd/pkg/logutil:go_default_library",
"//vendor/github.com/coreos/etcd/raft:go_default_library",
"//vendor/github.com/coreos/etcd/rafthttp:go_default_library",
"//vendor/github.com/coreos/etcd/version:go_default_library",
"//vendor/github.com/coreos/pkg/capnslog:go_default_library",
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
"//vendor/golang.org/x/net/context:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,186 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package etcdhttp
import (
"encoding/json"
"expvar"
"fmt"
"net/http"
"strings"
"time"
etcdErr "github.com/coreos/etcd/error"
"github.com/coreos/etcd/etcdserver"
"github.com/coreos/etcd/etcdserver/api"
"github.com/coreos/etcd/etcdserver/api/v2http/httptypes"
"github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/pkg/logutil"
"github.com/coreos/etcd/raft"
"github.com/coreos/etcd/version"
"github.com/coreos/pkg/capnslog"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/net/context"
)
var (
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/etcdhttp")
mlog = logutil.NewMergeLogger(plog)
)
const (
configPath = "/config"
metricsPath = "/metrics"
healthPath = "/health"
varsPath = "/debug/vars"
versionPath = "/version"
)
// HandleBasic adds handlers to a mux for serving JSON etcd client requests
// that do not access the v2 store.
func HandleBasic(mux *http.ServeMux, server *etcdserver.EtcdServer) {
mux.HandleFunc(varsPath, serveVars)
mux.HandleFunc(configPath+"/local/log", logHandleFunc)
mux.Handle(metricsPath, prometheus.Handler())
mux.Handle(healthPath, healthHandler(server))
mux.HandleFunc(versionPath, versionHandler(server.Cluster(), serveVersion))
}
func healthHandler(server *etcdserver.EtcdServer) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if !allowMethod(w, r, "GET") {
return
}
if uint64(server.Leader()) == raft.None {
http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable)
return
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
if _, err := server.Do(ctx, etcdserverpb.Request{Method: "QGET"}); err != nil {
http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable)
return
}
w.WriteHeader(http.StatusOK)
w.Write([]byte(`{"health": "true"}`))
}
}
func versionHandler(c api.Cluster, fn func(http.ResponseWriter, *http.Request, string)) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
v := c.Version()
if v != nil {
fn(w, r, v.String())
} else {
fn(w, r, "not_decided")
}
}
}
func serveVersion(w http.ResponseWriter, r *http.Request, clusterV string) {
if !allowMethod(w, r, "GET") {
return
}
vs := version.Versions{
Server: version.Version,
Cluster: clusterV,
}
w.Header().Set("Content-Type", "application/json")
b, err := json.Marshal(&vs)
if err != nil {
plog.Panicf("cannot marshal versions to json (%v)", err)
}
w.Write(b)
}
func logHandleFunc(w http.ResponseWriter, r *http.Request) {
if !allowMethod(w, r, "PUT") {
return
}
in := struct{ Level string }{}
d := json.NewDecoder(r.Body)
if err := d.Decode(&in); err != nil {
WriteError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid json body"))
return
}
logl, err := capnslog.ParseLevel(strings.ToUpper(in.Level))
if err != nil {
WriteError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid log level "+in.Level))
return
}
plog.Noticef("globalLogLevel set to %q", logl.String())
capnslog.SetGlobalLogLevel(logl)
w.WriteHeader(http.StatusNoContent)
}
func serveVars(w http.ResponseWriter, r *http.Request) {
if !allowMethod(w, r, "GET") {
return
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
fmt.Fprintf(w, "{\n")
first := true
expvar.Do(func(kv expvar.KeyValue) {
if !first {
fmt.Fprintf(w, ",\n")
}
first = false
fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
})
fmt.Fprintf(w, "\n}\n")
}
func allowMethod(w http.ResponseWriter, r *http.Request, m string) bool {
if m == r.Method {
return true
}
w.Header().Set("Allow", m)
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
return false
}
// WriteError logs and writes the given Error to the ResponseWriter
// If Error is an etcdErr, it is rendered to the ResponseWriter
// Otherwise, it is assumed to be a StatusInternalServerError
func WriteError(w http.ResponseWriter, r *http.Request, err error) {
if err == nil {
return
}
switch e := err.(type) {
case *etcdErr.Error:
e.WriteTo(w)
case *httptypes.HTTPError:
if et := e.WriteTo(w); et != nil {
plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr)
}
default:
switch err {
case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost, etcdserver.ErrNotEnoughStartedMembers, etcdserver.ErrUnhealthy:
mlog.MergeError(err)
default:
mlog.MergeErrorf("got unexpected response error (%v)", err)
}
herr := httptypes.NewHTTPError(http.StatusInternalServerError, "Internal Server Error")
if et := herr.WriteTo(w); et != nil {
plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr)
}
}
}

View File

@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package v2http
package etcdhttp
import (
"encoding/json"
@@ -61,7 +61,7 @@ type peerMembersHandler struct {
}
func (h *peerMembersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if !allowMethod(w, r.Method, "GET") {
if !allowMethod(w, r, "GET") {
return
}
w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String())

View File

@@ -9,7 +9,6 @@ go_library(
"doc.go",
"http.go",
"metrics.go",
"peer.go",
],
importpath = "github.com/coreos/etcd/etcdserver/api/v2http",
visibility = ["//visibility:public"],
@@ -17,18 +16,15 @@ go_library(
"//vendor/github.com/coreos/etcd/error:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver/api:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver/auth:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver/membership:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver/stats:go_default_library",
"//vendor/github.com/coreos/etcd/lease/leasehttp:go_default_library",
"//vendor/github.com/coreos/etcd/pkg/logutil:go_default_library",
"//vendor/github.com/coreos/etcd/pkg/types:go_default_library",
"//vendor/github.com/coreos/etcd/raft:go_default_library",
"//vendor/github.com/coreos/etcd/rafthttp:go_default_library",
"//vendor/github.com/coreos/etcd/store:go_default_library",
"//vendor/github.com/coreos/etcd/version:go_default_library",
"//vendor/github.com/coreos/pkg/capnslog:go_default_library",
"//vendor/github.com/jonboulle/clockwork:go_default_library",
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",

View File

@@ -17,7 +17,6 @@ package v2http
import (
"encoding/json"
"errors"
"expvar"
"fmt"
"io/ioutil"
"net/http"
@@ -30,38 +29,36 @@ import (
etcdErr "github.com/coreos/etcd/error"
"github.com/coreos/etcd/etcdserver"
"github.com/coreos/etcd/etcdserver/api"
"github.com/coreos/etcd/etcdserver/api/etcdhttp"
"github.com/coreos/etcd/etcdserver/api/v2http/httptypes"
"github.com/coreos/etcd/etcdserver/auth"
"github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/etcdserver/membership"
"github.com/coreos/etcd/etcdserver/stats"
"github.com/coreos/etcd/pkg/types"
"github.com/coreos/etcd/raft"
"github.com/coreos/etcd/store"
"github.com/coreos/etcd/version"
"github.com/coreos/pkg/capnslog"
"github.com/jonboulle/clockwork"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/net/context"
)
const (
authPrefix = "/v2/auth"
keysPrefix = "/v2/keys"
deprecatedMachinesPrefix = "/v2/machines"
membersPrefix = "/v2/members"
statsPrefix = "/v2/stats"
varsPath = "/debug/vars"
metricsPath = "/metrics"
healthPath = "/health"
versionPath = "/version"
configPath = "/config"
authPrefix = "/v2/auth"
keysPrefix = "/v2/keys"
machinesPrefix = "/v2/machines"
membersPrefix = "/v2/members"
statsPrefix = "/v2/stats"
)
// NewClientHandler generates a muxed http.Handler with the given parameters to serve etcd client requests.
func NewClientHandler(server *etcdserver.EtcdServer, timeout time.Duration) http.Handler {
sec := auth.NewStore(server, timeout)
mux := http.NewServeMux()
etcdhttp.HandleBasic(mux, server)
handleV2(mux, server, timeout)
return requestLogger(mux)
}
func handleV2(mux *http.ServeMux, server *etcdserver.EtcdServer, timeout time.Duration) {
sec := auth.NewStore(server, timeout)
kh := &keysHandler{
sec: sec,
server: server,
@@ -84,34 +81,23 @@ func NewClientHandler(server *etcdserver.EtcdServer, timeout time.Duration) http
clientCertAuthEnabled: server.Cfg.ClientCertAuthEnabled,
}
dmh := &deprecatedMachinesHandler{
cluster: server.Cluster(),
}
mah := &machinesHandler{cluster: server.Cluster()}
sech := &authHandler{
sec: sec,
cluster: server.Cluster(),
clientCertAuthEnabled: server.Cfg.ClientCertAuthEnabled,
}
mux := http.NewServeMux()
mux.HandleFunc("/", http.NotFound)
mux.Handle(healthPath, healthHandler(server))
mux.HandleFunc(versionPath, versionHandler(server.Cluster(), serveVersion))
mux.Handle(keysPrefix, kh)
mux.Handle(keysPrefix+"/", kh)
mux.HandleFunc(statsPrefix+"/store", sh.serveStore)
mux.HandleFunc(statsPrefix+"/self", sh.serveSelf)
mux.HandleFunc(statsPrefix+"/leader", sh.serveLeader)
mux.HandleFunc(varsPath, serveVars)
mux.HandleFunc(configPath+"/local/log", logHandleFunc)
mux.Handle(metricsPath, prometheus.Handler())
mux.Handle(membersPrefix, mh)
mux.Handle(membersPrefix+"/", mh)
mux.Handle(deprecatedMachinesPrefix, dmh)
mux.Handle(machinesPrefix, mah)
handleAuth(mux, sech)
return requestLogger(mux)
}
type keysHandler struct {
@@ -170,11 +156,11 @@ func (h *keysHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
}
type deprecatedMachinesHandler struct {
type machinesHandler struct {
cluster api.Cluster
}
func (h *deprecatedMachinesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
func (h *machinesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if !allowMethod(w, r.Method, "GET", "HEAD") {
return
}
@@ -234,7 +220,7 @@ func (h *membersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
now := h.clock.Now()
m := membership.NewMember("", req.PeerURLs, "", &now)
err := h.server.AddMember(ctx, *m)
_, err := h.server.AddMember(ctx, *m)
switch {
case err == membership.ErrIDExists || err == membership.ErrPeerURLexists:
writeError(w, r, httptypes.NewHTTPError(http.StatusConflict, err.Error()))
@@ -255,7 +241,7 @@ func (h *membersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if !ok {
return
}
err := h.server.RemoveMember(ctx, uint64(id))
_, err := h.server.RemoveMember(ctx, uint64(id))
switch {
case err == membership.ErrIDRemoved:
writeError(w, r, httptypes.NewHTTPError(http.StatusGone, fmt.Sprintf("Member permanently removed: %s", id)))
@@ -280,7 +266,7 @@ func (h *membersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
ID: id,
RaftAttributes: membership.RaftAttributes{PeerURLs: req.PeerURLs.StringSlice()},
}
err := h.server.UpdateMember(ctx, m)
_, err := h.server.UpdateMember(ctx, m)
switch {
case err == membership.ErrPeerURLexists:
writeError(w, r, httptypes.NewHTTPError(http.StatusConflict, err.Error()))
@@ -321,103 +307,13 @@ func (h *statsHandler) serveLeader(w http.ResponseWriter, r *http.Request) {
}
stats := h.stats.LeaderStats()
if stats == nil {
writeError(w, r, httptypes.NewHTTPError(http.StatusForbidden, "not current leader"))
etcdhttp.WriteError(w, r, httptypes.NewHTTPError(http.StatusForbidden, "not current leader"))
return
}
w.Header().Set("Content-Type", "application/json")
w.Write(stats)
}
func serveVars(w http.ResponseWriter, r *http.Request) {
if !allowMethod(w, r.Method, "GET") {
return
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
fmt.Fprintf(w, "{\n")
first := true
expvar.Do(func(kv expvar.KeyValue) {
if !first {
fmt.Fprintf(w, ",\n")
}
first = false
fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
})
fmt.Fprintf(w, "\n}\n")
}
func healthHandler(server *etcdserver.EtcdServer) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if !allowMethod(w, r.Method, "GET") {
return
}
if uint64(server.Leader()) == raft.None {
http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable)
return
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
if _, err := server.Do(ctx, etcdserverpb.Request{Method: "QGET"}); err != nil {
http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable)
return
}
w.WriteHeader(http.StatusOK)
w.Write([]byte(`{"health": "true"}`))
}
}
func versionHandler(c api.Cluster, fn func(http.ResponseWriter, *http.Request, string)) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
v := c.Version()
if v != nil {
fn(w, r, v.String())
} else {
fn(w, r, "not_decided")
}
}
}
func serveVersion(w http.ResponseWriter, r *http.Request, clusterV string) {
if !allowMethod(w, r.Method, "GET") {
return
}
vs := version.Versions{
Server: version.Version,
Cluster: clusterV,
}
w.Header().Set("Content-Type", "application/json")
b, err := json.Marshal(&vs)
if err != nil {
plog.Panicf("cannot marshal versions to json (%v)", err)
}
w.Write(b)
}
func logHandleFunc(w http.ResponseWriter, r *http.Request) {
if !allowMethod(w, r.Method, "PUT") {
return
}
in := struct{ Level string }{}
d := json.NewDecoder(r.Body)
if err := d.Decode(&in); err != nil {
writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid json body"))
return
}
logl, err := capnslog.ParseLevel(strings.ToUpper(in.Level))
if err != nil {
writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid log level "+in.Level))
return
}
plog.Noticef("globalLogLevel set to %q", logl.String())
capnslog.SetGlobalLogLevel(logl)
w.WriteHeader(http.StatusNoContent)
}
// parseKeyRequest converts a received http.Request on keysPrefix to
// a server Request, performing validation of supplied fields as appropriate.
// If any validation fails, an empty Request and non-nil error is returned.

View File

@@ -20,12 +20,11 @@ import (
"strings"
"time"
etcdErr "github.com/coreos/etcd/error"
"github.com/coreos/etcd/etcdserver"
"github.com/coreos/etcd/etcdserver/api/etcdhttp"
"github.com/coreos/etcd/etcdserver/api/v2http/httptypes"
"github.com/coreos/etcd/etcdserver/auth"
"github.com/coreos/etcd/pkg/logutil"
"github.com/coreos/pkg/capnslog"
)
@@ -39,37 +38,18 @@ var (
mlog = logutil.NewMergeLogger(plog)
)
// writeError logs and writes the given Error to the ResponseWriter
// If Error is an etcdErr, it is rendered to the ResponseWriter
// Otherwise, it is assumed to be a StatusInternalServerError
func writeError(w http.ResponseWriter, r *http.Request, err error) {
if err == nil {
return
}
switch e := err.(type) {
case *etcdErr.Error:
e.WriteTo(w)
case *httptypes.HTTPError:
if et := e.WriteTo(w); et != nil {
plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr)
}
case auth.Error:
if e, ok := err.(auth.Error); ok {
herr := httptypes.NewHTTPError(e.HTTPStatus(), e.Error())
if et := herr.WriteTo(w); et != nil {
plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr)
}
default:
switch err {
case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost, etcdserver.ErrNotEnoughStartedMembers, etcdserver.ErrUnhealthy:
mlog.MergeError(err)
default:
mlog.MergeErrorf("got unexpected response error (%v)", err)
}
herr := httptypes.NewHTTPError(http.StatusInternalServerError, "Internal Server Error")
if et := herr.WriteTo(w); et != nil {
plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr)
}
return
}
etcdhttp.WriteError(w, r, err)
}
// allowMethod verifies that the given method is one of the allowed methods,

View File

@@ -0,0 +1,32 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"v3client.go",
],
importpath = "github.com/coreos/etcd/etcdserver/api/v3client",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/coreos/etcd/clientv3:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc:go_default_library",
"//vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter:go_default_library",
"//vendor/golang.org/x/net/context:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,45 @@
// Copyright 2017 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package v3client provides clientv3 interfaces from an etcdserver.
//
// Use v3client by creating an EtcdServer instance, then wrapping it with v3client.New:
//
// import (
// "context"
//
// "github.com/coreos/etcd/embed"
// "github.com/coreos/etcd/etcdserver/api/v3client"
// )
//
// ...
//
// // create an embedded EtcdServer from the default configuration
// cfg := embed.NewConfig()
// cfg.Dir = "default.etcd"
// e, err := embed.StartEtcd(cfg)
// if err != nil {
// // handle error!
// }
//
// // wrap the EtcdServer with v3client
// cli := v3client.New(e.Server)
//
// // use like an ordinary clientv3
// resp, err := cli.Put(context.TODO(), "some-key", "it works!")
// if err != nil {
// // handle error!
// }
//
package v3client

View File

@@ -0,0 +1,67 @@
// Copyright 2017 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v3client
import (
"time"
"github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/etcdserver"
"github.com/coreos/etcd/etcdserver/api/v3rpc"
"github.com/coreos/etcd/proxy/grpcproxy/adapter"
"golang.org/x/net/context"
)
// New creates a clientv3 client that wraps an in-process EtcdServer. Instead
// of making gRPC calls through sockets, the client makes direct function calls
// to the etcd server through its api/v3rpc function interfaces.
func New(s *etcdserver.EtcdServer) *clientv3.Client {
c := clientv3.NewCtxClient(context.Background())
kvc := adapter.KvServerToKvClient(v3rpc.NewQuotaKVServer(s))
c.KV = clientv3.NewKVFromKVClient(kvc)
lc := adapter.LeaseServerToLeaseClient(v3rpc.NewQuotaLeaseServer(s))
c.Lease = clientv3.NewLeaseFromLeaseClient(lc, time.Second)
wc := adapter.WatchServerToWatchClient(v3rpc.NewWatchServer(s))
c.Watcher = &watchWrapper{clientv3.NewWatchFromWatchClient(wc)}
mc := adapter.MaintenanceServerToMaintenanceClient(v3rpc.NewMaintenanceServer(s))
c.Maintenance = clientv3.NewMaintenanceFromMaintenanceClient(mc)
clc := adapter.ClusterServerToClusterClient(v3rpc.NewClusterServer(s))
c.Cluster = clientv3.NewClusterFromClusterClient(clc)
// TODO: implement clientv3.Auth interface?
return c
}
// BlankContext implements Stringer on a context so the ctx string doesn't
// depend on the context's WithValue data, which tends to be unsynchronized
// (e.g., x/net/trace), causing ctx.String() to throw data races.
type blankContext struct{ context.Context }
func (*blankContext) String() string { return "(blankCtx)" }
// watchWrapper wraps clientv3 watch calls to blank out the context
// to avoid races on trace data.
type watchWrapper struct{ clientv3.Watcher }
func (ww *watchWrapper) Watch(ctx context.Context, key string, opts ...clientv3.OpOption) clientv3.WatchChan {
return ww.Watcher.Watch(&blankContext{ctx}, key, opts...)
}

View File

@@ -0,0 +1,34 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"election.go",
],
importpath = "github.com/coreos/etcd/etcdserver/api/v3election",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/coreos/etcd/clientv3:go_default_library",
"//vendor/github.com/coreos/etcd/clientv3/concurrency:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb:go_default_library",
"//vendor/golang.org/x/net/context:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,16 @@
// Copyright 2017 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package v3election provides a v3 election service from an etcdserver.
package v3election

View File

@@ -0,0 +1,123 @@
// Copyright 2017 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v3election
import (
"golang.org/x/net/context"
"github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/clientv3/concurrency"
epb "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb"
)
type electionServer struct {
c *clientv3.Client
}
func NewElectionServer(c *clientv3.Client) epb.ElectionServer {
return &electionServer{c}
}
func (es *electionServer) Campaign(ctx context.Context, req *epb.CampaignRequest) (*epb.CampaignResponse, error) {
s, err := es.session(ctx, req.Lease)
if err != nil {
return nil, err
}
e := concurrency.NewElection(s, string(req.Name))
if err = e.Campaign(ctx, string(req.Value)); err != nil {
return nil, err
}
return &epb.CampaignResponse{
Header: e.Header(),
Leader: &epb.LeaderKey{
Name: req.Name,
Key: []byte(e.Key()),
Rev: e.Rev(),
Lease: int64(s.Lease()),
},
}, nil
}
func (es *electionServer) Proclaim(ctx context.Context, req *epb.ProclaimRequest) (*epb.ProclaimResponse, error) {
s, err := es.session(ctx, req.Leader.Lease)
if err != nil {
return nil, err
}
e := concurrency.ResumeElection(s, string(req.Leader.Name), string(req.Leader.Key), req.Leader.Rev)
if err := e.Proclaim(ctx, string(req.Value)); err != nil {
return nil, err
}
return &epb.ProclaimResponse{Header: e.Header()}, nil
}
func (es *electionServer) Observe(req *epb.LeaderRequest, stream epb.Election_ObserveServer) error {
s, err := es.session(stream.Context(), -1)
if err != nil {
return err
}
e := concurrency.NewElection(s, string(req.Name))
ch := e.Observe(stream.Context())
for stream.Context().Err() == nil {
select {
case <-stream.Context().Done():
case resp, ok := <-ch:
if !ok {
return nil
}
lresp := &epb.LeaderResponse{Header: resp.Header, Kv: resp.Kvs[0]}
if err := stream.Send(lresp); err != nil {
return err
}
}
}
return stream.Context().Err()
}
func (es *electionServer) Leader(ctx context.Context, req *epb.LeaderRequest) (*epb.LeaderResponse, error) {
s, err := es.session(ctx, -1)
if err != nil {
return nil, err
}
l, lerr := concurrency.NewElection(s, string(req.Name)).Leader(ctx)
if lerr != nil {
return nil, lerr
}
return &epb.LeaderResponse{Header: l.Header, Kv: l.Kvs[0]}, nil
}
func (es *electionServer) Resign(ctx context.Context, req *epb.ResignRequest) (*epb.ResignResponse, error) {
s, err := es.session(ctx, req.Leader.Lease)
if err != nil {
return nil, err
}
e := concurrency.ResumeElection(s, string(req.Leader.Name), string(req.Leader.Key), req.Leader.Rev)
if err := e.Resign(ctx); err != nil {
return nil, err
}
return &epb.ResignResponse{Header: e.Header()}, nil
}
func (es *electionServer) session(ctx context.Context, lease int64) (*concurrency.Session, error) {
s, err := concurrency.NewSession(
es.c,
concurrency.WithLease(clientv3.LeaseID(lease)),
concurrency.WithContext(ctx),
)
if err != nil {
return nil, err
}
s.Orphan()
return s, nil
}

View File

@@ -0,0 +1,39 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
filegroup(
name = "go_default_library_protos",
srcs = ["v3election.proto"],
visibility = ["//visibility:public"],
)
go_library(
name = "go_default_library",
srcs = ["v3election.pb.go"],
importpath = "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library",
"//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library",
"//vendor/github.com/golang/protobuf/proto:go_default_library",
"//vendor/golang.org/x/net/context:go_default_library",
"//vendor/google.golang.org/genproto/googleapis/api/annotations:go_default_library",
"//vendor/google.golang.org/grpc:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,33 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["v3election.pb.gw.go"],
importpath = "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb:go_default_library",
"//vendor/github.com/golang/protobuf/proto:go_default_library",
"//vendor/github.com/grpc-ecosystem/grpc-gateway/runtime:go_default_library",
"//vendor/github.com/grpc-ecosystem/grpc-gateway/utilities:go_default_library",
"//vendor/golang.org/x/net/context:go_default_library",
"//vendor/google.golang.org/grpc:go_default_library",
"//vendor/google.golang.org/grpc/codes:go_default_library",
"//vendor/google.golang.org/grpc/grpclog:go_default_library",
"//vendor/google.golang.org/grpc/status:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,313 @@
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
// source: etcdserver/api/v3election/v3electionpb/v3election.proto
/*
Package v3electionpb is a reverse proxy.
It translates gRPC into RESTful JSON APIs.
*/
package gw
import (
"github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb"
"io"
"net/http"
"github.com/golang/protobuf/proto"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
"github.com/grpc-ecosystem/grpc-gateway/utilities"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/status"
)
var _ codes.Code
var _ io.Reader
var _ status.Status
var _ = runtime.String
var _ = utilities.NewDoubleArray
func request_Election_Campaign_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq v3electionpb.CampaignRequest
var metadata runtime.ServerMetadata
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.Campaign(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_Election_Proclaim_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq v3electionpb.ProclaimRequest
var metadata runtime.ServerMetadata
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.Proclaim(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_Election_Leader_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq v3electionpb.LeaderRequest
var metadata runtime.ServerMetadata
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.Leader(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_Election_Observe_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (v3electionpb.Election_ObserveClient, runtime.ServerMetadata, error) {
var protoReq v3electionpb.LeaderRequest
var metadata runtime.ServerMetadata
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
stream, err := client.Observe(ctx, &protoReq)
if err != nil {
return nil, metadata, err
}
header, err := stream.Header()
if err != nil {
return nil, metadata, err
}
metadata.HeaderMD = header
return stream, metadata, nil
}
func request_Election_Resign_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq v3electionpb.ResignRequest
var metadata runtime.ServerMetadata
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.Resign(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
// RegisterElectionHandlerFromEndpoint is same as RegisterElectionHandler but
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
func RegisterElectionHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
conn, err := grpc.Dial(endpoint, opts...)
if err != nil {
return err
}
defer func() {
if err != nil {
if cerr := conn.Close(); cerr != nil {
grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
}
return
}
go func() {
<-ctx.Done()
if cerr := conn.Close(); cerr != nil {
grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
}
}()
}()
return RegisterElectionHandler(ctx, mux, conn)
}
// RegisterElectionHandler registers the http handlers for service Election to "mux".
// The handlers forward requests to the grpc endpoint over "conn".
func RegisterElectionHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
return RegisterElectionHandlerClient(ctx, mux, v3electionpb.NewElectionClient(conn))
}
// RegisterElectionHandler registers the http handlers for service Election to "mux".
// The handlers forward requests to the grpc endpoint over the given implementation of "ElectionClient".
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ElectionClient"
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "ElectionClient" to call the correct interceptors.
func RegisterElectionHandlerClient(ctx context.Context, mux *runtime.ServeMux, client v3electionpb.ElectionClient) error {
mux.Handle("POST", pattern_Election_Campaign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
if cn, ok := w.(http.CloseNotifier); ok {
go func(done <-chan struct{}, closed <-chan bool) {
select {
case <-done:
case <-closed:
cancel()
}
}(ctx.Done(), cn.CloseNotify())
}
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_Election_Campaign_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_Election_Campaign_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_Election_Proclaim_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
if cn, ok := w.(http.CloseNotifier); ok {
go func(done <-chan struct{}, closed <-chan bool) {
select {
case <-done:
case <-closed:
cancel()
}
}(ctx.Done(), cn.CloseNotify())
}
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_Election_Proclaim_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_Election_Proclaim_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_Election_Leader_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
if cn, ok := w.(http.CloseNotifier); ok {
go func(done <-chan struct{}, closed <-chan bool) {
select {
case <-done:
case <-closed:
cancel()
}
}(ctx.Done(), cn.CloseNotify())
}
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_Election_Leader_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_Election_Leader_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_Election_Observe_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
if cn, ok := w.(http.CloseNotifier); ok {
go func(done <-chan struct{}, closed <-chan bool) {
select {
case <-done:
case <-closed:
cancel()
}
}(ctx.Done(), cn.CloseNotify())
}
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_Election_Observe_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_Election_Observe_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_Election_Resign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
if cn, ok := w.(http.CloseNotifier); ok {
go func(done <-chan struct{}, closed <-chan bool) {
select {
case <-done:
case <-closed:
cancel()
}
}(ctx.Done(), cn.CloseNotify())
}
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_Election_Resign_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_Election_Resign_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
var (
pattern_Election_Campaign_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "election", "campaign"}, ""))
pattern_Election_Proclaim_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "election", "proclaim"}, ""))
pattern_Election_Leader_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "election", "leader"}, ""))
pattern_Election_Observe_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "election", "observe"}, ""))
pattern_Election_Resign_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "election", "resign"}, ""))
)
var (
forward_Election_Campaign_0 = runtime.ForwardResponseMessage
forward_Election_Proclaim_0 = runtime.ForwardResponseMessage
forward_Election_Leader_0 = runtime.ForwardResponseMessage
forward_Election_Observe_0 = runtime.ForwardResponseStream
forward_Election_Resign_0 = runtime.ForwardResponseMessage
)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,119 @@
syntax = "proto3";
package v3electionpb;
import "gogoproto/gogo.proto";
import "etcd/etcdserver/etcdserverpb/rpc.proto";
import "etcd/mvcc/mvccpb/kv.proto";
// for grpc-gateway
import "google/api/annotations.proto";
option (gogoproto.marshaler_all) = true;
option (gogoproto.unmarshaler_all) = true;
// The election service exposes client-side election facilities as a gRPC interface.
service Election {
// Campaign waits to acquire leadership in an election, returning a LeaderKey
// representing the leadership if successful. The LeaderKey can then be used
// to issue new values on the election, transactionally guard API requests on
// leadership still being held, and resign from the election.
rpc Campaign(CampaignRequest) returns (CampaignResponse) {
option (google.api.http) = {
post: "/v3alpha/election/campaign"
body: "*"
};
}
// Proclaim updates the leader's posted value with a new value.
rpc Proclaim(ProclaimRequest) returns (ProclaimResponse) {
option (google.api.http) = {
post: "/v3alpha/election/proclaim"
body: "*"
};
}
// Leader returns the current election proclamation, if any.
rpc Leader(LeaderRequest) returns (LeaderResponse) {
option (google.api.http) = {
post: "/v3alpha/election/leader"
body: "*"
};
}
// Observe streams election proclamations in-order as made by the election's
// elected leaders.
rpc Observe(LeaderRequest) returns (stream LeaderResponse) {
option (google.api.http) = {
post: "/v3alpha/election/observe"
body: "*"
};
}
// Resign releases election leadership so other campaigners may acquire
// leadership on the election.
rpc Resign(ResignRequest) returns (ResignResponse) {
option (google.api.http) = {
post: "/v3alpha/election/resign"
body: "*"
};
}
}
message CampaignRequest {
// name is the election's identifier for the campaign.
bytes name = 1;
// lease is the ID of the lease attached to leadership of the election. If the
// lease expires or is revoked before resigning leadership, then the
// leadership is transferred to the next campaigner, if any.
int64 lease = 2;
// value is the initial proclaimed value set when the campaigner wins the
// election.
bytes value = 3;
}
message CampaignResponse {
etcdserverpb.ResponseHeader header = 1;
// leader describes the resources used for holding leadereship of the election.
LeaderKey leader = 2;
}
message LeaderKey {
// name is the election identifier that correponds to the leadership key.
bytes name = 1;
// key is an opaque key representing the ownership of the election. If the key
// is deleted, then leadership is lost.
bytes key = 2;
// rev is the creation revision of the key. It can be used to test for ownership
// of an election during transactions by testing the key's creation revision
// matches rev.
int64 rev = 3;
// lease is the lease ID of the election leader.
int64 lease = 4;
}
message LeaderRequest {
// name is the election identifier for the leadership information.
bytes name = 1;
}
message LeaderResponse {
etcdserverpb.ResponseHeader header = 1;
// kv is the key-value pair representing the latest leader update.
mvccpb.KeyValue kv = 2;
}
message ResignRequest {
// leader is the leadership to relinquish by resignation.
LeaderKey leader = 1;
}
message ResignResponse {
etcdserverpb.ResponseHeader header = 1;
}
message ProclaimRequest {
// leader is the leadership hold on the election.
LeaderKey leader = 1;
// value is an update meant to overwrite the leader's current value.
bytes value = 2;
}
message ProclaimResponse {
etcdserverpb.ResponseHeader header = 1;
}

View File

@@ -0,0 +1,34 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"lock.go",
],
importpath = "github.com/coreos/etcd/etcdserver/api/v3lock",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/coreos/etcd/clientv3:go_default_library",
"//vendor/github.com/coreos/etcd/clientv3/concurrency:go_default_library",
"//vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb:go_default_library",
"//vendor/golang.org/x/net/context:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,16 @@
// Copyright 2017 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package v3lock provides a v3 locking service from an etcdserver.
package v3lock

View File

@@ -0,0 +1,56 @@
// Copyright 2017 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v3lock
import (
"golang.org/x/net/context"
"github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/clientv3/concurrency"
"github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb"
)
type lockServer struct {
c *clientv3.Client
}
func NewLockServer(c *clientv3.Client) v3lockpb.LockServer {
return &lockServer{c}
}
func (ls *lockServer) Lock(ctx context.Context, req *v3lockpb.LockRequest) (*v3lockpb.LockResponse, error) {
s, err := concurrency.NewSession(
ls.c,
concurrency.WithLease(clientv3.LeaseID(req.Lease)),
concurrency.WithContext(ctx),
)
if err != nil {
return nil, err
}
s.Orphan()
m := concurrency.NewMutex(s, string(req.Name))
if err = m.Lock(ctx); err != nil {
return nil, err
}
return &v3lockpb.LockResponse{Header: m.Header(), Key: []byte(m.Key())}, nil
}
func (ls *lockServer) Unlock(ctx context.Context, req *v3lockpb.UnlockRequest) (*v3lockpb.UnlockResponse, error) {
resp, err := ls.c.Delete(ctx, string(req.Key))
if err != nil {
return nil, err
}
return &v3lockpb.UnlockResponse{Header: resp.Header}, nil
}

View File

@@ -0,0 +1,38 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
filegroup(
name = "go_default_library_protos",
srcs = ["v3lock.proto"],
visibility = ["//visibility:public"],
)
go_library(
name = "go_default_library",
srcs = ["v3lock.pb.go"],
importpath = "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library",
"//vendor/github.com/golang/protobuf/proto:go_default_library",
"//vendor/golang.org/x/net/context:go_default_library",
"//vendor/google.golang.org/genproto/googleapis/api/annotations:go_default_library",
"//vendor/google.golang.org/grpc:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,33 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["v3lock.pb.gw.go"],
importpath = "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb:go_default_library",
"//vendor/github.com/golang/protobuf/proto:go_default_library",
"//vendor/github.com/grpc-ecosystem/grpc-gateway/runtime:go_default_library",
"//vendor/github.com/grpc-ecosystem/grpc-gateway/utilities:go_default_library",
"//vendor/golang.org/x/net/context:go_default_library",
"//vendor/google.golang.org/grpc:go_default_library",
"//vendor/google.golang.org/grpc/codes:go_default_library",
"//vendor/google.golang.org/grpc/grpclog:go_default_library",
"//vendor/google.golang.org/grpc/status:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,167 @@
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
// source: etcdserver/api/v3lock/v3lockpb/v3lock.proto
/*
Package v3lockpb is a reverse proxy.
It translates gRPC into RESTful JSON APIs.
*/
package gw
import (
"github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb"
"io"
"net/http"
"github.com/golang/protobuf/proto"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
"github.com/grpc-ecosystem/grpc-gateway/utilities"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/status"
)
var _ codes.Code
var _ io.Reader
var _ status.Status
var _ = runtime.String
var _ = utilities.NewDoubleArray
func request_Lock_Lock_0(ctx context.Context, marshaler runtime.Marshaler, client v3lockpb.LockClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq v3lockpb.LockRequest
var metadata runtime.ServerMetadata
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.Lock(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_Lock_Unlock_0(ctx context.Context, marshaler runtime.Marshaler, client v3lockpb.LockClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq v3lockpb.UnlockRequest
var metadata runtime.ServerMetadata
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.Unlock(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
// RegisterLockHandlerFromEndpoint is same as RegisterLockHandler but
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
func RegisterLockHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
conn, err := grpc.Dial(endpoint, opts...)
if err != nil {
return err
}
defer func() {
if err != nil {
if cerr := conn.Close(); cerr != nil {
grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
}
return
}
go func() {
<-ctx.Done()
if cerr := conn.Close(); cerr != nil {
grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
}
}()
}()
return RegisterLockHandler(ctx, mux, conn)
}
// RegisterLockHandler registers the http handlers for service Lock to "mux".
// The handlers forward requests to the grpc endpoint over "conn".
func RegisterLockHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
return RegisterLockHandlerClient(ctx, mux, v3lockpb.NewLockClient(conn))
}
// RegisterLockHandler registers the http handlers for service Lock to "mux".
// The handlers forward requests to the grpc endpoint over the given implementation of "LockClient".
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "LockClient"
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "LockClient" to call the correct interceptors.
func RegisterLockHandlerClient(ctx context.Context, mux *runtime.ServeMux, client v3lockpb.LockClient) error {
mux.Handle("POST", pattern_Lock_Lock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
if cn, ok := w.(http.CloseNotifier); ok {
go func(done <-chan struct{}, closed <-chan bool) {
select {
case <-done:
case <-closed:
cancel()
}
}(ctx.Done(), cn.CloseNotify())
}
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_Lock_Lock_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_Lock_Lock_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_Lock_Unlock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
if cn, ok := w.(http.CloseNotifier); ok {
go func(done <-chan struct{}, closed <-chan bool) {
select {
case <-done:
case <-closed:
cancel()
}
}(ctx.Done(), cn.CloseNotify())
}
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_Lock_Unlock_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_Lock_Unlock_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
var (
pattern_Lock_Lock_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 1}, []string{"v3alpha", "lock"}, ""))
pattern_Lock_Unlock_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "lock", "unlock"}, ""))
)
var (
forward_Lock_Lock_0 = runtime.ForwardResponseMessage
forward_Lock_Unlock_0 = runtime.ForwardResponseMessage
)

View File

@@ -0,0 +1,978 @@
// Code generated by protoc-gen-gogo.
// source: v3lock.proto
// DO NOT EDIT!
/*
Package v3lockpb is a generated protocol buffer package.
It is generated from these files:
v3lock.proto
It has these top-level messages:
LockRequest
LockResponse
UnlockRequest
UnlockResponse
*/
package v3lockpb
import (
"fmt"
proto "github.com/golang/protobuf/proto"
math "math"
etcdserverpb "github.com/coreos/etcd/etcdserver/etcdserverpb"
_ "google.golang.org/genproto/googleapis/api/annotations"
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
io "io"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type LockRequest struct {
// name is the identifier for the distributed shared lock to be acquired.
Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// lease is the ID of the lease that will be attached to ownership of the
// lock. If the lease expires or is revoked and currently holds the lock,
// the lock is automatically released. Calls to Lock with the same lease will
// be treated as a single acquistion; locking twice with the same lease is a
// no-op.
Lease int64 `protobuf:"varint,2,opt,name=lease,proto3" json:"lease,omitempty"`
}
func (m *LockRequest) Reset() { *m = LockRequest{} }
func (m *LockRequest) String() string { return proto.CompactTextString(m) }
func (*LockRequest) ProtoMessage() {}
func (*LockRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Lock, []int{0} }
func (m *LockRequest) GetName() []byte {
if m != nil {
return m.Name
}
return nil
}
func (m *LockRequest) GetLease() int64 {
if m != nil {
return m.Lease
}
return 0
}
type LockResponse struct {
Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
// key is a key that will exist on etcd for the duration that the Lock caller
// owns the lock. Users should not modify this key or the lock may exhibit
// undefined behavior.
Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
}
func (m *LockResponse) Reset() { *m = LockResponse{} }
func (m *LockResponse) String() string { return proto.CompactTextString(m) }
func (*LockResponse) ProtoMessage() {}
func (*LockResponse) Descriptor() ([]byte, []int) { return fileDescriptorV3Lock, []int{1} }
func (m *LockResponse) GetHeader() *etcdserverpb.ResponseHeader {
if m != nil {
return m.Header
}
return nil
}
func (m *LockResponse) GetKey() []byte {
if m != nil {
return m.Key
}
return nil
}
type UnlockRequest struct {
// key is the lock ownership key granted by Lock.
Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
}
func (m *UnlockRequest) Reset() { *m = UnlockRequest{} }
func (m *UnlockRequest) String() string { return proto.CompactTextString(m) }
func (*UnlockRequest) ProtoMessage() {}
func (*UnlockRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Lock, []int{2} }
func (m *UnlockRequest) GetKey() []byte {
if m != nil {
return m.Key
}
return nil
}
type UnlockResponse struct {
Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
}
func (m *UnlockResponse) Reset() { *m = UnlockResponse{} }
func (m *UnlockResponse) String() string { return proto.CompactTextString(m) }
func (*UnlockResponse) ProtoMessage() {}
func (*UnlockResponse) Descriptor() ([]byte, []int) { return fileDescriptorV3Lock, []int{3} }
func (m *UnlockResponse) GetHeader() *etcdserverpb.ResponseHeader {
if m != nil {
return m.Header
}
return nil
}
func init() {
proto.RegisterType((*LockRequest)(nil), "v3lockpb.LockRequest")
proto.RegisterType((*LockResponse)(nil), "v3lockpb.LockResponse")
proto.RegisterType((*UnlockRequest)(nil), "v3lockpb.UnlockRequest")
proto.RegisterType((*UnlockResponse)(nil), "v3lockpb.UnlockResponse")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for Lock service
type LockClient interface {
// Lock acquires a distributed shared lock on a given named lock.
// On success, it will return a unique key that exists so long as the
// lock is held by the caller. This key can be used in conjunction with
// transactions to safely ensure updates to etcd only occur while holding
// lock ownership. The lock is held until Unlock is called on the key or the
// lease associate with the owner expires.
Lock(ctx context.Context, in *LockRequest, opts ...grpc.CallOption) (*LockResponse, error)
// Unlock takes a key returned by Lock and releases the hold on lock. The
// next Lock caller waiting for the lock will then be woken up and given
// ownership of the lock.
Unlock(ctx context.Context, in *UnlockRequest, opts ...grpc.CallOption) (*UnlockResponse, error)
}
type lockClient struct {
cc *grpc.ClientConn
}
func NewLockClient(cc *grpc.ClientConn) LockClient {
return &lockClient{cc}
}
func (c *lockClient) Lock(ctx context.Context, in *LockRequest, opts ...grpc.CallOption) (*LockResponse, error) {
out := new(LockResponse)
err := grpc.Invoke(ctx, "/v3lockpb.Lock/Lock", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *lockClient) Unlock(ctx context.Context, in *UnlockRequest, opts ...grpc.CallOption) (*UnlockResponse, error) {
out := new(UnlockResponse)
err := grpc.Invoke(ctx, "/v3lockpb.Lock/Unlock", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for Lock service
type LockServer interface {
// Lock acquires a distributed shared lock on a given named lock.
// On success, it will return a unique key that exists so long as the
// lock is held by the caller. This key can be used in conjunction with
// transactions to safely ensure updates to etcd only occur while holding
// lock ownership. The lock is held until Unlock is called on the key or the
// lease associate with the owner expires.
Lock(context.Context, *LockRequest) (*LockResponse, error)
// Unlock takes a key returned by Lock and releases the hold on lock. The
// next Lock caller waiting for the lock will then be woken up and given
// ownership of the lock.
Unlock(context.Context, *UnlockRequest) (*UnlockResponse, error)
}
func RegisterLockServer(s *grpc.Server, srv LockServer) {
s.RegisterService(&_Lock_serviceDesc, srv)
}
func _Lock_Lock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(LockRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(LockServer).Lock(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/v3lockpb.Lock/Lock",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(LockServer).Lock(ctx, req.(*LockRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Lock_Unlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(UnlockRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(LockServer).Unlock(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/v3lockpb.Lock/Unlock",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(LockServer).Unlock(ctx, req.(*UnlockRequest))
}
return interceptor(ctx, in, info, handler)
}
var _Lock_serviceDesc = grpc.ServiceDesc{
ServiceName: "v3lockpb.Lock",
HandlerType: (*LockServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Lock",
Handler: _Lock_Lock_Handler,
},
{
MethodName: "Unlock",
Handler: _Lock_Unlock_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "v3lock.proto",
}
func (m *LockRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *LockRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Name) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintV3Lock(dAtA, i, uint64(len(m.Name)))
i += copy(dAtA[i:], m.Name)
}
if m.Lease != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintV3Lock(dAtA, i, uint64(m.Lease))
}
return i, nil
}
func (m *LockResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *LockResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintV3Lock(dAtA, i, uint64(m.Header.Size()))
n1, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n1
}
if len(m.Key) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintV3Lock(dAtA, i, uint64(len(m.Key)))
i += copy(dAtA[i:], m.Key)
}
return i, nil
}
func (m *UnlockRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *UnlockRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Key) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintV3Lock(dAtA, i, uint64(len(m.Key)))
i += copy(dAtA[i:], m.Key)
}
return i, nil
}
func (m *UnlockResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *UnlockResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Header != nil {
dAtA[i] = 0xa
i++
i = encodeVarintV3Lock(dAtA, i, uint64(m.Header.Size()))
n2, err := m.Header.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n2
}
return i, nil
}
func encodeFixed64V3Lock(dAtA []byte, offset int, v uint64) int {
dAtA[offset] = uint8(v)
dAtA[offset+1] = uint8(v >> 8)
dAtA[offset+2] = uint8(v >> 16)
dAtA[offset+3] = uint8(v >> 24)
dAtA[offset+4] = uint8(v >> 32)
dAtA[offset+5] = uint8(v >> 40)
dAtA[offset+6] = uint8(v >> 48)
dAtA[offset+7] = uint8(v >> 56)
return offset + 8
}
func encodeFixed32V3Lock(dAtA []byte, offset int, v uint32) int {
dAtA[offset] = uint8(v)
dAtA[offset+1] = uint8(v >> 8)
dAtA[offset+2] = uint8(v >> 16)
dAtA[offset+3] = uint8(v >> 24)
return offset + 4
}
func encodeVarintV3Lock(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *LockRequest) Size() (n int) {
var l int
_ = l
l = len(m.Name)
if l > 0 {
n += 1 + l + sovV3Lock(uint64(l))
}
if m.Lease != 0 {
n += 1 + sovV3Lock(uint64(m.Lease))
}
return n
}
func (m *LockResponse) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovV3Lock(uint64(l))
}
l = len(m.Key)
if l > 0 {
n += 1 + l + sovV3Lock(uint64(l))
}
return n
}
func (m *UnlockRequest) Size() (n int) {
var l int
_ = l
l = len(m.Key)
if l > 0 {
n += 1 + l + sovV3Lock(uint64(l))
}
return n
}
func (m *UnlockResponse) Size() (n int) {
var l int
_ = l
if m.Header != nil {
l = m.Header.Size()
n += 1 + l + sovV3Lock(uint64(l))
}
return n
}
func sovV3Lock(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozV3Lock(x uint64) (n int) {
return sovV3Lock(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *LockRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowV3Lock
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: LockRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: LockRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowV3Lock
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthV3Lock
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
if m.Name == nil {
m.Name = []byte{}
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType)
}
m.Lease = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowV3Lock
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Lease |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipV3Lock(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthV3Lock
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *LockResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowV3Lock
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: LockResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: LockResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowV3Lock
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthV3Lock
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &etcdserverpb.ResponseHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowV3Lock
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthV3Lock
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
if m.Key == nil {
m.Key = []byte{}
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipV3Lock(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthV3Lock
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *UnlockRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowV3Lock
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: UnlockRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: UnlockRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowV3Lock
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthV3Lock
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
if m.Key == nil {
m.Key = []byte{}
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipV3Lock(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthV3Lock
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *UnlockResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowV3Lock
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: UnlockResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: UnlockResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowV3Lock
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthV3Lock
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Header == nil {
m.Header = &etcdserverpb.ResponseHeader{}
}
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipV3Lock(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthV3Lock
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipV3Lock(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowV3Lock
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowV3Lock
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowV3Lock
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthV3Lock
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowV3Lock
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipV3Lock(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthV3Lock = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowV3Lock = fmt.Errorf("proto: integer overflow")
)
func init() { proto.RegisterFile("v3lock.proto", fileDescriptorV3Lock) }
var fileDescriptorV3Lock = []byte{
// 336 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x33, 0xce, 0xc9,
0x4f, 0xce, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x80, 0xf0, 0x0a, 0x92, 0xa4, 0x44,
0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x82, 0xfa, 0x20, 0x16, 0x44, 0x5e, 0x4a, 0x2d, 0xb5, 0x24, 0x39,
0x45, 0x1f, 0x44, 0x14, 0xa7, 0x16, 0x95, 0xa5, 0x16, 0x21, 0x31, 0x0b, 0x92, 0xf4, 0x8b, 0x0a,
0x92, 0xa1, 0xea, 0x64, 0xd2, 0xf3, 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0x13, 0x0b, 0x32, 0xf5, 0x13,
0xf3, 0xf2, 0xf2, 0x4b, 0x12, 0x4b, 0x32, 0xf3, 0xf3, 0x8a, 0x21, 0xb2, 0x4a, 0xe6, 0x5c, 0xdc,
0x3e, 0xf9, 0xc9, 0xd9, 0x41, 0xa9, 0x85, 0xa5, 0xa9, 0xc5, 0x25, 0x42, 0x42, 0x5c, 0x2c, 0x79,
0x89, 0xb9, 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x3c, 0x41, 0x60, 0xb6, 0x90, 0x08, 0x17, 0x6b,
0x4e, 0x6a, 0x62, 0x71, 0xaa, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x73, 0x10, 0x84, 0xa3, 0x14, 0xc6,
0xc5, 0x03, 0xd1, 0x58, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0x2a, 0x64, 0xc2, 0xc5, 0x96, 0x91, 0x9a,
0x98, 0x92, 0x5a, 0x04, 0xd6, 0xcb, 0x6d, 0x24, 0xa3, 0x87, 0xec, 0x1e, 0x3d, 0x98, 0x3a, 0x0f,
0xb0, 0x9a, 0x20, 0xa8, 0x5a, 0x21, 0x01, 0x2e, 0xe6, 0xec, 0xd4, 0x4a, 0xb0, 0xc9, 0x3c, 0x41,
0x20, 0xa6, 0x92, 0x22, 0x17, 0x6f, 0x68, 0x5e, 0x0e, 0x92, 0x93, 0xa0, 0x4a, 0x18, 0x11, 0x4a,
0xdc, 0xb8, 0xf8, 0x60, 0x4a, 0x28, 0xb1, 0xdc, 0x68, 0x17, 0x23, 0x17, 0x0b, 0xc8, 0x0f, 0x42,
0x21, 0x50, 0x5a, 0x54, 0x0f, 0x16, 0xe6, 0x7a, 0x48, 0x81, 0x22, 0x25, 0x86, 0x2e, 0x0c, 0x31,
0x4d, 0x49, 0xb6, 0xe9, 0xf2, 0x93, 0xc9, 0x4c, 0xe2, 0x4a, 0x42, 0xfa, 0x65, 0xc6, 0x89, 0x39,
0x05, 0x19, 0x89, 0xfa, 0x20, 0x55, 0x60, 0xc2, 0x8a, 0x51, 0x4b, 0x28, 0x86, 0x8b, 0x0d, 0xe2,
0x4c, 0x21, 0x71, 0x84, 0x01, 0x28, 0x7e, 0x93, 0x92, 0xc0, 0x94, 0x80, 0x9a, 0x2d, 0x0f, 0x36,
0x5b, 0x52, 0x49, 0x04, 0xd5, 0xec, 0xd2, 0x3c, 0xa8, 0xe9, 0x4e, 0x02, 0x27, 0x1e, 0xc9, 0x31,
0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x8c, 0xc7, 0x72, 0x0c, 0x49, 0x6c, 0xe0,
0x18, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb6, 0xa0, 0x26, 0x28, 0x47, 0x02, 0x00, 0x00,
}

View File

@@ -0,0 +1,65 @@
syntax = "proto3";
package v3lockpb;
import "gogoproto/gogo.proto";
import "etcd/etcdserver/etcdserverpb/rpc.proto";
// for grpc-gateway
import "google/api/annotations.proto";
option (gogoproto.marshaler_all) = true;
option (gogoproto.unmarshaler_all) = true;
// The lock service exposes client-side locking facilities as a gRPC interface.
service Lock {
// Lock acquires a distributed shared lock on a given named lock.
// On success, it will return a unique key that exists so long as the
// lock is held by the caller. This key can be used in conjunction with
// transactions to safely ensure updates to etcd only occur while holding
// lock ownership. The lock is held until Unlock is called on the key or the
// lease associate with the owner expires.
rpc Lock(LockRequest) returns (LockResponse) {
option (google.api.http) = {
post: "/v3alpha/lock/lock"
body: "*"
};
}
// Unlock takes a key returned by Lock and releases the hold on lock. The
// next Lock caller waiting for the lock will then be woken up and given
// ownership of the lock.
rpc Unlock(UnlockRequest) returns (UnlockResponse) {
option (google.api.http) = {
post: "/v3alpha/lock/unlock"
body: "*"
};
}
}
message LockRequest {
// name is the identifier for the distributed shared lock to be acquired.
bytes name = 1;
// lease is the ID of the lease that will be attached to ownership of the
// lock. If the lease expires or is revoked and currently holds the lock,
// the lock is automatically released. Calls to Lock with the same lease will
// be treated as a single acquistion; locking twice with the same lease is a
// no-op.
int64 lease = 2;
}
message LockResponse {
etcdserverpb.ResponseHeader header = 1;
// key is a key that will exist on etcd for the duration that the Lock caller
// owns the lock. Users should not modify this key or the lock may exhibit
// undefined behavior.
bytes key = 2;
}
message UnlockRequest {
// key is the lock ownership key granted by Lock.
bytes key = 1;
}
message UnlockResponse {
etcdserverpb.ResponseHeader header = 1;
}

View File

@@ -16,6 +16,7 @@ package v3rpc
import (
"crypto/tls"
"math"
"github.com/coreos/etcd/etcdserver"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
@@ -24,11 +25,17 @@ import (
"google.golang.org/grpc/grpclog"
)
const (
grpcOverheadBytes = 512 * 1024
maxStreams = math.MaxUint32
maxSendBytes = math.MaxInt32
)
func init() {
grpclog.SetLogger(plog)
}
func Server(s *etcdserver.EtcdServer, tls *tls.Config) *grpc.Server {
func Server(s *etcdserver.EtcdServer, tls *tls.Config, gopts ...grpc.ServerOption) *grpc.Server {
var opts []grpc.ServerOption
opts = append(opts, grpc.CustomCodec(&codec{}))
if tls != nil {
@@ -36,8 +43,11 @@ func Server(s *etcdserver.EtcdServer, tls *tls.Config) *grpc.Server {
}
opts = append(opts, grpc.UnaryInterceptor(newUnaryInterceptor(s)))
opts = append(opts, grpc.StreamInterceptor(newStreamInterceptor(s)))
opts = append(opts, grpc.MaxRecvMsgSize(int(s.Cfg.MaxRequestBytes+grpcOverheadBytes)))
opts = append(opts, grpc.MaxSendMsgSize(maxSendBytes))
opts = append(opts, grpc.MaxConcurrentStreams(maxStreams))
grpcServer := grpc.NewServer(append(opts, gopts...)...)
grpcServer := grpc.NewServer(opts...)
pb.RegisterKVServer(grpcServer, NewQuotaKVServer(s))
pb.RegisterWatchServer(grpcServer, NewWatchServer(s))
pb.RegisterLeaseServer(grpcServer, NewQuotaLeaseServer(s))

View File

@@ -45,7 +45,7 @@ func newUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor {
return nil, rpctypes.ErrGRPCNotCapable
}
md, ok := metadata.FromContext(ctx)
md, ok := metadata.FromIncomingContext(ctx)
if ok {
if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader {
if s.Leader() == types.ID(raft.None) {
@@ -66,7 +66,7 @@ func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor
return rpctypes.ErrGRPCNotCapable
}
md, ok := metadata.FromContext(ss.Context())
md, ok := metadata.FromIncomingContext(ss.Context())
if ok {
if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader {
if s.Leader() == types.ID(raft.None) {

View File

@@ -134,6 +134,12 @@ func checkPutRequest(r *pb.PutRequest) error {
if len(r.Key) == 0 {
return rpctypes.ErrGRPCEmptyKey
}
if r.IgnoreValue && len(r.Value) != 0 {
return rpctypes.ErrGRPCValueProvided
}
if r.IgnoreLease && r.Lease != 0 {
return rpctypes.ErrGRPCLeaseProvided
}
return nil
}
@@ -246,8 +252,8 @@ func checkRequestOp(u *pb.RequestOp) error {
return checkDeleteRequest(uv.RequestDeleteRange)
}
default:
// empty op
return nil
// empty op / nil entry
return rpctypes.ErrGRPCKeyNotFound
}
return nil
}

View File

@@ -18,6 +18,7 @@ import (
"io"
"github.com/coreos/etcd/etcdserver"
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/lease"
"golang.org/x/net/context"
@@ -53,20 +54,45 @@ func (ls *LeaseServer) LeaseRevoke(ctx context.Context, rr *pb.LeaseRevokeReques
func (ls *LeaseServer) LeaseTimeToLive(ctx context.Context, rr *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) {
resp, err := ls.le.LeaseTimeToLive(ctx, rr)
if err != nil {
if err != nil && err != lease.ErrLeaseNotFound {
return nil, togRPCError(err)
}
if err == lease.ErrLeaseNotFound {
resp = &pb.LeaseTimeToLiveResponse{
Header: &pb.ResponseHeader{},
ID: rr.ID,
TTL: -1,
}
}
ls.hdr.fill(resp.Header)
return resp, nil
}
func (ls *LeaseServer) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error {
func (ls *LeaseServer) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) (err error) {
errc := make(chan error, 1)
go func() {
errc <- ls.leaseKeepAlive(stream)
}()
select {
case err = <-errc:
case <-stream.Context().Done():
// the only server-side cancellation is noleader for now.
err = stream.Context().Err()
if err == context.Canceled {
err = rpctypes.ErrGRPCNoLeader
}
}
return err
}
func (ls *LeaseServer) leaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error {
for {
req, err := stream.Recv()
if err == io.EOF {
return nil
}
if err != nil {
plog.Warningf("failed to receive lease keepalive request from gRPC stream (%q)", err.Error())
return err
}
@@ -92,6 +118,7 @@ func (ls *LeaseServer) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) erro
resp.TTL = ttl
err = stream.Send(resp)
if err != nil {
plog.Warningf("failed to send lease keepalive response to gRPC stream (%q)", err.Error())
return err
}
}

View File

@@ -47,6 +47,7 @@ type RaftStatusGetter interface {
}
type AuthGetter interface {
AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error)
AuthStore() auth.AuthStore
}
@@ -152,7 +153,7 @@ type authMaintenanceServer struct {
}
func (ams *authMaintenanceServer) isAuthenticated(ctx context.Context) error {
authInfo, err := ams.ag.AuthStore().AuthInfoFromCtx(ctx)
authInfo, err := ams.ag.AuthInfoFromCtx(ctx)
if err != nil {
return err
}

View File

@@ -48,21 +48,24 @@ func (cs *ClusterServer) MemberAdd(ctx context.Context, r *pb.MemberAddRequest)
now := time.Now()
m := membership.NewMember("", urls, "", &now)
if err = cs.server.AddMember(ctx, *m); err != nil {
return nil, togRPCError(err)
membs, merr := cs.server.AddMember(ctx, *m)
if merr != nil {
return nil, togRPCError(merr)
}
return &pb.MemberAddResponse{
Header: cs.header(),
Member: &pb.Member{ID: uint64(m.ID), PeerURLs: m.PeerURLs},
Header: cs.header(),
Member: &pb.Member{ID: uint64(m.ID), PeerURLs: m.PeerURLs},
Members: membersToProtoMembers(membs),
}, nil
}
func (cs *ClusterServer) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest) (*pb.MemberRemoveResponse, error) {
if err := cs.server.RemoveMember(ctx, r.ID); err != nil {
membs, err := cs.server.RemoveMember(ctx, r.ID)
if err != nil {
return nil, togRPCError(err)
}
return &pb.MemberRemoveResponse{Header: cs.header()}, nil
return &pb.MemberRemoveResponse{Header: cs.header(), Members: membersToProtoMembers(membs)}, nil
}
func (cs *ClusterServer) MemberUpdate(ctx context.Context, r *pb.MemberUpdateRequest) (*pb.MemberUpdateResponse, error) {
@@ -70,15 +73,23 @@ func (cs *ClusterServer) MemberUpdate(ctx context.Context, r *pb.MemberUpdateReq
ID: types.ID(r.ID),
RaftAttributes: membership.RaftAttributes{PeerURLs: r.PeerURLs},
}
if err := cs.server.UpdateMember(ctx, m); err != nil {
membs, err := cs.server.UpdateMember(ctx, m)
if err != nil {
return nil, togRPCError(err)
}
return &pb.MemberUpdateResponse{Header: cs.header()}, nil
return &pb.MemberUpdateResponse{Header: cs.header(), Members: membersToProtoMembers(membs)}, nil
}
func (cs *ClusterServer) MemberList(ctx context.Context, r *pb.MemberListRequest) (*pb.MemberListResponse, error) {
membs := cs.cluster.Members()
membs := membersToProtoMembers(cs.cluster.Members())
return &pb.MemberListResponse{Header: cs.header(), Members: membs}, nil
}
func (cs *ClusterServer) header() *pb.ResponseHeader {
return &pb.ResponseHeader{ClusterId: uint64(cs.cluster.ID()), MemberId: uint64(cs.server.ID()), RaftTerm: cs.raftTimer.Term()}
}
func membersToProtoMembers(membs []*membership.Member) []*pb.Member {
protoMembs := make([]*pb.Member, len(membs))
for i := range membs {
protoMembs[i] = &pb.Member{
@@ -88,10 +99,5 @@ func (cs *ClusterServer) MemberList(ctx context.Context, r *pb.MemberListRequest
ClientURLs: membs[i].ClientURLs,
}
}
return &pb.MemberListResponse{Header: cs.header(), Members: protoMembs}, nil
}
func (cs *ClusterServer) header() *pb.ResponseHeader {
return &pb.ResponseHeader{ClusterId: uint64(cs.cluster.ID()), MemberId: uint64(cs.server.ID()), RaftTerm: cs.raftTimer.Term()}
return protoMembs
}

View File

@@ -12,6 +12,7 @@ go_library(
deps = [
"//vendor/google.golang.org/grpc:go_default_library",
"//vendor/google.golang.org/grpc/codes:go_default_library",
"//vendor/google.golang.org/grpc/status:go_default_library",
],
)

View File

@@ -17,16 +17,20 @@ package rpctypes
import (
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
var (
// server-side error
ErrGRPCEmptyKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: key is not provided")
ErrGRPCTooManyOps = grpc.Errorf(codes.InvalidArgument, "etcdserver: too many operations in txn request")
ErrGRPCDuplicateKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: duplicate key given in txn request")
ErrGRPCCompacted = grpc.Errorf(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted")
ErrGRPCFutureRev = grpc.Errorf(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision")
ErrGRPCNoSpace = grpc.Errorf(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded")
ErrGRPCEmptyKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: key is not provided")
ErrGRPCKeyNotFound = grpc.Errorf(codes.InvalidArgument, "etcdserver: key not found")
ErrGRPCValueProvided = grpc.Errorf(codes.InvalidArgument, "etcdserver: value is provided")
ErrGRPCLeaseProvided = grpc.Errorf(codes.InvalidArgument, "etcdserver: lease is provided")
ErrGRPCTooManyOps = grpc.Errorf(codes.InvalidArgument, "etcdserver: too many operations in txn request")
ErrGRPCDuplicateKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: duplicate key given in txn request")
ErrGRPCCompacted = grpc.Errorf(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted")
ErrGRPCFutureRev = grpc.Errorf(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision")
ErrGRPCNoSpace = grpc.Errorf(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded")
ErrGRPCLeaseNotFound = grpc.Errorf(codes.NotFound, "etcdserver: requested lease not found")
ErrGRPCLeaseExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: lease already exists")
@@ -53,6 +57,7 @@ var (
ErrGRPCPermissionNotGranted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: permission is not granted to the role")
ErrGRPCAuthNotEnabled = grpc.Errorf(codes.FailedPrecondition, "etcdserver: authentication is not enabled")
ErrGRPCInvalidAuthToken = grpc.Errorf(codes.Unauthenticated, "etcdserver: invalid auth token")
ErrGRPCInvalidAuthMgmt = grpc.Errorf(codes.InvalidArgument, "etcdserver: invalid auth management")
ErrGRPCNoLeader = grpc.Errorf(codes.Unavailable, "etcdserver: no leader")
ErrGRPCNotCapable = grpc.Errorf(codes.Unavailable, "etcdserver: not capable")
@@ -63,7 +68,11 @@ var (
ErrGRPCUnhealthy = grpc.Errorf(codes.Unavailable, "etcdserver: unhealthy cluster")
errStringToError = map[string]error{
grpc.ErrorDesc(ErrGRPCEmptyKey): ErrGRPCEmptyKey,
grpc.ErrorDesc(ErrGRPCEmptyKey): ErrGRPCEmptyKey,
grpc.ErrorDesc(ErrGRPCKeyNotFound): ErrGRPCKeyNotFound,
grpc.ErrorDesc(ErrGRPCValueProvided): ErrGRPCValueProvided,
grpc.ErrorDesc(ErrGRPCLeaseProvided): ErrGRPCLeaseProvided,
grpc.ErrorDesc(ErrGRPCTooManyOps): ErrGRPCTooManyOps,
grpc.ErrorDesc(ErrGRPCDuplicateKey): ErrGRPCDuplicateKey,
grpc.ErrorDesc(ErrGRPCCompacted): ErrGRPCCompacted,
@@ -95,6 +104,7 @@ var (
grpc.ErrorDesc(ErrGRPCPermissionNotGranted): ErrGRPCPermissionNotGranted,
grpc.ErrorDesc(ErrGRPCAuthNotEnabled): ErrGRPCAuthNotEnabled,
grpc.ErrorDesc(ErrGRPCInvalidAuthToken): ErrGRPCInvalidAuthToken,
grpc.ErrorDesc(ErrGRPCInvalidAuthMgmt): ErrGRPCInvalidAuthMgmt,
grpc.ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader,
grpc.ErrorDesc(ErrGRPCNotCapable): ErrGRPCNotCapable,
@@ -106,12 +116,15 @@ var (
}
// client-side error
ErrEmptyKey = Error(ErrGRPCEmptyKey)
ErrTooManyOps = Error(ErrGRPCTooManyOps)
ErrDuplicateKey = Error(ErrGRPCDuplicateKey)
ErrCompacted = Error(ErrGRPCCompacted)
ErrFutureRev = Error(ErrGRPCFutureRev)
ErrNoSpace = Error(ErrGRPCNoSpace)
ErrEmptyKey = Error(ErrGRPCEmptyKey)
ErrKeyNotFound = Error(ErrGRPCKeyNotFound)
ErrValueProvided = Error(ErrGRPCValueProvided)
ErrLeaseProvided = Error(ErrGRPCLeaseProvided)
ErrTooManyOps = Error(ErrGRPCTooManyOps)
ErrDuplicateKey = Error(ErrGRPCDuplicateKey)
ErrCompacted = Error(ErrGRPCCompacted)
ErrFutureRev = Error(ErrGRPCFutureRev)
ErrNoSpace = Error(ErrGRPCNoSpace)
ErrLeaseNotFound = Error(ErrGRPCLeaseNotFound)
ErrLeaseExist = Error(ErrGRPCLeaseExist)
@@ -138,6 +151,7 @@ var (
ErrPermissionNotGranted = Error(ErrGRPCPermissionNotGranted)
ErrAuthNotEnabled = Error(ErrGRPCAuthNotEnabled)
ErrInvalidAuthToken = Error(ErrGRPCInvalidAuthToken)
ErrInvalidAuthMgmt = Error(ErrGRPCInvalidAuthMgmt)
ErrNoLeader = Error(ErrGRPCNoLeader)
ErrNotCapable = Error(ErrGRPCNotCapable)
@@ -175,3 +189,10 @@ func Error(err error) error {
}
return EtcdError{code: grpc.Code(verr), desc: grpc.ErrorDesc(verr)}
}
func ErrorDesc(err error) string {
if s, ok := status.FromError(err); ok {
return s.Message()
}
return err.Error()
}

View File

@@ -42,8 +42,6 @@ func togRPCError(err error) error {
return rpctypes.ErrGRPCCompacted
case mvcc.ErrFutureRev:
return rpctypes.ErrGRPCFutureRev
case lease.ErrLeaseNotFound:
return rpctypes.ErrGRPCLeaseNotFound
case etcdserver.ErrRequestTooLarge:
return rpctypes.ErrGRPCRequestTooLarge
case etcdserver.ErrNoSpace:
@@ -63,6 +61,8 @@ func togRPCError(err error) error {
return rpctypes.ErrGRPCTimeoutDueToConnectionLost
case etcdserver.ErrUnhealthy:
return rpctypes.ErrGRPCUnhealthy
case etcdserver.ErrKeyNotFound:
return rpctypes.ErrGRPCKeyNotFound
case lease.ErrLeaseNotFound:
return rpctypes.ErrGRPCLeaseNotFound
@@ -95,6 +95,8 @@ func togRPCError(err error) error {
return rpctypes.ErrGRPCAuthNotEnabled
case auth.ErrInvalidAuthToken:
return rpctypes.ErrGRPCInvalidAuthToken
case auth.ErrInvalidAuthMgmt:
return rpctypes.ErrGRPCInvalidAuthMgmt
default:
return grpc.Errorf(codes.Unknown, err.Error())
}

View File

@@ -21,6 +21,7 @@ import (
"golang.org/x/net/context"
"github.com/coreos/etcd/auth"
"github.com/coreos/etcd/etcdserver"
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
@@ -33,6 +34,8 @@ type watchServer struct {
memberID int64
raftTimer etcdserver.RaftTimer
watchable mvcc.WatchableKV
ag AuthGetter
}
func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer {
@@ -41,6 +44,7 @@ func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer {
memberID: int64(s.ID()),
raftTimer: s,
watchable: s.Watchable(),
ag: s,
}
}
@@ -101,6 +105,8 @@ type serverWatchStream struct {
// wg waits for the send loop to complete
wg sync.WaitGroup
ag AuthGetter
}
func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
@@ -118,6 +124,8 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
progress: make(map[mvcc.WatchID]bool),
prevKV: make(map[mvcc.WatchID]bool),
closec: make(chan struct{}),
ag: ws.ag,
}
sws.wg.Add(1)
@@ -133,6 +141,7 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
// deadlock when calling sws.close().
go func() {
if rerr := sws.recvLoop(); rerr != nil {
plog.Warningf("failed to receive watch request from gRPC stream (%q)", rerr.Error())
errc <- rerr
}
}()
@@ -150,6 +159,19 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
return err
}
func (sws *serverWatchStream) isWatchPermitted(wcr *pb.WatchCreateRequest) bool {
authInfo, err := sws.ag.AuthInfoFromCtx(sws.gRPCStream.Context())
if err != nil {
return false
}
if authInfo == nil {
// if auth is enabled, IsRangePermitted() can cause an error
authInfo = &auth.AuthInfo{}
}
return sws.ag.AuthStore().IsRangePermitted(authInfo, wcr.Key, wcr.RangeEnd) == nil
}
func (sws *serverWatchStream) recvLoop() error {
for {
req, err := sws.gRPCStream.Recv()
@@ -171,10 +193,32 @@ func (sws *serverWatchStream) recvLoop() error {
// \x00 is the smallest key
creq.Key = []byte{0}
}
if len(creq.RangeEnd) == 0 {
// force nil since watchstream.Watch distinguishes
// between nil and []byte{} for single key / >=
creq.RangeEnd = nil
}
if len(creq.RangeEnd) == 1 && creq.RangeEnd[0] == 0 {
// support >= key queries
creq.RangeEnd = []byte{}
}
if !sws.isWatchPermitted(creq) {
wr := &pb.WatchResponse{
Header: sws.newResponseHeader(sws.watchStream.Rev()),
WatchId: -1,
Canceled: true,
Created: true,
CancelReason: rpctypes.ErrGRPCPermissionDenied.Error(),
}
select {
case sws.ctrlStream <- wr:
case <-sws.closec:
}
return nil
}
filters := FiltersFromRequest(creq)
wsrev := sws.watchStream.Rev()
@@ -294,6 +338,7 @@ func (sws *serverWatchStream) sendLoop() {
mvcc.ReportEventReceived(len(evs))
if err := sws.gRPCStream.Send(wr); err != nil {
plog.Warningf("failed to send watch response to gRPC stream (%q)", err.Error())
return
}
@@ -310,6 +355,7 @@ func (sws *serverWatchStream) sendLoop() {
}
if err := sws.gRPCStream.Send(c); err != nil {
plog.Warningf("failed to send watch control response to gRPC stream (%q)", err.Error())
return
}
@@ -325,6 +371,7 @@ func (sws *serverWatchStream) sendLoop() {
for _, v := range pending[wid] {
mvcc.ReportEventReceived(len(v.Events))
if err := sws.gRPCStream.Send(v); err != nil {
plog.Warningf("failed to send pending watch response to gRPC stream (%q)", err.Error())
return
}
}

View File

@@ -16,7 +16,6 @@ package etcdserver
import (
"bytes"
"fmt"
"sort"
"time"
@@ -30,11 +29,6 @@ import (
)
const (
// noTxn is an invalid txn ID.
// To apply with independent Range, Put, Delete, you can pass noTxn
// to apply functions instead of a valid txn ID.
noTxn = -1
warnApplyDuration = 100 * time.Millisecond
)
@@ -51,9 +45,9 @@ type applyResult struct {
type applierV3 interface {
Apply(r *pb.InternalRaftRequest) *applyResult
Put(txnID int64, p *pb.PutRequest) (*pb.PutResponse, error)
Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResponse, error)
DeleteRange(txnID int64, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error)
Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error)
Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error)
DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error)
Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error)
Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, error)
@@ -99,11 +93,11 @@ func (a *applierV3backend) Apply(r *pb.InternalRaftRequest) *applyResult {
// call into a.s.applyV3.F instead of a.F so upper appliers can check individual calls
switch {
case r.Range != nil:
ar.resp, ar.err = a.s.applyV3.Range(noTxn, r.Range)
ar.resp, ar.err = a.s.applyV3.Range(nil, r.Range)
case r.Put != nil:
ar.resp, ar.err = a.s.applyV3.Put(noTxn, r.Put)
ar.resp, ar.err = a.s.applyV3.Put(nil, r.Put)
case r.DeleteRange != nil:
ar.resp, ar.err = a.s.applyV3.DeleteRange(noTxn, r.DeleteRange)
ar.resp, ar.err = a.s.applyV3.DeleteRange(nil, r.DeleteRange)
case r.Txn != nil:
ar.resp, ar.err = a.s.applyV3.Txn(r.Txn)
case r.Compaction != nil:
@@ -152,106 +146,87 @@ func (a *applierV3backend) Apply(r *pb.InternalRaftRequest) *applyResult {
return ar
}
func (a *applierV3backend) Put(txnID int64, p *pb.PutRequest) (*pb.PutResponse, error) {
resp := &pb.PutResponse{}
func (a *applierV3backend) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (resp *pb.PutResponse, err error) {
resp = &pb.PutResponse{}
resp.Header = &pb.ResponseHeader{}
var (
rev int64
err error
)
var rr *mvcc.RangeResult
if p.PrevKv {
if txnID != noTxn {
rr, err = a.s.KV().TxnRange(txnID, p.Key, nil, mvcc.RangeOptions{})
if err != nil {
return nil, err
}
} else {
rr, err = a.s.KV().Range(p.Key, nil, mvcc.RangeOptions{})
if err != nil {
return nil, err
}
}
}
if txnID != noTxn {
rev, err = a.s.KV().TxnPut(txnID, p.Key, p.Value, lease.LeaseID(p.Lease))
if err != nil {
return nil, err
}
} else {
leaseID := lease.LeaseID(p.Lease)
val, leaseID := p.Value, lease.LeaseID(p.Lease)
if txn == nil {
if leaseID != lease.NoLease {
if l := a.s.lessor.Lookup(leaseID); l == nil {
return nil, lease.ErrLeaseNotFound
}
}
rev = a.s.KV().Put(p.Key, p.Value, leaseID)
txn = a.s.KV().Write()
defer txn.End()
}
resp.Header.Revision = rev
if rr != nil && len(rr.KVs) != 0 {
resp.PrevKv = &rr.KVs[0]
var rr *mvcc.RangeResult
if p.IgnoreValue || p.IgnoreLease || p.PrevKv {
rr, err = txn.Range(p.Key, nil, mvcc.RangeOptions{})
if err != nil {
return nil, err
}
}
if p.IgnoreValue || p.IgnoreLease {
if rr == nil || len(rr.KVs) == 0 {
// ignore_{lease,value} flag expects previous key-value pair
return nil, ErrKeyNotFound
}
}
if p.IgnoreValue {
val = rr.KVs[0].Value
}
if p.IgnoreLease {
leaseID = lease.LeaseID(rr.KVs[0].Lease)
}
if p.PrevKv {
if rr != nil && len(rr.KVs) != 0 {
resp.PrevKv = &rr.KVs[0]
}
}
resp.Header.Revision = txn.Put(p.Key, val, leaseID)
return resp, nil
}
func (a *applierV3backend) DeleteRange(txnID int64, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
func (a *applierV3backend) DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
resp := &pb.DeleteRangeResponse{}
resp.Header = &pb.ResponseHeader{}
var (
n int64
rev int64
err error
)
if txn == nil {
txn = a.s.kv.Write()
defer txn.End()
}
if isGteRange(dr.RangeEnd) {
dr.RangeEnd = []byte{}
}
var rr *mvcc.RangeResult
if dr.PrevKv {
if txnID != noTxn {
rr, err = a.s.KV().TxnRange(txnID, dr.Key, dr.RangeEnd, mvcc.RangeOptions{})
if err != nil {
return nil, err
}
} else {
rr, err = a.s.KV().Range(dr.Key, dr.RangeEnd, mvcc.RangeOptions{})
if err != nil {
return nil, err
}
}
}
if txnID != noTxn {
n, rev, err = a.s.KV().TxnDeleteRange(txnID, dr.Key, dr.RangeEnd)
rr, err := txn.Range(dr.Key, dr.RangeEnd, mvcc.RangeOptions{})
if err != nil {
return nil, err
}
} else {
n, rev = a.s.KV().DeleteRange(dr.Key, dr.RangeEnd)
}
resp.Deleted = n
if rr != nil {
for i := range rr.KVs {
resp.PrevKvs = append(resp.PrevKvs, &rr.KVs[i])
if rr != nil {
for i := range rr.KVs {
resp.PrevKvs = append(resp.PrevKvs, &rr.KVs[i])
}
}
}
resp.Header.Revision = rev
resp.Deleted, resp.Header.Revision = txn.DeleteRange(dr.Key, dr.RangeEnd)
return resp, nil
}
func (a *applierV3backend) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResponse, error) {
func (a *applierV3backend) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) {
resp := &pb.RangeResponse{}
resp.Header = &pb.ResponseHeader{}
var (
rr *mvcc.RangeResult
err error
)
if txn == nil {
txn = a.s.kv.Read()
defer txn.End()
}
if isGteRange(r.RangeEnd) {
r.RangeEnd = []byte{}
@@ -275,16 +250,9 @@ func (a *applierV3backend) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResp
Count: r.CountOnly,
}
if txnID != noTxn {
rr, err = a.s.KV().TxnRange(txnID, r.Key, r.RangeEnd, ro)
if err != nil {
return nil, err
}
} else {
rr, err = a.s.KV().Range(r.Key, r.RangeEnd, ro)
if err != nil {
return nil, err
}
rr, err := txn.Range(r.Key, r.RangeEnd, ro)
if err != nil {
return nil, err
}
if r.MaxModRevision != 0 {
@@ -350,61 +318,64 @@ func (a *applierV3backend) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResp
}
func (a *applierV3backend) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
ok := true
for _, c := range rt.Compare {
if _, ok = a.applyCompare(c); !ok {
break
isWrite := !isTxnReadonly(rt)
txn := mvcc.NewReadOnlyTxnWrite(a.s.KV().Read())
reqs, ok := a.compareToOps(txn, rt)
if isWrite {
if err := a.checkRequestPut(txn, reqs); err != nil {
txn.End()
return nil, err
}
}
var reqs []*pb.RequestOp
if ok {
reqs = rt.Success
} else {
reqs = rt.Failure
}
if err := a.checkRequestLeases(reqs); err != nil {
if err := checkRequestRange(txn, reqs); err != nil {
txn.End()
return nil, err
}
if err := a.checkRequestRange(reqs); err != nil {
return nil, err
}
// When executing the operations of txn, we need to hold the txn lock.
// So the reader will not see any intermediate results.
txnID := a.s.KV().TxnBegin()
resps := make([]*pb.ResponseOp, len(reqs))
txnResp := &pb.TxnResponse{
Responses: resps,
Succeeded: ok,
Header: &pb.ResponseHeader{},
}
// When executing mutable txn ops, etcd must hold the txn lock so
// readers do not see any intermediate results. Since writes are
// serialized on the raft loop, the revision in the read view will
// be the revision of the write txn.
if isWrite {
txn.End()
txn = a.s.KV().Write()
}
for i := range reqs {
resps[i] = a.applyUnion(txnID, reqs[i])
resps[i] = a.applyUnion(txn, reqs[i])
}
err := a.s.KV().TxnEnd(txnID)
if err != nil {
panic(fmt.Sprint("unexpected error when closing txn", txnID))
rev := txn.Rev()
if len(txn.Changes()) != 0 {
rev++
}
txn.End()
txnResp := &pb.TxnResponse{}
txnResp.Header = &pb.ResponseHeader{}
txnResp.Header.Revision = a.s.KV().Rev()
txnResp.Responses = resps
txnResp.Succeeded = ok
txnResp.Header.Revision = rev
return txnResp, nil
}
// applyCompare applies the compare request.
// It returns the revision at which the comparison happens. If the comparison
// succeeds, the it returns true. Otherwise it returns false.
func (a *applierV3backend) applyCompare(c *pb.Compare) (int64, bool) {
rr, err := a.s.KV().Range(c.Key, nil, mvcc.RangeOptions{})
rev := rr.Rev
if err != nil {
if err == mvcc.ErrTxnIDMismatch {
panic("unexpected txn ID mismatch error")
func (a *applierV3backend) compareToOps(rv mvcc.ReadView, rt *pb.TxnRequest) ([]*pb.RequestOp, bool) {
for _, c := range rt.Compare {
if !applyCompare(rv, c) {
return rt.Failure, false
}
return rev, false
}
return rt.Success, true
}
// applyCompare applies the compare request.
// If the comparison succeeds, it returns true. Otherwise, returns false.
func applyCompare(rv mvcc.ReadView, c *pb.Compare) bool {
rr, err := rv.Range(c.Key, nil, mvcc.RangeOptions{})
if err != nil {
return false
}
var ckv mvccpb.KeyValue
if len(rr.KVs) != 0 {
@@ -416,7 +387,7 @@ func (a *applierV3backend) applyCompare(c *pb.Compare) (int64, bool) {
// We can treat non-existence as the empty set explicitly, such that
// even a key with a value of length 0 bytes is still a real key
// that was written that way
return rev, false
return false
}
}
@@ -448,30 +419,22 @@ func (a *applierV3backend) applyCompare(c *pb.Compare) (int64, bool) {
switch c.Result {
case pb.Compare_EQUAL:
if result != 0 {
return rev, false
}
return result == 0
case pb.Compare_NOT_EQUAL:
if result == 0 {
return rev, false
}
return result != 0
case pb.Compare_GREATER:
if result != 1 {
return rev, false
}
return result > 0
case pb.Compare_LESS:
if result != -1 {
return rev, false
}
return result < 0
}
return rev, true
return true
}
func (a *applierV3backend) applyUnion(txnID int64, union *pb.RequestOp) *pb.ResponseOp {
func (a *applierV3backend) applyUnion(txn mvcc.TxnWrite, union *pb.RequestOp) *pb.ResponseOp {
switch tv := union.Request.(type) {
case *pb.RequestOp_RequestRange:
if tv.RequestRange != nil {
resp, err := a.Range(txnID, tv.RequestRange)
resp, err := a.Range(txn, tv.RequestRange)
if err != nil {
plog.Panicf("unexpected error during txn: %v", err)
}
@@ -479,7 +442,7 @@ func (a *applierV3backend) applyUnion(txnID int64, union *pb.RequestOp) *pb.Resp
}
case *pb.RequestOp_RequestPut:
if tv.RequestPut != nil {
resp, err := a.Put(txnID, tv.RequestPut)
resp, err := a.Put(txn, tv.RequestPut)
if err != nil {
plog.Panicf("unexpected error during txn: %v", err)
}
@@ -487,7 +450,7 @@ func (a *applierV3backend) applyUnion(txnID int64, union *pb.RequestOp) *pb.Resp
}
case *pb.RequestOp_RequestDeleteRange:
if tv.RequestDeleteRange != nil {
resp, err := a.DeleteRange(txnID, tv.RequestDeleteRange)
resp, err := a.DeleteRange(txn, tv.RequestDeleteRange)
if err != nil {
plog.Panicf("unexpected error during txn: %v", err)
}
@@ -588,7 +551,7 @@ type applierV3Capped struct {
// with Puts so that the number of keys in the store is capped.
func newApplierV3Capped(base applierV3) applierV3 { return &applierV3Capped{applierV3: base} }
func (a *applierV3Capped) Put(txnID int64, p *pb.PutRequest) (*pb.PutResponse, error) {
func (a *applierV3Capped) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) {
return nil, ErrNoSpace
}
@@ -617,7 +580,7 @@ func (a *applierV3backend) AuthDisable() (*pb.AuthDisableResponse, error) {
}
func (a *applierV3backend) Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error) {
ctx := context.WithValue(context.WithValue(context.Background(), "index", a.s.consistIndex.ConsistentIndex()), "simpleToken", r.SimpleToken)
ctx := context.WithValue(context.WithValue(a.s.ctx, "index", a.s.consistIndex.ConsistentIndex()), "simpleToken", r.SimpleToken)
resp, err := a.s.AuthStore().Authenticate(ctx, r.Name, r.Password)
if resp != nil {
resp.Header = newHeader(a.s)
@@ -738,9 +701,9 @@ func newQuotaApplierV3(s *EtcdServer, app applierV3) applierV3 {
return &quotaApplierV3{app, NewBackendQuota(s)}
}
func (a *quotaApplierV3) Put(txnID int64, p *pb.PutRequest) (*pb.PutResponse, error) {
func (a *quotaApplierV3) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) {
ok := a.q.Available(p)
resp, err := a.applierV3.Put(txnID, p)
resp, err := a.applierV3.Put(txn, p)
if err == nil && !ok {
err = ErrNoSpace
}
@@ -804,14 +767,27 @@ func (s *kvSortByValue) Less(i, j int) bool {
return bytes.Compare(s.kvs[i].Value, s.kvs[j].Value) < 0
}
func (a *applierV3backend) checkRequestLeases(reqs []*pb.RequestOp) error {
func (a *applierV3backend) checkRequestPut(rv mvcc.ReadView, reqs []*pb.RequestOp) error {
for _, requ := range reqs {
tv, ok := requ.Request.(*pb.RequestOp_RequestPut)
if !ok {
continue
}
preq := tv.RequestPut
if preq == nil || lease.LeaseID(preq.Lease) == lease.NoLease {
if preq == nil {
continue
}
if preq.IgnoreValue || preq.IgnoreLease {
// expects previous key-value, error if not exist
rr, err := rv.Range(preq.Key, nil, mvcc.RangeOptions{})
if err != nil {
return err
}
if rr == nil || len(rr.KVs) == 0 {
return ErrKeyNotFound
}
}
if lease.LeaseID(preq.Lease) == lease.NoLease {
continue
}
if l := a.s.lessor.Lookup(lease.LeaseID(preq.Lease)); l == nil {
@@ -821,7 +797,7 @@ func (a *applierV3backend) checkRequestLeases(reqs []*pb.RequestOp) error {
return nil
}
func (a *applierV3backend) checkRequestRange(reqs []*pb.RequestOp) error {
func checkRequestRange(rv mvcc.ReadView, reqs []*pb.RequestOp) error {
for _, requ := range reqs {
tv, ok := requ.Request.(*pb.RequestOp_RequestRange)
if !ok {
@@ -832,10 +808,10 @@ func (a *applierV3backend) checkRequestRange(reqs []*pb.RequestOp) error {
continue
}
if greq.Revision > a.s.KV().Rev() {
if greq.Revision > rv.Rev() {
return mvcc.ErrFutureRev
}
if greq.Revision < a.s.KV().FirstRev() {
if greq.Revision < rv.FirstRev() {
return mvcc.ErrCompacted
}
}

View File

@@ -19,6 +19,7 @@ import (
"github.com/coreos/etcd/auth"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/mvcc"
)
type authApplierV3 struct {
@@ -58,7 +59,7 @@ func (aa *authApplierV3) Apply(r *pb.InternalRaftRequest) *applyResult {
return ret
}
func (aa *authApplierV3) Put(txnID int64, r *pb.PutRequest) (*pb.PutResponse, error) {
func (aa *authApplierV3) Put(txn mvcc.TxnWrite, r *pb.PutRequest) (*pb.PutResponse, error) {
if err := aa.as.IsPutPermitted(&aa.authInfo, r.Key); err != nil {
return nil, err
}
@@ -68,17 +69,17 @@ func (aa *authApplierV3) Put(txnID int64, r *pb.PutRequest) (*pb.PutResponse, er
return nil, err
}
}
return aa.applierV3.Put(txnID, r)
return aa.applierV3.Put(txn, r)
}
func (aa *authApplierV3) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResponse, error) {
func (aa *authApplierV3) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) {
if err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil {
return nil, err
}
return aa.applierV3.Range(txnID, r)
return aa.applierV3.Range(txn, r)
}
func (aa *authApplierV3) DeleteRange(txnID int64, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
func (aa *authApplierV3) DeleteRange(txn mvcc.TxnWrite, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
if err := aa.as.IsDeleteRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil {
return nil, err
}
@@ -89,7 +90,7 @@ func (aa *authApplierV3) DeleteRange(txnID int64, r *pb.DeleteRangeRequest) (*pb
}
}
return aa.applierV3.DeleteRange(txnID, r)
return aa.applierV3.DeleteRange(txn, r)
}
func checkTxnReqsPermission(as auth.AuthStore, ai *auth.AuthInfo, reqs []*pb.RequestOp) error {

81
vendor/github.com/coreos/etcd/etcdserver/backend.go generated vendored Normal file
View File

@@ -0,0 +1,81 @@
// Copyright 2017 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package etcdserver
import (
"fmt"
"os"
"time"
"github.com/coreos/etcd/lease"
"github.com/coreos/etcd/mvcc"
"github.com/coreos/etcd/mvcc/backend"
"github.com/coreos/etcd/raft/raftpb"
"github.com/coreos/etcd/snap"
)
func newBackend(cfg *ServerConfig) backend.Backend {
bcfg := backend.DefaultBackendConfig()
bcfg.Path = cfg.backendPath()
if cfg.QuotaBackendBytes > 0 && cfg.QuotaBackendBytes != DefaultQuotaBytes {
// permit 10% excess over quota for disarm
bcfg.MmapSize = uint64(cfg.QuotaBackendBytes + cfg.QuotaBackendBytes/10)
}
return backend.New(bcfg)
}
// openSnapshotBackend renames a snapshot db to the current etcd db and opens it.
func openSnapshotBackend(cfg *ServerConfig, ss *snap.Snapshotter, snapshot raftpb.Snapshot) (backend.Backend, error) {
snapPath, err := ss.DBFilePath(snapshot.Metadata.Index)
if err != nil {
return nil, fmt.Errorf("database snapshot file path error: %v", err)
}
if err := os.Rename(snapPath, cfg.backendPath()); err != nil {
return nil, fmt.Errorf("rename snapshot file error: %v", err)
}
return openBackend(cfg), nil
}
// openBackend returns a backend using the current etcd db.
func openBackend(cfg *ServerConfig) backend.Backend {
fn := cfg.backendPath()
beOpened := make(chan backend.Backend)
go func() {
beOpened <- newBackend(cfg)
}()
select {
case be := <-beOpened:
return be
case <-time.After(time.Second):
plog.Warningf("another etcd process is using %q and holds the file lock.", fn)
plog.Warningf("waiting for it to exit before starting...")
}
return <-beOpened
}
// recoverBackendSnapshot recovers the DB from a snapshot in case etcd crashes
// before updating the backend db after persisting raft snapshot to disk,
// violating the invariant snapshot.Metadata.Index < db.consistentIndex. In this
// case, replace the db with the snapshot db sent by the leader.
func recoverSnapshotBackend(cfg *ServerConfig, oldbe backend.Backend, snapshot raftpb.Snapshot) (backend.Backend, error) {
var cIndex consistentIndex
kv := mvcc.New(oldbe, &lease.FakeLessor{}, &cIndex)
defer kv.Close()
if snapshot.Metadata.Index <= kv.ConsistentIndex() {
return oldbe, nil
}
oldbe.Close()
return openSnapshotBackend(cfg, snap.New(cfg.SnapDir()), snapshot)
}

View File

@@ -23,7 +23,6 @@ import (
"time"
"github.com/coreos/etcd/etcdserver/membership"
"github.com/coreos/etcd/pkg/httputil"
"github.com/coreos/etcd/pkg/types"
"github.com/coreos/etcd/version"
"github.com/coreos/go-semver/semver"
@@ -241,15 +240,6 @@ func getVersion(m *membership.Member, rt http.RoundTripper) (*version.Versions,
plog.Warningf("failed to reach the peerURL(%s) of member %s (%v)", u, m.ID, err)
continue
}
// etcd 2.0 does not have version endpoint on peer url.
if resp.StatusCode == http.StatusNotFound {
httputil.GracefulClose(resp)
return &version.Versions{
Server: "2.0.0",
Cluster: "2.0.0",
}, nil
}
var b []byte
b, err = ioutil.ReadAll(resp.Body)
resp.Body.Close()

View File

@@ -55,10 +55,15 @@ type ServerConfig struct {
AutoCompactionRetention int
QuotaBackendBytes int64
// MaxRequestBytes is the maximum request size to send over raft.
MaxRequestBytes uint
StrictReconfigCheck bool
// ClientCertAuthEnabled is true when cert has been signed by the client CA.
ClientCertAuthEnabled bool
AuthToken string
}
// VerifyBootstrap sanity-checks the initial config for bootstrap case
@@ -198,3 +203,5 @@ func (c *ServerConfig) bootstrapTimeout() time.Duration {
}
return time.Second
}
func (c *ServerConfig) backendPath() string { return filepath.Join(c.SnapDir(), "db") }

View File

@@ -33,6 +33,7 @@ var (
ErrNoSpace = errors.New("etcdserver: no space")
ErrTooManyRequests = errors.New("etcdserver: too many requests")
ErrUnhealthy = errors.New("etcdserver: unhealthy cluster")
ErrKeyNotFound = errors.New("etcdserver: key not found")
)
type DiscoveryError struct {

Some files were not shown because too many files have changed in this diff Show More