Level sets dependency graph to consume etcd 3.1.5
This commit is contained in:
5
vendor/github.com/coreos/etcd/etcdserver/api/capability.go
generated
vendored
5
vendor/github.com/coreos/etcd/etcdserver/api/capability.go
generated
vendored
@@ -30,15 +30,14 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd/etcdserver", "api")
|
||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api")
|
||||
|
||||
// capabilityMaps is a static map of version to capability map.
|
||||
// the base capabilities is the set of capability 2.0 supports.
|
||||
capabilityMaps = map[string]map[Capability]bool{
|
||||
"2.1.0": {AuthCapability: true},
|
||||
"2.2.0": {AuthCapability: true},
|
||||
"2.3.0": {AuthCapability: true},
|
||||
"3.0.0": {AuthCapability: true, V3rpcCapability: true},
|
||||
"3.1.0": {AuthCapability: true, V3rpcCapability: true},
|
||||
}
|
||||
|
||||
enableMapMu sync.RWMutex
|
||||
|
||||
104
vendor/github.com/coreos/etcd/etcdserver/api/v2http/client.go
generated
vendored
104
vendor/github.com/coreos/etcd/etcdserver/api/v2http/client.go
generated
vendored
@@ -21,7 +21,6 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/pprof"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
@@ -57,7 +56,6 @@ const (
|
||||
healthPath = "/health"
|
||||
versionPath = "/version"
|
||||
configPath = "/config"
|
||||
pprofPrefix = "/debug/pprof"
|
||||
)
|
||||
|
||||
// NewClientHandler generates a muxed http.Handler with the given parameters to serve etcd client requests.
|
||||
@@ -113,23 +111,6 @@ func NewClientHandler(server *etcdserver.EtcdServer, timeout time.Duration) http
|
||||
mux.Handle(deprecatedMachinesPrefix, dmh)
|
||||
handleAuth(mux, sech)
|
||||
|
||||
if server.IsPprofEnabled() {
|
||||
plog.Infof("pprof is enabled under %s", pprofPrefix)
|
||||
|
||||
mux.HandleFunc(pprofPrefix, pprof.Index)
|
||||
mux.HandleFunc(pprofPrefix+"/profile", pprof.Profile)
|
||||
mux.HandleFunc(pprofPrefix+"/symbol", pprof.Symbol)
|
||||
mux.HandleFunc(pprofPrefix+"/cmdline", pprof.Cmdline)
|
||||
// TODO: currently, we don't create an entry for pprof.Trace,
|
||||
// because go 1.4 doesn't provide it. After support of go 1.4 is dropped,
|
||||
// we should add the entry.
|
||||
|
||||
mux.Handle(pprofPrefix+"/heap", pprof.Handler("heap"))
|
||||
mux.Handle(pprofPrefix+"/goroutine", pprof.Handler("goroutine"))
|
||||
mux.Handle(pprofPrefix+"/threadcreate", pprof.Handler("threadcreate"))
|
||||
mux.Handle(pprofPrefix+"/block", pprof.Handler("block"))
|
||||
}
|
||||
|
||||
return requestLogger(mux)
|
||||
}
|
||||
|
||||
@@ -153,7 +134,7 @@ func (h *keysHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
defer cancel()
|
||||
clock := clockwork.NewRealClock()
|
||||
startTime := clock.Now()
|
||||
rr, err := parseKeyRequest(r, clock)
|
||||
rr, noValueOnSuccess, err := parseKeyRequest(r, clock)
|
||||
if err != nil {
|
||||
writeKeyError(w, err)
|
||||
return
|
||||
@@ -175,7 +156,7 @@ func (h *keysHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
switch {
|
||||
case resp.Event != nil:
|
||||
if err := writeKeyEvent(w, resp.Event, h.timer); err != nil {
|
||||
if err := writeKeyEvent(w, resp.Event, noValueOnSuccess, h.timer); err != nil {
|
||||
// Should never be reached
|
||||
plog.Errorf("error writing event (%v)", err)
|
||||
}
|
||||
@@ -365,32 +346,23 @@ func serveVars(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprintf(w, "\n}\n")
|
||||
}
|
||||
|
||||
// TODO: change etcdserver to raft interface when we have it.
|
||||
// add test for healthHandler when we have the interface ready.
|
||||
func healthHandler(server *etcdserver.EtcdServer) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if !allowMethod(w, r.Method, "GET") {
|
||||
return
|
||||
}
|
||||
|
||||
if uint64(server.Leader()) == raft.None {
|
||||
http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
// wait for raft's progress
|
||||
index := server.Index()
|
||||
for i := 0; i < 3; i++ {
|
||||
time.Sleep(250 * time.Millisecond)
|
||||
if server.Index() > index {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(`{"health": "true"}`))
|
||||
return
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
defer cancel()
|
||||
if _, err := server.Do(ctx, etcdserverpb.Request{Method: "QGET"}); err != nil {
|
||||
http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable)
|
||||
return
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(`{"health": "true"}`))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -449,19 +421,20 @@ func logHandleFunc(w http.ResponseWriter, r *http.Request) {
|
||||
// parseKeyRequest converts a received http.Request on keysPrefix to
|
||||
// a server Request, performing validation of supplied fields as appropriate.
|
||||
// If any validation fails, an empty Request and non-nil error is returned.
|
||||
func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Request, error) {
|
||||
func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Request, bool, error) {
|
||||
noValueOnSuccess := false
|
||||
emptyReq := etcdserverpb.Request{}
|
||||
|
||||
err := r.ParseForm()
|
||||
if err != nil {
|
||||
return emptyReq, etcdErr.NewRequestError(
|
||||
return emptyReq, false, etcdErr.NewRequestError(
|
||||
etcdErr.EcodeInvalidForm,
|
||||
err.Error(),
|
||||
)
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(r.URL.Path, keysPrefix) {
|
||||
return emptyReq, etcdErr.NewRequestError(
|
||||
return emptyReq, false, etcdErr.NewRequestError(
|
||||
etcdErr.EcodeInvalidForm,
|
||||
"incorrect key prefix",
|
||||
)
|
||||
@@ -470,13 +443,13 @@ func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Reque
|
||||
|
||||
var pIdx, wIdx uint64
|
||||
if pIdx, err = getUint64(r.Form, "prevIndex"); err != nil {
|
||||
return emptyReq, etcdErr.NewRequestError(
|
||||
return emptyReq, false, etcdErr.NewRequestError(
|
||||
etcdErr.EcodeIndexNaN,
|
||||
`invalid value for "prevIndex"`,
|
||||
)
|
||||
}
|
||||
if wIdx, err = getUint64(r.Form, "waitIndex"); err != nil {
|
||||
return emptyReq, etcdErr.NewRequestError(
|
||||
return emptyReq, false, etcdErr.NewRequestError(
|
||||
etcdErr.EcodeIndexNaN,
|
||||
`invalid value for "waitIndex"`,
|
||||
)
|
||||
@@ -484,45 +457,45 @@ func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Reque
|
||||
|
||||
var rec, sort, wait, dir, quorum, stream bool
|
||||
if rec, err = getBool(r.Form, "recursive"); err != nil {
|
||||
return emptyReq, etcdErr.NewRequestError(
|
||||
return emptyReq, false, etcdErr.NewRequestError(
|
||||
etcdErr.EcodeInvalidField,
|
||||
`invalid value for "recursive"`,
|
||||
)
|
||||
}
|
||||
if sort, err = getBool(r.Form, "sorted"); err != nil {
|
||||
return emptyReq, etcdErr.NewRequestError(
|
||||
return emptyReq, false, etcdErr.NewRequestError(
|
||||
etcdErr.EcodeInvalidField,
|
||||
`invalid value for "sorted"`,
|
||||
)
|
||||
}
|
||||
if wait, err = getBool(r.Form, "wait"); err != nil {
|
||||
return emptyReq, etcdErr.NewRequestError(
|
||||
return emptyReq, false, etcdErr.NewRequestError(
|
||||
etcdErr.EcodeInvalidField,
|
||||
`invalid value for "wait"`,
|
||||
)
|
||||
}
|
||||
// TODO(jonboulle): define what parameters dir is/isn't compatible with?
|
||||
if dir, err = getBool(r.Form, "dir"); err != nil {
|
||||
return emptyReq, etcdErr.NewRequestError(
|
||||
return emptyReq, false, etcdErr.NewRequestError(
|
||||
etcdErr.EcodeInvalidField,
|
||||
`invalid value for "dir"`,
|
||||
)
|
||||
}
|
||||
if quorum, err = getBool(r.Form, "quorum"); err != nil {
|
||||
return emptyReq, etcdErr.NewRequestError(
|
||||
return emptyReq, false, etcdErr.NewRequestError(
|
||||
etcdErr.EcodeInvalidField,
|
||||
`invalid value for "quorum"`,
|
||||
)
|
||||
}
|
||||
if stream, err = getBool(r.Form, "stream"); err != nil {
|
||||
return emptyReq, etcdErr.NewRequestError(
|
||||
return emptyReq, false, etcdErr.NewRequestError(
|
||||
etcdErr.EcodeInvalidField,
|
||||
`invalid value for "stream"`,
|
||||
)
|
||||
}
|
||||
|
||||
if wait && r.Method != "GET" {
|
||||
return emptyReq, etcdErr.NewRequestError(
|
||||
return emptyReq, false, etcdErr.NewRequestError(
|
||||
etcdErr.EcodeInvalidField,
|
||||
`"wait" can only be used with GET requests`,
|
||||
)
|
||||
@@ -530,19 +503,26 @@ func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Reque
|
||||
|
||||
pV := r.FormValue("prevValue")
|
||||
if _, ok := r.Form["prevValue"]; ok && pV == "" {
|
||||
return emptyReq, etcdErr.NewRequestError(
|
||||
return emptyReq, false, etcdErr.NewRequestError(
|
||||
etcdErr.EcodePrevValueRequired,
|
||||
`"prevValue" cannot be empty`,
|
||||
)
|
||||
}
|
||||
|
||||
if noValueOnSuccess, err = getBool(r.Form, "noValueOnSuccess"); err != nil {
|
||||
return emptyReq, false, etcdErr.NewRequestError(
|
||||
etcdErr.EcodeInvalidField,
|
||||
`invalid value for "noValueOnSuccess"`,
|
||||
)
|
||||
}
|
||||
|
||||
// TTL is nullable, so leave it null if not specified
|
||||
// or an empty string
|
||||
var ttl *uint64
|
||||
if len(r.FormValue("ttl")) > 0 {
|
||||
i, err := getUint64(r.Form, "ttl")
|
||||
if err != nil {
|
||||
return emptyReq, etcdErr.NewRequestError(
|
||||
return emptyReq, false, etcdErr.NewRequestError(
|
||||
etcdErr.EcodeTTLNaN,
|
||||
`invalid value for "ttl"`,
|
||||
)
|
||||
@@ -555,7 +535,7 @@ func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Reque
|
||||
if _, ok := r.Form["prevExist"]; ok {
|
||||
bv, err := getBool(r.Form, "prevExist")
|
||||
if err != nil {
|
||||
return emptyReq, etcdErr.NewRequestError(
|
||||
return emptyReq, false, etcdErr.NewRequestError(
|
||||
etcdErr.EcodeInvalidField,
|
||||
"invalid value for prevExist",
|
||||
)
|
||||
@@ -568,7 +548,7 @@ func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Reque
|
||||
if _, ok := r.Form["refresh"]; ok {
|
||||
bv, err := getBool(r.Form, "refresh")
|
||||
if err != nil {
|
||||
return emptyReq, etcdErr.NewRequestError(
|
||||
return emptyReq, false, etcdErr.NewRequestError(
|
||||
etcdErr.EcodeInvalidField,
|
||||
"invalid value for refresh",
|
||||
)
|
||||
@@ -577,13 +557,13 @@ func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Reque
|
||||
if refresh != nil && *refresh {
|
||||
val := r.FormValue("value")
|
||||
if _, ok := r.Form["value"]; ok && val != "" {
|
||||
return emptyReq, etcdErr.NewRequestError(
|
||||
return emptyReq, false, etcdErr.NewRequestError(
|
||||
etcdErr.EcodeRefreshValue,
|
||||
`A value was provided on a refresh`,
|
||||
)
|
||||
}
|
||||
if ttl == nil {
|
||||
return emptyReq, etcdErr.NewRequestError(
|
||||
return emptyReq, false, etcdErr.NewRequestError(
|
||||
etcdErr.EcodeRefreshTTLRequired,
|
||||
`No TTL value set`,
|
||||
)
|
||||
@@ -621,13 +601,13 @@ func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Reque
|
||||
rr.Expiration = clock.Now().Add(expr).UnixNano()
|
||||
}
|
||||
|
||||
return rr, nil
|
||||
return rr, noValueOnSuccess, nil
|
||||
}
|
||||
|
||||
// writeKeyEvent trims the prefix of key path in a single Event under
|
||||
// StoreKeysPrefix, serializes it and writes the resulting JSON to the given
|
||||
// ResponseWriter, along with the appropriate headers.
|
||||
func writeKeyEvent(w http.ResponseWriter, ev *store.Event, rt etcdserver.RaftTimer) error {
|
||||
func writeKeyEvent(w http.ResponseWriter, ev *store.Event, noValueOnSuccess bool, rt etcdserver.RaftTimer) error {
|
||||
if ev == nil {
|
||||
return errors.New("cannot write empty Event!")
|
||||
}
|
||||
@@ -641,6 +621,12 @@ func writeKeyEvent(w http.ResponseWriter, ev *store.Event, rt etcdserver.RaftTim
|
||||
}
|
||||
|
||||
ev = trimEventPrefix(ev, etcdserver.StoreKeysPrefix)
|
||||
if noValueOnSuccess &&
|
||||
(ev.Action == store.Set || ev.Action == store.CompareAndSwap ||
|
||||
ev.Action == store.Create || ev.Action == store.Update) {
|
||||
ev.Node = nil
|
||||
ev.PrevNode = nil
|
||||
}
|
||||
return json.NewEncoder(w).Encode(ev)
|
||||
}
|
||||
|
||||
@@ -747,6 +733,10 @@ func trimErrorPrefix(err error, prefix string) error {
|
||||
|
||||
func unmarshalRequest(r *http.Request, req json.Unmarshaler, w http.ResponseWriter) bool {
|
||||
ctype := r.Header.Get("Content-Type")
|
||||
semicolonPosition := strings.Index(ctype, ";")
|
||||
if semicolonPosition != -1 {
|
||||
ctype = strings.TrimSpace(strings.ToLower(ctype[0:semicolonPosition]))
|
||||
}
|
||||
if ctype != "application/json" {
|
||||
writeError(w, r, httptypes.NewHTTPError(http.StatusUnsupportedMediaType, fmt.Sprintf("Bad Content-Type %s, accept application/json", ctype)))
|
||||
return false
|
||||
|
||||
4
vendor/github.com/coreos/etcd/etcdserver/api/v2http/http.go
generated
vendored
4
vendor/github.com/coreos/etcd/etcdserver/api/v2http/http.go
generated
vendored
@@ -35,7 +35,7 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd/etcdserver/api", "v2http")
|
||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/v2http")
|
||||
mlog = logutil.NewMergeLogger(plog)
|
||||
)
|
||||
|
||||
@@ -60,7 +60,7 @@ func writeError(w http.ResponseWriter, r *http.Request, err error) {
|
||||
}
|
||||
default:
|
||||
switch err {
|
||||
case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost, etcdserver.ErrNotEnoughStartedMembers:
|
||||
case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost, etcdserver.ErrNotEnoughStartedMembers, etcdserver.ErrUnhealthy:
|
||||
mlog.MergeError(err)
|
||||
default:
|
||||
mlog.MergeErrorf("got unexpected response error (%v)", err)
|
||||
|
||||
2
vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/errors.go
generated
vendored
2
vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/errors.go
generated
vendored
@@ -22,7 +22,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd/etcdserver/api/v2http", "httptypes")
|
||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/v2http/httptypes")
|
||||
)
|
||||
|
||||
type HTTPError struct {
|
||||
|
||||
9
vendor/github.com/coreos/etcd/etcdserver/api/v2http/peer.go
generated
vendored
9
vendor/github.com/coreos/etcd/etcdserver/api/v2http/peer.go
generated
vendored
@@ -26,14 +26,14 @@ import (
|
||||
|
||||
const (
|
||||
peerMembersPrefix = "/members"
|
||||
leasesPrefix = "/leases"
|
||||
)
|
||||
|
||||
// NewPeerHandler generates an http.Handler to handle etcd peer requests.
|
||||
func NewPeerHandler(s *etcdserver.EtcdServer) http.Handler {
|
||||
var lh http.Handler
|
||||
if l := s.Lessor(); l != nil {
|
||||
lh = leasehttp.NewHandler(l)
|
||||
l := s.Lessor()
|
||||
if l != nil {
|
||||
lh = leasehttp.NewHandler(l, func() <-chan struct{} { return s.ApplyWait() })
|
||||
}
|
||||
return newPeerHandler(s.Cluster(), s.RaftHandler(), lh)
|
||||
}
|
||||
@@ -49,7 +49,8 @@ func newPeerHandler(cluster api.Cluster, raftHandler http.Handler, leaseHandler
|
||||
mux.Handle(rafthttp.RaftPrefix+"/", raftHandler)
|
||||
mux.Handle(peerMembersPrefix, mh)
|
||||
if leaseHandler != nil {
|
||||
mux.Handle(leasesPrefix, leaseHandler)
|
||||
mux.Handle(leasehttp.LeasePrefix, leaseHandler)
|
||||
mux.Handle(leasehttp.LeaseInternalPrefix, leaseHandler)
|
||||
}
|
||||
mux.HandleFunc(versionPath, versionHandler(cluster, serveVersion))
|
||||
return mux
|
||||
|
||||
3
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go
generated
vendored
3
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go
generated
vendored
@@ -19,14 +19,13 @@ import (
|
||||
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
func init() {
|
||||
grpclog.SetLogger(capnslog.NewPackageLogger("github.com/coreos/etcd/etcdserver", "v3rpc/grpc"))
|
||||
grpclog.SetLogger(plog)
|
||||
}
|
||||
|
||||
func Server(s *etcdserver.EtcdServer, tls *tls.Config) *grpc.Server {
|
||||
|
||||
42
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go
generated
vendored
42
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go
generated
vendored
@@ -15,7 +15,6 @@
|
||||
package v3rpc
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -25,6 +24,7 @@ import (
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
"github.com/coreos/etcd/raft"
|
||||
|
||||
prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/metadata"
|
||||
@@ -53,7 +53,8 @@ func newUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor {
|
||||
}
|
||||
}
|
||||
}
|
||||
return metricsUnaryInterceptor(ctx, req, info, handler)
|
||||
|
||||
return prometheus.UnaryServerInterceptor(ctx, req, info, handler)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -88,44 +89,11 @@ func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor
|
||||
|
||||
}
|
||||
}
|
||||
return metricsStreamInterceptor(srv, ss, info, handler)
|
||||
|
||||
return prometheus.StreamServerInterceptor(srv, ss, info, handler)
|
||||
}
|
||||
}
|
||||
|
||||
func metricsUnaryInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
|
||||
service, method := splitMethodName(info.FullMethod)
|
||||
receivedCounter.WithLabelValues(service, method).Inc()
|
||||
|
||||
start := time.Now()
|
||||
resp, err = handler(ctx, req)
|
||||
if err != nil {
|
||||
failedCounter.WithLabelValues(service, method, grpc.Code(err).String()).Inc()
|
||||
}
|
||||
handlingDuration.WithLabelValues(service, method).Observe(time.Since(start).Seconds())
|
||||
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func metricsStreamInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||
service, method := splitMethodName(info.FullMethod)
|
||||
receivedCounter.WithLabelValues(service, method).Inc()
|
||||
|
||||
err := handler(srv, ss)
|
||||
if err != nil {
|
||||
failedCounter.WithLabelValues(service, method, grpc.Code(err).String()).Inc()
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func splitMethodName(fullMethodName string) (string, string) {
|
||||
fullMethodName = strings.TrimPrefix(fullMethodName, "/") // remove leading slash
|
||||
if i := strings.Index(fullMethodName, "/"); i >= 0 {
|
||||
return fullMethodName[:i], fullMethodName[i+1:]
|
||||
}
|
||||
return "unknown", "unknown"
|
||||
}
|
||||
|
||||
type serverStreamWithCtx struct {
|
||||
grpc.ServerStream
|
||||
ctx context.Context
|
||||
|
||||
10
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go
generated
vendored
10
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go
generated
vendored
@@ -26,7 +26,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd/etcdserver/api", "v3rpc")
|
||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/v3rpc")
|
||||
|
||||
// Max operations per txn list. For example, Txn.Success can have at most 128 operations,
|
||||
// and Txn.Failure can have at most 128 operations.
|
||||
@@ -56,7 +56,7 @@ func (s *kvServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResp
|
||||
plog.Panic("unexpected nil resp.Header")
|
||||
}
|
||||
s.hdr.fill(resp.Header)
|
||||
return resp, err
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (s *kvServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) {
|
||||
@@ -73,7 +73,7 @@ func (s *kvServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse,
|
||||
plog.Panic("unexpected nil resp.Header")
|
||||
}
|
||||
s.hdr.fill(resp.Header)
|
||||
return resp, err
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (s *kvServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
|
||||
@@ -90,7 +90,7 @@ func (s *kvServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*
|
||||
plog.Panic("unexpected nil resp.Header")
|
||||
}
|
||||
s.hdr.fill(resp.Header)
|
||||
return resp, err
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (s *kvServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
|
||||
@@ -107,7 +107,7 @@ func (s *kvServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse,
|
||||
plog.Panic("unexpected nil resp.Header")
|
||||
}
|
||||
s.hdr.fill(resp.Header)
|
||||
return resp, err
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (s *kvServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) {
|
||||
|
||||
20
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go
generated
vendored
20
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go
generated
vendored
@@ -18,7 +18,6 @@ import (
|
||||
"io"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/lease"
|
||||
"golang.org/x/net/context"
|
||||
@@ -35,20 +34,27 @@ func NewLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer {
|
||||
|
||||
func (ls *LeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
|
||||
resp, err := ls.le.LeaseGrant(ctx, cr)
|
||||
if err == lease.ErrLeaseExists {
|
||||
return nil, rpctypes.ErrGRPCLeaseExist
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
ls.hdr.fill(resp.Header)
|
||||
return resp, err
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (ls *LeaseServer) LeaseRevoke(ctx context.Context, rr *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
|
||||
resp, err := ls.le.LeaseRevoke(ctx, rr)
|
||||
if err != nil {
|
||||
return nil, rpctypes.ErrGRPCLeaseNotFound
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
ls.hdr.fill(resp.Header)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (ls *LeaseServer) LeaseTimeToLive(ctx context.Context, rr *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) {
|
||||
resp, err := ls.le.LeaseTimeToLive(ctx, rr)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
ls.hdr.fill(resp.Header)
|
||||
return resp, nil
|
||||
|
||||
54
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go
generated
vendored
54
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go
generated
vendored
@@ -18,6 +18,7 @@ import (
|
||||
"crypto/sha256"
|
||||
"io"
|
||||
|
||||
"github.com/coreos/etcd/auth"
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/mvcc"
|
||||
@@ -45,6 +46,10 @@ type RaftStatusGetter interface {
|
||||
Leader() types.ID
|
||||
}
|
||||
|
||||
type AuthGetter interface {
|
||||
AuthStore() auth.AuthStore
|
||||
}
|
||||
|
||||
type maintenanceServer struct {
|
||||
rg RaftStatusGetter
|
||||
kg KVGetter
|
||||
@@ -54,7 +59,8 @@ type maintenanceServer struct {
|
||||
}
|
||||
|
||||
func NewMaintenanceServer(s *etcdserver.EtcdServer) pb.MaintenanceServer {
|
||||
return &maintenanceServer{rg: s, kg: s, bg: s, a: s, hdr: newHeader(s)}
|
||||
srv := &maintenanceServer{rg: s, kg: s, bg: s, a: s, hdr: newHeader(s)}
|
||||
return &authMaintenanceServer{srv, s}
|
||||
}
|
||||
|
||||
func (ms *maintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) {
|
||||
@@ -139,3 +145,49 @@ func (ms *maintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (
|
||||
ms.hdr.fill(resp.Header)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
type authMaintenanceServer struct {
|
||||
*maintenanceServer
|
||||
ag AuthGetter
|
||||
}
|
||||
|
||||
func (ams *authMaintenanceServer) isAuthenticated(ctx context.Context) error {
|
||||
authInfo, err := ams.ag.AuthStore().AuthInfoFromCtx(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ams.ag.AuthStore().IsAdminPermitted(authInfo)
|
||||
}
|
||||
|
||||
func (ams *authMaintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) {
|
||||
if err := ams.isAuthenticated(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ams.maintenanceServer.Defragment(ctx, sr)
|
||||
}
|
||||
|
||||
func (ams *authMaintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error {
|
||||
if err := ams.isAuthenticated(srv.Context()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ams.maintenanceServer.Snapshot(sr, srv)
|
||||
}
|
||||
|
||||
func (ams *authMaintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) {
|
||||
if err := ams.isAuthenticated(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ams.maintenanceServer.Hash(ctx, r)
|
||||
}
|
||||
|
||||
func (ams *authMaintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) {
|
||||
if err := ams.isAuthenticated(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ams.maintenanceServer.Status(ctx, ar)
|
||||
}
|
||||
|
||||
34
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go
generated
vendored
34
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go
generated
vendored
@@ -24,8 +24,6 @@ import (
|
||||
"github.com/coreos/etcd/etcdserver/membership"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
type ClusterServer struct {
|
||||
@@ -50,14 +48,8 @@ func (cs *ClusterServer) MemberAdd(ctx context.Context, r *pb.MemberAddRequest)
|
||||
|
||||
now := time.Now()
|
||||
m := membership.NewMember("", urls, "", &now)
|
||||
err = cs.server.AddMember(ctx, *m)
|
||||
switch {
|
||||
case err == membership.ErrIDExists:
|
||||
return nil, rpctypes.ErrGRPCMemberExist
|
||||
case err == membership.ErrPeerURLexists:
|
||||
return nil, rpctypes.ErrGRPCPeerURLExist
|
||||
case err != nil:
|
||||
return nil, grpc.Errorf(codes.Internal, err.Error())
|
||||
if err = cs.server.AddMember(ctx, *m); err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
|
||||
return &pb.MemberAddResponse{
|
||||
@@ -67,16 +59,9 @@ func (cs *ClusterServer) MemberAdd(ctx context.Context, r *pb.MemberAddRequest)
|
||||
}
|
||||
|
||||
func (cs *ClusterServer) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest) (*pb.MemberRemoveResponse, error) {
|
||||
err := cs.server.RemoveMember(ctx, r.ID)
|
||||
switch {
|
||||
case err == membership.ErrIDRemoved:
|
||||
fallthrough
|
||||
case err == membership.ErrIDNotFound:
|
||||
return nil, rpctypes.ErrGRPCMemberNotFound
|
||||
case err != nil:
|
||||
return nil, grpc.Errorf(codes.Internal, err.Error())
|
||||
if err := cs.server.RemoveMember(ctx, r.ID); err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
|
||||
return &pb.MemberRemoveResponse{Header: cs.header()}, nil
|
||||
}
|
||||
|
||||
@@ -85,16 +70,9 @@ func (cs *ClusterServer) MemberUpdate(ctx context.Context, r *pb.MemberUpdateReq
|
||||
ID: types.ID(r.ID),
|
||||
RaftAttributes: membership.RaftAttributes{PeerURLs: r.PeerURLs},
|
||||
}
|
||||
err := cs.server.UpdateMember(ctx, m)
|
||||
switch {
|
||||
case err == membership.ErrPeerURLexists:
|
||||
return nil, rpctypes.ErrGRPCPeerURLExist
|
||||
case err == membership.ErrIDNotFound:
|
||||
return nil, rpctypes.ErrGRPCMemberNotFound
|
||||
case err != nil:
|
||||
return nil, grpc.Errorf(codes.Internal, err.Error())
|
||||
if err := cs.server.UpdateMember(ctx, m); err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
|
||||
return &pb.MemberUpdateResponse{Header: cs.header()}, nil
|
||||
}
|
||||
|
||||
|
||||
29
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/metrics.go
generated
vendored
29
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/metrics.go
generated
vendored
@@ -17,31 +17,6 @@ package v3rpc
|
||||
import "github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
var (
|
||||
receivedCounter = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "etcd",
|
||||
Subsystem: "grpc",
|
||||
Name: "requests_total",
|
||||
Help: "Counter of received requests.",
|
||||
}, []string{"grpc_service", "grpc_method"})
|
||||
|
||||
failedCounter = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "etcd",
|
||||
Subsystem: "grpc",
|
||||
Name: "requests_failed_total",
|
||||
Help: "Counter of failed requests.",
|
||||
}, []string{"grpc_service", "grpc_method", "grpc_code"})
|
||||
|
||||
handlingDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "etcd",
|
||||
Subsystem: "grpc",
|
||||
Name: "unary_requests_duration_seconds",
|
||||
Help: "Bucketed histogram of processing time (s) of handled unary (non-stream) requests.",
|
||||
Buckets: prometheus.ExponentialBuckets(0.0005, 2, 13),
|
||||
}, []string{"grpc_service", "grpc_method"})
|
||||
|
||||
sentBytes = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "etcd",
|
||||
Subsystem: "network",
|
||||
@@ -58,10 +33,6 @@ var (
|
||||
)
|
||||
|
||||
func init() {
|
||||
prometheus.MustRegister(receivedCounter)
|
||||
prometheus.MustRegister(failedCounter)
|
||||
prometheus.MustRegister(handlingDuration)
|
||||
|
||||
prometheus.MustRegister(sentBytes)
|
||||
prometheus.MustRegister(receivedBytes)
|
||||
}
|
||||
|
||||
45
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go
generated
vendored
45
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go
generated
vendored
@@ -31,23 +31,28 @@ var (
|
||||
ErrGRPCLeaseNotFound = grpc.Errorf(codes.NotFound, "etcdserver: requested lease not found")
|
||||
ErrGRPCLeaseExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: lease already exists")
|
||||
|
||||
ErrGRPCMemberExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: member ID already exist")
|
||||
ErrGRPCPeerURLExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: Peer URLs already exists")
|
||||
ErrGRPCMemberBadURLs = grpc.Errorf(codes.InvalidArgument, "etcdserver: given member URLs are invalid")
|
||||
ErrGRPCMemberNotFound = grpc.Errorf(codes.NotFound, "etcdserver: member not found")
|
||||
ErrGRPCMemberExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: member ID already exist")
|
||||
ErrGRPCPeerURLExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: Peer URLs already exists")
|
||||
ErrGRPCMemberNotEnoughStarted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: re-configuration failed due to not enough started members")
|
||||
ErrGRPCMemberBadURLs = grpc.Errorf(codes.InvalidArgument, "etcdserver: given member URLs are invalid")
|
||||
ErrGRPCMemberNotFound = grpc.Errorf(codes.NotFound, "etcdserver: member not found")
|
||||
|
||||
ErrGRPCRequestTooLarge = grpc.Errorf(codes.InvalidArgument, "etcdserver: request is too large")
|
||||
ErrGRPCRequestTooLarge = grpc.Errorf(codes.InvalidArgument, "etcdserver: request is too large")
|
||||
ErrGRPCRequestTooManyRequests = grpc.Errorf(codes.ResourceExhausted, "etcdserver: too many requests")
|
||||
|
||||
ErrGRPCRootUserNotExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: root user does not exist")
|
||||
ErrGRPCRootRoleNotExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: root user does not have root role")
|
||||
ErrGRPCUserAlreadyExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: user name already exists")
|
||||
ErrGRPCUserEmpty = grpc.Errorf(codes.InvalidArgument, "etcdserver: user name is empty")
|
||||
ErrGRPCUserNotFound = grpc.Errorf(codes.FailedPrecondition, "etcdserver: user name not found")
|
||||
ErrGRPCRoleAlreadyExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role name already exists")
|
||||
ErrGRPCRoleNotFound = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role name not found")
|
||||
ErrGRPCAuthFailed = grpc.Errorf(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password")
|
||||
ErrGRPCPermissionDenied = grpc.Errorf(codes.FailedPrecondition, "etcdserver: permission denied")
|
||||
ErrGRPCPermissionDenied = grpc.Errorf(codes.PermissionDenied, "etcdserver: permission denied")
|
||||
ErrGRPCRoleNotGranted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role is not granted to the user")
|
||||
ErrGRPCPermissionNotGranted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: permission is not granted to the role")
|
||||
ErrGRPCAuthNotEnabled = grpc.Errorf(codes.FailedPrecondition, "etcdserver: authentication is not enabled")
|
||||
ErrGRPCInvalidAuthToken = grpc.Errorf(codes.Unauthenticated, "etcdserver: invalid auth token")
|
||||
|
||||
ErrGRPCNoLeader = grpc.Errorf(codes.Unavailable, "etcdserver: no leader")
|
||||
ErrGRPCNotCapable = grpc.Errorf(codes.Unavailable, "etcdserver: not capable")
|
||||
@@ -68,16 +73,19 @@ var (
|
||||
grpc.ErrorDesc(ErrGRPCLeaseNotFound): ErrGRPCLeaseNotFound,
|
||||
grpc.ErrorDesc(ErrGRPCLeaseExist): ErrGRPCLeaseExist,
|
||||
|
||||
grpc.ErrorDesc(ErrGRPCMemberExist): ErrGRPCMemberExist,
|
||||
grpc.ErrorDesc(ErrGRPCPeerURLExist): ErrGRPCPeerURLExist,
|
||||
grpc.ErrorDesc(ErrGRPCMemberBadURLs): ErrGRPCMemberBadURLs,
|
||||
grpc.ErrorDesc(ErrGRPCMemberNotFound): ErrGRPCMemberNotFound,
|
||||
grpc.ErrorDesc(ErrGRPCMemberExist): ErrGRPCMemberExist,
|
||||
grpc.ErrorDesc(ErrGRPCPeerURLExist): ErrGRPCPeerURLExist,
|
||||
grpc.ErrorDesc(ErrGRPCMemberNotEnoughStarted): ErrGRPCMemberNotEnoughStarted,
|
||||
grpc.ErrorDesc(ErrGRPCMemberBadURLs): ErrGRPCMemberBadURLs,
|
||||
grpc.ErrorDesc(ErrGRPCMemberNotFound): ErrGRPCMemberNotFound,
|
||||
|
||||
grpc.ErrorDesc(ErrGRPCRequestTooLarge): ErrGRPCRequestTooLarge,
|
||||
grpc.ErrorDesc(ErrGRPCRequestTooLarge): ErrGRPCRequestTooLarge,
|
||||
grpc.ErrorDesc(ErrGRPCRequestTooManyRequests): ErrGRPCRequestTooManyRequests,
|
||||
|
||||
grpc.ErrorDesc(ErrGRPCRootUserNotExist): ErrGRPCRootUserNotExist,
|
||||
grpc.ErrorDesc(ErrGRPCRootRoleNotExist): ErrGRPCRootRoleNotExist,
|
||||
grpc.ErrorDesc(ErrGRPCUserAlreadyExist): ErrGRPCUserAlreadyExist,
|
||||
grpc.ErrorDesc(ErrGRPCUserEmpty): ErrGRPCUserEmpty,
|
||||
grpc.ErrorDesc(ErrGRPCUserNotFound): ErrGRPCUserNotFound,
|
||||
grpc.ErrorDesc(ErrGRPCRoleAlreadyExist): ErrGRPCRoleAlreadyExist,
|
||||
grpc.ErrorDesc(ErrGRPCRoleNotFound): ErrGRPCRoleNotFound,
|
||||
@@ -85,6 +93,8 @@ var (
|
||||
grpc.ErrorDesc(ErrGRPCPermissionDenied): ErrGRPCPermissionDenied,
|
||||
grpc.ErrorDesc(ErrGRPCRoleNotGranted): ErrGRPCRoleNotGranted,
|
||||
grpc.ErrorDesc(ErrGRPCPermissionNotGranted): ErrGRPCPermissionNotGranted,
|
||||
grpc.ErrorDesc(ErrGRPCAuthNotEnabled): ErrGRPCAuthNotEnabled,
|
||||
grpc.ErrorDesc(ErrGRPCInvalidAuthToken): ErrGRPCInvalidAuthToken,
|
||||
|
||||
grpc.ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader,
|
||||
grpc.ErrorDesc(ErrGRPCNotCapable): ErrGRPCNotCapable,
|
||||
@@ -106,16 +116,19 @@ var (
|
||||
ErrLeaseNotFound = Error(ErrGRPCLeaseNotFound)
|
||||
ErrLeaseExist = Error(ErrGRPCLeaseExist)
|
||||
|
||||
ErrMemberExist = Error(ErrGRPCMemberExist)
|
||||
ErrPeerURLExist = Error(ErrGRPCPeerURLExist)
|
||||
ErrMemberBadURLs = Error(ErrGRPCMemberBadURLs)
|
||||
ErrMemberNotFound = Error(ErrGRPCMemberNotFound)
|
||||
ErrMemberExist = Error(ErrGRPCMemberExist)
|
||||
ErrPeerURLExist = Error(ErrGRPCPeerURLExist)
|
||||
ErrMemberNotEnoughStarted = Error(ErrGRPCMemberNotEnoughStarted)
|
||||
ErrMemberBadURLs = Error(ErrGRPCMemberBadURLs)
|
||||
ErrMemberNotFound = Error(ErrGRPCMemberNotFound)
|
||||
|
||||
ErrRequestTooLarge = Error(ErrGRPCRequestTooLarge)
|
||||
ErrTooManyRequests = Error(ErrGRPCRequestTooManyRequests)
|
||||
|
||||
ErrRootUserNotExist = Error(ErrGRPCRootUserNotExist)
|
||||
ErrRootRoleNotExist = Error(ErrGRPCRootRoleNotExist)
|
||||
ErrUserAlreadyExist = Error(ErrGRPCUserAlreadyExist)
|
||||
ErrUserEmpty = Error(ErrGRPCUserEmpty)
|
||||
ErrUserNotFound = Error(ErrGRPCUserNotFound)
|
||||
ErrRoleAlreadyExist = Error(ErrGRPCRoleAlreadyExist)
|
||||
ErrRoleNotFound = Error(ErrGRPCRoleNotFound)
|
||||
@@ -123,6 +136,8 @@ var (
|
||||
ErrPermissionDenied = Error(ErrGRPCPermissionDenied)
|
||||
ErrRoleNotGranted = Error(ErrGRPCRoleNotGranted)
|
||||
ErrPermissionNotGranted = Error(ErrGRPCPermissionNotGranted)
|
||||
ErrAuthNotEnabled = Error(ErrGRPCAuthNotEnabled)
|
||||
ErrInvalidAuthToken = Error(ErrGRPCInvalidAuthToken)
|
||||
|
||||
ErrNoLeader = Error(ErrGRPCNoLeader)
|
||||
ErrNotCapable = Error(ErrGRPCNotCapable)
|
||||
|
||||
30
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go
generated
vendored
30
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go
generated
vendored
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/coreos/etcd/auth"
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
"github.com/coreos/etcd/etcdserver/membership"
|
||||
"github.com/coreos/etcd/lease"
|
||||
"github.com/coreos/etcd/mvcc"
|
||||
"google.golang.org/grpc"
|
||||
@@ -26,17 +27,29 @@ import (
|
||||
|
||||
func togRPCError(err error) error {
|
||||
switch err {
|
||||
case membership.ErrIDRemoved:
|
||||
return rpctypes.ErrGRPCMemberNotFound
|
||||
case membership.ErrIDNotFound:
|
||||
return rpctypes.ErrGRPCMemberNotFound
|
||||
case membership.ErrIDExists:
|
||||
return rpctypes.ErrGRPCMemberExist
|
||||
case membership.ErrPeerURLexists:
|
||||
return rpctypes.ErrGRPCPeerURLExist
|
||||
case etcdserver.ErrNotEnoughStartedMembers:
|
||||
return rpctypes.ErrMemberNotEnoughStarted
|
||||
|
||||
case mvcc.ErrCompacted:
|
||||
return rpctypes.ErrGRPCCompacted
|
||||
case mvcc.ErrFutureRev:
|
||||
return rpctypes.ErrGRPCFutureRev
|
||||
case lease.ErrLeaseNotFound:
|
||||
return rpctypes.ErrGRPCLeaseNotFound
|
||||
// TODO: handle error from raft and timeout
|
||||
case etcdserver.ErrRequestTooLarge:
|
||||
return rpctypes.ErrGRPCRequestTooLarge
|
||||
case etcdserver.ErrNoSpace:
|
||||
return rpctypes.ErrGRPCNoSpace
|
||||
case etcdserver.ErrTooManyRequests:
|
||||
return rpctypes.ErrTooManyRequests
|
||||
|
||||
case etcdserver.ErrNoLeader:
|
||||
return rpctypes.ErrGRPCNoLeader
|
||||
@@ -48,6 +61,13 @@ func togRPCError(err error) error {
|
||||
return rpctypes.ErrGRPCTimeoutDueToLeaderFail
|
||||
case etcdserver.ErrTimeoutDueToConnectionLost:
|
||||
return rpctypes.ErrGRPCTimeoutDueToConnectionLost
|
||||
case etcdserver.ErrUnhealthy:
|
||||
return rpctypes.ErrGRPCUnhealthy
|
||||
|
||||
case lease.ErrLeaseNotFound:
|
||||
return rpctypes.ErrGRPCLeaseNotFound
|
||||
case lease.ErrLeaseExists:
|
||||
return rpctypes.ErrGRPCLeaseExist
|
||||
|
||||
case auth.ErrRootUserNotExist:
|
||||
return rpctypes.ErrGRPCRootUserNotExist
|
||||
@@ -55,6 +75,8 @@ func togRPCError(err error) error {
|
||||
return rpctypes.ErrGRPCRootRoleNotExist
|
||||
case auth.ErrUserAlreadyExist:
|
||||
return rpctypes.ErrGRPCUserAlreadyExist
|
||||
case auth.ErrUserEmpty:
|
||||
return rpctypes.ErrGRPCUserEmpty
|
||||
case auth.ErrUserNotFound:
|
||||
return rpctypes.ErrGRPCUserNotFound
|
||||
case auth.ErrRoleAlreadyExist:
|
||||
@@ -69,7 +91,11 @@ func togRPCError(err error) error {
|
||||
return rpctypes.ErrGRPCRoleNotGranted
|
||||
case auth.ErrPermissionNotGranted:
|
||||
return rpctypes.ErrGRPCPermissionNotGranted
|
||||
case auth.ErrAuthNotEnabled:
|
||||
return rpctypes.ErrGRPCAuthNotEnabled
|
||||
case auth.ErrInvalidAuthToken:
|
||||
return rpctypes.ErrGRPCInvalidAuthToken
|
||||
default:
|
||||
return grpc.Errorf(codes.Internal, err.Error())
|
||||
return grpc.Errorf(codes.Unknown, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
36
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go
generated
vendored
36
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go
generated
vendored
@@ -92,6 +92,7 @@ type serverWatchStream struct {
|
||||
mu sync.Mutex
|
||||
// progress tracks the watchID that stream might need to send
|
||||
// progress to.
|
||||
// TODO: combine progress and prevKV into a single struct?
|
||||
progress map[mvcc.WatchID]bool
|
||||
prevKV map[mvcc.WatchID]bool
|
||||
|
||||
@@ -130,10 +131,14 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
|
||||
// but when stream.Context().Done() is closed, the stream's recv
|
||||
// may continue to block since it uses a different context, leading to
|
||||
// deadlock when calling sws.close().
|
||||
go func() { errc <- sws.recvLoop() }()
|
||||
|
||||
go func() {
|
||||
if rerr := sws.recvLoop(); rerr != nil {
|
||||
errc <- rerr
|
||||
}
|
||||
}()
|
||||
select {
|
||||
case err = <-errc:
|
||||
close(sws.ctrlStream)
|
||||
case <-stream.Context().Done():
|
||||
err = stream.Context().Err()
|
||||
// the only server-side cancellation is noleader for now.
|
||||
@@ -146,7 +151,6 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
|
||||
}
|
||||
|
||||
func (sws *serverWatchStream) recvLoop() error {
|
||||
defer close(sws.ctrlStream)
|
||||
for {
|
||||
req, err := sws.gRPCStream.Recv()
|
||||
if err == io.EOF {
|
||||
@@ -171,12 +175,14 @@ func (sws *serverWatchStream) recvLoop() error {
|
||||
// support >= key queries
|
||||
creq.RangeEnd = []byte{}
|
||||
}
|
||||
filters := FiltersFromRequest(creq)
|
||||
|
||||
wsrev := sws.watchStream.Rev()
|
||||
rev := creq.StartRevision
|
||||
if rev == 0 {
|
||||
rev = wsrev + 1
|
||||
}
|
||||
id := sws.watchStream.Watch(creq.Key, creq.RangeEnd, rev)
|
||||
id := sws.watchStream.Watch(creq.Key, creq.RangeEnd, rev, filters...)
|
||||
if id != -1 {
|
||||
sws.mu.Lock()
|
||||
if creq.ProgressNotify {
|
||||
@@ -353,3 +359,25 @@ func (sws *serverWatchStream) newResponseHeader(rev int64) *pb.ResponseHeader {
|
||||
RaftTerm: sws.raftTimer.Term(),
|
||||
}
|
||||
}
|
||||
|
||||
func filterNoDelete(e mvccpb.Event) bool {
|
||||
return e.Type == mvccpb.DELETE
|
||||
}
|
||||
|
||||
func filterNoPut(e mvccpb.Event) bool {
|
||||
return e.Type == mvccpb.PUT
|
||||
}
|
||||
|
||||
func FiltersFromRequest(creq *pb.WatchCreateRequest) []mvcc.FilterFunc {
|
||||
filters := make([]mvcc.FilterFunc, 0, len(creq.Filters))
|
||||
for _, ft := range creq.Filters {
|
||||
switch ft {
|
||||
case pb.WatchCreateRequest_NOPUT:
|
||||
filters = append(filters, filterNoPut)
|
||||
case pb.WatchCreateRequest_NODELETE:
|
||||
filters = append(filters, filterNoDelete)
|
||||
default:
|
||||
}
|
||||
}
|
||||
return filters
|
||||
}
|
||||
|
||||
100
vendor/github.com/coreos/etcd/etcdserver/apply.go
generated
vendored
100
vendor/github.com/coreos/etcd/etcdserver/apply.go
generated
vendored
@@ -35,7 +35,7 @@ const (
|
||||
// to apply functions instead of a valid txn ID.
|
||||
noTxn = -1
|
||||
|
||||
warnApplyDuration = 10 * time.Millisecond
|
||||
warnApplyDuration = 100 * time.Millisecond
|
||||
)
|
||||
|
||||
type applyResult struct {
|
||||
@@ -258,7 +258,9 @@ func (a *applierV3backend) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResp
|
||||
}
|
||||
|
||||
limit := r.Limit
|
||||
if r.SortOrder != pb.RangeRequest_NONE {
|
||||
if r.SortOrder != pb.RangeRequest_NONE ||
|
||||
r.MinModRevision != 0 || r.MaxModRevision != 0 ||
|
||||
r.MinCreateRevision != 0 || r.MaxCreateRevision != 0 {
|
||||
// fetch everything; sort and truncate afterwards
|
||||
limit = 0
|
||||
}
|
||||
@@ -285,7 +287,31 @@ func (a *applierV3backend) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResp
|
||||
}
|
||||
}
|
||||
|
||||
if r.SortOrder != pb.RangeRequest_NONE {
|
||||
if r.MaxModRevision != 0 {
|
||||
f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision > r.MaxModRevision }
|
||||
pruneKVs(rr, f)
|
||||
}
|
||||
if r.MinModRevision != 0 {
|
||||
f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision < r.MinModRevision }
|
||||
pruneKVs(rr, f)
|
||||
}
|
||||
if r.MaxCreateRevision != 0 {
|
||||
f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision > r.MaxCreateRevision }
|
||||
pruneKVs(rr, f)
|
||||
}
|
||||
if r.MinCreateRevision != 0 {
|
||||
f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision < r.MinCreateRevision }
|
||||
pruneKVs(rr, f)
|
||||
}
|
||||
|
||||
sortOrder := r.SortOrder
|
||||
if r.SortTarget != pb.RangeRequest_KEY && sortOrder == pb.RangeRequest_NONE {
|
||||
// Since current mvcc.Range implementation returns results
|
||||
// sorted by keys in lexiographically ascending order,
|
||||
// sort ASCEND by default only when target is not 'KEY'
|
||||
sortOrder = pb.RangeRequest_ASCEND
|
||||
}
|
||||
if sortOrder != pb.RangeRequest_NONE {
|
||||
var sorter sort.Interface
|
||||
switch {
|
||||
case r.SortTarget == pb.RangeRequest_KEY:
|
||||
@@ -300,9 +326,9 @@ func (a *applierV3backend) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResp
|
||||
sorter = &kvSortByValue{&kvSort{rr.KVs}}
|
||||
}
|
||||
switch {
|
||||
case r.SortOrder == pb.RangeRequest_ASCEND:
|
||||
case sortOrder == pb.RangeRequest_ASCEND:
|
||||
sort.Sort(sorter)
|
||||
case r.SortOrder == pb.RangeRequest_DESCEND:
|
||||
case sortOrder == pb.RangeRequest_DESCEND:
|
||||
sort.Sort(sort.Reverse(sorter))
|
||||
}
|
||||
}
|
||||
@@ -345,34 +371,23 @@ func (a *applierV3backend) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
revision := a.s.KV().Rev()
|
||||
|
||||
// When executing the operations of txn, we need to hold the txn lock.
|
||||
// So the reader will not see any intermediate results.
|
||||
txnID := a.s.KV().TxnBegin()
|
||||
defer func() {
|
||||
err := a.s.KV().TxnEnd(txnID)
|
||||
if err != nil {
|
||||
panic(fmt.Sprint("unexpected error when closing txn", txnID))
|
||||
}
|
||||
}()
|
||||
|
||||
resps := make([]*pb.ResponseOp, len(reqs))
|
||||
changedKV := false
|
||||
for i := range reqs {
|
||||
if reqs[i].GetRequestRange() == nil {
|
||||
changedKV = true
|
||||
}
|
||||
resps[i] = a.applyUnion(txnID, reqs[i])
|
||||
}
|
||||
|
||||
if changedKV {
|
||||
revision += 1
|
||||
err := a.s.KV().TxnEnd(txnID)
|
||||
if err != nil {
|
||||
panic(fmt.Sprint("unexpected error when closing txn", txnID))
|
||||
}
|
||||
|
||||
txnResp := &pb.TxnResponse{}
|
||||
txnResp.Header = &pb.ResponseHeader{}
|
||||
txnResp.Header.Revision = revision
|
||||
txnResp.Header.Revision = a.s.KV().Rev()
|
||||
txnResp.Responses = resps
|
||||
txnResp.Succeeded = ok
|
||||
return txnResp, nil
|
||||
@@ -436,6 +451,10 @@ func (a *applierV3backend) applyCompare(c *pb.Compare) (int64, bool) {
|
||||
if result != 0 {
|
||||
return rev, false
|
||||
}
|
||||
case pb.Compare_NOT_EQUAL:
|
||||
if result == 0 {
|
||||
return rev, false
|
||||
}
|
||||
case pb.Compare_GREATER:
|
||||
if result != 1 {
|
||||
return rev, false
|
||||
@@ -454,7 +473,7 @@ func (a *applierV3backend) applyUnion(txnID int64, union *pb.RequestOp) *pb.Resp
|
||||
if tv.RequestRange != nil {
|
||||
resp, err := a.Range(txnID, tv.RequestRange)
|
||||
if err != nil {
|
||||
panic("unexpected error during txn")
|
||||
plog.Panicf("unexpected error during txn: %v", err)
|
||||
}
|
||||
return &pb.ResponseOp{Response: &pb.ResponseOp_ResponseRange{ResponseRange: resp}}
|
||||
}
|
||||
@@ -462,7 +481,7 @@ func (a *applierV3backend) applyUnion(txnID int64, union *pb.RequestOp) *pb.Resp
|
||||
if tv.RequestPut != nil {
|
||||
resp, err := a.Put(txnID, tv.RequestPut)
|
||||
if err != nil {
|
||||
panic("unexpected error during txn")
|
||||
plog.Panicf("unexpected error during txn: %v", err)
|
||||
}
|
||||
return &pb.ResponseOp{Response: &pb.ResponseOp_ResponsePut{ResponsePut: resp}}
|
||||
}
|
||||
@@ -470,7 +489,7 @@ func (a *applierV3backend) applyUnion(txnID int64, union *pb.RequestOp) *pb.Resp
|
||||
if tv.RequestDeleteRange != nil {
|
||||
resp, err := a.DeleteRange(txnID, tv.RequestDeleteRange)
|
||||
if err != nil {
|
||||
panic("unexpected error during txn")
|
||||
plog.Panicf("unexpected error during txn: %v", err)
|
||||
}
|
||||
return &pb.ResponseOp{Response: &pb.ResponseOp_ResponseDeleteRange{ResponseDeleteRange: resp}}
|
||||
}
|
||||
@@ -500,7 +519,7 @@ func (a *applierV3backend) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantR
|
||||
resp := &pb.LeaseGrantResponse{}
|
||||
if err == nil {
|
||||
resp.ID = int64(l.ID)
|
||||
resp.TTL = l.TTL
|
||||
resp.TTL = l.TTL()
|
||||
resp.Header = &pb.ResponseHeader{Revision: a.s.KV().Rev()}
|
||||
}
|
||||
|
||||
@@ -784,3 +803,36 @@ func compareInt64(a, b int64) int {
|
||||
func isGteRange(rangeEnd []byte) bool {
|
||||
return len(rangeEnd) == 1 && rangeEnd[0] == 0
|
||||
}
|
||||
|
||||
func noSideEffect(r *pb.InternalRaftRequest) bool {
|
||||
return r.Range != nil || r.AuthUserGet != nil || r.AuthRoleGet != nil
|
||||
}
|
||||
|
||||
func removeNeedlessRangeReqs(txn *pb.TxnRequest) {
|
||||
f := func(ops []*pb.RequestOp) []*pb.RequestOp {
|
||||
j := 0
|
||||
for i := 0; i < len(ops); i++ {
|
||||
if _, ok := ops[i].Request.(*pb.RequestOp_RequestRange); ok {
|
||||
continue
|
||||
}
|
||||
ops[j] = ops[i]
|
||||
j++
|
||||
}
|
||||
|
||||
return ops[:j]
|
||||
}
|
||||
|
||||
txn.Success = f(txn.Success)
|
||||
txn.Failure = f(txn.Failure)
|
||||
}
|
||||
|
||||
func pruneKVs(rr *mvcc.RangeResult, isPrunable func(*mvccpb.KeyValue) bool) {
|
||||
j := 0
|
||||
for i := range rr.KVs {
|
||||
rr.KVs[j] = rr.KVs[i]
|
||||
if !isPrunable(&rr.KVs[i]) {
|
||||
j++
|
||||
}
|
||||
}
|
||||
rr.KVs = rr.KVs[:j]
|
||||
}
|
||||
|
||||
100
vendor/github.com/coreos/etcd/etcdserver/apply_auth.go
generated
vendored
100
vendor/github.com/coreos/etcd/etcdserver/apply_auth.go
generated
vendored
@@ -27,8 +27,9 @@ type authApplierV3 struct {
|
||||
|
||||
// mu serializes Apply so that user isn't corrupted and so that
|
||||
// serialized requests don't leak data from TOCTOU errors
|
||||
mu sync.Mutex
|
||||
user string
|
||||
mu sync.Mutex
|
||||
|
||||
authInfo auth.AuthInfo
|
||||
}
|
||||
|
||||
func newAuthApplierV3(as auth.AuthStore, base applierV3) *authApplierV3 {
|
||||
@@ -41,45 +42,57 @@ func (aa *authApplierV3) Apply(r *pb.InternalRaftRequest) *applyResult {
|
||||
if r.Header != nil {
|
||||
// backward-compatible with pre-3.0 releases when internalRaftRequest
|
||||
// does not have header field
|
||||
aa.user = r.Header.Username
|
||||
aa.authInfo.Username = r.Header.Username
|
||||
aa.authInfo.Revision = r.Header.AuthRevision
|
||||
}
|
||||
if needAdminPermission(r) && !aa.as.IsAdminPermitted(aa.user) {
|
||||
aa.user = ""
|
||||
return &applyResult{err: auth.ErrPermissionDenied}
|
||||
if needAdminPermission(r) {
|
||||
if err := aa.as.IsAdminPermitted(&aa.authInfo); err != nil {
|
||||
aa.authInfo.Username = ""
|
||||
aa.authInfo.Revision = 0
|
||||
return &applyResult{err: err}
|
||||
}
|
||||
}
|
||||
ret := aa.applierV3.Apply(r)
|
||||
aa.user = ""
|
||||
aa.authInfo.Username = ""
|
||||
aa.authInfo.Revision = 0
|
||||
return ret
|
||||
}
|
||||
|
||||
func (aa *authApplierV3) Put(txnID int64, r *pb.PutRequest) (*pb.PutResponse, error) {
|
||||
if !aa.as.IsPutPermitted(aa.user, r.Key) {
|
||||
return nil, auth.ErrPermissionDenied
|
||||
if err := aa.as.IsPutPermitted(&aa.authInfo, r.Key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if r.PrevKv && !aa.as.IsRangePermitted(aa.user, r.Key, nil) {
|
||||
return nil, auth.ErrPermissionDenied
|
||||
if r.PrevKv {
|
||||
err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return aa.applierV3.Put(txnID, r)
|
||||
}
|
||||
|
||||
func (aa *authApplierV3) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResponse, error) {
|
||||
if !aa.as.IsRangePermitted(aa.user, r.Key, r.RangeEnd) {
|
||||
return nil, auth.ErrPermissionDenied
|
||||
if err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return aa.applierV3.Range(txnID, r)
|
||||
}
|
||||
|
||||
func (aa *authApplierV3) DeleteRange(txnID int64, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
|
||||
if !aa.as.IsDeleteRangePermitted(aa.user, r.Key, r.RangeEnd) {
|
||||
return nil, auth.ErrPermissionDenied
|
||||
if err := aa.as.IsDeleteRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if r.PrevKv && !aa.as.IsRangePermitted(aa.user, r.Key, r.RangeEnd) {
|
||||
return nil, auth.ErrPermissionDenied
|
||||
if r.PrevKv {
|
||||
err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return aa.applierV3.DeleteRange(txnID, r)
|
||||
}
|
||||
|
||||
func (aa *authApplierV3) checkTxnReqsPermission(reqs []*pb.RequestOp) bool {
|
||||
func checkTxnReqsPermission(as auth.AuthStore, ai *auth.AuthInfo, reqs []*pb.RequestOp) error {
|
||||
for _, requ := range reqs {
|
||||
switch tv := requ.Request.(type) {
|
||||
case *pb.RequestOp_RequestRange:
|
||||
@@ -87,8 +100,8 @@ func (aa *authApplierV3) checkTxnReqsPermission(reqs []*pb.RequestOp) bool {
|
||||
continue
|
||||
}
|
||||
|
||||
if !aa.as.IsRangePermitted(aa.user, tv.RequestRange.Key, tv.RequestRange.RangeEnd) {
|
||||
return false
|
||||
if err := as.IsRangePermitted(ai, tv.RequestRange.Key, tv.RequestRange.RangeEnd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *pb.RequestOp_RequestPut:
|
||||
@@ -96,8 +109,8 @@ func (aa *authApplierV3) checkTxnReqsPermission(reqs []*pb.RequestOp) bool {
|
||||
continue
|
||||
}
|
||||
|
||||
if !aa.as.IsPutPermitted(aa.user, tv.RequestPut.Key) {
|
||||
return false
|
||||
if err := as.IsPutPermitted(ai, tv.RequestPut.Key); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *pb.RequestOp_RequestDeleteRange:
|
||||
@@ -105,29 +118,42 @@ func (aa *authApplierV3) checkTxnReqsPermission(reqs []*pb.RequestOp) bool {
|
||||
continue
|
||||
}
|
||||
|
||||
if tv.RequestDeleteRange.PrevKv && !aa.as.IsRangePermitted(aa.user, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd) {
|
||||
return false
|
||||
if tv.RequestDeleteRange.PrevKv {
|
||||
err := as.IsRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err := as.IsDeleteRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkTxnAuth(as auth.AuthStore, ai *auth.AuthInfo, rt *pb.TxnRequest) error {
|
||||
for _, c := range rt.Compare {
|
||||
if err := as.IsRangePermitted(ai, c.Key, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := checkTxnReqsPermission(as, ai, rt.Success); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := checkTxnReqsPermission(as, ai, rt.Failure); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (aa *authApplierV3) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
|
||||
for _, c := range rt.Compare {
|
||||
if !aa.as.IsRangePermitted(aa.user, c.Key, nil) {
|
||||
return nil, auth.ErrPermissionDenied
|
||||
}
|
||||
if err := checkTxnAuth(aa.as, &aa.authInfo, rt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !aa.checkTxnReqsPermission(rt.Success) {
|
||||
return nil, auth.ErrPermissionDenied
|
||||
}
|
||||
if !aa.checkTxnReqsPermission(rt.Failure) {
|
||||
return nil, auth.ErrPermissionDenied
|
||||
}
|
||||
|
||||
return aa.applierV3.Txn(rt)
|
||||
}
|
||||
|
||||
|
||||
96
vendor/github.com/coreos/etcd/etcdserver/auth/auth.go
generated
vendored
96
vendor/github.com/coreos/etcd/etcdserver/auth/auth.go
generated
vendored
@@ -46,7 +46,7 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd/etcdserver", "auth")
|
||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/auth")
|
||||
)
|
||||
|
||||
var rootRole = Role{
|
||||
@@ -167,7 +167,7 @@ func (_ passwordStore) HashPassword(password string) (string, error) {
|
||||
}
|
||||
|
||||
func (s *store) AllUsers() ([]string, error) {
|
||||
resp, err := s.requestResource("/users/", false)
|
||||
resp, err := s.requestResource("/users/", false, false)
|
||||
if err != nil {
|
||||
if e, ok := err.(*etcderr.Error); ok {
|
||||
if e.ErrorCode == etcderr.EcodeKeyNotFound {
|
||||
@@ -185,33 +185,13 @@ func (s *store) AllUsers() ([]string, error) {
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
func (s *store) GetUser(name string) (User, error) {
|
||||
resp, err := s.requestResource("/users/"+name, false)
|
||||
if err != nil {
|
||||
if e, ok := err.(*etcderr.Error); ok {
|
||||
if e.ErrorCode == etcderr.EcodeKeyNotFound {
|
||||
return User{}, authErr(http.StatusNotFound, "User %s does not exist.", name)
|
||||
}
|
||||
}
|
||||
return User{}, err
|
||||
}
|
||||
var u User
|
||||
err = json.Unmarshal([]byte(*resp.Event.Node.Value), &u)
|
||||
if err != nil {
|
||||
return u, err
|
||||
}
|
||||
// Attach root role to root user.
|
||||
if u.User == "root" {
|
||||
u = attachRootRole(u)
|
||||
}
|
||||
return u, nil
|
||||
}
|
||||
func (s *store) GetUser(name string) (User, error) { return s.getUser(name, false) }
|
||||
|
||||
// CreateOrUpdateUser should be only used for creating the new user or when you are not
|
||||
// sure if it is a create or update. (When only password is passed in, we are not sure
|
||||
// if it is a update or create)
|
||||
func (s *store) CreateOrUpdateUser(user User) (out User, created bool, err error) {
|
||||
_, err = s.GetUser(user.User)
|
||||
_, err = s.getUser(user.User, true)
|
||||
if err == nil {
|
||||
out, err = s.UpdateUser(user)
|
||||
return out, false, err
|
||||
@@ -271,7 +251,7 @@ func (s *store) DeleteUser(name string) error {
|
||||
}
|
||||
|
||||
func (s *store) UpdateUser(user User) (User, error) {
|
||||
old, err := s.GetUser(user.User)
|
||||
old, err := s.getUser(user.User, true)
|
||||
if err != nil {
|
||||
if e, ok := err.(*etcderr.Error); ok {
|
||||
if e.ErrorCode == etcderr.EcodeKeyNotFound {
|
||||
@@ -297,7 +277,7 @@ func (s *store) UpdateUser(user User) (User, error) {
|
||||
|
||||
func (s *store) AllRoles() ([]string, error) {
|
||||
nodes := []string{RootRoleName}
|
||||
resp, err := s.requestResource("/roles/", false)
|
||||
resp, err := s.requestResource("/roles/", false, false)
|
||||
if err != nil {
|
||||
if e, ok := err.(*etcderr.Error); ok {
|
||||
if e.ErrorCode == etcderr.EcodeKeyNotFound {
|
||||
@@ -314,23 +294,7 @@ func (s *store) AllRoles() ([]string, error) {
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
func (s *store) GetRole(name string) (Role, error) {
|
||||
if name == RootRoleName {
|
||||
return rootRole, nil
|
||||
}
|
||||
resp, err := s.requestResource("/roles/"+name, false)
|
||||
if err != nil {
|
||||
if e, ok := err.(*etcderr.Error); ok {
|
||||
if e.ErrorCode == etcderr.EcodeKeyNotFound {
|
||||
return Role{}, authErr(http.StatusNotFound, "Role %s does not exist.", name)
|
||||
}
|
||||
}
|
||||
return Role{}, err
|
||||
}
|
||||
var r Role
|
||||
err = json.Unmarshal([]byte(*resp.Event.Node.Value), &r)
|
||||
return r, err
|
||||
}
|
||||
func (s *store) GetRole(name string) (Role, error) { return s.getRole(name, false) }
|
||||
|
||||
func (s *store) CreateRole(role Role) error {
|
||||
if role.Role == RootRoleName {
|
||||
@@ -372,7 +336,7 @@ func (s *store) UpdateRole(role Role) (Role, error) {
|
||||
if role.Role == RootRoleName {
|
||||
return Role{}, authErr(http.StatusForbidden, "Cannot modify role %s: is root role.", role.Role)
|
||||
}
|
||||
old, err := s.GetRole(role.Role)
|
||||
old, err := s.getRole(role.Role, true)
|
||||
if err != nil {
|
||||
if e, ok := err.(*etcderr.Error); ok {
|
||||
if e.ErrorCode == etcderr.EcodeKeyNotFound {
|
||||
@@ -404,10 +368,10 @@ func (s *store) EnableAuth() error {
|
||||
return authErr(http.StatusConflict, "already enabled")
|
||||
}
|
||||
|
||||
if _, err := s.GetUser("root"); err != nil {
|
||||
if _, err := s.getUser("root", true); err != nil {
|
||||
return authErr(http.StatusConflict, "No root user available, please create one")
|
||||
}
|
||||
if _, err := s.GetRole(GuestRoleName); err != nil {
|
||||
if _, err := s.getRole(GuestRoleName, true); err != nil {
|
||||
plog.Printf("no guest role access found, creating default")
|
||||
if err := s.CreateRole(guestRole); err != nil {
|
||||
plog.Errorf("error creating guest role. aborting auth enable.")
|
||||
@@ -641,3 +605,43 @@ func attachRootRole(u User) User {
|
||||
}
|
||||
return u
|
||||
}
|
||||
|
||||
func (s *store) getUser(name string, quorum bool) (User, error) {
|
||||
resp, err := s.requestResource("/users/"+name, false, quorum)
|
||||
if err != nil {
|
||||
if e, ok := err.(*etcderr.Error); ok {
|
||||
if e.ErrorCode == etcderr.EcodeKeyNotFound {
|
||||
return User{}, authErr(http.StatusNotFound, "User %s does not exist.", name)
|
||||
}
|
||||
}
|
||||
return User{}, err
|
||||
}
|
||||
var u User
|
||||
err = json.Unmarshal([]byte(*resp.Event.Node.Value), &u)
|
||||
if err != nil {
|
||||
return u, err
|
||||
}
|
||||
// Attach root role to root user.
|
||||
if u.User == "root" {
|
||||
u = attachRootRole(u)
|
||||
}
|
||||
return u, nil
|
||||
}
|
||||
|
||||
func (s *store) getRole(name string, quorum bool) (Role, error) {
|
||||
if name == RootRoleName {
|
||||
return rootRole, nil
|
||||
}
|
||||
resp, err := s.requestResource("/roles/"+name, false, quorum)
|
||||
if err != nil {
|
||||
if e, ok := err.(*etcderr.Error); ok {
|
||||
if e.ErrorCode == etcderr.EcodeKeyNotFound {
|
||||
return Role{}, authErr(http.StatusNotFound, "Role %s does not exist.", name)
|
||||
}
|
||||
}
|
||||
return Role{}, err
|
||||
}
|
||||
var r Role
|
||||
err = json.Unmarshal([]byte(*resp.Event.Node.Value), &r)
|
||||
return r, err
|
||||
}
|
||||
|
||||
10
vendor/github.com/coreos/etcd/etcdserver/auth/auth_requests.go
generated
vendored
10
vendor/github.com/coreos/etcd/etcdserver/auth/auth_requests.go
generated
vendored
@@ -85,7 +85,7 @@ func (s *store) detectAuth() bool {
|
||||
if s.server == nil {
|
||||
return false
|
||||
}
|
||||
value, err := s.requestResource("/enabled", false)
|
||||
value, err := s.requestResource("/enabled", false, false)
|
||||
if err != nil {
|
||||
if e, ok := err.(*etcderr.Error); ok {
|
||||
if e.ErrorCode == etcderr.EcodeKeyNotFound {
|
||||
@@ -105,12 +105,16 @@ func (s *store) detectAuth() bool {
|
||||
return u
|
||||
}
|
||||
|
||||
func (s *store) requestResource(res string, dir bool) (etcdserver.Response, error) {
|
||||
func (s *store) requestResource(res string, dir, quorum bool) (etcdserver.Response, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), s.timeout)
|
||||
defer cancel()
|
||||
p := path.Join(StorePermsPrefix, res)
|
||||
method := "GET"
|
||||
if quorum {
|
||||
method = "QGET"
|
||||
}
|
||||
rr := etcdserverpb.Request{
|
||||
Method: "GET",
|
||||
Method: method,
|
||||
Path: p,
|
||||
Dir: dir,
|
||||
}
|
||||
|
||||
11
vendor/github.com/coreos/etcd/etcdserver/cluster_util.go
generated
vendored
11
vendor/github.com/coreos/etcd/etcdserver/cluster_util.go
generated
vendored
@@ -94,7 +94,16 @@ func getClusterFromRemotePeers(urls []string, timeout time.Duration, logerr bool
|
||||
}
|
||||
continue
|
||||
}
|
||||
return membership.NewClusterFromMembers("", id, membs), nil
|
||||
|
||||
// check the length of membership members
|
||||
// if the membership members are present then prepare and return raft cluster
|
||||
// if membership members are not present then the raft cluster formed will be
|
||||
// an invalid empty cluster hence return failed to get raft cluster member(s) from the given urls error
|
||||
if len(membs) > 0 {
|
||||
return membership.NewClusterFromMembers("", id, membs), nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failed to get raft cluster member(s) from the given urls.")
|
||||
}
|
||||
return nil, fmt.Errorf("could not retrieve cluster information from the given urls")
|
||||
}
|
||||
|
||||
59
vendor/github.com/coreos/etcd/etcdserver/config.go
generated
vendored
59
vendor/github.com/coreos/etcd/etcdserver/config.go
generated
vendored
@@ -16,11 +16,13 @@ package etcdserver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/coreos/etcd/pkg/netutil"
|
||||
"github.com/coreos/etcd/pkg/transport"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
@@ -55,8 +57,6 @@ type ServerConfig struct {
|
||||
|
||||
StrictReconfigCheck bool
|
||||
|
||||
EnablePprof bool
|
||||
|
||||
// ClientCertAuthEnabled is true when cert has been signed by the client CA.
|
||||
ClientCertAuthEnabled bool
|
||||
}
|
||||
@@ -64,7 +64,10 @@ type ServerConfig struct {
|
||||
// VerifyBootstrap sanity-checks the initial config for bootstrap case
|
||||
// and returns an error for things that should never happen.
|
||||
func (c *ServerConfig) VerifyBootstrap() error {
|
||||
if err := c.verifyLocalMember(true); err != nil {
|
||||
if err := c.hasLocalMember(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.advertiseMatchesCluster(); err != nil {
|
||||
return err
|
||||
}
|
||||
if checkDuplicateURL(c.InitialPeerURLsMap) {
|
||||
@@ -79,10 +82,9 @@ func (c *ServerConfig) VerifyBootstrap() error {
|
||||
// VerifyJoinExisting sanity-checks the initial config for join existing cluster
|
||||
// case and returns an error for things that should never happen.
|
||||
func (c *ServerConfig) VerifyJoinExisting() error {
|
||||
// no need for strict checking since the member have announced its
|
||||
// peer urls to the cluster before starting and do not have to set
|
||||
// it in the configuration again.
|
||||
if err := c.verifyLocalMember(false); err != nil {
|
||||
// The member has announced its peer urls to the cluster before starting; no need to
|
||||
// set the configuration again.
|
||||
if err := c.hasLocalMember(); err != nil {
|
||||
return err
|
||||
}
|
||||
if checkDuplicateURL(c.InitialPeerURLsMap) {
|
||||
@@ -94,39 +96,38 @@ func (c *ServerConfig) VerifyJoinExisting() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// verifyLocalMember verifies the configured member is in configured
|
||||
// cluster. If strict is set, it also verifies the configured member
|
||||
// has the same peer urls as configured advertised peer urls.
|
||||
func (c *ServerConfig) verifyLocalMember(strict bool) error {
|
||||
urls := c.InitialPeerURLsMap[c.Name]
|
||||
// Make sure the cluster at least contains the local server.
|
||||
if urls == nil {
|
||||
// hasLocalMember checks that the cluster at least contains the local server.
|
||||
func (c *ServerConfig) hasLocalMember() error {
|
||||
if urls := c.InitialPeerURLsMap[c.Name]; urls == nil {
|
||||
return fmt.Errorf("couldn't find local name %q in the initial cluster configuration", c.Name)
|
||||
}
|
||||
|
||||
// Advertised peer URLs must match those in the cluster peer list
|
||||
apurls := c.PeerURLs.StringSlice()
|
||||
sort.Strings(apurls)
|
||||
urls.Sort()
|
||||
if strict {
|
||||
if !netutil.URLStringsEqual(apurls, urls.StringSlice()) {
|
||||
umap := map[string]types.URLs{c.Name: c.PeerURLs}
|
||||
return fmt.Errorf("--initial-cluster must include %s given --initial-advertise-peer-urls=%s", types.URLsMap(umap).String(), strings.Join(apurls, ","))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ServerConfig) MemberDir() string { return path.Join(c.DataDir, "member") }
|
||||
// advertiseMatchesCluster confirms peer URLs match those in the cluster peer list.
|
||||
func (c *ServerConfig) advertiseMatchesCluster() error {
|
||||
urls, apurls := c.InitialPeerURLsMap[c.Name], c.PeerURLs.StringSlice()
|
||||
urls.Sort()
|
||||
sort.Strings(apurls)
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second)
|
||||
defer cancel()
|
||||
if !netutil.URLStringsEqual(ctx, apurls, urls.StringSlice()) {
|
||||
umap := map[string]types.URLs{c.Name: c.PeerURLs}
|
||||
return fmt.Errorf("--initial-cluster must include %s given --initial-advertise-peer-urls=%s", types.URLsMap(umap).String(), strings.Join(apurls, ","))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ServerConfig) MemberDir() string { return filepath.Join(c.DataDir, "member") }
|
||||
|
||||
func (c *ServerConfig) WALDir() string {
|
||||
if c.DedicatedWALDir != "" {
|
||||
return c.DedicatedWALDir
|
||||
}
|
||||
return path.Join(c.MemberDir(), "wal")
|
||||
return filepath.Join(c.MemberDir(), "wal")
|
||||
}
|
||||
|
||||
func (c *ServerConfig) SnapDir() string { return path.Join(c.MemberDir(), "snap") }
|
||||
func (c *ServerConfig) SnapDir() string { return filepath.Join(c.MemberDir(), "snap") }
|
||||
|
||||
func (c *ServerConfig) ShouldDiscover() bool { return c.DiscoveryURL != "" }
|
||||
|
||||
|
||||
4
vendor/github.com/coreos/etcd/etcdserver/errors.go
generated
vendored
4
vendor/github.com/coreos/etcd/etcdserver/errors.go
generated
vendored
@@ -26,11 +26,13 @@ var (
|
||||
ErrTimeout = errors.New("etcdserver: request timed out")
|
||||
ErrTimeoutDueToLeaderFail = errors.New("etcdserver: request timed out, possibly due to previous leader failure")
|
||||
ErrTimeoutDueToConnectionLost = errors.New("etcdserver: request timed out, possibly due to connection lost")
|
||||
ErrTimeoutLeaderTransfer = errors.New("etcdserver: request timed out, leader transfer took too long")
|
||||
ErrNotEnoughStartedMembers = errors.New("etcdserver: re-configuration failed due to not enough started members")
|
||||
ErrNoLeader = errors.New("etcdserver: no leader")
|
||||
ErrRequestTooLarge = errors.New("etcdserver: request is too large")
|
||||
ErrNoSpace = errors.New("etcdserver: no space")
|
||||
ErrInvalidAuthToken = errors.New("etcdserver: invalid auth token")
|
||||
ErrTooManyRequests = errors.New("etcdserver: too many requests")
|
||||
ErrUnhealthy = errors.New("etcdserver: unhealthy cluster")
|
||||
)
|
||||
|
||||
type DiscoveryError struct {
|
||||
|
||||
340
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go
generated
vendored
340
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go
generated
vendored
@@ -45,6 +45,8 @@
|
||||
LeaseRevokeResponse
|
||||
LeaseKeepAliveRequest
|
||||
LeaseKeepAliveResponse
|
||||
LeaseTimeToLiveRequest
|
||||
LeaseTimeToLiveResponse
|
||||
Member
|
||||
MemberAddRequest
|
||||
MemberAddResponse
|
||||
@@ -113,26 +115,28 @@ var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
const _ = proto.ProtoPackageIsVersion1
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type Request struct {
|
||||
ID uint64 `protobuf:"varint,1,opt,name=ID,json=iD" json:"ID"`
|
||||
Method string `protobuf:"bytes,2,opt,name=Method,json=method" json:"Method"`
|
||||
Path string `protobuf:"bytes,3,opt,name=Path,json=path" json:"Path"`
|
||||
Val string `protobuf:"bytes,4,opt,name=Val,json=val" json:"Val"`
|
||||
Dir bool `protobuf:"varint,5,opt,name=Dir,json=dir" json:"Dir"`
|
||||
PrevValue string `protobuf:"bytes,6,opt,name=PrevValue,json=prevValue" json:"PrevValue"`
|
||||
PrevIndex uint64 `protobuf:"varint,7,opt,name=PrevIndex,json=prevIndex" json:"PrevIndex"`
|
||||
PrevExist *bool `protobuf:"varint,8,opt,name=PrevExist,json=prevExist" json:"PrevExist,omitempty"`
|
||||
Expiration int64 `protobuf:"varint,9,opt,name=Expiration,json=expiration" json:"Expiration"`
|
||||
Wait bool `protobuf:"varint,10,opt,name=Wait,json=wait" json:"Wait"`
|
||||
Since uint64 `protobuf:"varint,11,opt,name=Since,json=since" json:"Since"`
|
||||
Recursive bool `protobuf:"varint,12,opt,name=Recursive,json=recursive" json:"Recursive"`
|
||||
Sorted bool `protobuf:"varint,13,opt,name=Sorted,json=sorted" json:"Sorted"`
|
||||
Quorum bool `protobuf:"varint,14,opt,name=Quorum,json=quorum" json:"Quorum"`
|
||||
Time int64 `protobuf:"varint,15,opt,name=Time,json=time" json:"Time"`
|
||||
Stream bool `protobuf:"varint,16,opt,name=Stream,json=stream" json:"Stream"`
|
||||
Refresh *bool `protobuf:"varint,17,opt,name=Refresh,json=refresh" json:"Refresh,omitempty"`
|
||||
ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"`
|
||||
Method string `protobuf:"bytes,2,opt,name=Method" json:"Method"`
|
||||
Path string `protobuf:"bytes,3,opt,name=Path" json:"Path"`
|
||||
Val string `protobuf:"bytes,4,opt,name=Val" json:"Val"`
|
||||
Dir bool `protobuf:"varint,5,opt,name=Dir" json:"Dir"`
|
||||
PrevValue string `protobuf:"bytes,6,opt,name=PrevValue" json:"PrevValue"`
|
||||
PrevIndex uint64 `protobuf:"varint,7,opt,name=PrevIndex" json:"PrevIndex"`
|
||||
PrevExist *bool `protobuf:"varint,8,opt,name=PrevExist" json:"PrevExist,omitempty"`
|
||||
Expiration int64 `protobuf:"varint,9,opt,name=Expiration" json:"Expiration"`
|
||||
Wait bool `protobuf:"varint,10,opt,name=Wait" json:"Wait"`
|
||||
Since uint64 `protobuf:"varint,11,opt,name=Since" json:"Since"`
|
||||
Recursive bool `protobuf:"varint,12,opt,name=Recursive" json:"Recursive"`
|
||||
Sorted bool `protobuf:"varint,13,opt,name=Sorted" json:"Sorted"`
|
||||
Quorum bool `protobuf:"varint,14,opt,name=Quorum" json:"Quorum"`
|
||||
Time int64 `protobuf:"varint,15,opt,name=Time" json:"Time"`
|
||||
Stream bool `protobuf:"varint,16,opt,name=Stream" json:"Stream"`
|
||||
Refresh *bool `protobuf:"varint,17,opt,name=Refresh" json:"Refresh,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
@@ -142,8 +146,8 @@ func (*Request) ProtoMessage() {}
|
||||
func (*Request) Descriptor() ([]byte, []int) { return fileDescriptorEtcdserver, []int{0} }
|
||||
|
||||
type Metadata struct {
|
||||
NodeID uint64 `protobuf:"varint,1,opt,name=NodeID,json=nodeID" json:"NodeID"`
|
||||
ClusterID uint64 `protobuf:"varint,2,opt,name=ClusterID,json=clusterID" json:"ClusterID"`
|
||||
NodeID uint64 `protobuf:"varint,1,opt,name=NodeID" json:"NodeID"`
|
||||
ClusterID uint64 `protobuf:"varint,2,opt,name=ClusterID" json:"ClusterID"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
@@ -156,182 +160,182 @@ func init() {
|
||||
proto.RegisterType((*Request)(nil), "etcdserverpb.Request")
|
||||
proto.RegisterType((*Metadata)(nil), "etcdserverpb.Metadata")
|
||||
}
|
||||
func (m *Request) Marshal() (data []byte, err error) {
|
||||
func (m *Request) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
data = make([]byte, size)
|
||||
n, err := m.MarshalTo(data)
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return data[:n], nil
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *Request) MarshalTo(data []byte) (int, error) {
|
||||
func (m *Request) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
data[i] = 0x8
|
||||
dAtA[i] = 0x8
|
||||
i++
|
||||
i = encodeVarintEtcdserver(data, i, uint64(m.ID))
|
||||
data[i] = 0x12
|
||||
i = encodeVarintEtcdserver(dAtA, i, uint64(m.ID))
|
||||
dAtA[i] = 0x12
|
||||
i++
|
||||
i = encodeVarintEtcdserver(data, i, uint64(len(m.Method)))
|
||||
i += copy(data[i:], m.Method)
|
||||
data[i] = 0x1a
|
||||
i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Method)))
|
||||
i += copy(dAtA[i:], m.Method)
|
||||
dAtA[i] = 0x1a
|
||||
i++
|
||||
i = encodeVarintEtcdserver(data, i, uint64(len(m.Path)))
|
||||
i += copy(data[i:], m.Path)
|
||||
data[i] = 0x22
|
||||
i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Path)))
|
||||
i += copy(dAtA[i:], m.Path)
|
||||
dAtA[i] = 0x22
|
||||
i++
|
||||
i = encodeVarintEtcdserver(data, i, uint64(len(m.Val)))
|
||||
i += copy(data[i:], m.Val)
|
||||
data[i] = 0x28
|
||||
i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Val)))
|
||||
i += copy(dAtA[i:], m.Val)
|
||||
dAtA[i] = 0x28
|
||||
i++
|
||||
if m.Dir {
|
||||
data[i] = 1
|
||||
dAtA[i] = 1
|
||||
} else {
|
||||
data[i] = 0
|
||||
dAtA[i] = 0
|
||||
}
|
||||
i++
|
||||
data[i] = 0x32
|
||||
dAtA[i] = 0x32
|
||||
i++
|
||||
i = encodeVarintEtcdserver(data, i, uint64(len(m.PrevValue)))
|
||||
i += copy(data[i:], m.PrevValue)
|
||||
data[i] = 0x38
|
||||
i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.PrevValue)))
|
||||
i += copy(dAtA[i:], m.PrevValue)
|
||||
dAtA[i] = 0x38
|
||||
i++
|
||||
i = encodeVarintEtcdserver(data, i, uint64(m.PrevIndex))
|
||||
i = encodeVarintEtcdserver(dAtA, i, uint64(m.PrevIndex))
|
||||
if m.PrevExist != nil {
|
||||
data[i] = 0x40
|
||||
dAtA[i] = 0x40
|
||||
i++
|
||||
if *m.PrevExist {
|
||||
data[i] = 1
|
||||
dAtA[i] = 1
|
||||
} else {
|
||||
data[i] = 0
|
||||
dAtA[i] = 0
|
||||
}
|
||||
i++
|
||||
}
|
||||
data[i] = 0x48
|
||||
dAtA[i] = 0x48
|
||||
i++
|
||||
i = encodeVarintEtcdserver(data, i, uint64(m.Expiration))
|
||||
data[i] = 0x50
|
||||
i = encodeVarintEtcdserver(dAtA, i, uint64(m.Expiration))
|
||||
dAtA[i] = 0x50
|
||||
i++
|
||||
if m.Wait {
|
||||
data[i] = 1
|
||||
dAtA[i] = 1
|
||||
} else {
|
||||
data[i] = 0
|
||||
dAtA[i] = 0
|
||||
}
|
||||
i++
|
||||
data[i] = 0x58
|
||||
dAtA[i] = 0x58
|
||||
i++
|
||||
i = encodeVarintEtcdserver(data, i, uint64(m.Since))
|
||||
data[i] = 0x60
|
||||
i = encodeVarintEtcdserver(dAtA, i, uint64(m.Since))
|
||||
dAtA[i] = 0x60
|
||||
i++
|
||||
if m.Recursive {
|
||||
data[i] = 1
|
||||
dAtA[i] = 1
|
||||
} else {
|
||||
data[i] = 0
|
||||
dAtA[i] = 0
|
||||
}
|
||||
i++
|
||||
data[i] = 0x68
|
||||
dAtA[i] = 0x68
|
||||
i++
|
||||
if m.Sorted {
|
||||
data[i] = 1
|
||||
dAtA[i] = 1
|
||||
} else {
|
||||
data[i] = 0
|
||||
dAtA[i] = 0
|
||||
}
|
||||
i++
|
||||
data[i] = 0x70
|
||||
dAtA[i] = 0x70
|
||||
i++
|
||||
if m.Quorum {
|
||||
data[i] = 1
|
||||
dAtA[i] = 1
|
||||
} else {
|
||||
data[i] = 0
|
||||
dAtA[i] = 0
|
||||
}
|
||||
i++
|
||||
data[i] = 0x78
|
||||
dAtA[i] = 0x78
|
||||
i++
|
||||
i = encodeVarintEtcdserver(data, i, uint64(m.Time))
|
||||
data[i] = 0x80
|
||||
i = encodeVarintEtcdserver(dAtA, i, uint64(m.Time))
|
||||
dAtA[i] = 0x80
|
||||
i++
|
||||
data[i] = 0x1
|
||||
dAtA[i] = 0x1
|
||||
i++
|
||||
if m.Stream {
|
||||
data[i] = 1
|
||||
dAtA[i] = 1
|
||||
} else {
|
||||
data[i] = 0
|
||||
dAtA[i] = 0
|
||||
}
|
||||
i++
|
||||
if m.Refresh != nil {
|
||||
data[i] = 0x88
|
||||
dAtA[i] = 0x88
|
||||
i++
|
||||
data[i] = 0x1
|
||||
dAtA[i] = 0x1
|
||||
i++
|
||||
if *m.Refresh {
|
||||
data[i] = 1
|
||||
dAtA[i] = 1
|
||||
} else {
|
||||
data[i] = 0
|
||||
dAtA[i] = 0
|
||||
}
|
||||
i++
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(data[i:], m.XXX_unrecognized)
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *Metadata) Marshal() (data []byte, err error) {
|
||||
func (m *Metadata) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
data = make([]byte, size)
|
||||
n, err := m.MarshalTo(data)
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return data[:n], nil
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *Metadata) MarshalTo(data []byte) (int, error) {
|
||||
func (m *Metadata) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
data[i] = 0x8
|
||||
dAtA[i] = 0x8
|
||||
i++
|
||||
i = encodeVarintEtcdserver(data, i, uint64(m.NodeID))
|
||||
data[i] = 0x10
|
||||
i = encodeVarintEtcdserver(dAtA, i, uint64(m.NodeID))
|
||||
dAtA[i] = 0x10
|
||||
i++
|
||||
i = encodeVarintEtcdserver(data, i, uint64(m.ClusterID))
|
||||
i = encodeVarintEtcdserver(dAtA, i, uint64(m.ClusterID))
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(data[i:], m.XXX_unrecognized)
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Etcdserver(data []byte, offset int, v uint64) int {
|
||||
data[offset] = uint8(v)
|
||||
data[offset+1] = uint8(v >> 8)
|
||||
data[offset+2] = uint8(v >> 16)
|
||||
data[offset+3] = uint8(v >> 24)
|
||||
data[offset+4] = uint8(v >> 32)
|
||||
data[offset+5] = uint8(v >> 40)
|
||||
data[offset+6] = uint8(v >> 48)
|
||||
data[offset+7] = uint8(v >> 56)
|
||||
func encodeFixed64Etcdserver(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Etcdserver(data []byte, offset int, v uint32) int {
|
||||
data[offset] = uint8(v)
|
||||
data[offset+1] = uint8(v >> 8)
|
||||
data[offset+2] = uint8(v >> 16)
|
||||
data[offset+3] = uint8(v >> 24)
|
||||
func encodeFixed32Etcdserver(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintEtcdserver(data []byte, offset int, v uint64) int {
|
||||
func encodeVarintEtcdserver(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
data[offset] = uint8(v&0x7f | 0x80)
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
data[offset] = uint8(v)
|
||||
dAtA[offset] = uint8(v)
|
||||
return offset + 1
|
||||
}
|
||||
func (m *Request) Size() (n int) {
|
||||
@@ -392,8 +396,8 @@ func sovEtcdserver(x uint64) (n int) {
|
||||
func sozEtcdserver(x uint64) (n int) {
|
||||
return sovEtcdserver(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (m *Request) Unmarshal(data []byte) error {
|
||||
l := len(data)
|
||||
func (m *Request) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
@@ -405,7 +409,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -433,7 +437,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.ID |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -452,7 +456,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -467,7 +471,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Method = string(data[iNdEx:postIndex])
|
||||
m.Method = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 2 {
|
||||
@@ -481,7 +485,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -496,7 +500,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Path = string(data[iNdEx:postIndex])
|
||||
m.Path = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 4:
|
||||
if wireType != 2 {
|
||||
@@ -510,7 +514,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -525,7 +529,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Val = string(data[iNdEx:postIndex])
|
||||
m.Val = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 5:
|
||||
if wireType != 0 {
|
||||
@@ -539,7 +543,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -559,7 +563,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -574,7 +578,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.PrevValue = string(data[iNdEx:postIndex])
|
||||
m.PrevValue = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 7:
|
||||
if wireType != 0 {
|
||||
@@ -588,7 +592,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.PrevIndex |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -607,7 +611,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -628,7 +632,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Expiration |= (int64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -647,7 +651,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -667,7 +671,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Since |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -686,7 +690,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -706,7 +710,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -726,7 +730,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -746,7 +750,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Time |= (int64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -765,7 +769,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -785,7 +789,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -796,7 +800,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
m.Refresh = &b
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipEtcdserver(data[iNdEx:])
|
||||
skippy, err := skipEtcdserver(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -806,7 +810,7 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...)
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@@ -816,8 +820,8 @@ func (m *Request) Unmarshal(data []byte) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *Metadata) Unmarshal(data []byte) error {
|
||||
l := len(data)
|
||||
func (m *Metadata) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
@@ -829,7 +833,7 @@ func (m *Metadata) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -857,7 +861,7 @@ func (m *Metadata) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.NodeID |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -876,7 +880,7 @@ func (m *Metadata) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.ClusterID |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -885,7 +889,7 @@ func (m *Metadata) Unmarshal(data []byte) error {
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipEtcdserver(data[iNdEx:])
|
||||
skippy, err := skipEtcdserver(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -895,7 +899,7 @@ func (m *Metadata) Unmarshal(data []byte) error {
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...)
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
@@ -905,8 +909,8 @@ func (m *Metadata) Unmarshal(data []byte) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipEtcdserver(data []byte) (n int, err error) {
|
||||
l := len(data)
|
||||
func skipEtcdserver(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
@@ -917,7 +921,7 @@ func skipEtcdserver(data []byte) (n int, err error) {
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -935,7 +939,7 @@ func skipEtcdserver(data []byte) (n int, err error) {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if data[iNdEx-1] < 0x80 {
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -952,7 +956,7 @@ func skipEtcdserver(data []byte) (n int, err error) {
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -975,7 +979,7 @@ func skipEtcdserver(data []byte) (n int, err error) {
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
innerWire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -986,7 +990,7 @@ func skipEtcdserver(data []byte) (n int, err error) {
|
||||
if innerWireType == 4 {
|
||||
break
|
||||
}
|
||||
next, err := skipEtcdserver(data[start:])
|
||||
next, err := skipEtcdserver(dAtA[start:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -1010,32 +1014,32 @@ var (
|
||||
ErrIntOverflowEtcdserver = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
func init() { proto.RegisterFile("etcdserver.proto", fileDescriptorEtcdserver) }
|
||||
|
||||
var fileDescriptorEtcdserver = []byte{
|
||||
// 404 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x5c, 0x92, 0x41, 0x6e, 0x13, 0x31,
|
||||
0x14, 0x86, 0xe3, 0xc4, 0x99, 0x64, 0x4c, 0x81, 0x62, 0x45, 0xe8, 0xa9, 0x42, 0x43, 0x14, 0xb1,
|
||||
0xc8, 0x0a, 0xee, 0x50, 0xd2, 0x45, 0x24, 0x8a, 0x4a, 0x8a, 0xca, 0xda, 0x64, 0x1e, 0x8d, 0xa5,
|
||||
0xcc, 0x78, 0x6a, 0xbf, 0x19, 0x72, 0x03, 0xae, 0xc0, 0x91, 0xb2, 0xe4, 0x04, 0x08, 0xc2, 0x45,
|
||||
0x90, 0x3d, 0x9d, 0x60, 0xba, 0xb3, 0xbe, 0xff, 0xf7, 0xef, 0xdf, 0xf6, 0x13, 0xa7, 0x48, 0xeb,
|
||||
0xdc, 0xa1, 0x6d, 0xd0, 0xbe, 0xae, 0xac, 0x21, 0x23, 0x4f, 0xfe, 0x91, 0xea, 0xf3, 0xd9, 0xe4,
|
||||
0xd6, 0xdc, 0x9a, 0x20, 0xbc, 0xf1, 0xab, 0xd6, 0x33, 0xfb, 0xc6, 0xc5, 0x68, 0x85, 0x77, 0x35,
|
||||
0x3a, 0x92, 0x13, 0xd1, 0x5f, 0x2e, 0x80, 0x4d, 0xd9, 0x9c, 0x9f, 0xf3, 0xfd, 0xcf, 0x97, 0xbd,
|
||||
0x55, 0x5f, 0x2f, 0xe4, 0x0b, 0x91, 0x5c, 0x22, 0x6d, 0x4c, 0x0e, 0xfd, 0x29, 0x9b, 0xa7, 0xf7,
|
||||
0x4a, 0x52, 0x04, 0x26, 0x41, 0xf0, 0x2b, 0x45, 0x1b, 0x18, 0x44, 0x1a, 0xaf, 0x14, 0x6d, 0xe4,
|
||||
0x73, 0x31, 0xb8, 0x51, 0x5b, 0xe0, 0x91, 0x30, 0x68, 0xd4, 0xd6, 0xf3, 0x85, 0xb6, 0x30, 0x9c,
|
||||
0xb2, 0xf9, 0xb8, 0xe3, 0xb9, 0xb6, 0x72, 0x26, 0xd2, 0x2b, 0x8b, 0xcd, 0x8d, 0xda, 0xd6, 0x08,
|
||||
0x49, 0xb4, 0x2b, 0xad, 0x3a, 0xdc, 0x79, 0x96, 0x65, 0x8e, 0x3b, 0x18, 0x45, 0x45, 0x83, 0x27,
|
||||
0xe0, 0xce, 0x73, 0xb1, 0xd3, 0x8e, 0x60, 0x7c, 0x3c, 0x85, 0xb5, 0x9e, 0x80, 0xe5, 0x2b, 0x21,
|
||||
0x2e, 0x76, 0x95, 0xb6, 0x8a, 0xb4, 0x29, 0x21, 0x9d, 0xb2, 0xf9, 0xe0, 0x3e, 0x48, 0xe0, 0x91,
|
||||
0xfb, 0xbb, 0x7d, 0x52, 0x9a, 0x40, 0x44, 0x55, 0xf9, 0x57, 0xa5, 0x49, 0x9e, 0x89, 0xe1, 0xb5,
|
||||
0x2e, 0xd7, 0x08, 0x8f, 0xa2, 0x0e, 0x43, 0xe7, 0x91, 0x3f, 0x7f, 0x85, 0xeb, 0xda, 0x3a, 0xdd,
|
||||
0x20, 0x9c, 0x44, 0x5b, 0x53, 0xdb, 0x61, 0xff, 0xa6, 0xd7, 0xc6, 0x12, 0xe6, 0xf0, 0x38, 0x32,
|
||||
0x24, 0x2e, 0x30, 0xaf, 0x7e, 0xa8, 0x8d, 0xad, 0x0b, 0x78, 0x12, 0xab, 0x77, 0x81, 0xf9, 0x56,
|
||||
0x1f, 0x75, 0x81, 0xf0, 0x34, 0x6a, 0xcd, 0x49, 0x17, 0x6d, 0x2a, 0x59, 0x54, 0x05, 0x9c, 0xfe,
|
||||
0x97, 0x1a, 0x98, 0xcc, 0xfc, 0x47, 0x7f, 0xb1, 0xe8, 0x36, 0xf0, 0x2c, 0x7a, 0x95, 0x91, 0x6d,
|
||||
0xe1, 0xec, 0x9d, 0x18, 0x5f, 0x22, 0xa9, 0x5c, 0x91, 0xf2, 0x49, 0xef, 0x4d, 0x8e, 0x0f, 0xa6,
|
||||
0x21, 0x29, 0x03, 0xf3, 0x37, 0x7c, 0xbb, 0xad, 0x1d, 0xa1, 0x5d, 0x2e, 0xc2, 0x50, 0x1c, 0x7f,
|
||||
0x61, 0xdd, 0xe1, 0xf3, 0xc9, 0xfe, 0x77, 0xd6, 0xdb, 0x1f, 0x32, 0xf6, 0xe3, 0x90, 0xb1, 0x5f,
|
||||
0x87, 0x8c, 0x7d, 0xff, 0x93, 0xf5, 0xfe, 0x06, 0x00, 0x00, 0xff, 0xff, 0x80, 0x62, 0xfc, 0x40,
|
||||
0xa4, 0x02, 0x00, 0x00,
|
||||
// 380 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x5c, 0xd2, 0xdd, 0x6e, 0xda, 0x30,
|
||||
0x14, 0x07, 0x70, 0x0c, 0xe1, 0xcb, 0x63, 0x1b, 0xb3, 0xd0, 0x74, 0x84, 0xa6, 0x2c, 0x42, 0xbb,
|
||||
0xc8, 0xd5, 0xf6, 0x0e, 0x2c, 0x5c, 0x44, 0x2a, 0x15, 0x0d, 0x15, 0xbd, 0x76, 0xc9, 0x29, 0x58,
|
||||
0x02, 0x4c, 0x1d, 0x07, 0xf1, 0x06, 0x7d, 0x85, 0x3e, 0x12, 0x97, 0x7d, 0x82, 0xaa, 0xa5, 0x2f,
|
||||
0x52, 0x39, 0x24, 0xc4, 0xed, 0x5d, 0xf4, 0xfb, 0x9f, 0x1c, 0x1f, 0x7f, 0xd0, 0x2e, 0xea, 0x79,
|
||||
0x9c, 0xa0, 0xda, 0xa1, 0xfa, 0xbb, 0x55, 0x52, 0x4b, 0xd6, 0x29, 0x65, 0x7b, 0xdb, 0xef, 0x2d,
|
||||
0xe4, 0x42, 0x66, 0xc1, 0x3f, 0xf3, 0x75, 0xaa, 0x19, 0x3c, 0x38, 0xb4, 0x19, 0xe1, 0x7d, 0x8a,
|
||||
0x89, 0x66, 0x3d, 0x5a, 0x0d, 0x03, 0x20, 0x1e, 0xf1, 0x9d, 0xa1, 0x73, 0x78, 0xfe, 0x5d, 0x89,
|
||||
0xaa, 0x61, 0xc0, 0x7e, 0xd1, 0xc6, 0x18, 0xf5, 0x52, 0xc6, 0x50, 0xf5, 0x88, 0xdf, 0xce, 0x93,
|
||||
0xdc, 0x18, 0x50, 0x67, 0xc2, 0xf5, 0x12, 0x6a, 0x56, 0x96, 0x09, 0xfb, 0x49, 0x6b, 0x33, 0xbe,
|
||||
0x02, 0xc7, 0x0a, 0x0c, 0x18, 0x0f, 0x84, 0x82, 0xba, 0x47, 0xfc, 0x56, 0xe1, 0x81, 0x50, 0x6c,
|
||||
0x40, 0xdb, 0x13, 0x85, 0xbb, 0x19, 0x5f, 0xa5, 0x08, 0x0d, 0xeb, 0xaf, 0x92, 0x8b, 0x9a, 0x70,
|
||||
0x13, 0xe3, 0x1e, 0x9a, 0xd6, 0xa0, 0x25, 0x17, 0x35, 0xa3, 0xbd, 0x48, 0x34, 0xb4, 0xce, 0xab,
|
||||
0x90, 0xa8, 0x64, 0xf6, 0x87, 0xd2, 0xd1, 0x7e, 0x2b, 0x14, 0xd7, 0x42, 0x6e, 0xa0, 0xed, 0x11,
|
||||
0xbf, 0x96, 0x37, 0xb2, 0xdc, 0xec, 0xed, 0x86, 0x0b, 0x0d, 0xd4, 0x1a, 0x35, 0x13, 0xd6, 0xa7,
|
||||
0xf5, 0xa9, 0xd8, 0xcc, 0x11, 0xbe, 0x58, 0x33, 0x9c, 0xc8, 0xac, 0x1f, 0xe1, 0x3c, 0x55, 0x89,
|
||||
0xd8, 0x21, 0x74, 0xac, 0x5f, 0x4b, 0x36, 0x67, 0x3a, 0x95, 0x4a, 0x63, 0x0c, 0x5f, 0xad, 0x82,
|
||||
0xdc, 0x4c, 0x7a, 0x95, 0x4a, 0x95, 0xae, 0xe1, 0x9b, 0x9d, 0x9e, 0xcc, 0x4c, 0x75, 0x2d, 0xd6,
|
||||
0x08, 0xdf, 0xad, 0xa9, 0x33, 0xc9, 0xba, 0x6a, 0x85, 0x7c, 0x0d, 0xdd, 0x0f, 0x5d, 0x33, 0x63,
|
||||
0xae, 0xb9, 0xe8, 0x3b, 0x85, 0xc9, 0x12, 0x7e, 0x58, 0xa7, 0x52, 0xe0, 0xe0, 0x82, 0xb6, 0xc6,
|
||||
0xa8, 0x79, 0xcc, 0x35, 0x37, 0x9d, 0x2e, 0x65, 0x8c, 0x9f, 0x5e, 0x43, 0x6e, 0x66, 0x87, 0xff,
|
||||
0x57, 0x69, 0xa2, 0x51, 0x85, 0x41, 0xf6, 0x28, 0xce, 0xb7, 0x70, 0xe6, 0x61, 0xef, 0xf0, 0xea,
|
||||
0x56, 0x0e, 0x47, 0x97, 0x3c, 0x1d, 0x5d, 0xf2, 0x72, 0x74, 0xc9, 0xe3, 0x9b, 0x5b, 0x79, 0x0f,
|
||||
0x00, 0x00, 0xff, 0xff, 0xee, 0x40, 0xba, 0xd6, 0xa4, 0x02, 0x00, 0x00,
|
||||
}
|
||||
|
||||
608
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go
generated
vendored
608
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
2
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto
generated
vendored
2
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto
generated
vendored
@@ -14,6 +14,8 @@ message RequestHeader {
|
||||
uint64 ID = 1;
|
||||
// username is a username that is associated with an auth token of gRPC connection
|
||||
string username = 2;
|
||||
// auth_revision is a revision number of auth.authStore. It is not related to mvcc
|
||||
uint64 auth_revision = 3;
|
||||
}
|
||||
|
||||
// An InternalRaftRequest is the union of all requests which can be
|
||||
|
||||
5435
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go
generated
vendored
5435
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
45
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.gw.go
generated
vendored
45
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.gw.go
generated
vendored
@@ -222,6 +222,19 @@ func request_Lease_LeaseKeepAlive_0(ctx context.Context, marshaler runtime.Marsh
|
||||
return stream, metadata, nil
|
||||
}
|
||||
|
||||
func request_Lease_LeaseTimeToLive_0(ctx context.Context, marshaler runtime.Marshaler, client LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq LeaseTimeToLiveRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
|
||||
return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := client.LeaseTimeToLive(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func request_Cluster_MemberAdd_0(ctx context.Context, marshaler runtime.Marshaler, client ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq MemberAddRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
@@ -935,6 +948,34 @@ func RegisterLeaseHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("POST", pattern_Lease_LeaseTimeToLive_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
if cn, ok := w.(http.CloseNotifier); ok {
|
||||
go func(done <-chan struct{}, closed <-chan bool) {
|
||||
select {
|
||||
case <-done:
|
||||
case <-closed:
|
||||
cancel()
|
||||
}
|
||||
}(ctx.Done(), cn.CloseNotify())
|
||||
}
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, req)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, outboundMarshaler, w, req, err)
|
||||
}
|
||||
resp, md, err := request_Lease_LeaseTimeToLive_0(rctx, inboundMarshaler, client, req, pathParams)
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Lease_LeaseTimeToLive_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -944,6 +985,8 @@ var (
|
||||
pattern_Lease_LeaseRevoke_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "kv", "lease", "revoke"}, ""))
|
||||
|
||||
pattern_Lease_LeaseKeepAlive_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "lease", "keepalive"}, ""))
|
||||
|
||||
pattern_Lease_LeaseTimeToLive_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "kv", "lease", "timetolive"}, ""))
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -952,6 +995,8 @@ var (
|
||||
forward_Lease_LeaseRevoke_0 = runtime.ForwardResponseMessage
|
||||
|
||||
forward_Lease_LeaseKeepAlive_0 = runtime.ForwardResponseStream
|
||||
|
||||
forward_Lease_LeaseTimeToLive_0 = runtime.ForwardResponseMessage
|
||||
)
|
||||
|
||||
// RegisterClusterHandlerFromEndpoint is same as RegisterClusterHandler but
|
||||
|
||||
60
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto
generated
vendored
60
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto
generated
vendored
@@ -104,8 +104,15 @@ service Lease {
|
||||
};
|
||||
}
|
||||
|
||||
// LeaseTimeToLive retrieves lease information.
|
||||
rpc LeaseTimeToLive(LeaseTimeToLiveRequest) returns (LeaseTimeToLiveResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v3alpha/kv/lease/timetolive"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
// TODO(xiangli) List all existing Leases?
|
||||
// TODO(xiangli) Get details information (expirations, leased keys, etc.) of a lease?
|
||||
}
|
||||
|
||||
service Cluster {
|
||||
@@ -375,6 +382,22 @@ message RangeRequest {
|
||||
|
||||
// count_only when set returns only the count of the keys in the range.
|
||||
bool count_only = 9;
|
||||
|
||||
// min_mod_revision is the lower bound for returned key mod revisions; all keys with
|
||||
// lesser mod revisions will be filtered away.
|
||||
int64 min_mod_revision = 10;
|
||||
|
||||
// max_mod_revision is the upper bound for returned key mod revisions; all keys with
|
||||
// greater mod revisions will be filtered away.
|
||||
int64 max_mod_revision = 11;
|
||||
|
||||
// min_create_revision is the lower bound for returned key create revisions; all keys with
|
||||
// lesser create trevisions will be filtered away.
|
||||
int64 min_create_revision = 12;
|
||||
|
||||
// max_create_revision is the upper bound for returned key create revisions; all keys with
|
||||
// greater create revisions will be filtered away.
|
||||
int64 max_create_revision = 13;
|
||||
}
|
||||
|
||||
message RangeResponse {
|
||||
@@ -413,8 +436,11 @@ message DeleteRangeRequest {
|
||||
bytes key = 1;
|
||||
// range_end is the key following the last key to delete for the range [key, range_end).
|
||||
// If range_end is not given, the range is defined to contain only the key argument.
|
||||
// If range_end is one bit larger than the given key, then the range is all
|
||||
// the all keys with the prefix (the given key).
|
||||
// If range_end is '\0', the range is all keys greater than or equal to the key argument.
|
||||
bytes range_end = 2;
|
||||
|
||||
// If prev_kv is set, etcd gets the previous key-value pairs before deleting it.
|
||||
// The previous key-value pairs will be returned in the delte response.
|
||||
bool prev_kv = 3;
|
||||
@@ -451,6 +477,7 @@ message Compare {
|
||||
EQUAL = 0;
|
||||
GREATER = 1;
|
||||
LESS = 2;
|
||||
NOT_EQUAL = 3;
|
||||
}
|
||||
enum CompareTarget {
|
||||
VERSION = 0;
|
||||
@@ -566,6 +593,8 @@ message WatchCreateRequest {
|
||||
// range_end is the end of the range [key, range_end) to watch. If range_end is not given,
|
||||
// only the key argument is watched. If range_end is equal to '\0', all keys greater than
|
||||
// or equal to the key argument are watched.
|
||||
// If the range_end is one bit larger than the given key,
|
||||
// then all keys with the prefix (the given key) will be watched.
|
||||
bytes range_end = 2;
|
||||
// start_revision is an optional revision to watch from (inclusive). No start_revision is "now".
|
||||
int64 start_revision = 3;
|
||||
@@ -574,6 +603,16 @@ message WatchCreateRequest {
|
||||
// wish to recover a disconnected watcher starting from a recent known revision.
|
||||
// The etcd server may decide how often it will send notifications based on current load.
|
||||
bool progress_notify = 4;
|
||||
|
||||
enum FilterType {
|
||||
// filter out put event.
|
||||
NOPUT = 0;
|
||||
// filter out delete event.
|
||||
NODELETE = 1;
|
||||
}
|
||||
// filters filter the events at server side before it sends back to the watcher.
|
||||
repeated FilterType filters = 5;
|
||||
|
||||
// If prev_kv is set, created watcher gets the previous KV before the event happens.
|
||||
// If the previous KV is already compacted, nothing will be returned.
|
||||
bool prev_kv = 6;
|
||||
@@ -647,6 +686,25 @@ message LeaseKeepAliveResponse {
|
||||
int64 TTL = 3;
|
||||
}
|
||||
|
||||
message LeaseTimeToLiveRequest {
|
||||
// ID is the lease ID for the lease.
|
||||
int64 ID = 1;
|
||||
// keys is true to query all the keys attached to this lease.
|
||||
bool keys = 2;
|
||||
}
|
||||
|
||||
message LeaseTimeToLiveResponse {
|
||||
ResponseHeader header = 1;
|
||||
// ID is the lease ID from the keep alive request.
|
||||
int64 ID = 2;
|
||||
// TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds.
|
||||
int64 TTL = 3;
|
||||
// GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
|
||||
int64 grantedTTL = 4;
|
||||
// Keys is the list of keys attached to this lease.
|
||||
repeated bytes keys = 5;
|
||||
}
|
||||
|
||||
message Member {
|
||||
// ID is the member ID for this member.
|
||||
uint64 ID = 1;
|
||||
|
||||
7
vendor/github.com/coreos/etcd/etcdserver/membership/cluster.go
generated
vendored
7
vendor/github.com/coreos/etcd/etcdserver/membership/cluster.go
generated
vendored
@@ -24,6 +24,9 @@ import (
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/coreos/etcd/mvcc/backend"
|
||||
"github.com/coreos/etcd/pkg/netutil"
|
||||
@@ -484,8 +487,10 @@ func ValidateClusterAndAssignIDs(local *RaftCluster, existing *RaftCluster) erro
|
||||
sort.Sort(MembersByPeerURLs(ems))
|
||||
sort.Sort(MembersByPeerURLs(lms))
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second)
|
||||
defer cancel()
|
||||
for i := range ems {
|
||||
if !netutil.URLStringsEqual(ems[i].PeerURLs, lms[i].PeerURLs) {
|
||||
if !netutil.URLStringsEqual(ctx, ems[i].PeerURLs, lms[i].PeerURLs) {
|
||||
return fmt.Errorf("unmatched member while checking PeerURLs")
|
||||
}
|
||||
lms[i].ID = ems[i].ID
|
||||
|
||||
2
vendor/github.com/coreos/etcd/etcdserver/membership/member.go
generated
vendored
2
vendor/github.com/coreos/etcd/etcdserver/membership/member.go
generated
vendored
@@ -27,7 +27,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd/etcdserver", "membership")
|
||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/membership")
|
||||
)
|
||||
|
||||
// RaftAttributes represents the raft related attributes of an etcd member.
|
||||
|
||||
2
vendor/github.com/coreos/etcd/etcdserver/metrics.go
generated
vendored
2
vendor/github.com/coreos/etcd/etcdserver/metrics.go
generated
vendored
@@ -52,7 +52,7 @@ var (
|
||||
Name: "proposals_pending",
|
||||
Help: "The current number of pending proposals to commit.",
|
||||
})
|
||||
proposalsFailed = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
proposalsFailed = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "etcd",
|
||||
Subsystem: "server",
|
||||
Name: "proposals_failed_total",
|
||||
|
||||
132
vendor/github.com/coreos/etcd/etcdserver/raft.go
generated
vendored
132
vendor/github.com/coreos/etcd/etcdserver/raft.go
generated
vendored
@@ -98,18 +98,25 @@ type raftNode struct {
|
||||
// last lead elected time
|
||||
lt time.Time
|
||||
|
||||
// to check if msg receiver is removed from cluster
|
||||
isIDRemoved func(id uint64) bool
|
||||
|
||||
raft.Node
|
||||
|
||||
// a chan to send/receive snapshot
|
||||
msgSnapC chan raftpb.Message
|
||||
|
||||
// a chan to send out apply
|
||||
applyc chan apply
|
||||
|
||||
// TODO: remove the etcdserver related logic from raftNode
|
||||
// TODO: add a state machine interface to apply the commit entries
|
||||
// and do snapshot/recover
|
||||
s *EtcdServer
|
||||
// a chan to send out readState
|
||||
readStateC chan raft.ReadState
|
||||
|
||||
// utility
|
||||
ticker <-chan time.Time
|
||||
ticker <-chan time.Time
|
||||
// contention detectors for raft heartbeat message
|
||||
td *contention.TimeoutDetector
|
||||
heartbeat time.Duration // for logging
|
||||
raftStorage *raft.MemoryStorage
|
||||
storage Storage
|
||||
// transport specifies the transport to send and receive msgs to members.
|
||||
@@ -118,32 +125,19 @@ type raftNode struct {
|
||||
// If transport is nil, server will panic.
|
||||
transport rafthttp.Transporter
|
||||
|
||||
td *contention.TimeoutDetector
|
||||
|
||||
stopped chan struct{}
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
// start prepares and starts raftNode in a new goroutine. It is no longer safe
|
||||
// to modify the fields after it has been started.
|
||||
// TODO: Ideally raftNode should get rid of the passed in server structure.
|
||||
func (r *raftNode) start(s *EtcdServer) {
|
||||
r.s = s
|
||||
func (r *raftNode) start(rh *raftReadyHandler) {
|
||||
r.applyc = make(chan apply)
|
||||
r.stopped = make(chan struct{})
|
||||
r.done = make(chan struct{})
|
||||
|
||||
heartbeat := 200 * time.Millisecond
|
||||
if s.Cfg != nil {
|
||||
heartbeat = time.Duration(s.Cfg.TickMs) * time.Millisecond
|
||||
}
|
||||
// set up contention detectors for raft heartbeat message.
|
||||
// expect to send a heartbeat within 2 heartbeat intervals.
|
||||
r.td = contention.NewTimeoutDetector(2 * heartbeat)
|
||||
internalTimeout := time.Second
|
||||
|
||||
go func() {
|
||||
var syncC <-chan time.Time
|
||||
|
||||
defer r.onStop()
|
||||
islead := false
|
||||
|
||||
@@ -167,32 +161,17 @@ func (r *raftNode) start(s *EtcdServer) {
|
||||
}
|
||||
|
||||
atomic.StoreUint64(&r.lead, rd.SoftState.Lead)
|
||||
if rd.RaftState == raft.StateLeader {
|
||||
islead = true
|
||||
// TODO: raft should send server a notification through chan when
|
||||
// it promotes or demotes instead of modifying server directly.
|
||||
syncC = r.s.SyncTicker
|
||||
if r.s.lessor != nil {
|
||||
r.s.lessor.Promote(r.s.Cfg.electionTimeout())
|
||||
}
|
||||
// TODO: remove the nil checking
|
||||
// current test utility does not provide the stats
|
||||
if r.s.stats != nil {
|
||||
r.s.stats.BecomeLeader()
|
||||
}
|
||||
if r.s.compactor != nil {
|
||||
r.s.compactor.Resume()
|
||||
}
|
||||
r.td.Reset()
|
||||
} else {
|
||||
islead = false
|
||||
if r.s.lessor != nil {
|
||||
r.s.lessor.Demote()
|
||||
}
|
||||
if r.s.compactor != nil {
|
||||
r.s.compactor.Pause()
|
||||
}
|
||||
syncC = nil
|
||||
islead = rd.RaftState == raft.StateLeader
|
||||
rh.updateLeadership()
|
||||
}
|
||||
|
||||
if len(rd.ReadStates) != 0 {
|
||||
select {
|
||||
case r.readStateC <- rd.ReadStates[len(rd.ReadStates)-1]:
|
||||
case <-time.After(internalTimeout):
|
||||
plog.Warningf("timed out sending read state")
|
||||
case <-r.stopped:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -203,6 +182,8 @@ func (r *raftNode) start(s *EtcdServer) {
|
||||
raftDone: raftDone,
|
||||
}
|
||||
|
||||
updateCommittedIndex(&ap, rh)
|
||||
|
||||
select {
|
||||
case r.applyc <- ap:
|
||||
case <-r.stopped:
|
||||
@@ -214,7 +195,7 @@ func (r *raftNode) start(s *EtcdServer) {
|
||||
// For more details, check raft thesis 10.2.1
|
||||
if islead {
|
||||
// gofail: var raftBeforeLeaderSend struct{}
|
||||
r.s.send(rd.Messages)
|
||||
r.sendMessages(rd.Messages)
|
||||
}
|
||||
|
||||
// gofail: var raftBeforeSave struct{}
|
||||
@@ -241,12 +222,10 @@ func (r *raftNode) start(s *EtcdServer) {
|
||||
|
||||
if !islead {
|
||||
// gofail: var raftBeforeFollowerSend struct{}
|
||||
r.s.send(rd.Messages)
|
||||
r.sendMessages(rd.Messages)
|
||||
}
|
||||
raftDone <- struct{}{}
|
||||
r.Advance()
|
||||
case <-syncC:
|
||||
r.s.sync(r.s.Cfg.ReqTimeout())
|
||||
case <-r.stopped:
|
||||
return
|
||||
}
|
||||
@@ -254,6 +233,59 @@ func (r *raftNode) start(s *EtcdServer) {
|
||||
}()
|
||||
}
|
||||
|
||||
func updateCommittedIndex(ap *apply, rh *raftReadyHandler) {
|
||||
var ci uint64
|
||||
if len(ap.entries) != 0 {
|
||||
ci = ap.entries[len(ap.entries)-1].Index
|
||||
}
|
||||
if ap.snapshot.Metadata.Index > ci {
|
||||
ci = ap.snapshot.Metadata.Index
|
||||
}
|
||||
if ci != 0 {
|
||||
rh.updateCommittedIndex(ci)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *raftNode) sendMessages(ms []raftpb.Message) {
|
||||
sentAppResp := false
|
||||
for i := len(ms) - 1; i >= 0; i-- {
|
||||
if r.isIDRemoved(ms[i].To) {
|
||||
ms[i].To = 0
|
||||
}
|
||||
|
||||
if ms[i].Type == raftpb.MsgAppResp {
|
||||
if sentAppResp {
|
||||
ms[i].To = 0
|
||||
} else {
|
||||
sentAppResp = true
|
||||
}
|
||||
}
|
||||
|
||||
if ms[i].Type == raftpb.MsgSnap {
|
||||
// There are two separate data store: the store for v2, and the KV for v3.
|
||||
// The msgSnap only contains the most recent snapshot of store without KV.
|
||||
// So we need to redirect the msgSnap to etcd server main loop for merging in the
|
||||
// current store snapshot and KV snapshot.
|
||||
select {
|
||||
case r.msgSnapC <- ms[i]:
|
||||
default:
|
||||
// drop msgSnap if the inflight chan if full.
|
||||
}
|
||||
ms[i].To = 0
|
||||
}
|
||||
if ms[i].Type == raftpb.MsgHeartbeat {
|
||||
ok, exceed := r.td.Observe(ms[i].To)
|
||||
if !ok {
|
||||
// TODO: limit request rate.
|
||||
plog.Warningf("failed to send out heartbeat on time (exceeded the %v timeout for %v)", r.heartbeat, exceed)
|
||||
plog.Warningf("server is likely overloaded")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
r.transport.Send(ms)
|
||||
}
|
||||
|
||||
func (r *raftNode) apply() chan apply {
|
||||
return r.applyc
|
||||
}
|
||||
|
||||
530
vendor/github.com/coreos/etcd/etcdserver/server.go
generated
vendored
530
vendor/github.com/coreos/etcd/etcdserver/server.go
generated
vendored
@@ -23,6 +23,7 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
@@ -40,6 +41,7 @@ import (
|
||||
"github.com/coreos/etcd/lease"
|
||||
"github.com/coreos/etcd/mvcc"
|
||||
"github.com/coreos/etcd/mvcc/backend"
|
||||
"github.com/coreos/etcd/pkg/contention"
|
||||
"github.com/coreos/etcd/pkg/fileutil"
|
||||
"github.com/coreos/etcd/pkg/idutil"
|
||||
"github.com/coreos/etcd/pkg/pbutil"
|
||||
@@ -65,6 +67,10 @@ const (
|
||||
StoreClusterPrefix = "/0"
|
||||
StoreKeysPrefix = "/1"
|
||||
|
||||
// HealthInterval is the minimum time the cluster should be healthy
|
||||
// before accepting add member requests.
|
||||
HealthInterval = 5 * time.Second
|
||||
|
||||
purgeFileInterval = 30 * time.Second
|
||||
// monitorVersionInterval should be smaller than the timeout
|
||||
// on the connection. Or we will not be able to reuse the connection
|
||||
@@ -77,6 +83,9 @@ const (
|
||||
maxInFlightMsgSnap = 16
|
||||
|
||||
releaseDelayAfterSnapshot = 30 * time.Second
|
||||
|
||||
// maxPendingRevokes is the maximum number of outstanding expired lease revocations.
|
||||
maxPendingRevokes = 16
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -158,6 +167,7 @@ type EtcdServer struct {
|
||||
// inflightSnapshots holds count the number of snapshots currently inflight.
|
||||
inflightSnapshots int64 // must use atomic operations to access; keep 64-bit aligned.
|
||||
appliedIndex uint64 // must use atomic operations to access; keep 64-bit aligned.
|
||||
committedIndex uint64 // must use atomic operations to access; keep 64-bit aligned.
|
||||
// consistIndex used to hold the offset of current executing entry
|
||||
// It is initialized to 0 before executing any entry.
|
||||
consistIndex consistentIndex // must use atomic operations to access; keep 64-bit aligned.
|
||||
@@ -168,9 +178,23 @@ type EtcdServer struct {
|
||||
|
||||
snapCount uint64
|
||||
|
||||
w wait.Wait
|
||||
stop chan struct{}
|
||||
done chan struct{}
|
||||
w wait.Wait
|
||||
|
||||
readMu sync.RWMutex
|
||||
// read routine notifies etcd server that it waits for reading by sending an empty struct to
|
||||
// readwaitC
|
||||
readwaitc chan struct{}
|
||||
// readNotifier is used to notify the read routine that it can process the request
|
||||
// when there is no error
|
||||
readNotifier *notifier
|
||||
|
||||
// stop signals the run goroutine should shutdown.
|
||||
stop chan struct{}
|
||||
// stopping is closed by run goroutine on shutdown.
|
||||
stopping chan struct{}
|
||||
// done is closed when all goroutines from start() complete.
|
||||
done chan struct{}
|
||||
|
||||
errorc chan error
|
||||
id types.ID
|
||||
attributes membership.Attributes
|
||||
@@ -181,7 +205,12 @@ type EtcdServer struct {
|
||||
|
||||
applyV2 ApplierV2
|
||||
|
||||
applyV3 applierV3
|
||||
// applyV3 is the applier with auth and quotas
|
||||
applyV3 applierV3
|
||||
// applyV3Base is the core applier without auth or quotas
|
||||
applyV3Base applierV3
|
||||
applyWait wait.WaitTime
|
||||
|
||||
kv mvcc.ConsistentWatchableKV
|
||||
lessor lease.Lessor
|
||||
bemu sync.Mutex
|
||||
@@ -204,8 +233,8 @@ type EtcdServer struct {
|
||||
// to detect the cluster version immediately.
|
||||
forceVersionC chan struct{}
|
||||
|
||||
msgSnapC chan raftpb.Message
|
||||
|
||||
// wgMu blocks concurrent waitgroup mutation while server stopping
|
||||
wgMu sync.RWMutex
|
||||
// wg is used to wait for the go routines that depends on the server state
|
||||
// to exit when stopping the server.
|
||||
wg sync.WaitGroup
|
||||
@@ -228,15 +257,6 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) {
|
||||
return nil, fmt.Errorf("cannot access data directory: %v", terr)
|
||||
}
|
||||
|
||||
// Run the migrations.
|
||||
dataVer, err := version.DetectDataDir(cfg.DataDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = upgradeDataDir(cfg.DataDir, cfg.Name, dataVer); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
haveWAL := wal.Exist(cfg.WALDir())
|
||||
|
||||
if err = fileutil.TouchDirAll(cfg.SnapDir()); err != nil {
|
||||
@@ -244,9 +264,24 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) {
|
||||
}
|
||||
ss := snap.New(cfg.SnapDir())
|
||||
|
||||
bepath := path.Join(cfg.SnapDir(), databaseFilename)
|
||||
bepath := filepath.Join(cfg.SnapDir(), databaseFilename)
|
||||
beExist := fileutil.Exist(bepath)
|
||||
be := backend.NewDefaultBackend(bepath)
|
||||
|
||||
var be backend.Backend
|
||||
beOpened := make(chan struct{})
|
||||
go func() {
|
||||
be = backend.NewDefaultBackend(bepath)
|
||||
beOpened <- struct{}{}
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-beOpened:
|
||||
case <-time.After(time.Second):
|
||||
plog.Warningf("another etcd process is running with the same data dir and holding the file lock.")
|
||||
plog.Warningf("waiting for it to exit before starting...")
|
||||
<-beOpened
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
be.Close()
|
||||
@@ -372,6 +407,7 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) {
|
||||
sstats.Initialize()
|
||||
lstats := stats.NewLeaderStats(id.String())
|
||||
|
||||
heartbeat := time.Duration(cfg.TickMs) * time.Millisecond
|
||||
srv = &EtcdServer{
|
||||
readych: make(chan struct{}),
|
||||
Cfg: cfg,
|
||||
@@ -379,10 +415,17 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) {
|
||||
errorc: make(chan error, 1),
|
||||
store: st,
|
||||
r: raftNode{
|
||||
isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) },
|
||||
Node: n,
|
||||
ticker: time.Tick(time.Duration(cfg.TickMs) * time.Millisecond),
|
||||
ticker: time.Tick(heartbeat),
|
||||
// set up contention detectors for raft heartbeat message.
|
||||
// expect to send a heartbeat within 2 heartbeat intervals.
|
||||
td: contention.NewTimeoutDetector(2 * heartbeat),
|
||||
heartbeat: heartbeat,
|
||||
raftStorage: s,
|
||||
storage: NewStorage(w, ss),
|
||||
msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap),
|
||||
readStateC: make(chan raft.ReadState, 1),
|
||||
},
|
||||
id: id,
|
||||
attributes: membership.Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()},
|
||||
@@ -393,15 +436,16 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) {
|
||||
peerRt: prt,
|
||||
reqIDGen: idutil.NewGenerator(uint16(id), time.Now()),
|
||||
forceVersionC: make(chan struct{}),
|
||||
msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap),
|
||||
}
|
||||
|
||||
srv.applyV2 = &applierV2store{store: srv.store, cluster: srv.cluster}
|
||||
|
||||
srv.be = be
|
||||
minTTL := time.Duration((3*cfg.ElectionTicks)/2) * time.Duration(cfg.TickMs) * time.Millisecond
|
||||
srv.lessor = lease.NewLessor(srv.be, int64(math.Ceil(minTTL.Seconds())))
|
||||
minTTL := time.Duration((3*cfg.ElectionTicks)/2) * heartbeat
|
||||
|
||||
// always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases.
|
||||
// If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers.
|
||||
srv.lessor = lease.NewLessor(srv.be, int64(math.Ceil(minTTL.Seconds())))
|
||||
srv.kv = mvcc.New(srv.be, srv.lessor, &srv.consistIndex)
|
||||
if beExist {
|
||||
kvindex := srv.kv.ConsistentIndex()
|
||||
@@ -416,12 +460,16 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) {
|
||||
}
|
||||
srv.consistIndex.setConsistentIndex(srv.kv.ConsistentIndex())
|
||||
|
||||
srv.authStore = auth.NewAuthStore(srv.be)
|
||||
srv.authStore = auth.NewAuthStore(srv.be,
|
||||
func(index uint64) <-chan struct{} {
|
||||
return srv.applyWait.Wait(index)
|
||||
})
|
||||
if h := cfg.AutoCompactionRetention; h != 0 {
|
||||
srv.compactor = compactor.NewPeriodic(h, srv.kv, srv)
|
||||
srv.compactor.Run()
|
||||
}
|
||||
|
||||
srv.applyV3Base = &applierV3backend{srv}
|
||||
if err = srv.restoreAlarms(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -463,10 +511,11 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) {
|
||||
// It also starts a goroutine to publish its server information.
|
||||
func (s *EtcdServer) Start() {
|
||||
s.start()
|
||||
go s.publish(s.Cfg.ReqTimeout())
|
||||
go s.purgeFile()
|
||||
go monitorFileDescriptor(s.done)
|
||||
go s.monitorVersions()
|
||||
s.goAttach(func() { s.publish(s.Cfg.ReqTimeout()) })
|
||||
s.goAttach(s.purgeFile)
|
||||
s.goAttach(func() { monitorFileDescriptor(s.stopping) })
|
||||
s.goAttach(s.monitorVersions)
|
||||
s.goAttach(s.linearizableReadLoop)
|
||||
}
|
||||
|
||||
// start prepares and starts server in a new goroutine. It is no longer safe to
|
||||
@@ -478,8 +527,12 @@ func (s *EtcdServer) start() {
|
||||
s.snapCount = DefaultSnapCount
|
||||
}
|
||||
s.w = wait.New()
|
||||
s.applyWait = wait.NewTimeList()
|
||||
s.done = make(chan struct{})
|
||||
s.stop = make(chan struct{})
|
||||
s.stopping = make(chan struct{})
|
||||
s.readwaitc = make(chan struct{}, 1)
|
||||
s.readNotifier = newNotifier()
|
||||
if s.ClusterVersion() != nil {
|
||||
plog.Infof("starting server... [version: %v, cluster version: %v]", version.Version, version.Cluster(s.ClusterVersion().String()))
|
||||
} else {
|
||||
@@ -503,7 +556,7 @@ func (s *EtcdServer) purgeFile() {
|
||||
plog.Fatalf("failed to purge wal file %v", e)
|
||||
case e := <-serrc:
|
||||
plog.Fatalf("failed to purge snap file %v", e)
|
||||
case <-s.done:
|
||||
case <-s.stopping:
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -516,6 +569,8 @@ func (s *EtcdServer) RaftHandler() http.Handler { return s.r.transport.Handler()
|
||||
|
||||
func (s *EtcdServer) Lessor() lease.Lessor { return s.lessor }
|
||||
|
||||
func (s *EtcdServer) ApplyWait() <-chan struct{} { return s.applyWait.Wait(s.getCommittedIndex()) }
|
||||
|
||||
func (s *EtcdServer) Process(ctx context.Context, m raftpb.Message) error {
|
||||
if s.cluster.IsIDRemoved(types.ID(m.From)) {
|
||||
plog.Warningf("reject message from removed member %s", types.ID(m.From).String())
|
||||
@@ -540,28 +595,91 @@ func (s *EtcdServer) ReportSnapshot(id uint64, status raft.SnapshotStatus) {
|
||||
type etcdProgress struct {
|
||||
confState raftpb.ConfState
|
||||
snapi uint64
|
||||
appliedt uint64
|
||||
appliedi uint64
|
||||
}
|
||||
|
||||
// raftReadyHandler contains a set of EtcdServer operations to be called by raftNode,
|
||||
// and helps decouple state machine logic from Raft algorithms.
|
||||
// TODO: add a state machine interface to apply the commit entries and do snapshot/recover
|
||||
type raftReadyHandler struct {
|
||||
updateLeadership func()
|
||||
updateCommittedIndex func(uint64)
|
||||
}
|
||||
|
||||
func (s *EtcdServer) run() {
|
||||
snap, err := s.r.raftStorage.Snapshot()
|
||||
if err != nil {
|
||||
plog.Panicf("get snapshot from raft storage error: %v", err)
|
||||
}
|
||||
s.r.start(s)
|
||||
|
||||
var (
|
||||
smu sync.RWMutex
|
||||
syncC <-chan time.Time
|
||||
)
|
||||
setSyncC := func(ch <-chan time.Time) {
|
||||
smu.Lock()
|
||||
syncC = ch
|
||||
smu.Unlock()
|
||||
}
|
||||
getSyncC := func() (ch <-chan time.Time) {
|
||||
smu.RLock()
|
||||
ch = syncC
|
||||
smu.RUnlock()
|
||||
return
|
||||
}
|
||||
rh := &raftReadyHandler{
|
||||
updateLeadership: func() {
|
||||
if !s.isLeader() {
|
||||
if s.lessor != nil {
|
||||
s.lessor.Demote()
|
||||
}
|
||||
if s.compactor != nil {
|
||||
s.compactor.Pause()
|
||||
}
|
||||
setSyncC(nil)
|
||||
} else {
|
||||
setSyncC(s.SyncTicker)
|
||||
if s.compactor != nil {
|
||||
s.compactor.Resume()
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: remove the nil checking
|
||||
// current test utility does not provide the stats
|
||||
if s.stats != nil {
|
||||
s.stats.BecomeLeader()
|
||||
}
|
||||
if s.r.td != nil {
|
||||
s.r.td.Reset()
|
||||
}
|
||||
},
|
||||
updateCommittedIndex: func(ci uint64) {
|
||||
cci := s.getCommittedIndex()
|
||||
if ci > cci {
|
||||
s.setCommittedIndex(ci)
|
||||
}
|
||||
},
|
||||
}
|
||||
s.r.start(rh)
|
||||
|
||||
// asynchronously accept apply packets, dispatch progress in-order
|
||||
sched := schedule.NewFIFOScheduler()
|
||||
ep := etcdProgress{
|
||||
confState: snap.Metadata.ConfState,
|
||||
snapi: snap.Metadata.Index,
|
||||
appliedt: snap.Metadata.Term,
|
||||
appliedi: snap.Metadata.Index,
|
||||
}
|
||||
|
||||
defer func() {
|
||||
s.wgMu.Lock() // block concurrent waitgroup adds in goAttach while stopping
|
||||
close(s.stopping)
|
||||
s.wgMu.Unlock()
|
||||
|
||||
sched.Stop()
|
||||
|
||||
// wait for snapshots before closing raft so wal stays open
|
||||
// wait for gouroutines before closing raft so wal stays open
|
||||
s.wg.Wait()
|
||||
|
||||
// must stop raft after scheduler-- etcdserver can leak rafthttp pipelines
|
||||
@@ -576,6 +694,9 @@ func (s *EtcdServer) run() {
|
||||
if s.kv != nil {
|
||||
s.kv.Close()
|
||||
}
|
||||
if s.authStore != nil {
|
||||
s.authStore.Close()
|
||||
}
|
||||
if s.be != nil {
|
||||
s.be.Close()
|
||||
}
|
||||
@@ -596,15 +717,30 @@ func (s *EtcdServer) run() {
|
||||
f := func(context.Context) { s.applyAll(&ep, &ap) }
|
||||
sched.Schedule(f)
|
||||
case leases := <-expiredLeaseC:
|
||||
go func() {
|
||||
for _, l := range leases {
|
||||
s.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: int64(l.ID)})
|
||||
s.goAttach(func() {
|
||||
// Increases throughput of expired leases deletion process through parallelization
|
||||
c := make(chan struct{}, maxPendingRevokes)
|
||||
for _, lease := range leases {
|
||||
select {
|
||||
case c <- struct{}{}:
|
||||
case <-s.stopping:
|
||||
return
|
||||
}
|
||||
lid := lease.ID
|
||||
s.goAttach(func() {
|
||||
s.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: int64(lid)})
|
||||
<-c
|
||||
})
|
||||
}
|
||||
}()
|
||||
})
|
||||
case err := <-s.errorc:
|
||||
plog.Errorf("%s", err)
|
||||
plog.Infof("the data-dir used by this member must be removed.")
|
||||
return
|
||||
case <-getSyncC():
|
||||
if s.store.HasTTLKeys() {
|
||||
s.sync(s.Cfg.ReqTimeout())
|
||||
}
|
||||
case <-s.stop:
|
||||
return
|
||||
}
|
||||
@@ -622,15 +758,17 @@ func (s *EtcdServer) applyAll(ep *etcdProgress, apply *apply) {
|
||||
plog.Warningf("avoid queries with large range/delete range!")
|
||||
}
|
||||
proposalsApplied.Set(float64(ep.appliedi))
|
||||
s.applyWait.Trigger(ep.appliedi)
|
||||
// wait for the raft routine to finish the disk writes before triggering a
|
||||
// snapshot. or applied index might be greater than the last index in raft
|
||||
// storage, since the raft routine might be slower than apply routine.
|
||||
<-apply.raftDone
|
||||
|
||||
s.triggerSnapshot(ep)
|
||||
select {
|
||||
// snapshot requested via send()
|
||||
case m := <-s.msgSnapC:
|
||||
merged := s.createMergedSnapshotMessage(m, ep.appliedi, ep.confState)
|
||||
case m := <-s.r.msgSnapC:
|
||||
merged := s.createMergedSnapshotMessage(m, ep.appliedt, ep.appliedi, ep.confState)
|
||||
s.sendMergedSnap(merged)
|
||||
default:
|
||||
}
|
||||
@@ -654,7 +792,7 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) {
|
||||
plog.Panicf("get database snapshot file path error: %v", err)
|
||||
}
|
||||
|
||||
fn := path.Join(s.Cfg.SnapDir(), databaseFilename)
|
||||
fn := filepath.Join(s.Cfg.SnapDir(), databaseFilename)
|
||||
if err := os.Rename(snapfn, fn); err != nil {
|
||||
plog.Panicf("rename snapshot file error: %v", err)
|
||||
}
|
||||
@@ -732,6 +870,7 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) {
|
||||
}
|
||||
plog.Info("finished adding peers from new cluster configuration into network...")
|
||||
|
||||
ep.appliedt = apply.snapshot.Metadata.Term
|
||||
ep.appliedi = apply.snapshot.Metadata.Index
|
||||
ep.snapi = ep.appliedi
|
||||
ep.confState = apply.snapshot.Metadata.ConfState
|
||||
@@ -753,7 +892,7 @@ func (s *EtcdServer) applyEntries(ep *etcdProgress, apply *apply) {
|
||||
return
|
||||
}
|
||||
var shouldstop bool
|
||||
if ep.appliedi, shouldstop = s.apply(ents, &ep.confState); shouldstop {
|
||||
if ep.appliedt, ep.appliedi, shouldstop = s.apply(ents, &ep.confState); shouldstop {
|
||||
go s.stopWithDelay(10*100*time.Millisecond, fmt.Errorf("the member has been permanently removed from the cluster"))
|
||||
}
|
||||
}
|
||||
@@ -768,9 +907,62 @@ func (s *EtcdServer) triggerSnapshot(ep *etcdProgress) {
|
||||
ep.snapi = ep.appliedi
|
||||
}
|
||||
|
||||
// Stop stops the server gracefully, and shuts down the running goroutine.
|
||||
// Stop should be called after a Start(s), otherwise it will block forever.
|
||||
func (s *EtcdServer) Stop() {
|
||||
func (s *EtcdServer) isMultiNode() bool {
|
||||
return s.cluster != nil && len(s.cluster.MemberIDs()) > 1
|
||||
}
|
||||
|
||||
func (s *EtcdServer) isLeader() bool {
|
||||
return uint64(s.ID()) == s.Lead()
|
||||
}
|
||||
|
||||
// transferLeadership transfers the leader to the given transferee.
|
||||
// TODO: maybe expose to client?
|
||||
func (s *EtcdServer) transferLeadership(ctx context.Context, lead, transferee uint64) error {
|
||||
now := time.Now()
|
||||
interval := time.Duration(s.Cfg.TickMs) * time.Millisecond
|
||||
|
||||
plog.Infof("%s starts leadership transfer from %s to %s", s.ID(), types.ID(lead), types.ID(transferee))
|
||||
s.r.TransferLeadership(ctx, lead, transferee)
|
||||
for s.Lead() != transferee {
|
||||
select {
|
||||
case <-ctx.Done(): // time out
|
||||
return ErrTimeoutLeaderTransfer
|
||||
case <-time.After(interval):
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: drain all requests, or drop all messages to the old leader
|
||||
|
||||
plog.Infof("%s finished leadership transfer from %s to %s (took %v)", s.ID(), types.ID(lead), types.ID(transferee), time.Since(now))
|
||||
return nil
|
||||
}
|
||||
|
||||
// TransferLeadership transfers the leader to the chosen transferee.
|
||||
func (s *EtcdServer) TransferLeadership() error {
|
||||
if !s.isLeader() {
|
||||
plog.Printf("skipped leadership transfer for stopping non-leader member")
|
||||
return nil
|
||||
}
|
||||
|
||||
if !s.isMultiNode() {
|
||||
plog.Printf("skipped leadership transfer for single member cluster")
|
||||
return nil
|
||||
}
|
||||
|
||||
transferee, ok := longestConnected(s.r.transport, s.cluster.MemberIDs())
|
||||
if !ok {
|
||||
return ErrUnhealthy
|
||||
}
|
||||
|
||||
tm := s.Cfg.ReqTimeout()
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), tm)
|
||||
err := s.transferLeadership(ctx, s.Lead(), uint64(transferee))
|
||||
cancel()
|
||||
return err
|
||||
}
|
||||
|
||||
// HardStop stops the server without coordination with other members in the cluster.
|
||||
func (s *EtcdServer) HardStop() {
|
||||
select {
|
||||
case s.stop <- struct{}{}:
|
||||
case <-s.done:
|
||||
@@ -779,6 +971,17 @@ func (s *EtcdServer) Stop() {
|
||||
<-s.done
|
||||
}
|
||||
|
||||
// Stop stops the server gracefully, and shuts down the running goroutine.
|
||||
// Stop should be called after a Start(s), otherwise it will block forever.
|
||||
// When stopping leader, Stop transfers its leadership to one of its peers
|
||||
// before stopping the server.
|
||||
func (s *EtcdServer) Stop() {
|
||||
if err := s.TransferLeadership(); err != nil {
|
||||
plog.Warningf("%s failed to transfer leadership (%v)", s.ID(), err)
|
||||
}
|
||||
s.HardStop()
|
||||
}
|
||||
|
||||
// ReadyNotify returns a channel that will be closed when the server
|
||||
// is ready to serve client requests
|
||||
func (s *EtcdServer) ReadyNotify() <-chan struct{} { return s.readych }
|
||||
@@ -810,11 +1013,42 @@ func (s *EtcdServer) LeaderStats() []byte {
|
||||
|
||||
func (s *EtcdServer) StoreStats() []byte { return s.store.JsonStats() }
|
||||
|
||||
func (s *EtcdServer) checkMembershipOperationPermission(ctx context.Context) error {
|
||||
if s.authStore == nil {
|
||||
// In the context of ordinal etcd process, s.authStore will never be nil.
|
||||
// This branch is for handling cases in server_test.go
|
||||
return nil
|
||||
}
|
||||
|
||||
// Note that this permission check is done in the API layer,
|
||||
// so TOCTOU problem can be caused potentially in a schedule like this:
|
||||
// update membership with user A -> revoke root role of A -> apply membership change
|
||||
// in the state machine layer
|
||||
// However, both of membership change and role management requires the root privilege.
|
||||
// So careful operation by admins can prevent the problem.
|
||||
authInfo, err := s.AuthStore().AuthInfoFromCtx(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s.AuthStore().IsAdminPermitted(authInfo)
|
||||
}
|
||||
|
||||
func (s *EtcdServer) AddMember(ctx context.Context, memb membership.Member) error {
|
||||
if s.Cfg.StrictReconfigCheck && !s.cluster.IsReadyToAddNewMember() {
|
||||
// If s.cfg.StrictReconfigCheck is false, it means the option --strict-reconfig-check isn't passed to etcd.
|
||||
// In such a case adding a new member is allowed unconditionally
|
||||
return ErrNotEnoughStartedMembers
|
||||
if err := s.checkMembershipOperationPermission(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if s.Cfg.StrictReconfigCheck {
|
||||
// by default StrictReconfigCheck is enabled; reject new members if unhealthy
|
||||
if !s.cluster.IsReadyToAddNewMember() {
|
||||
plog.Warningf("not enough started members, rejecting member add %+v", memb)
|
||||
return ErrNotEnoughStartedMembers
|
||||
}
|
||||
if !isConnectedFullySince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), s.cluster.Members()) {
|
||||
plog.Warningf("not healthy for reconfigure, rejecting member add %+v", memb)
|
||||
return ErrUnhealthy
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: move Member to protobuf type
|
||||
@@ -831,10 +1065,13 @@ func (s *EtcdServer) AddMember(ctx context.Context, memb membership.Member) erro
|
||||
}
|
||||
|
||||
func (s *EtcdServer) RemoveMember(ctx context.Context, id uint64) error {
|
||||
if s.Cfg.StrictReconfigCheck && !s.cluster.IsReadyToRemoveMember(id) {
|
||||
// If s.cfg.StrictReconfigCheck is false, it means the option --strict-reconfig-check isn't passed to etcd.
|
||||
// In such a case removing a member is allowed unconditionally
|
||||
return ErrNotEnoughStartedMembers
|
||||
if err := s.checkMembershipOperationPermission(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// by default StrictReconfigCheck is enabled; reject removal if leads to quorum loss
|
||||
if err := s.mayRemoveMember(types.ID(id)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cc := raftpb.ConfChange{
|
||||
@@ -844,9 +1081,39 @@ func (s *EtcdServer) RemoveMember(ctx context.Context, id uint64) error {
|
||||
return s.configure(ctx, cc)
|
||||
}
|
||||
|
||||
func (s *EtcdServer) mayRemoveMember(id types.ID) error {
|
||||
if !s.Cfg.StrictReconfigCheck {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !s.cluster.IsReadyToRemoveMember(uint64(id)) {
|
||||
plog.Warningf("not enough started members, rejecting remove member %s", id)
|
||||
return ErrNotEnoughStartedMembers
|
||||
}
|
||||
|
||||
// downed member is safe to remove since it's not part of the active quorum
|
||||
if t := s.r.transport.ActiveSince(id); id != s.ID() && t.IsZero() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// protect quorum if some members are down
|
||||
m := s.cluster.Members()
|
||||
active := numConnectedSince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), m)
|
||||
if (active - 1) < 1+((len(m)-1)/2) {
|
||||
plog.Warningf("reconfigure breaks active quorum, rejecting remove member %s", id)
|
||||
return ErrUnhealthy
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) UpdateMember(ctx context.Context, memb membership.Member) error {
|
||||
b, err := json.Marshal(memb)
|
||||
if err != nil {
|
||||
b, merr := json.Marshal(memb)
|
||||
if merr != nil {
|
||||
return merr
|
||||
}
|
||||
|
||||
if err := s.checkMembershipOperationPermission(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
cc := raftpb.ConfChange{
|
||||
@@ -870,8 +1137,6 @@ func (s *EtcdServer) Lead() uint64 { return atomic.LoadUint64(&s.r.lead) }
|
||||
|
||||
func (s *EtcdServer) Leader() types.ID { return types.ID(s.Lead()) }
|
||||
|
||||
func (s *EtcdServer) IsPprofEnabled() bool { return s.Cfg.EnablePprof }
|
||||
|
||||
// configure sends a configuration change through consensus and
|
||||
// then waits for it to be applied to the server. It
|
||||
// will block until the change is performed or there is an error.
|
||||
@@ -895,7 +1160,7 @@ func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) error
|
||||
case <-ctx.Done():
|
||||
s.w.Trigger(cc.ID, nil) // GC wait
|
||||
return s.parseProposeCtxErr(ctx.Err(), start)
|
||||
case <-s.done:
|
||||
case <-s.stopping:
|
||||
return ErrStopped
|
||||
}
|
||||
}
|
||||
@@ -913,10 +1178,10 @@ func (s *EtcdServer) sync(timeout time.Duration) {
|
||||
data := pbutil.MustMarshal(&req)
|
||||
// There is no promise that node has leader when do SYNC request,
|
||||
// so it uses goroutine to propose.
|
||||
go func() {
|
||||
s.goAttach(func() {
|
||||
s.r.Propose(ctx, data)
|
||||
cancel()
|
||||
}()
|
||||
})
|
||||
}
|
||||
|
||||
// publish registers server information into the cluster. The information
|
||||
@@ -954,52 +1219,11 @@ func (s *EtcdServer) publish(timeout time.Duration) {
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: move this function into raft.go
|
||||
func (s *EtcdServer) send(ms []raftpb.Message) {
|
||||
sentAppResp := false
|
||||
for i := len(ms) - 1; i >= 0; i-- {
|
||||
if s.cluster.IsIDRemoved(types.ID(ms[i].To)) {
|
||||
ms[i].To = 0
|
||||
}
|
||||
|
||||
if ms[i].Type == raftpb.MsgAppResp {
|
||||
if sentAppResp {
|
||||
ms[i].To = 0
|
||||
} else {
|
||||
sentAppResp = true
|
||||
}
|
||||
}
|
||||
|
||||
if ms[i].Type == raftpb.MsgSnap {
|
||||
// There are two separate data store: the store for v2, and the KV for v3.
|
||||
// The msgSnap only contains the most recent snapshot of store without KV.
|
||||
// So we need to redirect the msgSnap to etcd server main loop for merging in the
|
||||
// current store snapshot and KV snapshot.
|
||||
select {
|
||||
case s.msgSnapC <- ms[i]:
|
||||
default:
|
||||
// drop msgSnap if the inflight chan if full.
|
||||
}
|
||||
ms[i].To = 0
|
||||
}
|
||||
if ms[i].Type == raftpb.MsgHeartbeat {
|
||||
ok, exceed := s.r.td.Observe(ms[i].To)
|
||||
if !ok {
|
||||
// TODO: limit request rate.
|
||||
plog.Warningf("failed to send out heartbeat on time (exceeded the %dms timeout for %v)", s.Cfg.TickMs, exceed)
|
||||
plog.Warningf("server is likely overloaded")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
s.r.transport.Send(ms)
|
||||
}
|
||||
|
||||
func (s *EtcdServer) sendMergedSnap(merged snap.Message) {
|
||||
atomic.AddInt64(&s.inflightSnapshots, 1)
|
||||
|
||||
s.r.transport.SendSnapshot(merged)
|
||||
go func() {
|
||||
s.goAttach(func() {
|
||||
select {
|
||||
case ok := <-merged.CloseNotify():
|
||||
// delay releasing inflight snapshot for another 30 seconds to
|
||||
@@ -1009,22 +1233,20 @@ func (s *EtcdServer) sendMergedSnap(merged snap.Message) {
|
||||
if ok {
|
||||
select {
|
||||
case <-time.After(releaseDelayAfterSnapshot):
|
||||
case <-s.done:
|
||||
case <-s.stopping:
|
||||
}
|
||||
}
|
||||
atomic.AddInt64(&s.inflightSnapshots, -1)
|
||||
case <-s.done:
|
||||
case <-s.stopping:
|
||||
return
|
||||
}
|
||||
}()
|
||||
})
|
||||
}
|
||||
|
||||
// apply takes entries received from Raft (after it has been committed) and
|
||||
// applies them to the current state of the EtcdServer.
|
||||
// The given entries should not be empty.
|
||||
func (s *EtcdServer) apply(es []raftpb.Entry, confState *raftpb.ConfState) (uint64, bool) {
|
||||
var applied uint64
|
||||
var shouldstop bool
|
||||
func (s *EtcdServer) apply(es []raftpb.Entry, confState *raftpb.ConfState) (appliedt uint64, appliedi uint64, shouldStop bool) {
|
||||
for i := range es {
|
||||
e := es[i]
|
||||
switch e.Type {
|
||||
@@ -1034,16 +1256,17 @@ func (s *EtcdServer) apply(es []raftpb.Entry, confState *raftpb.ConfState) (uint
|
||||
var cc raftpb.ConfChange
|
||||
pbutil.MustUnmarshal(&cc, e.Data)
|
||||
removedSelf, err := s.applyConfChange(cc, confState)
|
||||
shouldstop = shouldstop || removedSelf
|
||||
shouldStop = shouldStop || removedSelf
|
||||
s.w.Trigger(cc.ID, err)
|
||||
default:
|
||||
plog.Panicf("entry type should be either EntryNormal or EntryConfChange")
|
||||
}
|
||||
atomic.StoreUint64(&s.r.index, e.Index)
|
||||
atomic.StoreUint64(&s.r.term, e.Term)
|
||||
applied = e.Index
|
||||
appliedt = e.Term
|
||||
appliedi = e.Index
|
||||
}
|
||||
return applied, shouldstop
|
||||
return appliedt, appliedi, shouldStop
|
||||
}
|
||||
|
||||
// applyEntryNormal apples an EntryNormal type raftpb request to the EtcdServer
|
||||
@@ -1054,6 +1277,7 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) {
|
||||
s.consistIndex.setConsistentIndex(e.Index)
|
||||
shouldApplyV3 = true
|
||||
}
|
||||
defer s.setAppliedIndex(e.Index)
|
||||
|
||||
// raft state machine may generate noop entry when leader confirmation.
|
||||
// skip it in advance to avoid some potential bug in the future
|
||||
@@ -1062,6 +1286,11 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) {
|
||||
case s.forceVersionC <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
// promote lessor when the local member is leader and finished
|
||||
// applying all entries from the last term.
|
||||
if s.isLeader() {
|
||||
s.lessor.Promote(s.Cfg.electionTimeout())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1088,15 +1317,26 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) {
|
||||
id = raftReq.Header.ID
|
||||
}
|
||||
|
||||
ar := s.applyV3.Apply(&raftReq)
|
||||
s.setAppliedIndex(e.Index)
|
||||
var ar *applyResult
|
||||
needResult := s.w.IsRegistered(id)
|
||||
if needResult || !noSideEffect(&raftReq) {
|
||||
if !needResult && raftReq.Txn != nil {
|
||||
removeNeedlessRangeReqs(raftReq.Txn)
|
||||
}
|
||||
ar = s.applyV3.Apply(&raftReq)
|
||||
}
|
||||
|
||||
if ar == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if ar.err != ErrNoSpace || len(s.alarmStore.Get(pb.AlarmType_NOSPACE)) > 0 {
|
||||
s.w.Trigger(id, ar)
|
||||
return
|
||||
}
|
||||
|
||||
plog.Errorf("applying raft message exceeded backend quota")
|
||||
go func() {
|
||||
s.goAttach(func() {
|
||||
a := &pb.AlarmRequest{
|
||||
MemberID: uint64(s.ID()),
|
||||
Action: pb.AlarmRequest_ACTIVATE,
|
||||
@@ -1105,7 +1345,7 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) {
|
||||
r := pb.InternalRaftRequest{Alarm: a}
|
||||
s.processInternalRaftRequest(context.TODO(), r)
|
||||
s.w.Trigger(id, ar)
|
||||
}()
|
||||
})
|
||||
}
|
||||
|
||||
// applyConfChange applies a ConfChange to the server. It is only
|
||||
@@ -1156,11 +1396,15 @@ func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.Con
|
||||
// TODO: non-blocking snapshot
|
||||
func (s *EtcdServer) snapshot(snapi uint64, confState raftpb.ConfState) {
|
||||
clone := s.store.Clone()
|
||||
// commit kv to write metadata (for example: consistent index) to disk.
|
||||
// KV().commit() updates the consistent index in backend.
|
||||
// All operations that update consistent index must be called sequentially
|
||||
// from applyAll function.
|
||||
// So KV().Commit() cannot run in parallel with apply. It has to be called outside
|
||||
// the go routine created below.
|
||||
s.KV().Commit()
|
||||
|
||||
s.wg.Add(1)
|
||||
go func() {
|
||||
defer s.wg.Done()
|
||||
|
||||
s.goAttach(func() {
|
||||
d, err := clone.SaveNoCopy()
|
||||
// TODO: current store will never fail to do a snapshot
|
||||
// what should we do if the store might fail?
|
||||
@@ -1176,8 +1420,6 @@ func (s *EtcdServer) snapshot(snapi uint64, confState raftpb.ConfState) {
|
||||
}
|
||||
plog.Panicf("unexpected create snapshot error %v", err)
|
||||
}
|
||||
// commit kv to write metadata (for example: consistent index) to disk.
|
||||
s.KV().Commit()
|
||||
// SaveSnap saves the snapshot and releases the locked wal files
|
||||
// to the snapshot index.
|
||||
if err = s.r.storage.SaveSnap(snap); err != nil {
|
||||
@@ -1210,7 +1452,23 @@ func (s *EtcdServer) snapshot(snapi uint64, confState raftpb.ConfState) {
|
||||
plog.Panicf("unexpected compaction error %v", err)
|
||||
}
|
||||
plog.Infof("compacted raft log at %d", compacti)
|
||||
}()
|
||||
})
|
||||
}
|
||||
|
||||
// CutPeer drops messages to the specified peer.
|
||||
func (s *EtcdServer) CutPeer(id types.ID) {
|
||||
tr, ok := s.r.transport.(*rafthttp.Transport)
|
||||
if ok {
|
||||
tr.CutPeer(id)
|
||||
}
|
||||
}
|
||||
|
||||
// MendPeer recovers the message dropping behavior of the given peer.
|
||||
func (s *EtcdServer) MendPeer(id types.ID) {
|
||||
tr, ok := s.r.transport.(*rafthttp.Transport)
|
||||
if ok {
|
||||
tr.MendPeer(id)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *EtcdServer) PauseSending() { s.r.pauseSending() }
|
||||
@@ -1233,7 +1491,7 @@ func (s *EtcdServer) monitorVersions() {
|
||||
select {
|
||||
case <-s.forceVersionC:
|
||||
case <-time.After(monitorVersionInterval):
|
||||
case <-s.done:
|
||||
case <-s.stopping:
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1254,18 +1512,18 @@ func (s *EtcdServer) monitorVersions() {
|
||||
// 1. use the decided version if possible
|
||||
// 2. or use the min cluster version
|
||||
if s.cluster.Version() == nil {
|
||||
verStr := version.MinClusterVersion
|
||||
if v != nil {
|
||||
go s.updateClusterVersion(v.String())
|
||||
} else {
|
||||
go s.updateClusterVersion(version.MinClusterVersion)
|
||||
verStr = v.String()
|
||||
}
|
||||
s.goAttach(func() { s.updateClusterVersion(verStr) })
|
||||
continue
|
||||
}
|
||||
|
||||
// update cluster version only if the decided version is greater than
|
||||
// the current cluster version
|
||||
if v != nil && s.cluster.Version().LessThan(*v) {
|
||||
go s.updateClusterVersion(v.String())
|
||||
s.goAttach(func() { s.updateClusterVersion(v.String()) })
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1355,3 +1613,31 @@ func (s *EtcdServer) getAppliedIndex() uint64 {
|
||||
func (s *EtcdServer) setAppliedIndex(v uint64) {
|
||||
atomic.StoreUint64(&s.appliedIndex, v)
|
||||
}
|
||||
|
||||
func (s *EtcdServer) getCommittedIndex() uint64 {
|
||||
return atomic.LoadUint64(&s.committedIndex)
|
||||
}
|
||||
|
||||
func (s *EtcdServer) setCommittedIndex(v uint64) {
|
||||
atomic.StoreUint64(&s.committedIndex, v)
|
||||
}
|
||||
|
||||
// goAttach creates a goroutine on a given function and tracks it using
|
||||
// the etcdserver waitgroup.
|
||||
func (s *EtcdServer) goAttach(f func()) {
|
||||
s.wgMu.RLock() // this blocks with ongoing close(s.stopping)
|
||||
defer s.wgMu.RUnlock()
|
||||
select {
|
||||
case <-s.stopping:
|
||||
plog.Warning("server has stopped (skipping goAttach)")
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
// now safe to add since waitgroup wait has not started yet
|
||||
s.wg.Add(1)
|
||||
go func() {
|
||||
defer s.wg.Done()
|
||||
f()
|
||||
}()
|
||||
}
|
||||
|
||||
8
vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go
generated
vendored
8
vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go
generated
vendored
@@ -16,7 +16,6 @@ package etcdserver
|
||||
|
||||
import (
|
||||
"io"
|
||||
"log"
|
||||
|
||||
"github.com/coreos/etcd/mvcc/backend"
|
||||
"github.com/coreos/etcd/raft/raftpb"
|
||||
@@ -26,12 +25,7 @@ import (
|
||||
// createMergedSnapshotMessage creates a snapshot message that contains: raft status (term, conf),
|
||||
// a snapshot of v2 store inside raft.Snapshot as []byte, a snapshot of v3 KV in the top level message
|
||||
// as ReadCloser.
|
||||
func (s *EtcdServer) createMergedSnapshotMessage(m raftpb.Message, snapi uint64, confState raftpb.ConfState) snap.Message {
|
||||
snapt, err := s.r.raftStorage.Term(snapi)
|
||||
if err != nil {
|
||||
log.Panicf("get term should never fail: %v", err)
|
||||
}
|
||||
|
||||
func (s *EtcdServer) createMergedSnapshotMessage(m raftpb.Message, snapt, snapi uint64, confState raftpb.ConfState) snap.Message {
|
||||
// get a snapshot of v2 store as []byte
|
||||
clone := s.store.Clone()
|
||||
d, err := clone.SaveNoCopy()
|
||||
|
||||
13
vendor/github.com/coreos/etcd/etcdserver/stats/server.go
generated
vendored
13
vendor/github.com/coreos/etcd/etcdserver/stats/server.go
generated
vendored
@@ -123,17 +123,11 @@ func (ss *ServerStats) SendAppendReq(reqSize int) {
|
||||
ss.Lock()
|
||||
defer ss.Unlock()
|
||||
|
||||
now := time.Now()
|
||||
|
||||
if ss.State != raft.StateLeader {
|
||||
ss.State = raft.StateLeader
|
||||
ss.LeaderInfo.Name = ss.ID
|
||||
ss.LeaderInfo.StartTime = now
|
||||
}
|
||||
ss.becomeLeader()
|
||||
|
||||
ss.sendRateQueue.Insert(
|
||||
&RequestStats{
|
||||
SendingTime: now,
|
||||
SendingTime: time.Now(),
|
||||
Size: reqSize,
|
||||
},
|
||||
)
|
||||
@@ -144,7 +138,10 @@ func (ss *ServerStats) SendAppendReq(reqSize int) {
|
||||
func (ss *ServerStats) BecomeLeader() {
|
||||
ss.Lock()
|
||||
defer ss.Unlock()
|
||||
ss.becomeLeader()
|
||||
}
|
||||
|
||||
func (ss *ServerStats) becomeLeader() {
|
||||
if ss.State != raft.StateLeader {
|
||||
ss.State = raft.StateLeader
|
||||
ss.LeaderInfo.Name = ss.ID
|
||||
|
||||
2
vendor/github.com/coreos/etcd/etcdserver/stats/stats.go
generated
vendored
2
vendor/github.com/coreos/etcd/etcdserver/stats/stats.go
generated
vendored
@@ -18,7 +18,7 @@ package stats
|
||||
import "github.com/coreos/pkg/capnslog"
|
||||
|
||||
var (
|
||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd/etcdserver", "stats")
|
||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/stats")
|
||||
)
|
||||
|
||||
type Stats interface {
|
||||
|
||||
42
vendor/github.com/coreos/etcd/etcdserver/storage.go
generated
vendored
42
vendor/github.com/coreos/etcd/etcdserver/storage.go
generated
vendored
@@ -16,16 +16,12 @@ package etcdserver
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/pkg/fileutil"
|
||||
"github.com/coreos/etcd/pkg/pbutil"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
"github.com/coreos/etcd/raft/raftpb"
|
||||
"github.com/coreos/etcd/snap"
|
||||
"github.com/coreos/etcd/version"
|
||||
"github.com/coreos/etcd/wal"
|
||||
"github.com/coreos/etcd/wal/walpb"
|
||||
)
|
||||
@@ -103,41 +99,3 @@ func readWAL(waldir string, snap walpb.Snapshot) (w *wal.WAL, id, cid types.ID,
|
||||
cid = types.ID(metadata.ClusterID)
|
||||
return
|
||||
}
|
||||
|
||||
// upgradeDataDir converts an older version of the etcdServer data to the newest version.
|
||||
// It must ensure that, after upgrading, the most recent version is present.
|
||||
func upgradeDataDir(baseDataDir string, name string, ver version.DataDirVersion) error {
|
||||
switch ver {
|
||||
case version.DataDir2_0:
|
||||
err := makeMemberDir(baseDataDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fallthrough
|
||||
case version.DataDir2_0_1:
|
||||
fallthrough
|
||||
default:
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func makeMemberDir(dir string) error {
|
||||
membdir := path.Join(dir, "member")
|
||||
_, err := os.Stat(membdir)
|
||||
switch {
|
||||
case err == nil:
|
||||
return nil
|
||||
case !os.IsNotExist(err):
|
||||
return err
|
||||
}
|
||||
if err := fileutil.CreateDirAll(membdir); err != nil {
|
||||
return err
|
||||
}
|
||||
names := []string{"snap", "wal"}
|
||||
for _, name := range names {
|
||||
if err := os.Rename(path.Join(dir, name), path.Join(membdir, name)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
69
vendor/github.com/coreos/etcd/etcdserver/util.go
generated
vendored
69
vendor/github.com/coreos/etcd/etcdserver/util.go
generated
vendored
@@ -25,13 +25,7 @@ import (
|
||||
// isConnectedToQuorumSince checks whether the local member is connected to the
|
||||
// quorum of the cluster since the given time.
|
||||
func isConnectedToQuorumSince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) bool {
|
||||
var connectedNum int
|
||||
for _, m := range members {
|
||||
if m.ID == self || isConnectedSince(transport, since, m.ID) {
|
||||
connectedNum++
|
||||
}
|
||||
}
|
||||
return connectedNum >= (len(members)+1)/2
|
||||
return numConnectedSince(transport, since, self, members) >= (len(members)/2)+1
|
||||
}
|
||||
|
||||
// isConnectedSince checks whether the local member is connected to the
|
||||
@@ -40,3 +34,64 @@ func isConnectedSince(transport rafthttp.Transporter, since time.Time, remote ty
|
||||
t := transport.ActiveSince(remote)
|
||||
return !t.IsZero() && t.Before(since)
|
||||
}
|
||||
|
||||
// isConnectedFullySince checks whether the local member is connected to all
|
||||
// members in the cluster since the given time.
|
||||
func isConnectedFullySince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) bool {
|
||||
return numConnectedSince(transport, since, self, members) == len(members)
|
||||
}
|
||||
|
||||
// numConnectedSince counts how many members are connected to the local member
|
||||
// since the given time.
|
||||
func numConnectedSince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) int {
|
||||
connectedNum := 0
|
||||
for _, m := range members {
|
||||
if m.ID == self || isConnectedSince(transport, since, m.ID) {
|
||||
connectedNum++
|
||||
}
|
||||
}
|
||||
return connectedNum
|
||||
}
|
||||
|
||||
// longestConnected chooses the member with longest active-since-time.
|
||||
// It returns false, if nothing is active.
|
||||
func longestConnected(tp rafthttp.Transporter, membs []types.ID) (types.ID, bool) {
|
||||
var longest types.ID
|
||||
var oldest time.Time
|
||||
for _, id := range membs {
|
||||
tm := tp.ActiveSince(id)
|
||||
if tm.IsZero() { // inactive
|
||||
continue
|
||||
}
|
||||
|
||||
if oldest.IsZero() { // first longest candidate
|
||||
oldest = tm
|
||||
longest = id
|
||||
}
|
||||
|
||||
if tm.Before(oldest) {
|
||||
oldest = tm
|
||||
longest = id
|
||||
}
|
||||
}
|
||||
if uint64(longest) == 0 {
|
||||
return longest, false
|
||||
}
|
||||
return longest, true
|
||||
}
|
||||
|
||||
type notifier struct {
|
||||
c chan struct{}
|
||||
err error
|
||||
}
|
||||
|
||||
func newNotifier() *notifier {
|
||||
return ¬ifier{
|
||||
c: make(chan struct{}, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func (nc *notifier) notify(err error) {
|
||||
nc.err = err
|
||||
close(nc.c)
|
||||
}
|
||||
|
||||
2
vendor/github.com/coreos/etcd/etcdserver/v2_server.go
generated
vendored
2
vendor/github.com/coreos/etcd/etcdserver/v2_server.go
generated
vendored
@@ -68,7 +68,7 @@ func (a *v2apiStore) processRaftRequest(ctx context.Context, r *pb.Request) (Res
|
||||
proposalsFailed.Inc()
|
||||
a.s.w.Trigger(r.ID, nil) // GC wait
|
||||
return Response{}, a.s.parseProposeCtxErr(ctx.Err(), start)
|
||||
case <-a.s.done:
|
||||
case <-a.s.stopping:
|
||||
}
|
||||
return Response{}, ErrStopped
|
||||
}
|
||||
|
||||
440
vendor/github.com/coreos/etcd/etcdserver/v3_server.go
generated
vendored
440
vendor/github.com/coreos/etcd/etcdserver/v3_server.go
generated
vendored
@@ -15,17 +15,20 @@
|
||||
package etcdserver
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/auth"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/etcdserver/membership"
|
||||
"github.com/coreos/etcd/lease"
|
||||
"github.com/coreos/etcd/lease/leasehttp"
|
||||
"github.com/coreos/etcd/mvcc"
|
||||
"github.com/coreos/etcd/raft"
|
||||
|
||||
"github.com/coreos/go-semver/semver"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -35,8 +38,15 @@ const (
|
||||
// specify a large value might end up with shooting in the foot.
|
||||
maxRequestBytes = 1.5 * 1024 * 1024
|
||||
|
||||
// max timeout for waiting a v3 request to go through raft.
|
||||
maxV3RequestTimeout = 5 * time.Second
|
||||
// In the health case, there might be a small gap (10s of entries) between
|
||||
// the applied index and committed index.
|
||||
// However, if the committed entries are very heavy to apply, the gap might grow.
|
||||
// We should stop accepting new proposals if the gap growing to a certain point.
|
||||
maxGapBetweenApplyAndCommitIndex = 5000
|
||||
)
|
||||
|
||||
var (
|
||||
newRangeClusterVersion = *semver.Must(semver.NewVersion("3.1.0"))
|
||||
)
|
||||
|
||||
type RaftKV interface {
|
||||
@@ -56,6 +66,9 @@ type Lessor interface {
|
||||
// LeaseRenew renews the lease with given ID. The renewed TTL is returned. Or an error
|
||||
// is returned.
|
||||
LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, error)
|
||||
|
||||
// LeaseTimeToLive retrieves lease information.
|
||||
LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error)
|
||||
}
|
||||
|
||||
type Authenticator interface {
|
||||
@@ -78,22 +91,44 @@ type Authenticator interface {
|
||||
}
|
||||
|
||||
func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) {
|
||||
var result *applyResult
|
||||
var err error
|
||||
// TODO: remove this checking when we release etcd 3.2
|
||||
if s.ClusterVersion() == nil || s.ClusterVersion().LessThan(newRangeClusterVersion) {
|
||||
return s.legacyRange(ctx, r)
|
||||
}
|
||||
|
||||
if r.Serializable {
|
||||
var user string
|
||||
user, err = s.usernameFromCtx(ctx)
|
||||
if !r.Serializable {
|
||||
err := s.linearizableReadNotify(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result = s.applyV3.Apply(
|
||||
&pb.InternalRaftRequest{
|
||||
Header: &pb.RequestHeader{Username: user},
|
||||
Range: r})
|
||||
} else {
|
||||
result, err = s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Range: r})
|
||||
}
|
||||
var resp *pb.RangeResponse
|
||||
var err error
|
||||
chk := func(ai *auth.AuthInfo) error {
|
||||
return s.authStore.IsRangePermitted(ai, r.Key, r.RangeEnd)
|
||||
}
|
||||
get := func() { resp, err = s.applyV3Base.Range(noTxn, r) }
|
||||
if serr := s.doSerialize(ctx, chk, get); serr != nil {
|
||||
return nil, serr
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// TODO: remove this func when we release etcd 3.2
|
||||
func (s *EtcdServer) legacyRange(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) {
|
||||
if r.Serializable {
|
||||
var resp *pb.RangeResponse
|
||||
var err error
|
||||
chk := func(ai *auth.AuthInfo) error {
|
||||
return s.authStore.IsRangePermitted(ai, r.Key, r.RangeEnd)
|
||||
}
|
||||
get := func() { resp, err = s.applyV3Base.Range(noTxn, r) }
|
||||
if serr := s.doSerialize(ctx, chk, get); serr != nil {
|
||||
return nil, serr
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Range: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -126,21 +161,54 @@ func (s *EtcdServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest)
|
||||
}
|
||||
|
||||
func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
|
||||
var result *applyResult
|
||||
var err error
|
||||
|
||||
if isTxnSerializable(r) {
|
||||
user, err := s.usernameFromCtx(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result = s.applyV3.Apply(
|
||||
&pb.InternalRaftRequest{
|
||||
Header: &pb.RequestHeader{Username: user},
|
||||
Txn: r})
|
||||
} else {
|
||||
result, err = s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Txn: r})
|
||||
// TODO: remove this checking when we release etcd 3.2
|
||||
if s.ClusterVersion() == nil || s.ClusterVersion().LessThan(newRangeClusterVersion) {
|
||||
return s.legacyTxn(ctx, r)
|
||||
}
|
||||
|
||||
if isTxnReadonly(r) {
|
||||
if !isTxnSerializable(r) {
|
||||
err := s.linearizableReadNotify(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
var resp *pb.TxnResponse
|
||||
var err error
|
||||
chk := func(ai *auth.AuthInfo) error {
|
||||
return checkTxnAuth(s.authStore, ai, r)
|
||||
}
|
||||
get := func() { resp, err = s.applyV3Base.Txn(r) }
|
||||
if serr := s.doSerialize(ctx, chk, get); serr != nil {
|
||||
return nil, serr
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Txn: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.TxnResponse), nil
|
||||
}
|
||||
|
||||
// TODO: remove this func when we release etcd 3.2
|
||||
func (s *EtcdServer) legacyTxn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
|
||||
if isTxnSerializable(r) {
|
||||
var resp *pb.TxnResponse
|
||||
var err error
|
||||
chk := func(ai *auth.AuthInfo) error {
|
||||
return checkTxnAuth(s.authStore, ai, r)
|
||||
}
|
||||
get := func() { resp, err = s.applyV3Base.Txn(r) }
|
||||
if serr := s.doSerialize(ctx, chk, get); serr != nil {
|
||||
return nil, serr
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Txn: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -164,8 +232,22 @@ func isTxnSerializable(r *pb.TxnRequest) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func isTxnReadonly(r *pb.TxnRequest) bool {
|
||||
for _, u := range r.Success {
|
||||
if r := u.GetRequestRange(); r == nil {
|
||||
return false
|
||||
}
|
||||
}
|
||||
for _, u := range r.Failure {
|
||||
if r := u.GetRequestRange(); r == nil {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *EtcdServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) {
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Compaction: r})
|
||||
result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{Compaction: r})
|
||||
if r.Physical && result != nil && result.physc != nil {
|
||||
<-result.physc
|
||||
// The compaction is done deleting keys; the hash is now settled
|
||||
@@ -198,7 +280,7 @@ func (s *EtcdServer) LeaseGrant(ctx context.Context, r *pb.LeaseGrantRequest) (*
|
||||
// only use positive int64 id's
|
||||
r.ID = int64(s.reqIDGen.Next() & ((1 << 63) - 1))
|
||||
}
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{LeaseGrant: r})
|
||||
result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{LeaseGrant: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -209,7 +291,7 @@ func (s *EtcdServer) LeaseGrant(ctx context.Context, r *pb.LeaseGrantRequest) (*
|
||||
}
|
||||
|
||||
func (s *EtcdServer) LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{LeaseRevoke: r})
|
||||
result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{LeaseRevoke: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -221,14 +303,13 @@ func (s *EtcdServer) LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest)
|
||||
|
||||
func (s *EtcdServer) LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, error) {
|
||||
ttl, err := s.lessor.Renew(id)
|
||||
if err == nil {
|
||||
if err == nil { // already requested to primary lessor(leader)
|
||||
return ttl, nil
|
||||
}
|
||||
if err != lease.ErrNotPrimary {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
// renewals don't go through raft; forward to leader manually
|
||||
cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout())
|
||||
defer cancel()
|
||||
|
||||
@@ -239,7 +320,7 @@ func (s *EtcdServer) LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, e
|
||||
return -1, lerr
|
||||
}
|
||||
for _, url := range leader.PeerURLs {
|
||||
lurl := url + "/leases"
|
||||
lurl := url + leasehttp.LeasePrefix
|
||||
ttl, err = leasehttp.RenewHTTP(cctx, id, lurl, s.peerRt)
|
||||
if err == nil || err == lease.ErrLeaseNotFound {
|
||||
return ttl, err
|
||||
@@ -249,6 +330,49 @@ func (s *EtcdServer) LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, e
|
||||
return -1, ErrTimeout
|
||||
}
|
||||
|
||||
func (s *EtcdServer) LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) {
|
||||
if s.Leader() == s.ID() {
|
||||
// primary; timetolive directly from leader
|
||||
le := s.lessor.Lookup(lease.LeaseID(r.ID))
|
||||
if le == nil {
|
||||
return nil, lease.ErrLeaseNotFound
|
||||
}
|
||||
// TODO: fill out ResponseHeader
|
||||
resp := &pb.LeaseTimeToLiveResponse{Header: &pb.ResponseHeader{}, ID: r.ID, TTL: int64(le.Remaining().Seconds()), GrantedTTL: le.TTL()}
|
||||
if r.Keys {
|
||||
ks := le.Keys()
|
||||
kbs := make([][]byte, len(ks))
|
||||
for i := range ks {
|
||||
kbs[i] = []byte(ks[i])
|
||||
}
|
||||
resp.Keys = kbs
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout())
|
||||
defer cancel()
|
||||
|
||||
// forward to leader
|
||||
for cctx.Err() == nil {
|
||||
leader, err := s.waitLeader(cctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, url := range leader.PeerURLs {
|
||||
lurl := url + leasehttp.LeaseInternalPrefix
|
||||
resp, err := leasehttp.TimeToLiveHTTP(cctx, lease.LeaseID(r.ID), r.Keys, lurl, s.peerRt)
|
||||
if err == nil {
|
||||
return resp.LeaseTimeToLiveResponse, nil
|
||||
}
|
||||
if err == lease.ErrLeaseNotFound {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, ErrTimeout
|
||||
}
|
||||
|
||||
func (s *EtcdServer) waitLeader(ctx context.Context) (*membership.Member, error) {
|
||||
leader := s.cluster.Member(s.Leader())
|
||||
for leader == nil {
|
||||
@@ -257,7 +381,7 @@ func (s *EtcdServer) waitLeader(ctx context.Context) (*membership.Member, error)
|
||||
select {
|
||||
case <-time.After(dur):
|
||||
leader = s.cluster.Member(s.Leader())
|
||||
case <-s.done:
|
||||
case <-s.stopping:
|
||||
return nil, ErrStopped
|
||||
case <-ctx.Done():
|
||||
return nil, ErrNoLeader
|
||||
@@ -270,7 +394,7 @@ func (s *EtcdServer) waitLeader(ctx context.Context) (*membership.Member, error)
|
||||
}
|
||||
|
||||
func (s *EtcdServer) Alarm(ctx context.Context, r *pb.AlarmRequest) (*pb.AlarmResponse, error) {
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Alarm: r})
|
||||
result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{Alarm: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -281,7 +405,7 @@ func (s *EtcdServer) Alarm(ctx context.Context, r *pb.AlarmRequest) (*pb.AlarmRe
|
||||
}
|
||||
|
||||
func (s *EtcdServer) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) {
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthEnable: r})
|
||||
result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{AuthEnable: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -303,24 +427,47 @@ func (s *EtcdServer) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest)
|
||||
}
|
||||
|
||||
func (s *EtcdServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) {
|
||||
st, err := s.AuthStore().GenSimpleToken()
|
||||
var result *applyResult
|
||||
|
||||
err := s.linearizableReadNotify(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
internalReq := &pb.InternalAuthenticateRequest{
|
||||
Name: r.Name,
|
||||
Password: r.Password,
|
||||
SimpleToken: st,
|
||||
for {
|
||||
checkedRevision, err := s.AuthStore().CheckPassword(r.Name, r.Password)
|
||||
if err != nil {
|
||||
plog.Errorf("invalid authentication request to user %s was issued", r.Name)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
st, err := s.AuthStore().GenSimpleToken()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
internalReq := &pb.InternalAuthenticateRequest{
|
||||
Name: r.Name,
|
||||
Password: r.Password,
|
||||
SimpleToken: st,
|
||||
}
|
||||
|
||||
result, err = s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{Authenticate: internalReq})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
|
||||
if checkedRevision != s.AuthStore().Revision() {
|
||||
plog.Infof("revision when password checked is obsolete, retrying")
|
||||
continue
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Authenticate: internalReq})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.AuthenticateResponse), nil
|
||||
}
|
||||
|
||||
@@ -467,69 +614,52 @@ func (s *EtcdServer) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest
|
||||
return result.resp.(*pb.AuthRoleDeleteResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) isValidSimpleToken(token string) bool {
|
||||
splitted := strings.Split(token, ".")
|
||||
if len(splitted) != 2 {
|
||||
return false
|
||||
}
|
||||
index, err := strconv.Atoi(splitted[1])
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// CAUTION: below index synchronization is required because this node
|
||||
// might not receive and apply the log entry of Authenticate() RPC.
|
||||
authApplied := false
|
||||
for i := 0; i < 10; i++ {
|
||||
if uint64(index) <= s.getAppliedIndex() {
|
||||
authApplied = true
|
||||
break
|
||||
// doSerialize handles the auth logic, with permissions checked by "chk", for a serialized request "get". Returns a non-nil error on authentication failure.
|
||||
func (s *EtcdServer) doSerialize(ctx context.Context, chk func(*auth.AuthInfo) error, get func()) error {
|
||||
for {
|
||||
ai, err := s.AuthStore().AuthInfoFromCtx(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if ai == nil {
|
||||
// chk expects non-nil AuthInfo; use empty credentials
|
||||
ai = &auth.AuthInfo{}
|
||||
}
|
||||
if err = chk(ai); err != nil {
|
||||
if err == auth.ErrAuthOldRevision {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
// fetch response for serialized request
|
||||
get()
|
||||
// empty credentials or current auth info means no need to retry
|
||||
if ai.Revision == 0 || ai.Revision == s.authStore.Revision() {
|
||||
return nil
|
||||
}
|
||||
// avoid TOCTOU error, retry of the request is required.
|
||||
}
|
||||
|
||||
if !authApplied {
|
||||
plog.Errorf("timeout of waiting Authenticate() RPC")
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *EtcdServer) usernameFromCtx(ctx context.Context) (string, error) {
|
||||
md, ok := metadata.FromContext(ctx)
|
||||
if !ok {
|
||||
return "", nil
|
||||
func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.InternalRaftRequest) (*applyResult, error) {
|
||||
ai := s.getAppliedIndex()
|
||||
ci := s.getCommittedIndex()
|
||||
if ci > ai+maxGapBetweenApplyAndCommitIndex {
|
||||
return nil, ErrTooManyRequests
|
||||
}
|
||||
|
||||
ts, tok := md["token"]
|
||||
if !tok {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
token := ts[0]
|
||||
if !s.isValidSimpleToken(token) {
|
||||
return "", ErrInvalidAuthToken
|
||||
}
|
||||
|
||||
username, uok := s.AuthStore().UsernameFromToken(token)
|
||||
if !uok {
|
||||
plog.Warningf("invalid auth token: %s", token)
|
||||
return "", ErrInvalidAuthToken
|
||||
}
|
||||
return username, nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) processInternalRaftRequest(ctx context.Context, r pb.InternalRaftRequest) (*applyResult, error) {
|
||||
r.Header = &pb.RequestHeader{
|
||||
ID: s.reqIDGen.Next(),
|
||||
}
|
||||
username, err := s.usernameFromCtx(ctx)
|
||||
|
||||
authInfo, err := s.AuthStore().AuthInfoFromCtx(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.Header.Username = username
|
||||
if authInfo != nil {
|
||||
r.Header.Username = authInfo.Username
|
||||
r.Header.AuthRevision = authInfo.Revision
|
||||
}
|
||||
|
||||
data, err := r.Marshal()
|
||||
if err != nil {
|
||||
@@ -546,7 +676,7 @@ func (s *EtcdServer) processInternalRaftRequest(ctx context.Context, r pb.Intern
|
||||
}
|
||||
ch := s.w.Register(id)
|
||||
|
||||
cctx, cancel := context.WithTimeout(ctx, maxV3RequestTimeout)
|
||||
cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout())
|
||||
defer cancel()
|
||||
|
||||
start := time.Now()
|
||||
@@ -566,5 +696,109 @@ func (s *EtcdServer) processInternalRaftRequest(ctx context.Context, r pb.Intern
|
||||
}
|
||||
}
|
||||
|
||||
func (s *EtcdServer) processInternalRaftRequest(ctx context.Context, r pb.InternalRaftRequest) (*applyResult, error) {
|
||||
var result *applyResult
|
||||
var err error
|
||||
for {
|
||||
result, err = s.processInternalRaftRequestOnce(ctx, r)
|
||||
if err != auth.ErrAuthOldRevision {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return result, err
|
||||
}
|
||||
|
||||
// Watchable returns a watchable interface attached to the etcdserver.
|
||||
func (s *EtcdServer) Watchable() mvcc.WatchableKV { return s.KV() }
|
||||
|
||||
func (s *EtcdServer) linearizableReadLoop() {
|
||||
var rs raft.ReadState
|
||||
|
||||
for {
|
||||
ctx := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(ctx, s.reqIDGen.Next())
|
||||
|
||||
select {
|
||||
case <-s.readwaitc:
|
||||
case <-s.stopping:
|
||||
return
|
||||
}
|
||||
|
||||
nextnr := newNotifier()
|
||||
|
||||
s.readMu.Lock()
|
||||
nr := s.readNotifier
|
||||
s.readNotifier = nextnr
|
||||
s.readMu.Unlock()
|
||||
|
||||
cctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout())
|
||||
if err := s.r.ReadIndex(cctx, ctx); err != nil {
|
||||
cancel()
|
||||
if err == raft.ErrStopped {
|
||||
return
|
||||
}
|
||||
plog.Errorf("failed to get read index from raft: %v", err)
|
||||
nr.notify(err)
|
||||
continue
|
||||
}
|
||||
cancel()
|
||||
|
||||
var (
|
||||
timeout bool
|
||||
done bool
|
||||
)
|
||||
for !timeout && !done {
|
||||
select {
|
||||
case rs = <-s.r.readStateC:
|
||||
done = bytes.Equal(rs.RequestCtx, ctx)
|
||||
if !done {
|
||||
// a previous request might time out. now we should ignore the response of it and
|
||||
// continue waiting for the response of the current requests.
|
||||
plog.Warningf("ignored out-of-date read index response (want %v, got %v)", rs.RequestCtx, ctx)
|
||||
}
|
||||
case <-time.After(s.Cfg.ReqTimeout()):
|
||||
plog.Warningf("timed out waiting for read index response")
|
||||
nr.notify(ErrTimeout)
|
||||
timeout = true
|
||||
case <-s.stopping:
|
||||
return
|
||||
}
|
||||
}
|
||||
if !done {
|
||||
continue
|
||||
}
|
||||
|
||||
if ai := s.getAppliedIndex(); ai < rs.Index {
|
||||
select {
|
||||
case <-s.applyWait.Wait(rs.Index):
|
||||
case <-s.stopping:
|
||||
return
|
||||
}
|
||||
}
|
||||
// unblock all l-reads requested at indices before rs.Index
|
||||
nr.notify(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *EtcdServer) linearizableReadNotify(ctx context.Context) error {
|
||||
s.readMu.RLock()
|
||||
nc := s.readNotifier
|
||||
s.readMu.RUnlock()
|
||||
|
||||
// signal linearizable loop for current notify if it hasn't been already
|
||||
select {
|
||||
case s.readwaitc <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
|
||||
// wait for read state notification
|
||||
select {
|
||||
case <-nc.c:
|
||||
return nc.err
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-s.done:
|
||||
return ErrStopped
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user