Version bump to etcd v3.2.11
This commit is contained in:
2
vendor/github.com/coreos/etcd/etcdserver/BUILD
generated
vendored
2
vendor/github.com/coreos/etcd/etcdserver/BUILD
generated
vendored
@@ -6,6 +6,7 @@ go_library(
|
||||
"apply.go",
|
||||
"apply_auth.go",
|
||||
"apply_v2.go",
|
||||
"backend.go",
|
||||
"cluster_util.go",
|
||||
"config.go",
|
||||
"consistent_index.go",
|
||||
@@ -40,7 +41,6 @@ go_library(
|
||||
"//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/pkg/contention:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/pkg/fileutil:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/pkg/httputil:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/pkg/idutil:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/pkg/netutil:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/pkg/pbutil:go_default_library",
|
||||
|
||||
4
vendor/github.com/coreos/etcd/etcdserver/api/BUILD
generated
vendored
4
vendor/github.com/coreos/etcd/etcdserver/api/BUILD
generated
vendored
@@ -29,7 +29,11 @@ filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp:all-srcs",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/api/v2http:all-srcs",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/api/v3client:all-srcs",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/api/v3election:all-srcs",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/api/v3lock:all-srcs",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
|
||||
8
vendor/github.com/coreos/etcd/etcdserver/api/capability.go
generated
vendored
8
vendor/github.com/coreos/etcd/etcdserver/api/capability.go
generated
vendored
@@ -33,11 +33,10 @@ var (
|
||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api")
|
||||
|
||||
// capabilityMaps is a static map of version to capability map.
|
||||
// the base capabilities is the set of capability 2.0 supports.
|
||||
capabilityMaps = map[string]map[Capability]bool{
|
||||
"2.3.0": {AuthCapability: true},
|
||||
"3.0.0": {AuthCapability: true, V3rpcCapability: true},
|
||||
"3.1.0": {AuthCapability: true, V3rpcCapability: true},
|
||||
"3.2.0": {AuthCapability: true, V3rpcCapability: true},
|
||||
}
|
||||
|
||||
enableMapMu sync.RWMutex
|
||||
@@ -48,7 +47,10 @@ var (
|
||||
)
|
||||
|
||||
func init() {
|
||||
enabledMap = make(map[Capability]bool)
|
||||
enabledMap = map[Capability]bool{
|
||||
AuthCapability: true,
|
||||
V3rpcCapability: true,
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateCapability updates the enabledMap when the cluster version increases.
|
||||
|
||||
40
vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/BUILD
generated
vendored
Normal file
40
vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/BUILD
generated
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"base.go",
|
||||
"peer.go",
|
||||
],
|
||||
importpath = "github.com/coreos/etcd/etcdserver/api/etcdhttp",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/coreos/etcd/error:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/api:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/lease/leasehttp:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/pkg/logutil:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/raft:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/rafthttp:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/version:go_default_library",
|
||||
"//vendor/github.com/coreos/pkg/capnslog:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
186
vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/base.go
generated
vendored
Normal file
186
vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/base.go
generated
vendored
Normal file
@@ -0,0 +1,186 @@
|
||||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package etcdhttp
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"expvar"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
etcdErr "github.com/coreos/etcd/error"
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
"github.com/coreos/etcd/etcdserver/api"
|
||||
"github.com/coreos/etcd/etcdserver/api/v2http/httptypes"
|
||||
"github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/pkg/logutil"
|
||||
"github.com/coreos/etcd/raft"
|
||||
"github.com/coreos/etcd/version"
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var (
|
||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/etcdhttp")
|
||||
mlog = logutil.NewMergeLogger(plog)
|
||||
)
|
||||
|
||||
const (
|
||||
configPath = "/config"
|
||||
metricsPath = "/metrics"
|
||||
healthPath = "/health"
|
||||
varsPath = "/debug/vars"
|
||||
versionPath = "/version"
|
||||
)
|
||||
|
||||
// HandleBasic adds handlers to a mux for serving JSON etcd client requests
|
||||
// that do not access the v2 store.
|
||||
func HandleBasic(mux *http.ServeMux, server *etcdserver.EtcdServer) {
|
||||
mux.HandleFunc(varsPath, serveVars)
|
||||
mux.HandleFunc(configPath+"/local/log", logHandleFunc)
|
||||
mux.Handle(metricsPath, prometheus.Handler())
|
||||
mux.Handle(healthPath, healthHandler(server))
|
||||
mux.HandleFunc(versionPath, versionHandler(server.Cluster(), serveVersion))
|
||||
}
|
||||
|
||||
func healthHandler(server *etcdserver.EtcdServer) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if !allowMethod(w, r, "GET") {
|
||||
return
|
||||
}
|
||||
if uint64(server.Leader()) == raft.None {
|
||||
http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
defer cancel()
|
||||
if _, err := server.Do(ctx, etcdserverpb.Request{Method: "QGET"}); err != nil {
|
||||
http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(`{"health": "true"}`))
|
||||
}
|
||||
}
|
||||
|
||||
func versionHandler(c api.Cluster, fn func(http.ResponseWriter, *http.Request, string)) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
v := c.Version()
|
||||
if v != nil {
|
||||
fn(w, r, v.String())
|
||||
} else {
|
||||
fn(w, r, "not_decided")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func serveVersion(w http.ResponseWriter, r *http.Request, clusterV string) {
|
||||
if !allowMethod(w, r, "GET") {
|
||||
return
|
||||
}
|
||||
vs := version.Versions{
|
||||
Server: version.Version,
|
||||
Cluster: clusterV,
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
b, err := json.Marshal(&vs)
|
||||
if err != nil {
|
||||
plog.Panicf("cannot marshal versions to json (%v)", err)
|
||||
}
|
||||
w.Write(b)
|
||||
}
|
||||
|
||||
func logHandleFunc(w http.ResponseWriter, r *http.Request) {
|
||||
if !allowMethod(w, r, "PUT") {
|
||||
return
|
||||
}
|
||||
|
||||
in := struct{ Level string }{}
|
||||
|
||||
d := json.NewDecoder(r.Body)
|
||||
if err := d.Decode(&in); err != nil {
|
||||
WriteError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid json body"))
|
||||
return
|
||||
}
|
||||
|
||||
logl, err := capnslog.ParseLevel(strings.ToUpper(in.Level))
|
||||
if err != nil {
|
||||
WriteError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid log level "+in.Level))
|
||||
return
|
||||
}
|
||||
|
||||
plog.Noticef("globalLogLevel set to %q", logl.String())
|
||||
capnslog.SetGlobalLogLevel(logl)
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
func serveVars(w http.ResponseWriter, r *http.Request) {
|
||||
if !allowMethod(w, r, "GET") {
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
fmt.Fprintf(w, "{\n")
|
||||
first := true
|
||||
expvar.Do(func(kv expvar.KeyValue) {
|
||||
if !first {
|
||||
fmt.Fprintf(w, ",\n")
|
||||
}
|
||||
first = false
|
||||
fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
|
||||
})
|
||||
fmt.Fprintf(w, "\n}\n")
|
||||
}
|
||||
|
||||
func allowMethod(w http.ResponseWriter, r *http.Request, m string) bool {
|
||||
if m == r.Method {
|
||||
return true
|
||||
}
|
||||
w.Header().Set("Allow", m)
|
||||
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
|
||||
return false
|
||||
}
|
||||
|
||||
// WriteError logs and writes the given Error to the ResponseWriter
|
||||
// If Error is an etcdErr, it is rendered to the ResponseWriter
|
||||
// Otherwise, it is assumed to be a StatusInternalServerError
|
||||
func WriteError(w http.ResponseWriter, r *http.Request, err error) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
switch e := err.(type) {
|
||||
case *etcdErr.Error:
|
||||
e.WriteTo(w)
|
||||
case *httptypes.HTTPError:
|
||||
if et := e.WriteTo(w); et != nil {
|
||||
plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr)
|
||||
}
|
||||
default:
|
||||
switch err {
|
||||
case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost, etcdserver.ErrNotEnoughStartedMembers, etcdserver.ErrUnhealthy:
|
||||
mlog.MergeError(err)
|
||||
default:
|
||||
mlog.MergeErrorf("got unexpected response error (%v)", err)
|
||||
}
|
||||
herr := httptypes.NewHTTPError(http.StatusInternalServerError, "Internal Server Error")
|
||||
if et := herr.WriteTo(w); et != nil {
|
||||
plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v2http
|
||||
package etcdhttp
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
@@ -61,7 +61,7 @@ type peerMembersHandler struct {
|
||||
}
|
||||
|
||||
func (h *peerMembersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
if !allowMethod(w, r.Method, "GET") {
|
||||
if !allowMethod(w, r, "GET") {
|
||||
return
|
||||
}
|
||||
w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String())
|
||||
6
vendor/github.com/coreos/etcd/etcdserver/api/v2http/BUILD
generated
vendored
6
vendor/github.com/coreos/etcd/etcdserver/api/v2http/BUILD
generated
vendored
@@ -9,7 +9,6 @@ go_library(
|
||||
"doc.go",
|
||||
"http.go",
|
||||
"metrics.go",
|
||||
"peer.go",
|
||||
],
|
||||
importpath = "github.com/coreos/etcd/etcdserver/api/v2http",
|
||||
visibility = ["//visibility:public"],
|
||||
@@ -17,18 +16,15 @@ go_library(
|
||||
"//vendor/github.com/coreos/etcd/error:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/api:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/auth:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/membership:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/stats:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/lease/leasehttp:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/pkg/logutil:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/pkg/types:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/raft:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/rafthttp:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/store:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/version:go_default_library",
|
||||
"//vendor/github.com/coreos/pkg/capnslog:go_default_library",
|
||||
"//vendor/github.com/jonboulle/clockwork:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
|
||||
146
vendor/github.com/coreos/etcd/etcdserver/api/v2http/client.go
generated
vendored
146
vendor/github.com/coreos/etcd/etcdserver/api/v2http/client.go
generated
vendored
@@ -17,7 +17,6 @@ package v2http
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"expvar"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
@@ -30,38 +29,36 @@ import (
|
||||
etcdErr "github.com/coreos/etcd/error"
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
"github.com/coreos/etcd/etcdserver/api"
|
||||
"github.com/coreos/etcd/etcdserver/api/etcdhttp"
|
||||
"github.com/coreos/etcd/etcdserver/api/v2http/httptypes"
|
||||
"github.com/coreos/etcd/etcdserver/auth"
|
||||
"github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/etcdserver/membership"
|
||||
"github.com/coreos/etcd/etcdserver/stats"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
"github.com/coreos/etcd/raft"
|
||||
"github.com/coreos/etcd/store"
|
||||
"github.com/coreos/etcd/version"
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
"github.com/jonboulle/clockwork"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
const (
|
||||
authPrefix = "/v2/auth"
|
||||
keysPrefix = "/v2/keys"
|
||||
deprecatedMachinesPrefix = "/v2/machines"
|
||||
membersPrefix = "/v2/members"
|
||||
statsPrefix = "/v2/stats"
|
||||
varsPath = "/debug/vars"
|
||||
metricsPath = "/metrics"
|
||||
healthPath = "/health"
|
||||
versionPath = "/version"
|
||||
configPath = "/config"
|
||||
authPrefix = "/v2/auth"
|
||||
keysPrefix = "/v2/keys"
|
||||
machinesPrefix = "/v2/machines"
|
||||
membersPrefix = "/v2/members"
|
||||
statsPrefix = "/v2/stats"
|
||||
)
|
||||
|
||||
// NewClientHandler generates a muxed http.Handler with the given parameters to serve etcd client requests.
|
||||
func NewClientHandler(server *etcdserver.EtcdServer, timeout time.Duration) http.Handler {
|
||||
sec := auth.NewStore(server, timeout)
|
||||
mux := http.NewServeMux()
|
||||
etcdhttp.HandleBasic(mux, server)
|
||||
handleV2(mux, server, timeout)
|
||||
return requestLogger(mux)
|
||||
}
|
||||
|
||||
func handleV2(mux *http.ServeMux, server *etcdserver.EtcdServer, timeout time.Duration) {
|
||||
sec := auth.NewStore(server, timeout)
|
||||
kh := &keysHandler{
|
||||
sec: sec,
|
||||
server: server,
|
||||
@@ -84,34 +81,23 @@ func NewClientHandler(server *etcdserver.EtcdServer, timeout time.Duration) http
|
||||
clientCertAuthEnabled: server.Cfg.ClientCertAuthEnabled,
|
||||
}
|
||||
|
||||
dmh := &deprecatedMachinesHandler{
|
||||
cluster: server.Cluster(),
|
||||
}
|
||||
mah := &machinesHandler{cluster: server.Cluster()}
|
||||
|
||||
sech := &authHandler{
|
||||
sec: sec,
|
||||
cluster: server.Cluster(),
|
||||
clientCertAuthEnabled: server.Cfg.ClientCertAuthEnabled,
|
||||
}
|
||||
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/", http.NotFound)
|
||||
mux.Handle(healthPath, healthHandler(server))
|
||||
mux.HandleFunc(versionPath, versionHandler(server.Cluster(), serveVersion))
|
||||
mux.Handle(keysPrefix, kh)
|
||||
mux.Handle(keysPrefix+"/", kh)
|
||||
mux.HandleFunc(statsPrefix+"/store", sh.serveStore)
|
||||
mux.HandleFunc(statsPrefix+"/self", sh.serveSelf)
|
||||
mux.HandleFunc(statsPrefix+"/leader", sh.serveLeader)
|
||||
mux.HandleFunc(varsPath, serveVars)
|
||||
mux.HandleFunc(configPath+"/local/log", logHandleFunc)
|
||||
mux.Handle(metricsPath, prometheus.Handler())
|
||||
mux.Handle(membersPrefix, mh)
|
||||
mux.Handle(membersPrefix+"/", mh)
|
||||
mux.Handle(deprecatedMachinesPrefix, dmh)
|
||||
mux.Handle(machinesPrefix, mah)
|
||||
handleAuth(mux, sech)
|
||||
|
||||
return requestLogger(mux)
|
||||
}
|
||||
|
||||
type keysHandler struct {
|
||||
@@ -170,11 +156,11 @@ func (h *keysHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
type deprecatedMachinesHandler struct {
|
||||
type machinesHandler struct {
|
||||
cluster api.Cluster
|
||||
}
|
||||
|
||||
func (h *deprecatedMachinesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
func (h *machinesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
if !allowMethod(w, r.Method, "GET", "HEAD") {
|
||||
return
|
||||
}
|
||||
@@ -234,7 +220,7 @@ func (h *membersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
now := h.clock.Now()
|
||||
m := membership.NewMember("", req.PeerURLs, "", &now)
|
||||
err := h.server.AddMember(ctx, *m)
|
||||
_, err := h.server.AddMember(ctx, *m)
|
||||
switch {
|
||||
case err == membership.ErrIDExists || err == membership.ErrPeerURLexists:
|
||||
writeError(w, r, httptypes.NewHTTPError(http.StatusConflict, err.Error()))
|
||||
@@ -255,7 +241,7 @@ func (h *membersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
err := h.server.RemoveMember(ctx, uint64(id))
|
||||
_, err := h.server.RemoveMember(ctx, uint64(id))
|
||||
switch {
|
||||
case err == membership.ErrIDRemoved:
|
||||
writeError(w, r, httptypes.NewHTTPError(http.StatusGone, fmt.Sprintf("Member permanently removed: %s", id)))
|
||||
@@ -280,7 +266,7 @@ func (h *membersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
ID: id,
|
||||
RaftAttributes: membership.RaftAttributes{PeerURLs: req.PeerURLs.StringSlice()},
|
||||
}
|
||||
err := h.server.UpdateMember(ctx, m)
|
||||
_, err := h.server.UpdateMember(ctx, m)
|
||||
switch {
|
||||
case err == membership.ErrPeerURLexists:
|
||||
writeError(w, r, httptypes.NewHTTPError(http.StatusConflict, err.Error()))
|
||||
@@ -321,103 +307,13 @@ func (h *statsHandler) serveLeader(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
stats := h.stats.LeaderStats()
|
||||
if stats == nil {
|
||||
writeError(w, r, httptypes.NewHTTPError(http.StatusForbidden, "not current leader"))
|
||||
etcdhttp.WriteError(w, r, httptypes.NewHTTPError(http.StatusForbidden, "not current leader"))
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write(stats)
|
||||
}
|
||||
|
||||
func serveVars(w http.ResponseWriter, r *http.Request) {
|
||||
if !allowMethod(w, r.Method, "GET") {
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
fmt.Fprintf(w, "{\n")
|
||||
first := true
|
||||
expvar.Do(func(kv expvar.KeyValue) {
|
||||
if !first {
|
||||
fmt.Fprintf(w, ",\n")
|
||||
}
|
||||
first = false
|
||||
fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
|
||||
})
|
||||
fmt.Fprintf(w, "\n}\n")
|
||||
}
|
||||
|
||||
func healthHandler(server *etcdserver.EtcdServer) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if !allowMethod(w, r.Method, "GET") {
|
||||
return
|
||||
}
|
||||
if uint64(server.Leader()) == raft.None {
|
||||
http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
defer cancel()
|
||||
if _, err := server.Do(ctx, etcdserverpb.Request{Method: "QGET"}); err != nil {
|
||||
http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(`{"health": "true"}`))
|
||||
}
|
||||
}
|
||||
|
||||
func versionHandler(c api.Cluster, fn func(http.ResponseWriter, *http.Request, string)) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
v := c.Version()
|
||||
if v != nil {
|
||||
fn(w, r, v.String())
|
||||
} else {
|
||||
fn(w, r, "not_decided")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func serveVersion(w http.ResponseWriter, r *http.Request, clusterV string) {
|
||||
if !allowMethod(w, r.Method, "GET") {
|
||||
return
|
||||
}
|
||||
vs := version.Versions{
|
||||
Server: version.Version,
|
||||
Cluster: clusterV,
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
b, err := json.Marshal(&vs)
|
||||
if err != nil {
|
||||
plog.Panicf("cannot marshal versions to json (%v)", err)
|
||||
}
|
||||
w.Write(b)
|
||||
}
|
||||
|
||||
func logHandleFunc(w http.ResponseWriter, r *http.Request) {
|
||||
if !allowMethod(w, r.Method, "PUT") {
|
||||
return
|
||||
}
|
||||
|
||||
in := struct{ Level string }{}
|
||||
|
||||
d := json.NewDecoder(r.Body)
|
||||
if err := d.Decode(&in); err != nil {
|
||||
writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid json body"))
|
||||
return
|
||||
}
|
||||
|
||||
logl, err := capnslog.ParseLevel(strings.ToUpper(in.Level))
|
||||
if err != nil {
|
||||
writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid log level "+in.Level))
|
||||
return
|
||||
}
|
||||
|
||||
plog.Noticef("globalLogLevel set to %q", logl.String())
|
||||
capnslog.SetGlobalLogLevel(logl)
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
// parseKeyRequest converts a received http.Request on keysPrefix to
|
||||
// a server Request, performing validation of supplied fields as appropriate.
|
||||
// If any validation fails, an empty Request and non-nil error is returned.
|
||||
|
||||
30
vendor/github.com/coreos/etcd/etcdserver/api/v2http/http.go
generated
vendored
30
vendor/github.com/coreos/etcd/etcdserver/api/v2http/http.go
generated
vendored
@@ -20,12 +20,11 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
etcdErr "github.com/coreos/etcd/error"
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
"github.com/coreos/etcd/etcdserver/api/etcdhttp"
|
||||
"github.com/coreos/etcd/etcdserver/api/v2http/httptypes"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver/auth"
|
||||
"github.com/coreos/etcd/pkg/logutil"
|
||||
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
)
|
||||
|
||||
@@ -39,37 +38,18 @@ var (
|
||||
mlog = logutil.NewMergeLogger(plog)
|
||||
)
|
||||
|
||||
// writeError logs and writes the given Error to the ResponseWriter
|
||||
// If Error is an etcdErr, it is rendered to the ResponseWriter
|
||||
// Otherwise, it is assumed to be a StatusInternalServerError
|
||||
func writeError(w http.ResponseWriter, r *http.Request, err error) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
switch e := err.(type) {
|
||||
case *etcdErr.Error:
|
||||
e.WriteTo(w)
|
||||
case *httptypes.HTTPError:
|
||||
if et := e.WriteTo(w); et != nil {
|
||||
plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr)
|
||||
}
|
||||
case auth.Error:
|
||||
if e, ok := err.(auth.Error); ok {
|
||||
herr := httptypes.NewHTTPError(e.HTTPStatus(), e.Error())
|
||||
if et := herr.WriteTo(w); et != nil {
|
||||
plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr)
|
||||
}
|
||||
default:
|
||||
switch err {
|
||||
case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost, etcdserver.ErrNotEnoughStartedMembers, etcdserver.ErrUnhealthy:
|
||||
mlog.MergeError(err)
|
||||
default:
|
||||
mlog.MergeErrorf("got unexpected response error (%v)", err)
|
||||
}
|
||||
herr := httptypes.NewHTTPError(http.StatusInternalServerError, "Internal Server Error")
|
||||
if et := herr.WriteTo(w); et != nil {
|
||||
plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr)
|
||||
}
|
||||
return
|
||||
}
|
||||
etcdhttp.WriteError(w, r, err)
|
||||
}
|
||||
|
||||
// allowMethod verifies that the given method is one of the allowed methods,
|
||||
|
||||
32
vendor/github.com/coreos/etcd/etcdserver/api/v3client/BUILD
generated
vendored
Normal file
32
vendor/github.com/coreos/etcd/etcdserver/api/v3client/BUILD
generated
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"v3client.go",
|
||||
],
|
||||
importpath = "github.com/coreos/etcd/etcdserver/api/v3client",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/coreos/etcd/clientv3:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
45
vendor/github.com/coreos/etcd/etcdserver/api/v3client/doc.go
generated
vendored
Normal file
45
vendor/github.com/coreos/etcd/etcdserver/api/v3client/doc.go
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package v3client provides clientv3 interfaces from an etcdserver.
|
||||
//
|
||||
// Use v3client by creating an EtcdServer instance, then wrapping it with v3client.New:
|
||||
//
|
||||
// import (
|
||||
// "context"
|
||||
//
|
||||
// "github.com/coreos/etcd/embed"
|
||||
// "github.com/coreos/etcd/etcdserver/api/v3client"
|
||||
// )
|
||||
//
|
||||
// ...
|
||||
//
|
||||
// // create an embedded EtcdServer from the default configuration
|
||||
// cfg := embed.NewConfig()
|
||||
// cfg.Dir = "default.etcd"
|
||||
// e, err := embed.StartEtcd(cfg)
|
||||
// if err != nil {
|
||||
// // handle error!
|
||||
// }
|
||||
//
|
||||
// // wrap the EtcdServer with v3client
|
||||
// cli := v3client.New(e.Server)
|
||||
//
|
||||
// // use like an ordinary clientv3
|
||||
// resp, err := cli.Put(context.TODO(), "some-key", "it works!")
|
||||
// if err != nil {
|
||||
// // handle error!
|
||||
// }
|
||||
//
|
||||
package v3client
|
||||
67
vendor/github.com/coreos/etcd/etcdserver/api/v3client/v3client.go
generated
vendored
Normal file
67
vendor/github.com/coreos/etcd/etcdserver/api/v3client/v3client.go
generated
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3client
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc"
|
||||
"github.com/coreos/etcd/proxy/grpcproxy/adapter"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// New creates a clientv3 client that wraps an in-process EtcdServer. Instead
|
||||
// of making gRPC calls through sockets, the client makes direct function calls
|
||||
// to the etcd server through its api/v3rpc function interfaces.
|
||||
func New(s *etcdserver.EtcdServer) *clientv3.Client {
|
||||
c := clientv3.NewCtxClient(context.Background())
|
||||
|
||||
kvc := adapter.KvServerToKvClient(v3rpc.NewQuotaKVServer(s))
|
||||
c.KV = clientv3.NewKVFromKVClient(kvc)
|
||||
|
||||
lc := adapter.LeaseServerToLeaseClient(v3rpc.NewQuotaLeaseServer(s))
|
||||
c.Lease = clientv3.NewLeaseFromLeaseClient(lc, time.Second)
|
||||
|
||||
wc := adapter.WatchServerToWatchClient(v3rpc.NewWatchServer(s))
|
||||
c.Watcher = &watchWrapper{clientv3.NewWatchFromWatchClient(wc)}
|
||||
|
||||
mc := adapter.MaintenanceServerToMaintenanceClient(v3rpc.NewMaintenanceServer(s))
|
||||
c.Maintenance = clientv3.NewMaintenanceFromMaintenanceClient(mc)
|
||||
|
||||
clc := adapter.ClusterServerToClusterClient(v3rpc.NewClusterServer(s))
|
||||
c.Cluster = clientv3.NewClusterFromClusterClient(clc)
|
||||
|
||||
// TODO: implement clientv3.Auth interface?
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// BlankContext implements Stringer on a context so the ctx string doesn't
|
||||
// depend on the context's WithValue data, which tends to be unsynchronized
|
||||
// (e.g., x/net/trace), causing ctx.String() to throw data races.
|
||||
type blankContext struct{ context.Context }
|
||||
|
||||
func (*blankContext) String() string { return "(blankCtx)" }
|
||||
|
||||
// watchWrapper wraps clientv3 watch calls to blank out the context
|
||||
// to avoid races on trace data.
|
||||
type watchWrapper struct{ clientv3.Watcher }
|
||||
|
||||
func (ww *watchWrapper) Watch(ctx context.Context, key string, opts ...clientv3.OpOption) clientv3.WatchChan {
|
||||
return ww.Watcher.Watch(&blankContext{ctx}, key, opts...)
|
||||
}
|
||||
34
vendor/github.com/coreos/etcd/etcdserver/api/v3election/BUILD
generated
vendored
Normal file
34
vendor/github.com/coreos/etcd/etcdserver/api/v3election/BUILD
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"election.go",
|
||||
],
|
||||
importpath = "github.com/coreos/etcd/etcdserver/api/v3election",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/coreos/etcd/clientv3:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/clientv3/concurrency:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
16
vendor/github.com/coreos/etcd/etcdserver/api/v3election/doc.go
generated
vendored
Normal file
16
vendor/github.com/coreos/etcd/etcdserver/api/v3election/doc.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package v3election provides a v3 election service from an etcdserver.
|
||||
package v3election
|
||||
123
vendor/github.com/coreos/etcd/etcdserver/api/v3election/election.go
generated
vendored
Normal file
123
vendor/github.com/coreos/etcd/etcdserver/api/v3election/election.go
generated
vendored
Normal file
@@ -0,0 +1,123 @@
|
||||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3election
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/clientv3/concurrency"
|
||||
epb "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb"
|
||||
)
|
||||
|
||||
type electionServer struct {
|
||||
c *clientv3.Client
|
||||
}
|
||||
|
||||
func NewElectionServer(c *clientv3.Client) epb.ElectionServer {
|
||||
return &electionServer{c}
|
||||
}
|
||||
|
||||
func (es *electionServer) Campaign(ctx context.Context, req *epb.CampaignRequest) (*epb.CampaignResponse, error) {
|
||||
s, err := es.session(ctx, req.Lease)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
e := concurrency.NewElection(s, string(req.Name))
|
||||
if err = e.Campaign(ctx, string(req.Value)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &epb.CampaignResponse{
|
||||
Header: e.Header(),
|
||||
Leader: &epb.LeaderKey{
|
||||
Name: req.Name,
|
||||
Key: []byte(e.Key()),
|
||||
Rev: e.Rev(),
|
||||
Lease: int64(s.Lease()),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (es *electionServer) Proclaim(ctx context.Context, req *epb.ProclaimRequest) (*epb.ProclaimResponse, error) {
|
||||
s, err := es.session(ctx, req.Leader.Lease)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
e := concurrency.ResumeElection(s, string(req.Leader.Name), string(req.Leader.Key), req.Leader.Rev)
|
||||
if err := e.Proclaim(ctx, string(req.Value)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &epb.ProclaimResponse{Header: e.Header()}, nil
|
||||
}
|
||||
|
||||
func (es *electionServer) Observe(req *epb.LeaderRequest, stream epb.Election_ObserveServer) error {
|
||||
s, err := es.session(stream.Context(), -1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
e := concurrency.NewElection(s, string(req.Name))
|
||||
ch := e.Observe(stream.Context())
|
||||
for stream.Context().Err() == nil {
|
||||
select {
|
||||
case <-stream.Context().Done():
|
||||
case resp, ok := <-ch:
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
lresp := &epb.LeaderResponse{Header: resp.Header, Kv: resp.Kvs[0]}
|
||||
if err := stream.Send(lresp); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return stream.Context().Err()
|
||||
}
|
||||
|
||||
func (es *electionServer) Leader(ctx context.Context, req *epb.LeaderRequest) (*epb.LeaderResponse, error) {
|
||||
s, err := es.session(ctx, -1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
l, lerr := concurrency.NewElection(s, string(req.Name)).Leader(ctx)
|
||||
if lerr != nil {
|
||||
return nil, lerr
|
||||
}
|
||||
return &epb.LeaderResponse{Header: l.Header, Kv: l.Kvs[0]}, nil
|
||||
}
|
||||
|
||||
func (es *electionServer) Resign(ctx context.Context, req *epb.ResignRequest) (*epb.ResignResponse, error) {
|
||||
s, err := es.session(ctx, req.Leader.Lease)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
e := concurrency.ResumeElection(s, string(req.Leader.Name), string(req.Leader.Key), req.Leader.Rev)
|
||||
if err := e.Resign(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &epb.ResignResponse{Header: e.Header()}, nil
|
||||
}
|
||||
|
||||
func (es *electionServer) session(ctx context.Context, lease int64) (*concurrency.Session, error) {
|
||||
s, err := concurrency.NewSession(
|
||||
es.c,
|
||||
concurrency.WithLease(clientv3.LeaseID(lease)),
|
||||
concurrency.WithContext(ctx),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.Orphan()
|
||||
return s, nil
|
||||
}
|
||||
39
vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/BUILD
generated
vendored
Normal file
39
vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/BUILD
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
filegroup(
|
||||
name = "go_default_library_protos",
|
||||
srcs = ["v3election.proto"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["v3election.pb.go"],
|
||||
importpath = "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library",
|
||||
"//vendor/github.com/golang/protobuf/proto:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
"//vendor/google.golang.org/genproto/googleapis/api/annotations:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
33
vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/BUILD
generated
vendored
Normal file
33
vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/BUILD
generated
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["v3election.pb.gw.go"],
|
||||
importpath = "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb:go_default_library",
|
||||
"//vendor/github.com/golang/protobuf/proto:go_default_library",
|
||||
"//vendor/github.com/grpc-ecosystem/grpc-gateway/runtime:go_default_library",
|
||||
"//vendor/github.com/grpc-ecosystem/grpc-gateway/utilities:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/codes:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/grpclog:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/status:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
313
vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go
generated
vendored
Normal file
313
vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go
generated
vendored
Normal file
@@ -0,0 +1,313 @@
|
||||
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
|
||||
// source: etcdserver/api/v3election/v3electionpb/v3election.proto
|
||||
|
||||
/*
|
||||
Package v3electionpb is a reverse proxy.
|
||||
|
||||
It translates gRPC into RESTful JSON APIs.
|
||||
*/
|
||||
package gw
|
||||
|
||||
import (
|
||||
"github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/runtime"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/utilities"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
var _ codes.Code
|
||||
var _ io.Reader
|
||||
var _ status.Status
|
||||
var _ = runtime.String
|
||||
var _ = utilities.NewDoubleArray
|
||||
|
||||
func request_Election_Campaign_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq v3electionpb.CampaignRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := client.Campaign(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func request_Election_Proclaim_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq v3electionpb.ProclaimRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := client.Proclaim(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func request_Election_Leader_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq v3electionpb.LeaderRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := client.Leader(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func request_Election_Observe_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (v3electionpb.Election_ObserveClient, runtime.ServerMetadata, error) {
|
||||
var protoReq v3electionpb.LeaderRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
stream, err := client.Observe(ctx, &protoReq)
|
||||
if err != nil {
|
||||
return nil, metadata, err
|
||||
}
|
||||
header, err := stream.Header()
|
||||
if err != nil {
|
||||
return nil, metadata, err
|
||||
}
|
||||
metadata.HeaderMD = header
|
||||
return stream, metadata, nil
|
||||
|
||||
}
|
||||
|
||||
func request_Election_Resign_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq v3electionpb.ResignRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := client.Resign(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
// RegisterElectionHandlerFromEndpoint is same as RegisterElectionHandler but
|
||||
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
|
||||
func RegisterElectionHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
|
||||
conn, err := grpc.Dial(endpoint, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if cerr := conn.Close(); cerr != nil {
|
||||
grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
|
||||
}
|
||||
return
|
||||
}
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
if cerr := conn.Close(); cerr != nil {
|
||||
grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
|
||||
}
|
||||
}()
|
||||
}()
|
||||
|
||||
return RegisterElectionHandler(ctx, mux, conn)
|
||||
}
|
||||
|
||||
// RegisterElectionHandler registers the http handlers for service Election to "mux".
|
||||
// The handlers forward requests to the grpc endpoint over "conn".
|
||||
func RegisterElectionHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
|
||||
return RegisterElectionHandlerClient(ctx, mux, v3electionpb.NewElectionClient(conn))
|
||||
}
|
||||
|
||||
// RegisterElectionHandler registers the http handlers for service Election to "mux".
|
||||
// The handlers forward requests to the grpc endpoint over the given implementation of "ElectionClient".
|
||||
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ElectionClient"
|
||||
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
|
||||
// "ElectionClient" to call the correct interceptors.
|
||||
func RegisterElectionHandlerClient(ctx context.Context, mux *runtime.ServeMux, client v3electionpb.ElectionClient) error {
|
||||
|
||||
mux.Handle("POST", pattern_Election_Campaign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
if cn, ok := w.(http.CloseNotifier); ok {
|
||||
go func(done <-chan struct{}, closed <-chan bool) {
|
||||
select {
|
||||
case <-done:
|
||||
case <-closed:
|
||||
cancel()
|
||||
}
|
||||
}(ctx.Done(), cn.CloseNotify())
|
||||
}
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_Election_Campaign_0(rctx, inboundMarshaler, client, req, pathParams)
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Election_Campaign_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("POST", pattern_Election_Proclaim_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
if cn, ok := w.(http.CloseNotifier); ok {
|
||||
go func(done <-chan struct{}, closed <-chan bool) {
|
||||
select {
|
||||
case <-done:
|
||||
case <-closed:
|
||||
cancel()
|
||||
}
|
||||
}(ctx.Done(), cn.CloseNotify())
|
||||
}
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_Election_Proclaim_0(rctx, inboundMarshaler, client, req, pathParams)
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Election_Proclaim_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("POST", pattern_Election_Leader_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
if cn, ok := w.(http.CloseNotifier); ok {
|
||||
go func(done <-chan struct{}, closed <-chan bool) {
|
||||
select {
|
||||
case <-done:
|
||||
case <-closed:
|
||||
cancel()
|
||||
}
|
||||
}(ctx.Done(), cn.CloseNotify())
|
||||
}
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_Election_Leader_0(rctx, inboundMarshaler, client, req, pathParams)
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Election_Leader_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("POST", pattern_Election_Observe_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
if cn, ok := w.(http.CloseNotifier); ok {
|
||||
go func(done <-chan struct{}, closed <-chan bool) {
|
||||
select {
|
||||
case <-done:
|
||||
case <-closed:
|
||||
cancel()
|
||||
}
|
||||
}(ctx.Done(), cn.CloseNotify())
|
||||
}
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_Election_Observe_0(rctx, inboundMarshaler, client, req, pathParams)
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Election_Observe_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("POST", pattern_Election_Resign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
if cn, ok := w.(http.CloseNotifier); ok {
|
||||
go func(done <-chan struct{}, closed <-chan bool) {
|
||||
select {
|
||||
case <-done:
|
||||
case <-closed:
|
||||
cancel()
|
||||
}
|
||||
}(ctx.Done(), cn.CloseNotify())
|
||||
}
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_Election_Resign_0(rctx, inboundMarshaler, client, req, pathParams)
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Election_Resign_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
pattern_Election_Campaign_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "election", "campaign"}, ""))
|
||||
|
||||
pattern_Election_Proclaim_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "election", "proclaim"}, ""))
|
||||
|
||||
pattern_Election_Leader_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "election", "leader"}, ""))
|
||||
|
||||
pattern_Election_Observe_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "election", "observe"}, ""))
|
||||
|
||||
pattern_Election_Resign_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "election", "resign"}, ""))
|
||||
)
|
||||
|
||||
var (
|
||||
forward_Election_Campaign_0 = runtime.ForwardResponseMessage
|
||||
|
||||
forward_Election_Proclaim_0 = runtime.ForwardResponseMessage
|
||||
|
||||
forward_Election_Leader_0 = runtime.ForwardResponseMessage
|
||||
|
||||
forward_Election_Observe_0 = runtime.ForwardResponseStream
|
||||
|
||||
forward_Election_Resign_0 = runtime.ForwardResponseMessage
|
||||
)
|
||||
2098
vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.go
generated
vendored
Normal file
2098
vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
119
vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.proto
generated
vendored
Normal file
119
vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.proto
generated
vendored
Normal file
@@ -0,0 +1,119 @@
|
||||
syntax = "proto3";
|
||||
package v3electionpb;
|
||||
|
||||
import "gogoproto/gogo.proto";
|
||||
import "etcd/etcdserver/etcdserverpb/rpc.proto";
|
||||
import "etcd/mvcc/mvccpb/kv.proto";
|
||||
|
||||
// for grpc-gateway
|
||||
import "google/api/annotations.proto";
|
||||
|
||||
option (gogoproto.marshaler_all) = true;
|
||||
option (gogoproto.unmarshaler_all) = true;
|
||||
|
||||
// The election service exposes client-side election facilities as a gRPC interface.
|
||||
service Election {
|
||||
// Campaign waits to acquire leadership in an election, returning a LeaderKey
|
||||
// representing the leadership if successful. The LeaderKey can then be used
|
||||
// to issue new values on the election, transactionally guard API requests on
|
||||
// leadership still being held, and resign from the election.
|
||||
rpc Campaign(CampaignRequest) returns (CampaignResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v3alpha/election/campaign"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
// Proclaim updates the leader's posted value with a new value.
|
||||
rpc Proclaim(ProclaimRequest) returns (ProclaimResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v3alpha/election/proclaim"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
// Leader returns the current election proclamation, if any.
|
||||
rpc Leader(LeaderRequest) returns (LeaderResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v3alpha/election/leader"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
// Observe streams election proclamations in-order as made by the election's
|
||||
// elected leaders.
|
||||
rpc Observe(LeaderRequest) returns (stream LeaderResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v3alpha/election/observe"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
// Resign releases election leadership so other campaigners may acquire
|
||||
// leadership on the election.
|
||||
rpc Resign(ResignRequest) returns (ResignResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v3alpha/election/resign"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
message CampaignRequest {
|
||||
// name is the election's identifier for the campaign.
|
||||
bytes name = 1;
|
||||
// lease is the ID of the lease attached to leadership of the election. If the
|
||||
// lease expires or is revoked before resigning leadership, then the
|
||||
// leadership is transferred to the next campaigner, if any.
|
||||
int64 lease = 2;
|
||||
// value is the initial proclaimed value set when the campaigner wins the
|
||||
// election.
|
||||
bytes value = 3;
|
||||
}
|
||||
|
||||
message CampaignResponse {
|
||||
etcdserverpb.ResponseHeader header = 1;
|
||||
// leader describes the resources used for holding leadereship of the election.
|
||||
LeaderKey leader = 2;
|
||||
}
|
||||
|
||||
message LeaderKey {
|
||||
// name is the election identifier that correponds to the leadership key.
|
||||
bytes name = 1;
|
||||
// key is an opaque key representing the ownership of the election. If the key
|
||||
// is deleted, then leadership is lost.
|
||||
bytes key = 2;
|
||||
// rev is the creation revision of the key. It can be used to test for ownership
|
||||
// of an election during transactions by testing the key's creation revision
|
||||
// matches rev.
|
||||
int64 rev = 3;
|
||||
// lease is the lease ID of the election leader.
|
||||
int64 lease = 4;
|
||||
}
|
||||
|
||||
message LeaderRequest {
|
||||
// name is the election identifier for the leadership information.
|
||||
bytes name = 1;
|
||||
}
|
||||
|
||||
message LeaderResponse {
|
||||
etcdserverpb.ResponseHeader header = 1;
|
||||
// kv is the key-value pair representing the latest leader update.
|
||||
mvccpb.KeyValue kv = 2;
|
||||
}
|
||||
|
||||
message ResignRequest {
|
||||
// leader is the leadership to relinquish by resignation.
|
||||
LeaderKey leader = 1;
|
||||
}
|
||||
|
||||
message ResignResponse {
|
||||
etcdserverpb.ResponseHeader header = 1;
|
||||
}
|
||||
|
||||
message ProclaimRequest {
|
||||
// leader is the leadership hold on the election.
|
||||
LeaderKey leader = 1;
|
||||
// value is an update meant to overwrite the leader's current value.
|
||||
bytes value = 2;
|
||||
}
|
||||
|
||||
message ProclaimResponse {
|
||||
etcdserverpb.ResponseHeader header = 1;
|
||||
}
|
||||
34
vendor/github.com/coreos/etcd/etcdserver/api/v3lock/BUILD
generated
vendored
Normal file
34
vendor/github.com/coreos/etcd/etcdserver/api/v3lock/BUILD
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"lock.go",
|
||||
],
|
||||
importpath = "github.com/coreos/etcd/etcdserver/api/v3lock",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/coreos/etcd/clientv3:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/clientv3/concurrency:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
16
vendor/github.com/coreos/etcd/etcdserver/api/v3lock/doc.go
generated
vendored
Normal file
16
vendor/github.com/coreos/etcd/etcdserver/api/v3lock/doc.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package v3lock provides a v3 locking service from an etcdserver.
|
||||
package v3lock
|
||||
56
vendor/github.com/coreos/etcd/etcdserver/api/v3lock/lock.go
generated
vendored
Normal file
56
vendor/github.com/coreos/etcd/etcdserver/api/v3lock/lock.go
generated
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3lock
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/clientv3/concurrency"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb"
|
||||
)
|
||||
|
||||
type lockServer struct {
|
||||
c *clientv3.Client
|
||||
}
|
||||
|
||||
func NewLockServer(c *clientv3.Client) v3lockpb.LockServer {
|
||||
return &lockServer{c}
|
||||
}
|
||||
|
||||
func (ls *lockServer) Lock(ctx context.Context, req *v3lockpb.LockRequest) (*v3lockpb.LockResponse, error) {
|
||||
s, err := concurrency.NewSession(
|
||||
ls.c,
|
||||
concurrency.WithLease(clientv3.LeaseID(req.Lease)),
|
||||
concurrency.WithContext(ctx),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.Orphan()
|
||||
m := concurrency.NewMutex(s, string(req.Name))
|
||||
if err = m.Lock(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &v3lockpb.LockResponse{Header: m.Header(), Key: []byte(m.Key())}, nil
|
||||
}
|
||||
|
||||
func (ls *lockServer) Unlock(ctx context.Context, req *v3lockpb.UnlockRequest) (*v3lockpb.UnlockResponse, error) {
|
||||
resp, err := ls.c.Delete(ctx, string(req.Key))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &v3lockpb.UnlockResponse{Header: resp.Header}, nil
|
||||
}
|
||||
38
vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/BUILD
generated
vendored
Normal file
38
vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/BUILD
generated
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
filegroup(
|
||||
name = "go_default_library_protos",
|
||||
srcs = ["v3lock.proto"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["v3lock.pb.go"],
|
||||
importpath = "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library",
|
||||
"//vendor/github.com/golang/protobuf/proto:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
"//vendor/google.golang.org/genproto/googleapis/api/annotations:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
33
vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/BUILD
generated
vendored
Normal file
33
vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/BUILD
generated
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["v3lock.pb.gw.go"],
|
||||
importpath = "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb:go_default_library",
|
||||
"//vendor/github.com/golang/protobuf/proto:go_default_library",
|
||||
"//vendor/github.com/grpc-ecosystem/grpc-gateway/runtime:go_default_library",
|
||||
"//vendor/github.com/grpc-ecosystem/grpc-gateway/utilities:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/codes:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/grpclog:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/status:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
167
vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go
generated
vendored
Normal file
167
vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go
generated
vendored
Normal file
@@ -0,0 +1,167 @@
|
||||
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
|
||||
// source: etcdserver/api/v3lock/v3lockpb/v3lock.proto
|
||||
|
||||
/*
|
||||
Package v3lockpb is a reverse proxy.
|
||||
|
||||
It translates gRPC into RESTful JSON APIs.
|
||||
*/
|
||||
package gw
|
||||
|
||||
import (
|
||||
"github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/runtime"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/utilities"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
var _ codes.Code
|
||||
var _ io.Reader
|
||||
var _ status.Status
|
||||
var _ = runtime.String
|
||||
var _ = utilities.NewDoubleArray
|
||||
|
||||
func request_Lock_Lock_0(ctx context.Context, marshaler runtime.Marshaler, client v3lockpb.LockClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq v3lockpb.LockRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := client.Lock(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func request_Lock_Unlock_0(ctx context.Context, marshaler runtime.Marshaler, client v3lockpb.LockClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq v3lockpb.UnlockRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := client.Unlock(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
// RegisterLockHandlerFromEndpoint is same as RegisterLockHandler but
|
||||
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
|
||||
func RegisterLockHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
|
||||
conn, err := grpc.Dial(endpoint, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if cerr := conn.Close(); cerr != nil {
|
||||
grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
|
||||
}
|
||||
return
|
||||
}
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
if cerr := conn.Close(); cerr != nil {
|
||||
grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
|
||||
}
|
||||
}()
|
||||
}()
|
||||
|
||||
return RegisterLockHandler(ctx, mux, conn)
|
||||
}
|
||||
|
||||
// RegisterLockHandler registers the http handlers for service Lock to "mux".
|
||||
// The handlers forward requests to the grpc endpoint over "conn".
|
||||
func RegisterLockHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
|
||||
return RegisterLockHandlerClient(ctx, mux, v3lockpb.NewLockClient(conn))
|
||||
}
|
||||
|
||||
// RegisterLockHandler registers the http handlers for service Lock to "mux".
|
||||
// The handlers forward requests to the grpc endpoint over the given implementation of "LockClient".
|
||||
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "LockClient"
|
||||
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
|
||||
// "LockClient" to call the correct interceptors.
|
||||
func RegisterLockHandlerClient(ctx context.Context, mux *runtime.ServeMux, client v3lockpb.LockClient) error {
|
||||
|
||||
mux.Handle("POST", pattern_Lock_Lock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
if cn, ok := w.(http.CloseNotifier); ok {
|
||||
go func(done <-chan struct{}, closed <-chan bool) {
|
||||
select {
|
||||
case <-done:
|
||||
case <-closed:
|
||||
cancel()
|
||||
}
|
||||
}(ctx.Done(), cn.CloseNotify())
|
||||
}
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_Lock_Lock_0(rctx, inboundMarshaler, client, req, pathParams)
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Lock_Lock_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("POST", pattern_Lock_Unlock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
if cn, ok := w.(http.CloseNotifier); ok {
|
||||
go func(done <-chan struct{}, closed <-chan bool) {
|
||||
select {
|
||||
case <-done:
|
||||
case <-closed:
|
||||
cancel()
|
||||
}
|
||||
}(ctx.Done(), cn.CloseNotify())
|
||||
}
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_Lock_Unlock_0(rctx, inboundMarshaler, client, req, pathParams)
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Lock_Unlock_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
pattern_Lock_Lock_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 1}, []string{"v3alpha", "lock"}, ""))
|
||||
|
||||
pattern_Lock_Unlock_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "lock", "unlock"}, ""))
|
||||
)
|
||||
|
||||
var (
|
||||
forward_Lock_Lock_0 = runtime.ForwardResponseMessage
|
||||
|
||||
forward_Lock_Unlock_0 = runtime.ForwardResponseMessage
|
||||
)
|
||||
978
vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go
generated
vendored
Normal file
978
vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go
generated
vendored
Normal file
@@ -0,0 +1,978 @@
|
||||
// Code generated by protoc-gen-gogo.
|
||||
// source: v3lock.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package v3lockpb is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
v3lock.proto
|
||||
|
||||
It has these top-level messages:
|
||||
LockRequest
|
||||
LockResponse
|
||||
UnlockRequest
|
||||
UnlockResponse
|
||||
*/
|
||||
package v3lockpb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
|
||||
math "math"
|
||||
|
||||
etcdserverpb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
|
||||
_ "google.golang.org/genproto/googleapis/api/annotations"
|
||||
|
||||
context "golang.org/x/net/context"
|
||||
|
||||
grpc "google.golang.org/grpc"
|
||||
|
||||
io "io"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type LockRequest struct {
|
||||
// name is the identifier for the distributed shared lock to be acquired.
|
||||
Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
// lease is the ID of the lease that will be attached to ownership of the
|
||||
// lock. If the lease expires or is revoked and currently holds the lock,
|
||||
// the lock is automatically released. Calls to Lock with the same lease will
|
||||
// be treated as a single acquistion; locking twice with the same lease is a
|
||||
// no-op.
|
||||
Lease int64 `protobuf:"varint,2,opt,name=lease,proto3" json:"lease,omitempty"`
|
||||
}
|
||||
|
||||
func (m *LockRequest) Reset() { *m = LockRequest{} }
|
||||
func (m *LockRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*LockRequest) ProtoMessage() {}
|
||||
func (*LockRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Lock, []int{0} }
|
||||
|
||||
func (m *LockRequest) GetName() []byte {
|
||||
if m != nil {
|
||||
return m.Name
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *LockRequest) GetLease() int64 {
|
||||
if m != nil {
|
||||
return m.Lease
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type LockResponse struct {
|
||||
Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
|
||||
// key is a key that will exist on etcd for the duration that the Lock caller
|
||||
// owns the lock. Users should not modify this key or the lock may exhibit
|
||||
// undefined behavior.
|
||||
Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
|
||||
}
|
||||
|
||||
func (m *LockResponse) Reset() { *m = LockResponse{} }
|
||||
func (m *LockResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*LockResponse) ProtoMessage() {}
|
||||
func (*LockResponse) Descriptor() ([]byte, []int) { return fileDescriptorV3Lock, []int{1} }
|
||||
|
||||
func (m *LockResponse) GetHeader() *etcdserverpb.ResponseHeader {
|
||||
if m != nil {
|
||||
return m.Header
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *LockResponse) GetKey() []byte {
|
||||
if m != nil {
|
||||
return m.Key
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type UnlockRequest struct {
|
||||
// key is the lock ownership key granted by Lock.
|
||||
Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
|
||||
}
|
||||
|
||||
func (m *UnlockRequest) Reset() { *m = UnlockRequest{} }
|
||||
func (m *UnlockRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*UnlockRequest) ProtoMessage() {}
|
||||
func (*UnlockRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Lock, []int{2} }
|
||||
|
||||
func (m *UnlockRequest) GetKey() []byte {
|
||||
if m != nil {
|
||||
return m.Key
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type UnlockResponse struct {
|
||||
Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
|
||||
}
|
||||
|
||||
func (m *UnlockResponse) Reset() { *m = UnlockResponse{} }
|
||||
func (m *UnlockResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*UnlockResponse) ProtoMessage() {}
|
||||
func (*UnlockResponse) Descriptor() ([]byte, []int) { return fileDescriptorV3Lock, []int{3} }
|
||||
|
||||
func (m *UnlockResponse) GetHeader() *etcdserverpb.ResponseHeader {
|
||||
if m != nil {
|
||||
return m.Header
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*LockRequest)(nil), "v3lockpb.LockRequest")
|
||||
proto.RegisterType((*LockResponse)(nil), "v3lockpb.LockResponse")
|
||||
proto.RegisterType((*UnlockRequest)(nil), "v3lockpb.UnlockRequest")
|
||||
proto.RegisterType((*UnlockResponse)(nil), "v3lockpb.UnlockResponse")
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConn
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion4
|
||||
|
||||
// Client API for Lock service
|
||||
|
||||
type LockClient interface {
|
||||
// Lock acquires a distributed shared lock on a given named lock.
|
||||
// On success, it will return a unique key that exists so long as the
|
||||
// lock is held by the caller. This key can be used in conjunction with
|
||||
// transactions to safely ensure updates to etcd only occur while holding
|
||||
// lock ownership. The lock is held until Unlock is called on the key or the
|
||||
// lease associate with the owner expires.
|
||||
Lock(ctx context.Context, in *LockRequest, opts ...grpc.CallOption) (*LockResponse, error)
|
||||
// Unlock takes a key returned by Lock and releases the hold on lock. The
|
||||
// next Lock caller waiting for the lock will then be woken up and given
|
||||
// ownership of the lock.
|
||||
Unlock(ctx context.Context, in *UnlockRequest, opts ...grpc.CallOption) (*UnlockResponse, error)
|
||||
}
|
||||
|
||||
type lockClient struct {
|
||||
cc *grpc.ClientConn
|
||||
}
|
||||
|
||||
func NewLockClient(cc *grpc.ClientConn) LockClient {
|
||||
return &lockClient{cc}
|
||||
}
|
||||
|
||||
func (c *lockClient) Lock(ctx context.Context, in *LockRequest, opts ...grpc.CallOption) (*LockResponse, error) {
|
||||
out := new(LockResponse)
|
||||
err := grpc.Invoke(ctx, "/v3lockpb.Lock/Lock", in, out, c.cc, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *lockClient) Unlock(ctx context.Context, in *UnlockRequest, opts ...grpc.CallOption) (*UnlockResponse, error) {
|
||||
out := new(UnlockResponse)
|
||||
err := grpc.Invoke(ctx, "/v3lockpb.Lock/Unlock", in, out, c.cc, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Server API for Lock service
|
||||
|
||||
type LockServer interface {
|
||||
// Lock acquires a distributed shared lock on a given named lock.
|
||||
// On success, it will return a unique key that exists so long as the
|
||||
// lock is held by the caller. This key can be used in conjunction with
|
||||
// transactions to safely ensure updates to etcd only occur while holding
|
||||
// lock ownership. The lock is held until Unlock is called on the key or the
|
||||
// lease associate with the owner expires.
|
||||
Lock(context.Context, *LockRequest) (*LockResponse, error)
|
||||
// Unlock takes a key returned by Lock and releases the hold on lock. The
|
||||
// next Lock caller waiting for the lock will then be woken up and given
|
||||
// ownership of the lock.
|
||||
Unlock(context.Context, *UnlockRequest) (*UnlockResponse, error)
|
||||
}
|
||||
|
||||
func RegisterLockServer(s *grpc.Server, srv LockServer) {
|
||||
s.RegisterService(&_Lock_serviceDesc, srv)
|
||||
}
|
||||
|
||||
func _Lock_Lock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(LockRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(LockServer).Lock(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/v3lockpb.Lock/Lock",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(LockServer).Lock(ctx, req.(*LockRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Lock_Unlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(UnlockRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(LockServer).Unlock(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/v3lockpb.Lock/Unlock",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(LockServer).Unlock(ctx, req.(*UnlockRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
var _Lock_serviceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "v3lockpb.Lock",
|
||||
HandlerType: (*LockServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "Lock",
|
||||
Handler: _Lock_Lock_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "Unlock",
|
||||
Handler: _Lock_Unlock_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "v3lock.proto",
|
||||
}
|
||||
|
||||
func (m *LockRequest) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *LockRequest) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Name) > 0 {
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintV3Lock(dAtA, i, uint64(len(m.Name)))
|
||||
i += copy(dAtA[i:], m.Name)
|
||||
}
|
||||
if m.Lease != 0 {
|
||||
dAtA[i] = 0x10
|
||||
i++
|
||||
i = encodeVarintV3Lock(dAtA, i, uint64(m.Lease))
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *LockResponse) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *LockResponse) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.Header != nil {
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintV3Lock(dAtA, i, uint64(m.Header.Size()))
|
||||
n1, err := m.Header.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n1
|
||||
}
|
||||
if len(m.Key) > 0 {
|
||||
dAtA[i] = 0x12
|
||||
i++
|
||||
i = encodeVarintV3Lock(dAtA, i, uint64(len(m.Key)))
|
||||
i += copy(dAtA[i:], m.Key)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *UnlockRequest) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *UnlockRequest) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Key) > 0 {
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintV3Lock(dAtA, i, uint64(len(m.Key)))
|
||||
i += copy(dAtA[i:], m.Key)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *UnlockResponse) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *UnlockResponse) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.Header != nil {
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintV3Lock(dAtA, i, uint64(m.Header.Size()))
|
||||
n2, err := m.Header.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n2
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64V3Lock(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32V3Lock(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintV3Lock(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return offset + 1
|
||||
}
|
||||
func (m *LockRequest) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Name)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovV3Lock(uint64(l))
|
||||
}
|
||||
if m.Lease != 0 {
|
||||
n += 1 + sovV3Lock(uint64(m.Lease))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *LockResponse) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
if m.Header != nil {
|
||||
l = m.Header.Size()
|
||||
n += 1 + l + sovV3Lock(uint64(l))
|
||||
}
|
||||
l = len(m.Key)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovV3Lock(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *UnlockRequest) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Key)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovV3Lock(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *UnlockResponse) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
if m.Header != nil {
|
||||
l = m.Header.Size()
|
||||
n += 1 + l + sovV3Lock(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovV3Lock(x uint64) (n int) {
|
||||
for {
|
||||
n++
|
||||
x >>= 7
|
||||
if x == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
func sozV3Lock(x uint64) (n int) {
|
||||
return sovV3Lock(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (m *LockRequest) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowV3Lock
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: LockRequest: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: LockRequest: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
|
||||
}
|
||||
var byteLen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowV3Lock
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
byteLen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if byteLen < 0 {
|
||||
return ErrInvalidLengthV3Lock
|
||||
}
|
||||
postIndex := iNdEx + byteLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
|
||||
if m.Name == nil {
|
||||
m.Name = []byte{}
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType)
|
||||
}
|
||||
m.Lease = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowV3Lock
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Lease |= (int64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipV3Lock(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthV3Lock
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *LockResponse) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowV3Lock
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: LockResponse: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: LockResponse: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowV3Lock
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthV3Lock
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.Header == nil {
|
||||
m.Header = &etcdserverpb.ResponseHeader{}
|
||||
}
|
||||
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
|
||||
}
|
||||
var byteLen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowV3Lock
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
byteLen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if byteLen < 0 {
|
||||
return ErrInvalidLengthV3Lock
|
||||
}
|
||||
postIndex := iNdEx + byteLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
|
||||
if m.Key == nil {
|
||||
m.Key = []byte{}
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipV3Lock(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthV3Lock
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *UnlockRequest) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowV3Lock
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: UnlockRequest: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: UnlockRequest: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
|
||||
}
|
||||
var byteLen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowV3Lock
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
byteLen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if byteLen < 0 {
|
||||
return ErrInvalidLengthV3Lock
|
||||
}
|
||||
postIndex := iNdEx + byteLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
|
||||
if m.Key == nil {
|
||||
m.Key = []byte{}
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipV3Lock(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthV3Lock
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *UnlockResponse) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowV3Lock
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: UnlockResponse: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: UnlockResponse: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowV3Lock
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthV3Lock
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.Header == nil {
|
||||
m.Header = &etcdserverpb.ResponseHeader{}
|
||||
}
|
||||
if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipV3Lock(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthV3Lock
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipV3Lock(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowV3Lock
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowV3Lock
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
return iNdEx, nil
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowV3Lock
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
iNdEx += length
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthV3Lock
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 3:
|
||||
for {
|
||||
var innerWire uint64
|
||||
var start int = iNdEx
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowV3Lock
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
innerWire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
innerWireType := int(innerWire & 0x7)
|
||||
if innerWireType == 4 {
|
||||
break
|
||||
}
|
||||
next, err := skipV3Lock(dAtA[start:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
iNdEx = start + next
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 4:
|
||||
return iNdEx, nil
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
return iNdEx, nil
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthV3Lock = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowV3Lock = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
func init() { proto.RegisterFile("v3lock.proto", fileDescriptorV3Lock) }
|
||||
|
||||
var fileDescriptorV3Lock = []byte{
|
||||
// 336 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x33, 0xce, 0xc9,
|
||||
0x4f, 0xce, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x80, 0xf0, 0x0a, 0x92, 0xa4, 0x44,
|
||||
0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x82, 0xfa, 0x20, 0x16, 0x44, 0x5e, 0x4a, 0x2d, 0xb5, 0x24, 0x39,
|
||||
0x45, 0x1f, 0x44, 0x14, 0xa7, 0x16, 0x95, 0xa5, 0x16, 0x21, 0x31, 0x0b, 0x92, 0xf4, 0x8b, 0x0a,
|
||||
0x92, 0xa1, 0xea, 0x64, 0xd2, 0xf3, 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0x13, 0x0b, 0x32, 0xf5, 0x13,
|
||||
0xf3, 0xf2, 0xf2, 0x4b, 0x12, 0x4b, 0x32, 0xf3, 0xf3, 0x8a, 0x21, 0xb2, 0x4a, 0xe6, 0x5c, 0xdc,
|
||||
0x3e, 0xf9, 0xc9, 0xd9, 0x41, 0xa9, 0x85, 0xa5, 0xa9, 0xc5, 0x25, 0x42, 0x42, 0x5c, 0x2c, 0x79,
|
||||
0x89, 0xb9, 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x3c, 0x41, 0x60, 0xb6, 0x90, 0x08, 0x17, 0x6b,
|
||||
0x4e, 0x6a, 0x62, 0x71, 0xaa, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x73, 0x10, 0x84, 0xa3, 0x14, 0xc6,
|
||||
0xc5, 0x03, 0xd1, 0x58, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0x2a, 0x64, 0xc2, 0xc5, 0x96, 0x91, 0x9a,
|
||||
0x98, 0x92, 0x5a, 0x04, 0xd6, 0xcb, 0x6d, 0x24, 0xa3, 0x87, 0xec, 0x1e, 0x3d, 0x98, 0x3a, 0x0f,
|
||||
0xb0, 0x9a, 0x20, 0xa8, 0x5a, 0x21, 0x01, 0x2e, 0xe6, 0xec, 0xd4, 0x4a, 0xb0, 0xc9, 0x3c, 0x41,
|
||||
0x20, 0xa6, 0x92, 0x22, 0x17, 0x6f, 0x68, 0x5e, 0x0e, 0x92, 0x93, 0xa0, 0x4a, 0x18, 0x11, 0x4a,
|
||||
0xdc, 0xb8, 0xf8, 0x60, 0x4a, 0x28, 0xb1, 0xdc, 0x68, 0x17, 0x23, 0x17, 0x0b, 0xc8, 0x0f, 0x42,
|
||||
0x21, 0x50, 0x5a, 0x54, 0x0f, 0x16, 0xe6, 0x7a, 0x48, 0x81, 0x22, 0x25, 0x86, 0x2e, 0x0c, 0x31,
|
||||
0x4d, 0x49, 0xb6, 0xe9, 0xf2, 0x93, 0xc9, 0x4c, 0xe2, 0x4a, 0x42, 0xfa, 0x65, 0xc6, 0x89, 0x39,
|
||||
0x05, 0x19, 0x89, 0xfa, 0x20, 0x55, 0x60, 0xc2, 0x8a, 0x51, 0x4b, 0x28, 0x86, 0x8b, 0x0d, 0xe2,
|
||||
0x4c, 0x21, 0x71, 0x84, 0x01, 0x28, 0x7e, 0x93, 0x92, 0xc0, 0x94, 0x80, 0x9a, 0x2d, 0x0f, 0x36,
|
||||
0x5b, 0x52, 0x49, 0x04, 0xd5, 0xec, 0xd2, 0x3c, 0xa8, 0xe9, 0x4e, 0x02, 0x27, 0x1e, 0xc9, 0x31,
|
||||
0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x8c, 0xc7, 0x72, 0x0c, 0x49, 0x6c, 0xe0,
|
||||
0x18, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb6, 0xa0, 0x26, 0x28, 0x47, 0x02, 0x00, 0x00,
|
||||
}
|
||||
65
vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.proto
generated
vendored
Normal file
65
vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.proto
generated
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
syntax = "proto3";
|
||||
package v3lockpb;
|
||||
|
||||
import "gogoproto/gogo.proto";
|
||||
import "etcd/etcdserver/etcdserverpb/rpc.proto";
|
||||
|
||||
// for grpc-gateway
|
||||
import "google/api/annotations.proto";
|
||||
|
||||
option (gogoproto.marshaler_all) = true;
|
||||
option (gogoproto.unmarshaler_all) = true;
|
||||
|
||||
// The lock service exposes client-side locking facilities as a gRPC interface.
|
||||
service Lock {
|
||||
// Lock acquires a distributed shared lock on a given named lock.
|
||||
// On success, it will return a unique key that exists so long as the
|
||||
// lock is held by the caller. This key can be used in conjunction with
|
||||
// transactions to safely ensure updates to etcd only occur while holding
|
||||
// lock ownership. The lock is held until Unlock is called on the key or the
|
||||
// lease associate with the owner expires.
|
||||
rpc Lock(LockRequest) returns (LockResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v3alpha/lock/lock"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
// Unlock takes a key returned by Lock and releases the hold on lock. The
|
||||
// next Lock caller waiting for the lock will then be woken up and given
|
||||
// ownership of the lock.
|
||||
rpc Unlock(UnlockRequest) returns (UnlockResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v3alpha/lock/unlock"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
message LockRequest {
|
||||
// name is the identifier for the distributed shared lock to be acquired.
|
||||
bytes name = 1;
|
||||
// lease is the ID of the lease that will be attached to ownership of the
|
||||
// lock. If the lease expires or is revoked and currently holds the lock,
|
||||
// the lock is automatically released. Calls to Lock with the same lease will
|
||||
// be treated as a single acquistion; locking twice with the same lease is a
|
||||
// no-op.
|
||||
int64 lease = 2;
|
||||
}
|
||||
|
||||
message LockResponse {
|
||||
etcdserverpb.ResponseHeader header = 1;
|
||||
// key is a key that will exist on etcd for the duration that the Lock caller
|
||||
// owns the lock. Users should not modify this key or the lock may exhibit
|
||||
// undefined behavior.
|
||||
bytes key = 2;
|
||||
}
|
||||
|
||||
message UnlockRequest {
|
||||
// key is the lock ownership key granted by Lock.
|
||||
bytes key = 1;
|
||||
}
|
||||
|
||||
message UnlockResponse {
|
||||
etcdserverpb.ResponseHeader header = 1;
|
||||
}
|
||||
14
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go
generated
vendored
14
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go
generated
vendored
@@ -16,6 +16,7 @@ package v3rpc
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"math"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
@@ -24,11 +25,17 @@ import (
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
const (
|
||||
grpcOverheadBytes = 512 * 1024
|
||||
maxStreams = math.MaxUint32
|
||||
maxSendBytes = math.MaxInt32
|
||||
)
|
||||
|
||||
func init() {
|
||||
grpclog.SetLogger(plog)
|
||||
}
|
||||
|
||||
func Server(s *etcdserver.EtcdServer, tls *tls.Config) *grpc.Server {
|
||||
func Server(s *etcdserver.EtcdServer, tls *tls.Config, gopts ...grpc.ServerOption) *grpc.Server {
|
||||
var opts []grpc.ServerOption
|
||||
opts = append(opts, grpc.CustomCodec(&codec{}))
|
||||
if tls != nil {
|
||||
@@ -36,8 +43,11 @@ func Server(s *etcdserver.EtcdServer, tls *tls.Config) *grpc.Server {
|
||||
}
|
||||
opts = append(opts, grpc.UnaryInterceptor(newUnaryInterceptor(s)))
|
||||
opts = append(opts, grpc.StreamInterceptor(newStreamInterceptor(s)))
|
||||
opts = append(opts, grpc.MaxRecvMsgSize(int(s.Cfg.MaxRequestBytes+grpcOverheadBytes)))
|
||||
opts = append(opts, grpc.MaxSendMsgSize(maxSendBytes))
|
||||
opts = append(opts, grpc.MaxConcurrentStreams(maxStreams))
|
||||
grpcServer := grpc.NewServer(append(opts, gopts...)...)
|
||||
|
||||
grpcServer := grpc.NewServer(opts...)
|
||||
pb.RegisterKVServer(grpcServer, NewQuotaKVServer(s))
|
||||
pb.RegisterWatchServer(grpcServer, NewWatchServer(s))
|
||||
pb.RegisterLeaseServer(grpcServer, NewQuotaLeaseServer(s))
|
||||
|
||||
4
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go
generated
vendored
4
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go
generated
vendored
@@ -45,7 +45,7 @@ func newUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor {
|
||||
return nil, rpctypes.ErrGRPCNotCapable
|
||||
}
|
||||
|
||||
md, ok := metadata.FromContext(ctx)
|
||||
md, ok := metadata.FromIncomingContext(ctx)
|
||||
if ok {
|
||||
if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader {
|
||||
if s.Leader() == types.ID(raft.None) {
|
||||
@@ -66,7 +66,7 @@ func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor
|
||||
return rpctypes.ErrGRPCNotCapable
|
||||
}
|
||||
|
||||
md, ok := metadata.FromContext(ss.Context())
|
||||
md, ok := metadata.FromIncomingContext(ss.Context())
|
||||
if ok {
|
||||
if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader {
|
||||
if s.Leader() == types.ID(raft.None) {
|
||||
|
||||
10
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go
generated
vendored
10
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go
generated
vendored
@@ -134,6 +134,12 @@ func checkPutRequest(r *pb.PutRequest) error {
|
||||
if len(r.Key) == 0 {
|
||||
return rpctypes.ErrGRPCEmptyKey
|
||||
}
|
||||
if r.IgnoreValue && len(r.Value) != 0 {
|
||||
return rpctypes.ErrGRPCValueProvided
|
||||
}
|
||||
if r.IgnoreLease && r.Lease != 0 {
|
||||
return rpctypes.ErrGRPCLeaseProvided
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -246,8 +252,8 @@ func checkRequestOp(u *pb.RequestOp) error {
|
||||
return checkDeleteRequest(uv.RequestDeleteRange)
|
||||
}
|
||||
default:
|
||||
// empty op
|
||||
return nil
|
||||
// empty op / nil entry
|
||||
return rpctypes.ErrGRPCKeyNotFound
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
31
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go
generated
vendored
31
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go
generated
vendored
@@ -18,6 +18,7 @@ import (
|
||||
"io"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/lease"
|
||||
"golang.org/x/net/context"
|
||||
@@ -53,20 +54,45 @@ func (ls *LeaseServer) LeaseRevoke(ctx context.Context, rr *pb.LeaseRevokeReques
|
||||
|
||||
func (ls *LeaseServer) LeaseTimeToLive(ctx context.Context, rr *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) {
|
||||
resp, err := ls.le.LeaseTimeToLive(ctx, rr)
|
||||
if err != nil {
|
||||
if err != nil && err != lease.ErrLeaseNotFound {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
if err == lease.ErrLeaseNotFound {
|
||||
resp = &pb.LeaseTimeToLiveResponse{
|
||||
Header: &pb.ResponseHeader{},
|
||||
ID: rr.ID,
|
||||
TTL: -1,
|
||||
}
|
||||
}
|
||||
ls.hdr.fill(resp.Header)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (ls *LeaseServer) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error {
|
||||
func (ls *LeaseServer) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) (err error) {
|
||||
errc := make(chan error, 1)
|
||||
go func() {
|
||||
errc <- ls.leaseKeepAlive(stream)
|
||||
}()
|
||||
select {
|
||||
case err = <-errc:
|
||||
case <-stream.Context().Done():
|
||||
// the only server-side cancellation is noleader for now.
|
||||
err = stream.Context().Err()
|
||||
if err == context.Canceled {
|
||||
err = rpctypes.ErrGRPCNoLeader
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (ls *LeaseServer) leaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error {
|
||||
for {
|
||||
req, err := stream.Recv()
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
plog.Warningf("failed to receive lease keepalive request from gRPC stream (%q)", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -92,6 +118,7 @@ func (ls *LeaseServer) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) erro
|
||||
resp.TTL = ttl
|
||||
err = stream.Send(resp)
|
||||
if err != nil {
|
||||
plog.Warningf("failed to send lease keepalive response to gRPC stream (%q)", err.Error())
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
3
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go
generated
vendored
3
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go
generated
vendored
@@ -47,6 +47,7 @@ type RaftStatusGetter interface {
|
||||
}
|
||||
|
||||
type AuthGetter interface {
|
||||
AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error)
|
||||
AuthStore() auth.AuthStore
|
||||
}
|
||||
|
||||
@@ -152,7 +153,7 @@ type authMaintenanceServer struct {
|
||||
}
|
||||
|
||||
func (ams *authMaintenanceServer) isAuthenticated(ctx context.Context) error {
|
||||
authInfo, err := ams.ag.AuthStore().AuthInfoFromCtx(ctx)
|
||||
authInfo, err := ams.ag.AuthInfoFromCtx(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
36
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go
generated
vendored
36
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go
generated
vendored
@@ -48,21 +48,24 @@ func (cs *ClusterServer) MemberAdd(ctx context.Context, r *pb.MemberAddRequest)
|
||||
|
||||
now := time.Now()
|
||||
m := membership.NewMember("", urls, "", &now)
|
||||
if err = cs.server.AddMember(ctx, *m); err != nil {
|
||||
return nil, togRPCError(err)
|
||||
membs, merr := cs.server.AddMember(ctx, *m)
|
||||
if merr != nil {
|
||||
return nil, togRPCError(merr)
|
||||
}
|
||||
|
||||
return &pb.MemberAddResponse{
|
||||
Header: cs.header(),
|
||||
Member: &pb.Member{ID: uint64(m.ID), PeerURLs: m.PeerURLs},
|
||||
Header: cs.header(),
|
||||
Member: &pb.Member{ID: uint64(m.ID), PeerURLs: m.PeerURLs},
|
||||
Members: membersToProtoMembers(membs),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (cs *ClusterServer) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest) (*pb.MemberRemoveResponse, error) {
|
||||
if err := cs.server.RemoveMember(ctx, r.ID); err != nil {
|
||||
membs, err := cs.server.RemoveMember(ctx, r.ID)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return &pb.MemberRemoveResponse{Header: cs.header()}, nil
|
||||
return &pb.MemberRemoveResponse{Header: cs.header(), Members: membersToProtoMembers(membs)}, nil
|
||||
}
|
||||
|
||||
func (cs *ClusterServer) MemberUpdate(ctx context.Context, r *pb.MemberUpdateRequest) (*pb.MemberUpdateResponse, error) {
|
||||
@@ -70,15 +73,23 @@ func (cs *ClusterServer) MemberUpdate(ctx context.Context, r *pb.MemberUpdateReq
|
||||
ID: types.ID(r.ID),
|
||||
RaftAttributes: membership.RaftAttributes{PeerURLs: r.PeerURLs},
|
||||
}
|
||||
if err := cs.server.UpdateMember(ctx, m); err != nil {
|
||||
membs, err := cs.server.UpdateMember(ctx, m)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return &pb.MemberUpdateResponse{Header: cs.header()}, nil
|
||||
return &pb.MemberUpdateResponse{Header: cs.header(), Members: membersToProtoMembers(membs)}, nil
|
||||
}
|
||||
|
||||
func (cs *ClusterServer) MemberList(ctx context.Context, r *pb.MemberListRequest) (*pb.MemberListResponse, error) {
|
||||
membs := cs.cluster.Members()
|
||||
membs := membersToProtoMembers(cs.cluster.Members())
|
||||
return &pb.MemberListResponse{Header: cs.header(), Members: membs}, nil
|
||||
}
|
||||
|
||||
func (cs *ClusterServer) header() *pb.ResponseHeader {
|
||||
return &pb.ResponseHeader{ClusterId: uint64(cs.cluster.ID()), MemberId: uint64(cs.server.ID()), RaftTerm: cs.raftTimer.Term()}
|
||||
}
|
||||
|
||||
func membersToProtoMembers(membs []*membership.Member) []*pb.Member {
|
||||
protoMembs := make([]*pb.Member, len(membs))
|
||||
for i := range membs {
|
||||
protoMembs[i] = &pb.Member{
|
||||
@@ -88,10 +99,5 @@ func (cs *ClusterServer) MemberList(ctx context.Context, r *pb.MemberListRequest
|
||||
ClientURLs: membs[i].ClientURLs,
|
||||
}
|
||||
}
|
||||
|
||||
return &pb.MemberListResponse{Header: cs.header(), Members: protoMembs}, nil
|
||||
}
|
||||
|
||||
func (cs *ClusterServer) header() *pb.ResponseHeader {
|
||||
return &pb.ResponseHeader{ClusterId: uint64(cs.cluster.ID()), MemberId: uint64(cs.server.ID()), RaftTerm: cs.raftTimer.Term()}
|
||||
return protoMembs
|
||||
}
|
||||
|
||||
1
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/BUILD
generated
vendored
1
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/BUILD
generated
vendored
@@ -12,6 +12,7 @@ go_library(
|
||||
deps = [
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/codes:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/status:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
47
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go
generated
vendored
47
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go
generated
vendored
@@ -17,16 +17,20 @@ package rpctypes
|
||||
import (
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
var (
|
||||
// server-side error
|
||||
ErrGRPCEmptyKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: key is not provided")
|
||||
ErrGRPCTooManyOps = grpc.Errorf(codes.InvalidArgument, "etcdserver: too many operations in txn request")
|
||||
ErrGRPCDuplicateKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: duplicate key given in txn request")
|
||||
ErrGRPCCompacted = grpc.Errorf(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted")
|
||||
ErrGRPCFutureRev = grpc.Errorf(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision")
|
||||
ErrGRPCNoSpace = grpc.Errorf(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded")
|
||||
ErrGRPCEmptyKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: key is not provided")
|
||||
ErrGRPCKeyNotFound = grpc.Errorf(codes.InvalidArgument, "etcdserver: key not found")
|
||||
ErrGRPCValueProvided = grpc.Errorf(codes.InvalidArgument, "etcdserver: value is provided")
|
||||
ErrGRPCLeaseProvided = grpc.Errorf(codes.InvalidArgument, "etcdserver: lease is provided")
|
||||
ErrGRPCTooManyOps = grpc.Errorf(codes.InvalidArgument, "etcdserver: too many operations in txn request")
|
||||
ErrGRPCDuplicateKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: duplicate key given in txn request")
|
||||
ErrGRPCCompacted = grpc.Errorf(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted")
|
||||
ErrGRPCFutureRev = grpc.Errorf(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision")
|
||||
ErrGRPCNoSpace = grpc.Errorf(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded")
|
||||
|
||||
ErrGRPCLeaseNotFound = grpc.Errorf(codes.NotFound, "etcdserver: requested lease not found")
|
||||
ErrGRPCLeaseExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: lease already exists")
|
||||
@@ -53,6 +57,7 @@ var (
|
||||
ErrGRPCPermissionNotGranted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: permission is not granted to the role")
|
||||
ErrGRPCAuthNotEnabled = grpc.Errorf(codes.FailedPrecondition, "etcdserver: authentication is not enabled")
|
||||
ErrGRPCInvalidAuthToken = grpc.Errorf(codes.Unauthenticated, "etcdserver: invalid auth token")
|
||||
ErrGRPCInvalidAuthMgmt = grpc.Errorf(codes.InvalidArgument, "etcdserver: invalid auth management")
|
||||
|
||||
ErrGRPCNoLeader = grpc.Errorf(codes.Unavailable, "etcdserver: no leader")
|
||||
ErrGRPCNotCapable = grpc.Errorf(codes.Unavailable, "etcdserver: not capable")
|
||||
@@ -63,7 +68,11 @@ var (
|
||||
ErrGRPCUnhealthy = grpc.Errorf(codes.Unavailable, "etcdserver: unhealthy cluster")
|
||||
|
||||
errStringToError = map[string]error{
|
||||
grpc.ErrorDesc(ErrGRPCEmptyKey): ErrGRPCEmptyKey,
|
||||
grpc.ErrorDesc(ErrGRPCEmptyKey): ErrGRPCEmptyKey,
|
||||
grpc.ErrorDesc(ErrGRPCKeyNotFound): ErrGRPCKeyNotFound,
|
||||
grpc.ErrorDesc(ErrGRPCValueProvided): ErrGRPCValueProvided,
|
||||
grpc.ErrorDesc(ErrGRPCLeaseProvided): ErrGRPCLeaseProvided,
|
||||
|
||||
grpc.ErrorDesc(ErrGRPCTooManyOps): ErrGRPCTooManyOps,
|
||||
grpc.ErrorDesc(ErrGRPCDuplicateKey): ErrGRPCDuplicateKey,
|
||||
grpc.ErrorDesc(ErrGRPCCompacted): ErrGRPCCompacted,
|
||||
@@ -95,6 +104,7 @@ var (
|
||||
grpc.ErrorDesc(ErrGRPCPermissionNotGranted): ErrGRPCPermissionNotGranted,
|
||||
grpc.ErrorDesc(ErrGRPCAuthNotEnabled): ErrGRPCAuthNotEnabled,
|
||||
grpc.ErrorDesc(ErrGRPCInvalidAuthToken): ErrGRPCInvalidAuthToken,
|
||||
grpc.ErrorDesc(ErrGRPCInvalidAuthMgmt): ErrGRPCInvalidAuthMgmt,
|
||||
|
||||
grpc.ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader,
|
||||
grpc.ErrorDesc(ErrGRPCNotCapable): ErrGRPCNotCapable,
|
||||
@@ -106,12 +116,15 @@ var (
|
||||
}
|
||||
|
||||
// client-side error
|
||||
ErrEmptyKey = Error(ErrGRPCEmptyKey)
|
||||
ErrTooManyOps = Error(ErrGRPCTooManyOps)
|
||||
ErrDuplicateKey = Error(ErrGRPCDuplicateKey)
|
||||
ErrCompacted = Error(ErrGRPCCompacted)
|
||||
ErrFutureRev = Error(ErrGRPCFutureRev)
|
||||
ErrNoSpace = Error(ErrGRPCNoSpace)
|
||||
ErrEmptyKey = Error(ErrGRPCEmptyKey)
|
||||
ErrKeyNotFound = Error(ErrGRPCKeyNotFound)
|
||||
ErrValueProvided = Error(ErrGRPCValueProvided)
|
||||
ErrLeaseProvided = Error(ErrGRPCLeaseProvided)
|
||||
ErrTooManyOps = Error(ErrGRPCTooManyOps)
|
||||
ErrDuplicateKey = Error(ErrGRPCDuplicateKey)
|
||||
ErrCompacted = Error(ErrGRPCCompacted)
|
||||
ErrFutureRev = Error(ErrGRPCFutureRev)
|
||||
ErrNoSpace = Error(ErrGRPCNoSpace)
|
||||
|
||||
ErrLeaseNotFound = Error(ErrGRPCLeaseNotFound)
|
||||
ErrLeaseExist = Error(ErrGRPCLeaseExist)
|
||||
@@ -138,6 +151,7 @@ var (
|
||||
ErrPermissionNotGranted = Error(ErrGRPCPermissionNotGranted)
|
||||
ErrAuthNotEnabled = Error(ErrGRPCAuthNotEnabled)
|
||||
ErrInvalidAuthToken = Error(ErrGRPCInvalidAuthToken)
|
||||
ErrInvalidAuthMgmt = Error(ErrGRPCInvalidAuthMgmt)
|
||||
|
||||
ErrNoLeader = Error(ErrGRPCNoLeader)
|
||||
ErrNotCapable = Error(ErrGRPCNotCapable)
|
||||
@@ -175,3 +189,10 @@ func Error(err error) error {
|
||||
}
|
||||
return EtcdError{code: grpc.Code(verr), desc: grpc.ErrorDesc(verr)}
|
||||
}
|
||||
|
||||
func ErrorDesc(err error) string {
|
||||
if s, ok := status.FromError(err); ok {
|
||||
return s.Message()
|
||||
}
|
||||
return err.Error()
|
||||
}
|
||||
|
||||
6
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go
generated
vendored
6
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go
generated
vendored
@@ -42,8 +42,6 @@ func togRPCError(err error) error {
|
||||
return rpctypes.ErrGRPCCompacted
|
||||
case mvcc.ErrFutureRev:
|
||||
return rpctypes.ErrGRPCFutureRev
|
||||
case lease.ErrLeaseNotFound:
|
||||
return rpctypes.ErrGRPCLeaseNotFound
|
||||
case etcdserver.ErrRequestTooLarge:
|
||||
return rpctypes.ErrGRPCRequestTooLarge
|
||||
case etcdserver.ErrNoSpace:
|
||||
@@ -63,6 +61,8 @@ func togRPCError(err error) error {
|
||||
return rpctypes.ErrGRPCTimeoutDueToConnectionLost
|
||||
case etcdserver.ErrUnhealthy:
|
||||
return rpctypes.ErrGRPCUnhealthy
|
||||
case etcdserver.ErrKeyNotFound:
|
||||
return rpctypes.ErrGRPCKeyNotFound
|
||||
|
||||
case lease.ErrLeaseNotFound:
|
||||
return rpctypes.ErrGRPCLeaseNotFound
|
||||
@@ -95,6 +95,8 @@ func togRPCError(err error) error {
|
||||
return rpctypes.ErrGRPCAuthNotEnabled
|
||||
case auth.ErrInvalidAuthToken:
|
||||
return rpctypes.ErrGRPCInvalidAuthToken
|
||||
case auth.ErrInvalidAuthMgmt:
|
||||
return rpctypes.ErrGRPCInvalidAuthMgmt
|
||||
default:
|
||||
return grpc.Errorf(codes.Unknown, err.Error())
|
||||
}
|
||||
|
||||
47
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go
generated
vendored
47
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go
generated
vendored
@@ -21,6 +21,7 @@ import (
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/coreos/etcd/auth"
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
@@ -33,6 +34,8 @@ type watchServer struct {
|
||||
memberID int64
|
||||
raftTimer etcdserver.RaftTimer
|
||||
watchable mvcc.WatchableKV
|
||||
|
||||
ag AuthGetter
|
||||
}
|
||||
|
||||
func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer {
|
||||
@@ -41,6 +44,7 @@ func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer {
|
||||
memberID: int64(s.ID()),
|
||||
raftTimer: s,
|
||||
watchable: s.Watchable(),
|
||||
ag: s,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -101,6 +105,8 @@ type serverWatchStream struct {
|
||||
|
||||
// wg waits for the send loop to complete
|
||||
wg sync.WaitGroup
|
||||
|
||||
ag AuthGetter
|
||||
}
|
||||
|
||||
func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
|
||||
@@ -118,6 +124,8 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
|
||||
progress: make(map[mvcc.WatchID]bool),
|
||||
prevKV: make(map[mvcc.WatchID]bool),
|
||||
closec: make(chan struct{}),
|
||||
|
||||
ag: ws.ag,
|
||||
}
|
||||
|
||||
sws.wg.Add(1)
|
||||
@@ -133,6 +141,7 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
|
||||
// deadlock when calling sws.close().
|
||||
go func() {
|
||||
if rerr := sws.recvLoop(); rerr != nil {
|
||||
plog.Warningf("failed to receive watch request from gRPC stream (%q)", rerr.Error())
|
||||
errc <- rerr
|
||||
}
|
||||
}()
|
||||
@@ -150,6 +159,19 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
func (sws *serverWatchStream) isWatchPermitted(wcr *pb.WatchCreateRequest) bool {
|
||||
authInfo, err := sws.ag.AuthInfoFromCtx(sws.gRPCStream.Context())
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if authInfo == nil {
|
||||
// if auth is enabled, IsRangePermitted() can cause an error
|
||||
authInfo = &auth.AuthInfo{}
|
||||
}
|
||||
|
||||
return sws.ag.AuthStore().IsRangePermitted(authInfo, wcr.Key, wcr.RangeEnd) == nil
|
||||
}
|
||||
|
||||
func (sws *serverWatchStream) recvLoop() error {
|
||||
for {
|
||||
req, err := sws.gRPCStream.Recv()
|
||||
@@ -171,10 +193,32 @@ func (sws *serverWatchStream) recvLoop() error {
|
||||
// \x00 is the smallest key
|
||||
creq.Key = []byte{0}
|
||||
}
|
||||
if len(creq.RangeEnd) == 0 {
|
||||
// force nil since watchstream.Watch distinguishes
|
||||
// between nil and []byte{} for single key / >=
|
||||
creq.RangeEnd = nil
|
||||
}
|
||||
if len(creq.RangeEnd) == 1 && creq.RangeEnd[0] == 0 {
|
||||
// support >= key queries
|
||||
creq.RangeEnd = []byte{}
|
||||
}
|
||||
|
||||
if !sws.isWatchPermitted(creq) {
|
||||
wr := &pb.WatchResponse{
|
||||
Header: sws.newResponseHeader(sws.watchStream.Rev()),
|
||||
WatchId: -1,
|
||||
Canceled: true,
|
||||
Created: true,
|
||||
CancelReason: rpctypes.ErrGRPCPermissionDenied.Error(),
|
||||
}
|
||||
|
||||
select {
|
||||
case sws.ctrlStream <- wr:
|
||||
case <-sws.closec:
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
filters := FiltersFromRequest(creq)
|
||||
|
||||
wsrev := sws.watchStream.Rev()
|
||||
@@ -294,6 +338,7 @@ func (sws *serverWatchStream) sendLoop() {
|
||||
|
||||
mvcc.ReportEventReceived(len(evs))
|
||||
if err := sws.gRPCStream.Send(wr); err != nil {
|
||||
plog.Warningf("failed to send watch response to gRPC stream (%q)", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
@@ -310,6 +355,7 @@ func (sws *serverWatchStream) sendLoop() {
|
||||
}
|
||||
|
||||
if err := sws.gRPCStream.Send(c); err != nil {
|
||||
plog.Warningf("failed to send watch control response to gRPC stream (%q)", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
@@ -325,6 +371,7 @@ func (sws *serverWatchStream) sendLoop() {
|
||||
for _, v := range pending[wid] {
|
||||
mvcc.ReportEventReceived(len(v.Events))
|
||||
if err := sws.gRPCStream.Send(v); err != nil {
|
||||
plog.Warningf("failed to send pending watch response to gRPC stream (%q)", err.Error())
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
292
vendor/github.com/coreos/etcd/etcdserver/apply.go
generated
vendored
292
vendor/github.com/coreos/etcd/etcdserver/apply.go
generated
vendored
@@ -16,7 +16,6 @@ package etcdserver
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
@@ -30,11 +29,6 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// noTxn is an invalid txn ID.
|
||||
// To apply with independent Range, Put, Delete, you can pass noTxn
|
||||
// to apply functions instead of a valid txn ID.
|
||||
noTxn = -1
|
||||
|
||||
warnApplyDuration = 100 * time.Millisecond
|
||||
)
|
||||
|
||||
@@ -51,9 +45,9 @@ type applyResult struct {
|
||||
type applierV3 interface {
|
||||
Apply(r *pb.InternalRaftRequest) *applyResult
|
||||
|
||||
Put(txnID int64, p *pb.PutRequest) (*pb.PutResponse, error)
|
||||
Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResponse, error)
|
||||
DeleteRange(txnID int64, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error)
|
||||
Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error)
|
||||
Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error)
|
||||
DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error)
|
||||
Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error)
|
||||
Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, error)
|
||||
|
||||
@@ -99,11 +93,11 @@ func (a *applierV3backend) Apply(r *pb.InternalRaftRequest) *applyResult {
|
||||
// call into a.s.applyV3.F instead of a.F so upper appliers can check individual calls
|
||||
switch {
|
||||
case r.Range != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.Range(noTxn, r.Range)
|
||||
ar.resp, ar.err = a.s.applyV3.Range(nil, r.Range)
|
||||
case r.Put != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.Put(noTxn, r.Put)
|
||||
ar.resp, ar.err = a.s.applyV3.Put(nil, r.Put)
|
||||
case r.DeleteRange != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.DeleteRange(noTxn, r.DeleteRange)
|
||||
ar.resp, ar.err = a.s.applyV3.DeleteRange(nil, r.DeleteRange)
|
||||
case r.Txn != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.Txn(r.Txn)
|
||||
case r.Compaction != nil:
|
||||
@@ -152,106 +146,87 @@ func (a *applierV3backend) Apply(r *pb.InternalRaftRequest) *applyResult {
|
||||
return ar
|
||||
}
|
||||
|
||||
func (a *applierV3backend) Put(txnID int64, p *pb.PutRequest) (*pb.PutResponse, error) {
|
||||
resp := &pb.PutResponse{}
|
||||
func (a *applierV3backend) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (resp *pb.PutResponse, err error) {
|
||||
resp = &pb.PutResponse{}
|
||||
resp.Header = &pb.ResponseHeader{}
|
||||
var (
|
||||
rev int64
|
||||
err error
|
||||
)
|
||||
|
||||
var rr *mvcc.RangeResult
|
||||
if p.PrevKv {
|
||||
if txnID != noTxn {
|
||||
rr, err = a.s.KV().TxnRange(txnID, p.Key, nil, mvcc.RangeOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
rr, err = a.s.KV().Range(p.Key, nil, mvcc.RangeOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if txnID != noTxn {
|
||||
rev, err = a.s.KV().TxnPut(txnID, p.Key, p.Value, lease.LeaseID(p.Lease))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
leaseID := lease.LeaseID(p.Lease)
|
||||
val, leaseID := p.Value, lease.LeaseID(p.Lease)
|
||||
if txn == nil {
|
||||
if leaseID != lease.NoLease {
|
||||
if l := a.s.lessor.Lookup(leaseID); l == nil {
|
||||
return nil, lease.ErrLeaseNotFound
|
||||
}
|
||||
}
|
||||
rev = a.s.KV().Put(p.Key, p.Value, leaseID)
|
||||
txn = a.s.KV().Write()
|
||||
defer txn.End()
|
||||
}
|
||||
resp.Header.Revision = rev
|
||||
if rr != nil && len(rr.KVs) != 0 {
|
||||
resp.PrevKv = &rr.KVs[0]
|
||||
|
||||
var rr *mvcc.RangeResult
|
||||
if p.IgnoreValue || p.IgnoreLease || p.PrevKv {
|
||||
rr, err = txn.Range(p.Key, nil, mvcc.RangeOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if p.IgnoreValue || p.IgnoreLease {
|
||||
if rr == nil || len(rr.KVs) == 0 {
|
||||
// ignore_{lease,value} flag expects previous key-value pair
|
||||
return nil, ErrKeyNotFound
|
||||
}
|
||||
}
|
||||
if p.IgnoreValue {
|
||||
val = rr.KVs[0].Value
|
||||
}
|
||||
if p.IgnoreLease {
|
||||
leaseID = lease.LeaseID(rr.KVs[0].Lease)
|
||||
}
|
||||
if p.PrevKv {
|
||||
if rr != nil && len(rr.KVs) != 0 {
|
||||
resp.PrevKv = &rr.KVs[0]
|
||||
}
|
||||
}
|
||||
|
||||
resp.Header.Revision = txn.Put(p.Key, val, leaseID)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (a *applierV3backend) DeleteRange(txnID int64, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
|
||||
func (a *applierV3backend) DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
|
||||
resp := &pb.DeleteRangeResponse{}
|
||||
resp.Header = &pb.ResponseHeader{}
|
||||
|
||||
var (
|
||||
n int64
|
||||
rev int64
|
||||
err error
|
||||
)
|
||||
if txn == nil {
|
||||
txn = a.s.kv.Write()
|
||||
defer txn.End()
|
||||
}
|
||||
|
||||
if isGteRange(dr.RangeEnd) {
|
||||
dr.RangeEnd = []byte{}
|
||||
}
|
||||
|
||||
var rr *mvcc.RangeResult
|
||||
if dr.PrevKv {
|
||||
if txnID != noTxn {
|
||||
rr, err = a.s.KV().TxnRange(txnID, dr.Key, dr.RangeEnd, mvcc.RangeOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
rr, err = a.s.KV().Range(dr.Key, dr.RangeEnd, mvcc.RangeOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if txnID != noTxn {
|
||||
n, rev, err = a.s.KV().TxnDeleteRange(txnID, dr.Key, dr.RangeEnd)
|
||||
rr, err := txn.Range(dr.Key, dr.RangeEnd, mvcc.RangeOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
n, rev = a.s.KV().DeleteRange(dr.Key, dr.RangeEnd)
|
||||
}
|
||||
|
||||
resp.Deleted = n
|
||||
if rr != nil {
|
||||
for i := range rr.KVs {
|
||||
resp.PrevKvs = append(resp.PrevKvs, &rr.KVs[i])
|
||||
if rr != nil {
|
||||
for i := range rr.KVs {
|
||||
resp.PrevKvs = append(resp.PrevKvs, &rr.KVs[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
resp.Header.Revision = rev
|
||||
|
||||
resp.Deleted, resp.Header.Revision = txn.DeleteRange(dr.Key, dr.RangeEnd)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (a *applierV3backend) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResponse, error) {
|
||||
func (a *applierV3backend) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) {
|
||||
resp := &pb.RangeResponse{}
|
||||
resp.Header = &pb.ResponseHeader{}
|
||||
|
||||
var (
|
||||
rr *mvcc.RangeResult
|
||||
err error
|
||||
)
|
||||
if txn == nil {
|
||||
txn = a.s.kv.Read()
|
||||
defer txn.End()
|
||||
}
|
||||
|
||||
if isGteRange(r.RangeEnd) {
|
||||
r.RangeEnd = []byte{}
|
||||
@@ -275,16 +250,9 @@ func (a *applierV3backend) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResp
|
||||
Count: r.CountOnly,
|
||||
}
|
||||
|
||||
if txnID != noTxn {
|
||||
rr, err = a.s.KV().TxnRange(txnID, r.Key, r.RangeEnd, ro)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
rr, err = a.s.KV().Range(r.Key, r.RangeEnd, ro)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rr, err := txn.Range(r.Key, r.RangeEnd, ro)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if r.MaxModRevision != 0 {
|
||||
@@ -350,61 +318,64 @@ func (a *applierV3backend) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResp
|
||||
}
|
||||
|
||||
func (a *applierV3backend) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
|
||||
ok := true
|
||||
for _, c := range rt.Compare {
|
||||
if _, ok = a.applyCompare(c); !ok {
|
||||
break
|
||||
isWrite := !isTxnReadonly(rt)
|
||||
txn := mvcc.NewReadOnlyTxnWrite(a.s.KV().Read())
|
||||
|
||||
reqs, ok := a.compareToOps(txn, rt)
|
||||
if isWrite {
|
||||
if err := a.checkRequestPut(txn, reqs); err != nil {
|
||||
txn.End()
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var reqs []*pb.RequestOp
|
||||
if ok {
|
||||
reqs = rt.Success
|
||||
} else {
|
||||
reqs = rt.Failure
|
||||
}
|
||||
|
||||
if err := a.checkRequestLeases(reqs); err != nil {
|
||||
if err := checkRequestRange(txn, reqs); err != nil {
|
||||
txn.End()
|
||||
return nil, err
|
||||
}
|
||||
if err := a.checkRequestRange(reqs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// When executing the operations of txn, we need to hold the txn lock.
|
||||
// So the reader will not see any intermediate results.
|
||||
txnID := a.s.KV().TxnBegin()
|
||||
|
||||
resps := make([]*pb.ResponseOp, len(reqs))
|
||||
txnResp := &pb.TxnResponse{
|
||||
Responses: resps,
|
||||
Succeeded: ok,
|
||||
Header: &pb.ResponseHeader{},
|
||||
}
|
||||
|
||||
// When executing mutable txn ops, etcd must hold the txn lock so
|
||||
// readers do not see any intermediate results. Since writes are
|
||||
// serialized on the raft loop, the revision in the read view will
|
||||
// be the revision of the write txn.
|
||||
if isWrite {
|
||||
txn.End()
|
||||
txn = a.s.KV().Write()
|
||||
}
|
||||
for i := range reqs {
|
||||
resps[i] = a.applyUnion(txnID, reqs[i])
|
||||
resps[i] = a.applyUnion(txn, reqs[i])
|
||||
}
|
||||
|
||||
err := a.s.KV().TxnEnd(txnID)
|
||||
if err != nil {
|
||||
panic(fmt.Sprint("unexpected error when closing txn", txnID))
|
||||
rev := txn.Rev()
|
||||
if len(txn.Changes()) != 0 {
|
||||
rev++
|
||||
}
|
||||
txn.End()
|
||||
|
||||
txnResp := &pb.TxnResponse{}
|
||||
txnResp.Header = &pb.ResponseHeader{}
|
||||
txnResp.Header.Revision = a.s.KV().Rev()
|
||||
txnResp.Responses = resps
|
||||
txnResp.Succeeded = ok
|
||||
txnResp.Header.Revision = rev
|
||||
return txnResp, nil
|
||||
}
|
||||
|
||||
// applyCompare applies the compare request.
|
||||
// It returns the revision at which the comparison happens. If the comparison
|
||||
// succeeds, the it returns true. Otherwise it returns false.
|
||||
func (a *applierV3backend) applyCompare(c *pb.Compare) (int64, bool) {
|
||||
rr, err := a.s.KV().Range(c.Key, nil, mvcc.RangeOptions{})
|
||||
rev := rr.Rev
|
||||
|
||||
if err != nil {
|
||||
if err == mvcc.ErrTxnIDMismatch {
|
||||
panic("unexpected txn ID mismatch error")
|
||||
func (a *applierV3backend) compareToOps(rv mvcc.ReadView, rt *pb.TxnRequest) ([]*pb.RequestOp, bool) {
|
||||
for _, c := range rt.Compare {
|
||||
if !applyCompare(rv, c) {
|
||||
return rt.Failure, false
|
||||
}
|
||||
return rev, false
|
||||
}
|
||||
return rt.Success, true
|
||||
}
|
||||
|
||||
// applyCompare applies the compare request.
|
||||
// If the comparison succeeds, it returns true. Otherwise, returns false.
|
||||
func applyCompare(rv mvcc.ReadView, c *pb.Compare) bool {
|
||||
rr, err := rv.Range(c.Key, nil, mvcc.RangeOptions{})
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
var ckv mvccpb.KeyValue
|
||||
if len(rr.KVs) != 0 {
|
||||
@@ -416,7 +387,7 @@ func (a *applierV3backend) applyCompare(c *pb.Compare) (int64, bool) {
|
||||
// We can treat non-existence as the empty set explicitly, such that
|
||||
// even a key with a value of length 0 bytes is still a real key
|
||||
// that was written that way
|
||||
return rev, false
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -448,30 +419,22 @@ func (a *applierV3backend) applyCompare(c *pb.Compare) (int64, bool) {
|
||||
|
||||
switch c.Result {
|
||||
case pb.Compare_EQUAL:
|
||||
if result != 0 {
|
||||
return rev, false
|
||||
}
|
||||
return result == 0
|
||||
case pb.Compare_NOT_EQUAL:
|
||||
if result == 0 {
|
||||
return rev, false
|
||||
}
|
||||
return result != 0
|
||||
case pb.Compare_GREATER:
|
||||
if result != 1 {
|
||||
return rev, false
|
||||
}
|
||||
return result > 0
|
||||
case pb.Compare_LESS:
|
||||
if result != -1 {
|
||||
return rev, false
|
||||
}
|
||||
return result < 0
|
||||
}
|
||||
return rev, true
|
||||
return true
|
||||
}
|
||||
|
||||
func (a *applierV3backend) applyUnion(txnID int64, union *pb.RequestOp) *pb.ResponseOp {
|
||||
func (a *applierV3backend) applyUnion(txn mvcc.TxnWrite, union *pb.RequestOp) *pb.ResponseOp {
|
||||
switch tv := union.Request.(type) {
|
||||
case *pb.RequestOp_RequestRange:
|
||||
if tv.RequestRange != nil {
|
||||
resp, err := a.Range(txnID, tv.RequestRange)
|
||||
resp, err := a.Range(txn, tv.RequestRange)
|
||||
if err != nil {
|
||||
plog.Panicf("unexpected error during txn: %v", err)
|
||||
}
|
||||
@@ -479,7 +442,7 @@ func (a *applierV3backend) applyUnion(txnID int64, union *pb.RequestOp) *pb.Resp
|
||||
}
|
||||
case *pb.RequestOp_RequestPut:
|
||||
if tv.RequestPut != nil {
|
||||
resp, err := a.Put(txnID, tv.RequestPut)
|
||||
resp, err := a.Put(txn, tv.RequestPut)
|
||||
if err != nil {
|
||||
plog.Panicf("unexpected error during txn: %v", err)
|
||||
}
|
||||
@@ -487,7 +450,7 @@ func (a *applierV3backend) applyUnion(txnID int64, union *pb.RequestOp) *pb.Resp
|
||||
}
|
||||
case *pb.RequestOp_RequestDeleteRange:
|
||||
if tv.RequestDeleteRange != nil {
|
||||
resp, err := a.DeleteRange(txnID, tv.RequestDeleteRange)
|
||||
resp, err := a.DeleteRange(txn, tv.RequestDeleteRange)
|
||||
if err != nil {
|
||||
plog.Panicf("unexpected error during txn: %v", err)
|
||||
}
|
||||
@@ -588,7 +551,7 @@ type applierV3Capped struct {
|
||||
// with Puts so that the number of keys in the store is capped.
|
||||
func newApplierV3Capped(base applierV3) applierV3 { return &applierV3Capped{applierV3: base} }
|
||||
|
||||
func (a *applierV3Capped) Put(txnID int64, p *pb.PutRequest) (*pb.PutResponse, error) {
|
||||
func (a *applierV3Capped) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) {
|
||||
return nil, ErrNoSpace
|
||||
}
|
||||
|
||||
@@ -617,7 +580,7 @@ func (a *applierV3backend) AuthDisable() (*pb.AuthDisableResponse, error) {
|
||||
}
|
||||
|
||||
func (a *applierV3backend) Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error) {
|
||||
ctx := context.WithValue(context.WithValue(context.Background(), "index", a.s.consistIndex.ConsistentIndex()), "simpleToken", r.SimpleToken)
|
||||
ctx := context.WithValue(context.WithValue(a.s.ctx, "index", a.s.consistIndex.ConsistentIndex()), "simpleToken", r.SimpleToken)
|
||||
resp, err := a.s.AuthStore().Authenticate(ctx, r.Name, r.Password)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
@@ -738,9 +701,9 @@ func newQuotaApplierV3(s *EtcdServer, app applierV3) applierV3 {
|
||||
return "aApplierV3{app, NewBackendQuota(s)}
|
||||
}
|
||||
|
||||
func (a *quotaApplierV3) Put(txnID int64, p *pb.PutRequest) (*pb.PutResponse, error) {
|
||||
func (a *quotaApplierV3) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) {
|
||||
ok := a.q.Available(p)
|
||||
resp, err := a.applierV3.Put(txnID, p)
|
||||
resp, err := a.applierV3.Put(txn, p)
|
||||
if err == nil && !ok {
|
||||
err = ErrNoSpace
|
||||
}
|
||||
@@ -804,14 +767,27 @@ func (s *kvSortByValue) Less(i, j int) bool {
|
||||
return bytes.Compare(s.kvs[i].Value, s.kvs[j].Value) < 0
|
||||
}
|
||||
|
||||
func (a *applierV3backend) checkRequestLeases(reqs []*pb.RequestOp) error {
|
||||
func (a *applierV3backend) checkRequestPut(rv mvcc.ReadView, reqs []*pb.RequestOp) error {
|
||||
for _, requ := range reqs {
|
||||
tv, ok := requ.Request.(*pb.RequestOp_RequestPut)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
preq := tv.RequestPut
|
||||
if preq == nil || lease.LeaseID(preq.Lease) == lease.NoLease {
|
||||
if preq == nil {
|
||||
continue
|
||||
}
|
||||
if preq.IgnoreValue || preq.IgnoreLease {
|
||||
// expects previous key-value, error if not exist
|
||||
rr, err := rv.Range(preq.Key, nil, mvcc.RangeOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rr == nil || len(rr.KVs) == 0 {
|
||||
return ErrKeyNotFound
|
||||
}
|
||||
}
|
||||
if lease.LeaseID(preq.Lease) == lease.NoLease {
|
||||
continue
|
||||
}
|
||||
if l := a.s.lessor.Lookup(lease.LeaseID(preq.Lease)); l == nil {
|
||||
@@ -821,7 +797,7 @@ func (a *applierV3backend) checkRequestLeases(reqs []*pb.RequestOp) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *applierV3backend) checkRequestRange(reqs []*pb.RequestOp) error {
|
||||
func checkRequestRange(rv mvcc.ReadView, reqs []*pb.RequestOp) error {
|
||||
for _, requ := range reqs {
|
||||
tv, ok := requ.Request.(*pb.RequestOp_RequestRange)
|
||||
if !ok {
|
||||
@@ -832,10 +808,10 @@ func (a *applierV3backend) checkRequestRange(reqs []*pb.RequestOp) error {
|
||||
continue
|
||||
}
|
||||
|
||||
if greq.Revision > a.s.KV().Rev() {
|
||||
if greq.Revision > rv.Rev() {
|
||||
return mvcc.ErrFutureRev
|
||||
}
|
||||
if greq.Revision < a.s.KV().FirstRev() {
|
||||
if greq.Revision < rv.FirstRev() {
|
||||
return mvcc.ErrCompacted
|
||||
}
|
||||
}
|
||||
|
||||
13
vendor/github.com/coreos/etcd/etcdserver/apply_auth.go
generated
vendored
13
vendor/github.com/coreos/etcd/etcdserver/apply_auth.go
generated
vendored
@@ -19,6 +19,7 @@ import (
|
||||
|
||||
"github.com/coreos/etcd/auth"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/mvcc"
|
||||
)
|
||||
|
||||
type authApplierV3 struct {
|
||||
@@ -58,7 +59,7 @@ func (aa *authApplierV3) Apply(r *pb.InternalRaftRequest) *applyResult {
|
||||
return ret
|
||||
}
|
||||
|
||||
func (aa *authApplierV3) Put(txnID int64, r *pb.PutRequest) (*pb.PutResponse, error) {
|
||||
func (aa *authApplierV3) Put(txn mvcc.TxnWrite, r *pb.PutRequest) (*pb.PutResponse, error) {
|
||||
if err := aa.as.IsPutPermitted(&aa.authInfo, r.Key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -68,17 +69,17 @@ func (aa *authApplierV3) Put(txnID int64, r *pb.PutRequest) (*pb.PutResponse, er
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return aa.applierV3.Put(txnID, r)
|
||||
return aa.applierV3.Put(txn, r)
|
||||
}
|
||||
|
||||
func (aa *authApplierV3) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResponse, error) {
|
||||
func (aa *authApplierV3) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) {
|
||||
if err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return aa.applierV3.Range(txnID, r)
|
||||
return aa.applierV3.Range(txn, r)
|
||||
}
|
||||
|
||||
func (aa *authApplierV3) DeleteRange(txnID int64, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
|
||||
func (aa *authApplierV3) DeleteRange(txn mvcc.TxnWrite, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
|
||||
if err := aa.as.IsDeleteRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -89,7 +90,7 @@ func (aa *authApplierV3) DeleteRange(txnID int64, r *pb.DeleteRangeRequest) (*pb
|
||||
}
|
||||
}
|
||||
|
||||
return aa.applierV3.DeleteRange(txnID, r)
|
||||
return aa.applierV3.DeleteRange(txn, r)
|
||||
}
|
||||
|
||||
func checkTxnReqsPermission(as auth.AuthStore, ai *auth.AuthInfo, reqs []*pb.RequestOp) error {
|
||||
|
||||
81
vendor/github.com/coreos/etcd/etcdserver/backend.go
generated
vendored
Normal file
81
vendor/github.com/coreos/etcd/etcdserver/backend.go
generated
vendored
Normal file
@@ -0,0 +1,81 @@
|
||||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package etcdserver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/lease"
|
||||
"github.com/coreos/etcd/mvcc"
|
||||
"github.com/coreos/etcd/mvcc/backend"
|
||||
"github.com/coreos/etcd/raft/raftpb"
|
||||
"github.com/coreos/etcd/snap"
|
||||
)
|
||||
|
||||
func newBackend(cfg *ServerConfig) backend.Backend {
|
||||
bcfg := backend.DefaultBackendConfig()
|
||||
bcfg.Path = cfg.backendPath()
|
||||
if cfg.QuotaBackendBytes > 0 && cfg.QuotaBackendBytes != DefaultQuotaBytes {
|
||||
// permit 10% excess over quota for disarm
|
||||
bcfg.MmapSize = uint64(cfg.QuotaBackendBytes + cfg.QuotaBackendBytes/10)
|
||||
}
|
||||
return backend.New(bcfg)
|
||||
}
|
||||
|
||||
// openSnapshotBackend renames a snapshot db to the current etcd db and opens it.
|
||||
func openSnapshotBackend(cfg *ServerConfig, ss *snap.Snapshotter, snapshot raftpb.Snapshot) (backend.Backend, error) {
|
||||
snapPath, err := ss.DBFilePath(snapshot.Metadata.Index)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("database snapshot file path error: %v", err)
|
||||
}
|
||||
if err := os.Rename(snapPath, cfg.backendPath()); err != nil {
|
||||
return nil, fmt.Errorf("rename snapshot file error: %v", err)
|
||||
}
|
||||
return openBackend(cfg), nil
|
||||
}
|
||||
|
||||
// openBackend returns a backend using the current etcd db.
|
||||
func openBackend(cfg *ServerConfig) backend.Backend {
|
||||
fn := cfg.backendPath()
|
||||
beOpened := make(chan backend.Backend)
|
||||
go func() {
|
||||
beOpened <- newBackend(cfg)
|
||||
}()
|
||||
select {
|
||||
case be := <-beOpened:
|
||||
return be
|
||||
case <-time.After(time.Second):
|
||||
plog.Warningf("another etcd process is using %q and holds the file lock.", fn)
|
||||
plog.Warningf("waiting for it to exit before starting...")
|
||||
}
|
||||
return <-beOpened
|
||||
}
|
||||
|
||||
// recoverBackendSnapshot recovers the DB from a snapshot in case etcd crashes
|
||||
// before updating the backend db after persisting raft snapshot to disk,
|
||||
// violating the invariant snapshot.Metadata.Index < db.consistentIndex. In this
|
||||
// case, replace the db with the snapshot db sent by the leader.
|
||||
func recoverSnapshotBackend(cfg *ServerConfig, oldbe backend.Backend, snapshot raftpb.Snapshot) (backend.Backend, error) {
|
||||
var cIndex consistentIndex
|
||||
kv := mvcc.New(oldbe, &lease.FakeLessor{}, &cIndex)
|
||||
defer kv.Close()
|
||||
if snapshot.Metadata.Index <= kv.ConsistentIndex() {
|
||||
return oldbe, nil
|
||||
}
|
||||
oldbe.Close()
|
||||
return openSnapshotBackend(cfg, snap.New(cfg.SnapDir()), snapshot)
|
||||
}
|
||||
10
vendor/github.com/coreos/etcd/etcdserver/cluster_util.go
generated
vendored
10
vendor/github.com/coreos/etcd/etcdserver/cluster_util.go
generated
vendored
@@ -23,7 +23,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver/membership"
|
||||
"github.com/coreos/etcd/pkg/httputil"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
"github.com/coreos/etcd/version"
|
||||
"github.com/coreos/go-semver/semver"
|
||||
@@ -241,15 +240,6 @@ func getVersion(m *membership.Member, rt http.RoundTripper) (*version.Versions,
|
||||
plog.Warningf("failed to reach the peerURL(%s) of member %s (%v)", u, m.ID, err)
|
||||
continue
|
||||
}
|
||||
// etcd 2.0 does not have version endpoint on peer url.
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
httputil.GracefulClose(resp)
|
||||
return &version.Versions{
|
||||
Server: "2.0.0",
|
||||
Cluster: "2.0.0",
|
||||
}, nil
|
||||
}
|
||||
|
||||
var b []byte
|
||||
b, err = ioutil.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
|
||||
7
vendor/github.com/coreos/etcd/etcdserver/config.go
generated
vendored
7
vendor/github.com/coreos/etcd/etcdserver/config.go
generated
vendored
@@ -55,10 +55,15 @@ type ServerConfig struct {
|
||||
AutoCompactionRetention int
|
||||
QuotaBackendBytes int64
|
||||
|
||||
// MaxRequestBytes is the maximum request size to send over raft.
|
||||
MaxRequestBytes uint
|
||||
|
||||
StrictReconfigCheck bool
|
||||
|
||||
// ClientCertAuthEnabled is true when cert has been signed by the client CA.
|
||||
ClientCertAuthEnabled bool
|
||||
|
||||
AuthToken string
|
||||
}
|
||||
|
||||
// VerifyBootstrap sanity-checks the initial config for bootstrap case
|
||||
@@ -198,3 +203,5 @@ func (c *ServerConfig) bootstrapTimeout() time.Duration {
|
||||
}
|
||||
return time.Second
|
||||
}
|
||||
|
||||
func (c *ServerConfig) backendPath() string { return filepath.Join(c.SnapDir(), "db") }
|
||||
|
||||
1
vendor/github.com/coreos/etcd/etcdserver/errors.go
generated
vendored
1
vendor/github.com/coreos/etcd/etcdserver/errors.go
generated
vendored
@@ -33,6 +33,7 @@ var (
|
||||
ErrNoSpace = errors.New("etcdserver: no space")
|
||||
ErrTooManyRequests = errors.New("etcdserver: too many requests")
|
||||
ErrUnhealthy = errors.New("etcdserver: unhealthy cluster")
|
||||
ErrKeyNotFound = errors.New("etcdserver: key not found")
|
||||
)
|
||||
|
||||
type DiscoveryError struct {
|
||||
|
||||
11
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/BUILD
generated
vendored
11
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/BUILD
generated
vendored
@@ -16,7 +16,6 @@ go_library(
|
||||
"etcdserver.pb.go",
|
||||
"raft_internal.pb.go",
|
||||
"rpc.pb.go",
|
||||
"rpc.pb.gw.go",
|
||||
],
|
||||
importpath = "github.com/coreos/etcd/etcdserver/etcdserverpb",
|
||||
visibility = ["//visibility:public"],
|
||||
@@ -24,12 +23,9 @@ go_library(
|
||||
"//vendor/github.com/coreos/etcd/auth/authpb:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library",
|
||||
"//vendor/github.com/golang/protobuf/proto:go_default_library",
|
||||
"//vendor/github.com/grpc-ecosystem/grpc-gateway/runtime:go_default_library",
|
||||
"//vendor/github.com/grpc-ecosystem/grpc-gateway/utilities:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
"//vendor/google.golang.org/genproto/googleapis/api/annotations:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/codes:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/grpclog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -42,7 +38,10 @@ filegroup(
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
2
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go
generated
vendored
2
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go
generated
vendored
@@ -1018,7 +1018,7 @@ func init() { proto.RegisterFile("etcdserver.proto", fileDescriptorEtcdserver) }
|
||||
|
||||
var fileDescriptorEtcdserver = []byte{
|
||||
// 380 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x5c, 0xd2, 0xdd, 0x6e, 0xda, 0x30,
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0xd2, 0xdd, 0x6e, 0xda, 0x30,
|
||||
0x14, 0x07, 0x70, 0x0c, 0xe1, 0xcb, 0x63, 0x1b, 0xb3, 0xd0, 0x74, 0x84, 0xa6, 0x2c, 0x42, 0xbb,
|
||||
0xc8, 0xd5, 0xf6, 0x0e, 0x2c, 0x5c, 0x44, 0x2a, 0x15, 0x0d, 0x15, 0xbd, 0x76, 0xc9, 0x29, 0x58,
|
||||
0x02, 0x4c, 0x1d, 0x07, 0xf1, 0x06, 0x7d, 0x85, 0x3e, 0x12, 0x97, 0x7d, 0x82, 0xaa, 0xa5, 0x2f,
|
||||
|
||||
33
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw/BUILD
generated
vendored
Normal file
33
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw/BUILD
generated
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["rpc.pb.gw.go"],
|
||||
importpath = "github.com/coreos/etcd/etcdserver/etcdserverpb/gw",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library",
|
||||
"//vendor/github.com/golang/protobuf/proto:go_default_library",
|
||||
"//vendor/github.com/grpc-ecosystem/grpc-gateway/runtime:go_default_library",
|
||||
"//vendor/github.com/grpc-ecosystem/grpc-gateway/utilities:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/codes:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/grpclog:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/status:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
File diff suppressed because it is too large
Load Diff
2
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go
generated
vendored
2
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go
generated
vendored
@@ -2038,7 +2038,7 @@ func init() { proto.RegisterFile("raft_internal.proto", fileDescriptorRaftIntern
|
||||
|
||||
var fileDescriptorRaftInternal = []byte{
|
||||
// 837 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x7c, 0x96, 0xdd, 0x52, 0xd3, 0x40,
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x96, 0xdd, 0x52, 0xd3, 0x40,
|
||||
0x14, 0xc7, 0x69, 0xf9, 0xec, 0xb6, 0x54, 0x5c, 0x40, 0xd7, 0x32, 0x53, 0xa1, 0x8c, 0x8a, 0x5f,
|
||||
0xe8, 0x94, 0x07, 0xd0, 0x4a, 0x19, 0x60, 0x86, 0x61, 0x98, 0x0c, 0xce, 0x38, 0xe3, 0x45, 0x5c,
|
||||
0x9a, 0x43, 0x1b, 0x49, 0x93, 0xb8, 0xd9, 0x56, 0x7c, 0x13, 0x1f, 0xc3, 0xaf, 0x87, 0xe0, 0xc2,
|
||||
|
||||
1517
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go
generated
vendored
1517
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
32
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto
generated
vendored
32
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto
generated
vendored
@@ -352,11 +352,12 @@ message RangeRequest {
|
||||
bytes key = 1;
|
||||
// range_end is the upper bound on the requested range [key, range_end).
|
||||
// If range_end is '\0', the range is all keys >= key.
|
||||
// If the range_end is one bit larger than the given key,
|
||||
// then the range requests get the all keys with the prefix (the given key).
|
||||
// If both key and range_end are '\0', then range requests returns all keys.
|
||||
// If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"),
|
||||
// then the range request gets all keys prefixed with key.
|
||||
// If both key and range_end are '\0', then the range request returns all keys.
|
||||
bytes range_end = 2;
|
||||
// limit is a limit on the number of keys returned for the request.
|
||||
// limit is a limit on the number of keys returned for the request. When limit is set to 0,
|
||||
// it is treated as no limit.
|
||||
int64 limit = 3;
|
||||
// revision is the point-in-time of the key-value store to use for the range.
|
||||
// If revision is less or equal to zero, the range is over the newest key-value store.
|
||||
@@ -423,6 +424,14 @@ message PutRequest {
|
||||
// If prev_kv is set, etcd gets the previous key-value pair before changing it.
|
||||
// The previous key-value pair will be returned in the put response.
|
||||
bool prev_kv = 4;
|
||||
|
||||
// If ignore_value is set, etcd updates the key using its current value.
|
||||
// Returns an error if the key does not exist.
|
||||
bool ignore_value = 5;
|
||||
|
||||
// If ignore_lease is set, etcd updates the key using its current lease.
|
||||
// Returns an error if the key does not exist.
|
||||
bool ignore_lease = 6;
|
||||
}
|
||||
|
||||
message PutResponse {
|
||||
@@ -436,13 +445,13 @@ message DeleteRangeRequest {
|
||||
bytes key = 1;
|
||||
// range_end is the key following the last key to delete for the range [key, range_end).
|
||||
// If range_end is not given, the range is defined to contain only the key argument.
|
||||
// If range_end is one bit larger than the given key, then the range is all
|
||||
// the all keys with the prefix (the given key).
|
||||
// If range_end is one bit larger than the given key, then the range is all the keys
|
||||
// with the prefix (the given key).
|
||||
// If range_end is '\0', the range is all keys greater than or equal to the key argument.
|
||||
bytes range_end = 2;
|
||||
|
||||
// If prev_kv is set, etcd gets the previous key-value pairs before deleting it.
|
||||
// The previous key-value pairs will be returned in the delte response.
|
||||
// The previous key-value pairs will be returned in the delete response.
|
||||
bool prev_kv = 3;
|
||||
}
|
||||
|
||||
@@ -645,6 +654,9 @@ message WatchResponse {
|
||||
// watcher with the same start_revision again.
|
||||
int64 compact_revision = 5;
|
||||
|
||||
// cancel_reason indicates the reason for canceling the watcher.
|
||||
string cancel_reason = 6;
|
||||
|
||||
repeated mvccpb.Event events = 11;
|
||||
}
|
||||
|
||||
@@ -725,6 +737,8 @@ message MemberAddResponse {
|
||||
ResponseHeader header = 1;
|
||||
// member is the member information for the added member.
|
||||
Member member = 2;
|
||||
// members is a list of all members after adding the new member.
|
||||
repeated Member members = 3;
|
||||
}
|
||||
|
||||
message MemberRemoveRequest {
|
||||
@@ -734,6 +748,8 @@ message MemberRemoveRequest {
|
||||
|
||||
message MemberRemoveResponse {
|
||||
ResponseHeader header = 1;
|
||||
// members is a list of all members after removing the member.
|
||||
repeated Member members = 2;
|
||||
}
|
||||
|
||||
message MemberUpdateRequest {
|
||||
@@ -745,6 +761,8 @@ message MemberUpdateRequest {
|
||||
|
||||
message MemberUpdateResponse{
|
||||
ResponseHeader header = 1;
|
||||
// members is a list of all members after updating the member.
|
||||
repeated Member members = 2;
|
||||
}
|
||||
|
||||
message MemberListRequest {
|
||||
|
||||
1
vendor/github.com/coreos/etcd/etcdserver/membership/BUILD
generated
vendored
1
vendor/github.com/coreos/etcd/etcdserver/membership/BUILD
generated
vendored
@@ -4,6 +4,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"cluster.go",
|
||||
"doc.go",
|
||||
"errors.go",
|
||||
"member.go",
|
||||
"store.go",
|
||||
|
||||
2
vendor/github.com/coreos/etcd/etcdserver/membership/cluster.go
generated
vendored
2
vendor/github.com/coreos/etcd/etcdserver/membership/cluster.go
generated
vendored
@@ -178,7 +178,7 @@ func (c *RaftCluster) String() string {
|
||||
fmt.Fprintf(b, "Members:[%s] ", strings.Join(ms, " "))
|
||||
var ids []string
|
||||
for id := range c.removed {
|
||||
ids = append(ids, fmt.Sprintf("%s", id))
|
||||
ids = append(ids, id.String())
|
||||
}
|
||||
fmt.Fprintf(b, "RemovedMemberIDs:[%s]}", strings.Join(ids, " "))
|
||||
return b.String()
|
||||
|
||||
16
vendor/github.com/coreos/etcd/etcdserver/membership/doc.go
generated
vendored
Normal file
16
vendor/github.com/coreos/etcd/etcdserver/membership/doc.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package membership describes individual etcd members and clusters of members.
|
||||
package membership
|
||||
6
vendor/github.com/coreos/etcd/etcdserver/membership/store.go
generated
vendored
6
vendor/github.com/coreos/etcd/etcdserver/membership/store.go
generated
vendored
@@ -36,7 +36,7 @@ const (
|
||||
|
||||
var (
|
||||
membersBucketName = []byte("members")
|
||||
membersRemovedBuckedName = []byte("members_removed")
|
||||
membersRemovedBucketName = []byte("members_removed")
|
||||
clusterBucketName = []byte("cluster")
|
||||
|
||||
StoreMembersPrefix = path.Join(storePrefix, "members")
|
||||
@@ -62,7 +62,7 @@ func mustDeleteMemberFromBackend(be backend.Backend, id types.ID) {
|
||||
tx := be.BatchTx()
|
||||
tx.Lock()
|
||||
tx.UnsafeDelete(membersBucketName, mkey)
|
||||
tx.UnsafePut(membersRemovedBuckedName, mkey, []byte("removed"))
|
||||
tx.UnsafePut(membersRemovedBucketName, mkey, []byte("removed"))
|
||||
tx.Unlock()
|
||||
}
|
||||
|
||||
@@ -164,7 +164,7 @@ func mustCreateBackendBuckets(be backend.Backend) {
|
||||
tx.Lock()
|
||||
defer tx.Unlock()
|
||||
tx.UnsafeCreateBucket(membersBucketName)
|
||||
tx.UnsafeCreateBucket(membersRemovedBuckedName)
|
||||
tx.UnsafeCreateBucket(membersRemovedBucketName)
|
||||
tx.UnsafeCreateBucket(clusterBucketName)
|
||||
}
|
||||
|
||||
|
||||
7
vendor/github.com/coreos/etcd/etcdserver/metrics.go
generated
vendored
7
vendor/github.com/coreos/etcd/etcdserver/metrics.go
generated
vendored
@@ -58,6 +58,12 @@ var (
|
||||
Name: "proposals_failed_total",
|
||||
Help: "The total number of failed proposals seen.",
|
||||
})
|
||||
leaseExpired = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "etcd_debugging",
|
||||
Subsystem: "server",
|
||||
Name: "lease_expired_total",
|
||||
Help: "The total number of expired leases.",
|
||||
})
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -67,6 +73,7 @@ func init() {
|
||||
prometheus.MustRegister(proposalsApplied)
|
||||
prometheus.MustRegister(proposalsPending)
|
||||
prometheus.MustRegister(proposalsFailed)
|
||||
prometheus.MustRegister(leaseExpired)
|
||||
}
|
||||
|
||||
func monitorFileDescriptor(done <-chan struct{}) {
|
||||
|
||||
17
vendor/github.com/coreos/etcd/etcdserver/quota.go
generated
vendored
17
vendor/github.com/coreos/etcd/etcdserver/quota.go
generated
vendored
@@ -16,7 +16,15 @@ package etcdserver
|
||||
|
||||
import (
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/mvcc/backend"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultQuotaBytes is the number of bytes the backend Size may
|
||||
// consume before exceeding the space quota.
|
||||
DefaultQuotaBytes = int64(2 * 1024 * 1024 * 1024) // 2GB
|
||||
// MaxQuotaBytes is the maximum number of bytes suggested for a backend
|
||||
// quota. A larger quota may lead to degraded performance.
|
||||
MaxQuotaBytes = int64(8 * 1024 * 1024 * 1024) // 8GB
|
||||
)
|
||||
|
||||
// Quota represents an arbitrary quota against arbitrary requests. Each request
|
||||
@@ -57,11 +65,10 @@ func NewBackendQuota(s *EtcdServer) Quota {
|
||||
}
|
||||
if s.Cfg.QuotaBackendBytes == 0 {
|
||||
// use default size if no quota size given
|
||||
return &backendQuota{s, backend.DefaultQuotaBytes}
|
||||
return &backendQuota{s, DefaultQuotaBytes}
|
||||
}
|
||||
if s.Cfg.QuotaBackendBytes > backend.MaxQuotaBytes {
|
||||
plog.Warningf("backend quota %v exceeds maximum quota %v; using maximum", s.Cfg.QuotaBackendBytes, backend.MaxQuotaBytes)
|
||||
return &backendQuota{s, backend.MaxQuotaBytes}
|
||||
if s.Cfg.QuotaBackendBytes > MaxQuotaBytes {
|
||||
plog.Warningf("backend quota %v exceeds maximum recommended quota %v", s.Cfg.QuotaBackendBytes, MaxQuotaBytes)
|
||||
}
|
||||
return &backendQuota{s, s.Cfg.QuotaBackendBytes}
|
||||
}
|
||||
|
||||
131
vendor/github.com/coreos/etcd/etcdserver/raft.go
generated
vendored
131
vendor/github.com/coreos/etcd/etcdserver/raft.go
generated
vendored
@@ -83,7 +83,8 @@ type RaftTimer interface {
|
||||
type apply struct {
|
||||
entries []raftpb.Entry
|
||||
snapshot raftpb.Snapshot
|
||||
raftDone <-chan struct{} // rx {} after raft has persisted messages
|
||||
// notifyc synchronizes etcd server applies with the raft node
|
||||
notifyc chan struct{}
|
||||
}
|
||||
|
||||
type raftNode struct {
|
||||
@@ -94,14 +95,7 @@ type raftNode struct {
|
||||
term uint64
|
||||
lead uint64
|
||||
|
||||
mu sync.Mutex
|
||||
// last lead elected time
|
||||
lt time.Time
|
||||
|
||||
// to check if msg receiver is removed from cluster
|
||||
isIDRemoved func(id uint64) bool
|
||||
|
||||
raft.Node
|
||||
raftNodeConfig
|
||||
|
||||
// a chan to send/receive snapshot
|
||||
msgSnapC chan raftpb.Message
|
||||
@@ -113,28 +107,51 @@ type raftNode struct {
|
||||
readStateC chan raft.ReadState
|
||||
|
||||
// utility
|
||||
ticker <-chan time.Time
|
||||
ticker *time.Ticker
|
||||
// contention detectors for raft heartbeat message
|
||||
td *contention.TimeoutDetector
|
||||
heartbeat time.Duration // for logging
|
||||
raftStorage *raft.MemoryStorage
|
||||
storage Storage
|
||||
// transport specifies the transport to send and receive msgs to members.
|
||||
// Sending messages MUST NOT block. It is okay to drop messages, since
|
||||
// clients should timeout and reissue their messages.
|
||||
// If transport is nil, server will panic.
|
||||
transport rafthttp.Transporter
|
||||
td *contention.TimeoutDetector
|
||||
|
||||
stopped chan struct{}
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
type raftNodeConfig struct {
|
||||
// to check if msg receiver is removed from cluster
|
||||
isIDRemoved func(id uint64) bool
|
||||
raft.Node
|
||||
raftStorage *raft.MemoryStorage
|
||||
storage Storage
|
||||
heartbeat time.Duration // for logging
|
||||
// transport specifies the transport to send and receive msgs to members.
|
||||
// Sending messages MUST NOT block. It is okay to drop messages, since
|
||||
// clients should timeout and reissue their messages.
|
||||
// If transport is nil, server will panic.
|
||||
transport rafthttp.Transporter
|
||||
}
|
||||
|
||||
func newRaftNode(cfg raftNodeConfig) *raftNode {
|
||||
r := &raftNode{
|
||||
raftNodeConfig: cfg,
|
||||
// set up contention detectors for raft heartbeat message.
|
||||
// expect to send a heartbeat within 2 heartbeat intervals.
|
||||
td: contention.NewTimeoutDetector(2 * cfg.heartbeat),
|
||||
readStateC: make(chan raft.ReadState, 1),
|
||||
msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap),
|
||||
applyc: make(chan apply),
|
||||
stopped: make(chan struct{}),
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
if r.heartbeat == 0 {
|
||||
r.ticker = &time.Ticker{}
|
||||
} else {
|
||||
r.ticker = time.NewTicker(r.heartbeat)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// start prepares and starts raftNode in a new goroutine. It is no longer safe
|
||||
// to modify the fields after it has been started.
|
||||
func (r *raftNode) start(rh *raftReadyHandler) {
|
||||
r.applyc = make(chan apply)
|
||||
r.stopped = make(chan struct{})
|
||||
r.done = make(chan struct{})
|
||||
internalTimeout := time.Second
|
||||
|
||||
go func() {
|
||||
@@ -143,14 +160,12 @@ func (r *raftNode) start(rh *raftReadyHandler) {
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-r.ticker:
|
||||
case <-r.ticker.C:
|
||||
r.Tick()
|
||||
case rd := <-r.Ready():
|
||||
if rd.SoftState != nil {
|
||||
if lead := atomic.LoadUint64(&r.lead); rd.SoftState.Lead != raft.None && lead != rd.SoftState.Lead {
|
||||
r.mu.Lock()
|
||||
r.lt = time.Now()
|
||||
r.mu.Unlock()
|
||||
newLeader := rd.SoftState.Lead != raft.None && atomic.LoadUint64(&r.lead) != rd.SoftState.Lead
|
||||
if newLeader {
|
||||
leaderChanges.Inc()
|
||||
}
|
||||
|
||||
@@ -162,7 +177,8 @@ func (r *raftNode) start(rh *raftReadyHandler) {
|
||||
|
||||
atomic.StoreUint64(&r.lead, rd.SoftState.Lead)
|
||||
islead = rd.RaftState == raft.StateLeader
|
||||
rh.updateLeadership()
|
||||
rh.updateLeadership(newLeader)
|
||||
r.td.Reset()
|
||||
}
|
||||
|
||||
if len(rd.ReadStates) != 0 {
|
||||
@@ -175,11 +191,11 @@ func (r *raftNode) start(rh *raftReadyHandler) {
|
||||
}
|
||||
}
|
||||
|
||||
raftDone := make(chan struct{}, 1)
|
||||
notifyc := make(chan struct{}, 1)
|
||||
ap := apply{
|
||||
entries: rd.CommittedEntries,
|
||||
snapshot: rd.Snapshot,
|
||||
raftDone: raftDone,
|
||||
notifyc: notifyc,
|
||||
}
|
||||
|
||||
updateCommittedIndex(&ap, rh)
|
||||
@@ -195,7 +211,7 @@ func (r *raftNode) start(rh *raftReadyHandler) {
|
||||
// For more details, check raft thesis 10.2.1
|
||||
if islead {
|
||||
// gofail: var raftBeforeLeaderSend struct{}
|
||||
r.sendMessages(rd.Messages)
|
||||
r.transport.Send(r.processMessages(rd.Messages))
|
||||
}
|
||||
|
||||
// gofail: var raftBeforeSave struct{}
|
||||
@@ -212,6 +228,9 @@ func (r *raftNode) start(rh *raftReadyHandler) {
|
||||
if err := r.storage.SaveSnap(rd.Snapshot); err != nil {
|
||||
plog.Fatalf("raft save snapshot error: %v", err)
|
||||
}
|
||||
// etcdserver now claim the snapshot has been persisted onto the disk
|
||||
notifyc <- struct{}{}
|
||||
|
||||
// gofail: var raftAfterSaveSnap struct{}
|
||||
r.raftStorage.ApplySnapshot(rd.Snapshot)
|
||||
plog.Infof("raft applied incoming snapshot at index %d", rd.Snapshot.Metadata.Index)
|
||||
@@ -221,10 +240,44 @@ func (r *raftNode) start(rh *raftReadyHandler) {
|
||||
r.raftStorage.Append(rd.Entries)
|
||||
|
||||
if !islead {
|
||||
// finish processing incoming messages before we signal raftdone chan
|
||||
msgs := r.processMessages(rd.Messages)
|
||||
|
||||
// now unblocks 'applyAll' that waits on Raft log disk writes before triggering snapshots
|
||||
notifyc <- struct{}{}
|
||||
|
||||
// Candidate or follower needs to wait for all pending configuration
|
||||
// changes to be applied before sending messages.
|
||||
// Otherwise we might incorrectly count votes (e.g. votes from removed members).
|
||||
// Also slow machine's follower raft-layer could proceed to become the leader
|
||||
// on its own single-node cluster, before apply-layer applies the config change.
|
||||
// We simply wait for ALL pending entries to be applied for now.
|
||||
// We might improve this later on if it causes unnecessary long blocking issues.
|
||||
waitApply := false
|
||||
for _, ent := range rd.CommittedEntries {
|
||||
if ent.Type == raftpb.EntryConfChange {
|
||||
waitApply = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if waitApply {
|
||||
// blocks until 'applyAll' calls 'applyWait.Trigger'
|
||||
// to be in sync with scheduled config-change job
|
||||
// (assume notifyc has cap of 1)
|
||||
select {
|
||||
case notifyc <- struct{}{}:
|
||||
case <-r.stopped:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// gofail: var raftBeforeFollowerSend struct{}
|
||||
r.sendMessages(rd.Messages)
|
||||
r.transport.Send(msgs)
|
||||
} else {
|
||||
// leader already processed 'MsgSnap' and signaled
|
||||
notifyc <- struct{}{}
|
||||
}
|
||||
raftDone <- struct{}{}
|
||||
|
||||
r.Advance()
|
||||
case <-r.stopped:
|
||||
return
|
||||
@@ -246,7 +299,7 @@ func updateCommittedIndex(ap *apply, rh *raftReadyHandler) {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *raftNode) sendMessages(ms []raftpb.Message) {
|
||||
func (r *raftNode) processMessages(ms []raftpb.Message) []raftpb.Message {
|
||||
sentAppResp := false
|
||||
for i := len(ms) - 1; i >= 0; i-- {
|
||||
if r.isIDRemoved(ms[i].To) {
|
||||
@@ -282,20 +335,13 @@ func (r *raftNode) sendMessages(ms []raftpb.Message) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
r.transport.Send(ms)
|
||||
return ms
|
||||
}
|
||||
|
||||
func (r *raftNode) apply() chan apply {
|
||||
return r.applyc
|
||||
}
|
||||
|
||||
func (r *raftNode) leadElectedTime() time.Time {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
return r.lt
|
||||
}
|
||||
|
||||
func (r *raftNode) stop() {
|
||||
r.stopped <- struct{}{}
|
||||
<-r.done
|
||||
@@ -303,6 +349,7 @@ func (r *raftNode) stop() {
|
||||
|
||||
func (r *raftNode) onStop() {
|
||||
r.Stop()
|
||||
r.ticker.Stop()
|
||||
r.transport.Stop()
|
||||
if err := r.storage.Close(); err != nil {
|
||||
plog.Panicf("raft close storage error: %v", err)
|
||||
|
||||
234
vendor/github.com/coreos/etcd/etcdserver/server.go
generated
vendored
234
vendor/github.com/coreos/etcd/etcdserver/server.go
generated
vendored
@@ -23,7 +23,6 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
@@ -41,7 +40,6 @@ import (
|
||||
"github.com/coreos/etcd/lease"
|
||||
"github.com/coreos/etcd/mvcc"
|
||||
"github.com/coreos/etcd/mvcc/backend"
|
||||
"github.com/coreos/etcd/pkg/contention"
|
||||
"github.com/coreos/etcd/pkg/fileutil"
|
||||
"github.com/coreos/etcd/pkg/idutil"
|
||||
"github.com/coreos/etcd/pkg/pbutil"
|
||||
@@ -62,7 +60,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultSnapCount = 10000
|
||||
DefaultSnapCount = 100000
|
||||
|
||||
StoreClusterPrefix = "/0"
|
||||
StoreKeysPrefix = "/1"
|
||||
@@ -77,7 +75,6 @@ const (
|
||||
// (since it will timeout).
|
||||
monitorVersionInterval = rafthttp.ConnWriteTimeout - time.Second
|
||||
|
||||
databaseFilename = "db"
|
||||
// max number of in-flight snapshot messages etcdserver allows to have
|
||||
// This number is more than enough for most clusters with 5 machines.
|
||||
maxInFlightMsgSnap = 16
|
||||
@@ -85,7 +82,8 @@ const (
|
||||
releaseDelayAfterSnapshot = 30 * time.Second
|
||||
|
||||
// maxPendingRevokes is the maximum number of outstanding expired lease revocations.
|
||||
maxPendingRevokes = 16
|
||||
maxPendingRevokes = 16
|
||||
recommendedMaxRequestBytes = 10 * 1024 * 1024
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -135,15 +133,15 @@ type Server interface {
|
||||
// AddMember attempts to add a member into the cluster. It will return
|
||||
// ErrIDRemoved if member ID is removed from the cluster, or return
|
||||
// ErrIDExists if member ID exists in the cluster.
|
||||
AddMember(ctx context.Context, memb membership.Member) error
|
||||
AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error)
|
||||
// RemoveMember attempts to remove a member from the cluster. It will
|
||||
// return ErrIDRemoved if member ID is removed from the cluster, or return
|
||||
// ErrIDNotFound if member ID is not in the cluster.
|
||||
RemoveMember(ctx context.Context, id uint64) error
|
||||
RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error)
|
||||
|
||||
// UpdateMember attempts to update an existing member in the cluster. It will
|
||||
// return ErrIDNotFound if the member ID does not exist.
|
||||
UpdateMember(ctx context.Context, updateMemb membership.Member) error
|
||||
UpdateMember(ctx context.Context, updateMemb membership.Member) ([]*membership.Member, error)
|
||||
|
||||
// ClusterVersion is the cluster-wide minimum major.minor version.
|
||||
// Cluster version is set to the min version that an etcd member is
|
||||
@@ -201,7 +199,8 @@ type EtcdServer struct {
|
||||
|
||||
cluster *membership.RaftCluster
|
||||
|
||||
store store.Store
|
||||
store store.Store
|
||||
snapshotter *snap.Snapshotter
|
||||
|
||||
applyV2 ApplierV2
|
||||
|
||||
@@ -221,7 +220,7 @@ type EtcdServer struct {
|
||||
stats *stats.ServerStats
|
||||
lstats *stats.LeaderStats
|
||||
|
||||
SyncTicker <-chan time.Time
|
||||
SyncTicker *time.Ticker
|
||||
// compactor is used to auto-compact the KV.
|
||||
compactor *compactor.Periodic
|
||||
|
||||
@@ -238,6 +237,14 @@ type EtcdServer struct {
|
||||
// wg is used to wait for the go routines that depends on the server state
|
||||
// to exit when stopping the server.
|
||||
wg sync.WaitGroup
|
||||
|
||||
// ctx is used for etcd-initiated requests that may need to be canceled
|
||||
// on etcd server shutdown.
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
leadTimeMu sync.RWMutex
|
||||
leadElectedTime time.Time
|
||||
}
|
||||
|
||||
// NewServer creates a new EtcdServer from the supplied configuration. The
|
||||
@@ -253,6 +260,10 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) {
|
||||
cl *membership.RaftCluster
|
||||
)
|
||||
|
||||
if cfg.MaxRequestBytes > recommendedMaxRequestBytes {
|
||||
plog.Warningf("MaxRequestBytes %v exceeds maximum recommended size %v", cfg.MaxRequestBytes, recommendedMaxRequestBytes)
|
||||
}
|
||||
|
||||
if terr := fileutil.TouchDirAll(cfg.DataDir); terr != nil {
|
||||
return nil, fmt.Errorf("cannot access data directory: %v", terr)
|
||||
}
|
||||
@@ -264,23 +275,9 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) {
|
||||
}
|
||||
ss := snap.New(cfg.SnapDir())
|
||||
|
||||
bepath := filepath.Join(cfg.SnapDir(), databaseFilename)
|
||||
bepath := cfg.backendPath()
|
||||
beExist := fileutil.Exist(bepath)
|
||||
|
||||
var be backend.Backend
|
||||
beOpened := make(chan struct{})
|
||||
go func() {
|
||||
be = backend.NewDefaultBackend(bepath)
|
||||
beOpened <- struct{}{}
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-beOpened:
|
||||
case <-time.After(time.Second):
|
||||
plog.Warningf("another etcd process is running with the same data dir and holding the file lock.")
|
||||
plog.Warningf("waiting for it to exit before starting...")
|
||||
<-beOpened
|
||||
}
|
||||
be := openBackend(cfg)
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
@@ -378,6 +375,9 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) {
|
||||
plog.Panicf("recovered store from snapshot error: %v", err)
|
||||
}
|
||||
plog.Infof("recovered store from snapshot at index %d", snapshot.Metadata.Index)
|
||||
if be, err = recoverSnapshotBackend(cfg, be, *snapshot); err != nil {
|
||||
plog.Panicf("recovering backend from snapshot error: %v", err)
|
||||
}
|
||||
}
|
||||
cfg.Print()
|
||||
if !cfg.ForceNewCluster {
|
||||
@@ -400,39 +400,32 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) {
|
||||
return nil, fmt.Errorf("cannot access member directory: %v", terr)
|
||||
}
|
||||
|
||||
sstats := &stats.ServerStats{
|
||||
Name: cfg.Name,
|
||||
ID: id.String(),
|
||||
}
|
||||
sstats.Initialize()
|
||||
sstats := stats.NewServerStats(cfg.Name, id.String())
|
||||
lstats := stats.NewLeaderStats(id.String())
|
||||
|
||||
heartbeat := time.Duration(cfg.TickMs) * time.Millisecond
|
||||
srv = &EtcdServer{
|
||||
readych: make(chan struct{}),
|
||||
Cfg: cfg,
|
||||
snapCount: cfg.SnapCount,
|
||||
errorc: make(chan error, 1),
|
||||
store: st,
|
||||
r: raftNode{
|
||||
isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) },
|
||||
Node: n,
|
||||
ticker: time.Tick(heartbeat),
|
||||
// set up contention detectors for raft heartbeat message.
|
||||
// expect to send a heartbeat within 2 heartbeat intervals.
|
||||
td: contention.NewTimeoutDetector(2 * heartbeat),
|
||||
heartbeat: heartbeat,
|
||||
raftStorage: s,
|
||||
storage: NewStorage(w, ss),
|
||||
msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap),
|
||||
readStateC: make(chan raft.ReadState, 1),
|
||||
},
|
||||
readych: make(chan struct{}),
|
||||
Cfg: cfg,
|
||||
snapCount: cfg.SnapCount,
|
||||
errorc: make(chan error, 1),
|
||||
store: st,
|
||||
snapshotter: ss,
|
||||
r: *newRaftNode(
|
||||
raftNodeConfig{
|
||||
isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) },
|
||||
Node: n,
|
||||
heartbeat: heartbeat,
|
||||
raftStorage: s,
|
||||
storage: NewStorage(w, ss),
|
||||
},
|
||||
),
|
||||
id: id,
|
||||
attributes: membership.Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()},
|
||||
cluster: cl,
|
||||
stats: sstats,
|
||||
lstats: lstats,
|
||||
SyncTicker: time.Tick(500 * time.Millisecond),
|
||||
SyncTicker: time.NewTicker(500 * time.Millisecond),
|
||||
peerRt: prt,
|
||||
reqIDGen: idutil.NewGenerator(uint16(id), time.Now()),
|
||||
forceVersionC: make(chan struct{}),
|
||||
@@ -458,12 +451,26 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) {
|
||||
plog.Warningf("consistent index never saved (snapshot index=%d)", snapshot.Metadata.Index)
|
||||
}
|
||||
}
|
||||
srv.consistIndex.setConsistentIndex(srv.kv.ConsistentIndex())
|
||||
newSrv := srv // since srv == nil in defer if srv is returned as nil
|
||||
defer func() {
|
||||
// closing backend without first closing kv can cause
|
||||
// resumed compactions to fail with closed tx errors
|
||||
if err != nil {
|
||||
newSrv.kv.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
srv.authStore = auth.NewAuthStore(srv.be,
|
||||
srv.consistIndex.setConsistentIndex(srv.kv.ConsistentIndex())
|
||||
tp, err := auth.NewTokenProvider(cfg.AuthToken,
|
||||
func(index uint64) <-chan struct{} {
|
||||
return srv.applyWait.Wait(index)
|
||||
})
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
plog.Errorf("failed to create token provider: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
srv.authStore = auth.NewAuthStore(srv.be, tp)
|
||||
if h := cfg.AutoCompactionRetention; h != 0 {
|
||||
srv.compactor = compactor.NewPeriodic(h, srv.kv, srv)
|
||||
srv.compactor.Run()
|
||||
@@ -531,6 +538,7 @@ func (s *EtcdServer) start() {
|
||||
s.done = make(chan struct{})
|
||||
s.stop = make(chan struct{})
|
||||
s.stopping = make(chan struct{})
|
||||
s.ctx, s.cancel = context.WithCancel(context.Background())
|
||||
s.readwaitc = make(chan struct{}, 1)
|
||||
s.readNotifier = newNotifier()
|
||||
if s.ClusterVersion() != nil {
|
||||
@@ -603,16 +611,19 @@ type etcdProgress struct {
|
||||
// and helps decouple state machine logic from Raft algorithms.
|
||||
// TODO: add a state machine interface to apply the commit entries and do snapshot/recover
|
||||
type raftReadyHandler struct {
|
||||
updateLeadership func()
|
||||
updateLeadership func(newLeader bool)
|
||||
updateCommittedIndex func(uint64)
|
||||
}
|
||||
|
||||
func (s *EtcdServer) run() {
|
||||
snap, err := s.r.raftStorage.Snapshot()
|
||||
sn, err := s.r.raftStorage.Snapshot()
|
||||
if err != nil {
|
||||
plog.Panicf("get snapshot from raft storage error: %v", err)
|
||||
}
|
||||
|
||||
// asynchronously accept apply packets, dispatch progress in-order
|
||||
sched := schedule.NewFIFOScheduler()
|
||||
|
||||
var (
|
||||
smu sync.RWMutex
|
||||
syncC <-chan time.Time
|
||||
@@ -629,7 +640,7 @@ func (s *EtcdServer) run() {
|
||||
return
|
||||
}
|
||||
rh := &raftReadyHandler{
|
||||
updateLeadership: func() {
|
||||
updateLeadership: func(newLeader bool) {
|
||||
if !s.isLeader() {
|
||||
if s.lessor != nil {
|
||||
s.lessor.Demote()
|
||||
@@ -639,7 +650,13 @@ func (s *EtcdServer) run() {
|
||||
}
|
||||
setSyncC(nil)
|
||||
} else {
|
||||
setSyncC(s.SyncTicker)
|
||||
if newLeader {
|
||||
t := time.Now()
|
||||
s.leadTimeMu.Lock()
|
||||
s.leadElectedTime = t
|
||||
s.leadTimeMu.Unlock()
|
||||
}
|
||||
setSyncC(s.SyncTicker.C)
|
||||
if s.compactor != nil {
|
||||
s.compactor.Resume()
|
||||
}
|
||||
@@ -650,9 +667,6 @@ func (s *EtcdServer) run() {
|
||||
if s.stats != nil {
|
||||
s.stats.BecomeLeader()
|
||||
}
|
||||
if s.r.td != nil {
|
||||
s.r.td.Reset()
|
||||
}
|
||||
},
|
||||
updateCommittedIndex: func(ci uint64) {
|
||||
cci := s.getCommittedIndex()
|
||||
@@ -663,25 +677,26 @@ func (s *EtcdServer) run() {
|
||||
}
|
||||
s.r.start(rh)
|
||||
|
||||
// asynchronously accept apply packets, dispatch progress in-order
|
||||
sched := schedule.NewFIFOScheduler()
|
||||
ep := etcdProgress{
|
||||
confState: snap.Metadata.ConfState,
|
||||
snapi: snap.Metadata.Index,
|
||||
appliedt: snap.Metadata.Term,
|
||||
appliedi: snap.Metadata.Index,
|
||||
confState: sn.Metadata.ConfState,
|
||||
snapi: sn.Metadata.Index,
|
||||
appliedt: sn.Metadata.Term,
|
||||
appliedi: sn.Metadata.Index,
|
||||
}
|
||||
|
||||
defer func() {
|
||||
s.wgMu.Lock() // block concurrent waitgroup adds in goAttach while stopping
|
||||
close(s.stopping)
|
||||
s.wgMu.Unlock()
|
||||
s.cancel()
|
||||
|
||||
sched.Stop()
|
||||
|
||||
// wait for gouroutines before closing raft so wal stays open
|
||||
s.wg.Wait()
|
||||
|
||||
s.SyncTicker.Stop()
|
||||
|
||||
// must stop raft after scheduler-- etcdserver can leak rafthttp pipelines
|
||||
// by adding a peer after raft stops the transport
|
||||
s.r.stop()
|
||||
@@ -728,7 +743,8 @@ func (s *EtcdServer) run() {
|
||||
}
|
||||
lid := lease.ID
|
||||
s.goAttach(func() {
|
||||
s.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: int64(lid)})
|
||||
s.LeaseRevoke(s.ctx, &pb.LeaseRevokeRequest{ID: int64(lid)})
|
||||
leaseExpired.Inc()
|
||||
<-c
|
||||
})
|
||||
}
|
||||
@@ -762,7 +778,7 @@ func (s *EtcdServer) applyAll(ep *etcdProgress, apply *apply) {
|
||||
// wait for the raft routine to finish the disk writes before triggering a
|
||||
// snapshot. or applied index might be greater than the last index in raft
|
||||
// storage, since the raft routine might be slower than apply routine.
|
||||
<-apply.raftDone
|
||||
<-apply.notifyc
|
||||
|
||||
s.triggerSnapshot(ep)
|
||||
select {
|
||||
@@ -787,23 +803,19 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) {
|
||||
apply.snapshot.Metadata.Index, ep.appliedi)
|
||||
}
|
||||
|
||||
snapfn, err := s.r.storage.DBFilePath(apply.snapshot.Metadata.Index)
|
||||
// wait for raftNode to persist snapshot onto the disk
|
||||
<-apply.notifyc
|
||||
|
||||
newbe, err := openSnapshotBackend(s.Cfg, s.snapshotter, apply.snapshot)
|
||||
if err != nil {
|
||||
plog.Panicf("get database snapshot file path error: %v", err)
|
||||
plog.Panic(err)
|
||||
}
|
||||
|
||||
fn := filepath.Join(s.Cfg.SnapDir(), databaseFilename)
|
||||
if err := os.Rename(snapfn, fn); err != nil {
|
||||
plog.Panicf("rename snapshot file error: %v", err)
|
||||
}
|
||||
|
||||
newbe := backend.NewDefaultBackend(fn)
|
||||
|
||||
// always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases.
|
||||
// If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers.
|
||||
if s.lessor != nil {
|
||||
plog.Info("recovering lessor...")
|
||||
s.lessor.Recover(newbe, s.kv)
|
||||
s.lessor.Recover(newbe, func() lease.TxnDelete { return s.kv.Write() })
|
||||
plog.Info("finished recovering lessor")
|
||||
}
|
||||
|
||||
@@ -955,7 +967,7 @@ func (s *EtcdServer) TransferLeadership() error {
|
||||
}
|
||||
|
||||
tm := s.Cfg.ReqTimeout()
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), tm)
|
||||
ctx, cancel := context.WithTimeout(s.ctx, tm)
|
||||
err := s.transferLeadership(ctx, s.Lead(), uint64(transferee))
|
||||
cancel()
|
||||
return err
|
||||
@@ -1015,7 +1027,7 @@ func (s *EtcdServer) StoreStats() []byte { return s.store.JsonStats() }
|
||||
|
||||
func (s *EtcdServer) checkMembershipOperationPermission(ctx context.Context) error {
|
||||
if s.authStore == nil {
|
||||
// In the context of ordinal etcd process, s.authStore will never be nil.
|
||||
// In the context of ordinary etcd process, s.authStore will never be nil.
|
||||
// This branch is for handling cases in server_test.go
|
||||
return nil
|
||||
}
|
||||
@@ -1026,7 +1038,7 @@ func (s *EtcdServer) checkMembershipOperationPermission(ctx context.Context) err
|
||||
// in the state machine layer
|
||||
// However, both of membership change and role management requires the root privilege.
|
||||
// So careful operation by admins can prevent the problem.
|
||||
authInfo, err := s.AuthStore().AuthInfoFromCtx(ctx)
|
||||
authInfo, err := s.AuthInfoFromCtx(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1034,27 +1046,27 @@ func (s *EtcdServer) checkMembershipOperationPermission(ctx context.Context) err
|
||||
return s.AuthStore().IsAdminPermitted(authInfo)
|
||||
}
|
||||
|
||||
func (s *EtcdServer) AddMember(ctx context.Context, memb membership.Member) error {
|
||||
func (s *EtcdServer) AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) {
|
||||
if err := s.checkMembershipOperationPermission(ctx); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if s.Cfg.StrictReconfigCheck {
|
||||
// by default StrictReconfigCheck is enabled; reject new members if unhealthy
|
||||
if !s.cluster.IsReadyToAddNewMember() {
|
||||
plog.Warningf("not enough started members, rejecting member add %+v", memb)
|
||||
return ErrNotEnoughStartedMembers
|
||||
return nil, ErrNotEnoughStartedMembers
|
||||
}
|
||||
if !isConnectedFullySince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), s.cluster.Members()) {
|
||||
plog.Warningf("not healthy for reconfigure, rejecting member add %+v", memb)
|
||||
return ErrUnhealthy
|
||||
return nil, ErrUnhealthy
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: move Member to protobuf type
|
||||
b, err := json.Marshal(memb)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
cc := raftpb.ConfChange{
|
||||
Type: raftpb.ConfChangeAddNode,
|
||||
@@ -1064,14 +1076,14 @@ func (s *EtcdServer) AddMember(ctx context.Context, memb membership.Member) erro
|
||||
return s.configure(ctx, cc)
|
||||
}
|
||||
|
||||
func (s *EtcdServer) RemoveMember(ctx context.Context, id uint64) error {
|
||||
func (s *EtcdServer) RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error) {
|
||||
if err := s.checkMembershipOperationPermission(ctx); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// by default StrictReconfigCheck is enabled; reject removal if leads to quorum loss
|
||||
if err := s.mayRemoveMember(types.ID(id)); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cc := raftpb.ConfChange{
|
||||
@@ -1107,14 +1119,14 @@ func (s *EtcdServer) mayRemoveMember(id types.ID) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) UpdateMember(ctx context.Context, memb membership.Member) error {
|
||||
func (s *EtcdServer) UpdateMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) {
|
||||
b, merr := json.Marshal(memb)
|
||||
if merr != nil {
|
||||
return merr
|
||||
return nil, merr
|
||||
}
|
||||
|
||||
if err := s.checkMembershipOperationPermission(ctx); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
cc := raftpb.ConfChange{
|
||||
Type: raftpb.ConfChangeUpdateNode,
|
||||
@@ -1137,31 +1149,34 @@ func (s *EtcdServer) Lead() uint64 { return atomic.LoadUint64(&s.r.lead) }
|
||||
|
||||
func (s *EtcdServer) Leader() types.ID { return types.ID(s.Lead()) }
|
||||
|
||||
type confChangeResponse struct {
|
||||
membs []*membership.Member
|
||||
err error
|
||||
}
|
||||
|
||||
// configure sends a configuration change through consensus and
|
||||
// then waits for it to be applied to the server. It
|
||||
// will block until the change is performed or there is an error.
|
||||
func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) error {
|
||||
func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) ([]*membership.Member, error) {
|
||||
cc.ID = s.reqIDGen.Next()
|
||||
ch := s.w.Register(cc.ID)
|
||||
start := time.Now()
|
||||
if err := s.r.ProposeConfChange(ctx, cc); err != nil {
|
||||
s.w.Trigger(cc.ID, nil)
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
select {
|
||||
case x := <-ch:
|
||||
if err, ok := x.(error); ok {
|
||||
return err
|
||||
if x == nil {
|
||||
plog.Panicf("configure trigger value should never be nil")
|
||||
}
|
||||
if x != nil {
|
||||
plog.Panicf("return type should always be error")
|
||||
}
|
||||
return nil
|
||||
resp := x.(*confChangeResponse)
|
||||
return resp.membs, resp.err
|
||||
case <-ctx.Done():
|
||||
s.w.Trigger(cc.ID, nil) // GC wait
|
||||
return s.parseProposeCtxErr(ctx.Err(), start)
|
||||
return nil, s.parseProposeCtxErr(ctx.Err(), start)
|
||||
case <-s.stopping:
|
||||
return ErrStopped
|
||||
return nil, ErrStopped
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1169,7 +1184,6 @@ func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) error
|
||||
// This makes no guarantee that the request will be proposed or performed.
|
||||
// The request will be canceled after the given timeout.
|
||||
func (s *EtcdServer) sync(timeout time.Duration) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
req := pb.Request{
|
||||
Method: "SYNC",
|
||||
ID: s.reqIDGen.Next(),
|
||||
@@ -1178,6 +1192,7 @@ func (s *EtcdServer) sync(timeout time.Duration) {
|
||||
data := pbutil.MustMarshal(&req)
|
||||
// There is no promise that node has leader when do SYNC request,
|
||||
// so it uses goroutine to propose.
|
||||
ctx, cancel := context.WithTimeout(s.ctx, timeout)
|
||||
s.goAttach(func() {
|
||||
s.r.Propose(ctx, data)
|
||||
cancel()
|
||||
@@ -1202,7 +1217,7 @@ func (s *EtcdServer) publish(timeout time.Duration) {
|
||||
}
|
||||
|
||||
for {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
ctx, cancel := context.WithTimeout(s.ctx, timeout)
|
||||
_, err := s.Do(ctx, req)
|
||||
cancel()
|
||||
switch err {
|
||||
@@ -1262,7 +1277,7 @@ func (s *EtcdServer) apply(es []raftpb.Entry, confState *raftpb.ConfState) (appl
|
||||
removedSelf, err := s.applyConfChange(cc, confState)
|
||||
s.setAppliedIndex(e.Index)
|
||||
shouldStop = shouldStop || removedSelf
|
||||
s.w.Trigger(cc.ID, err)
|
||||
s.w.Trigger(cc.ID, &confChangeResponse{s.cluster.Members(), err})
|
||||
default:
|
||||
plog.Panicf("entry type should be either EntryNormal or EntryConfChange")
|
||||
}
|
||||
@@ -1347,8 +1362,7 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) {
|
||||
Action: pb.AlarmRequest_ACTIVATE,
|
||||
Alarm: pb.AlarmType_NOSPACE,
|
||||
}
|
||||
r := pb.InternalRaftRequest{Alarm: a}
|
||||
s.processInternalRaftRequest(context.TODO(), r)
|
||||
s.raftRequest(s.ctx, pb.InternalRaftRequest{Alarm: a})
|
||||
s.w.Trigger(id, ar)
|
||||
})
|
||||
}
|
||||
@@ -1544,7 +1558,7 @@ func (s *EtcdServer) updateClusterVersion(ver string) {
|
||||
Path: membership.StoreClusterVersionKey(),
|
||||
Val: ver,
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout())
|
||||
ctx, cancel := context.WithTimeout(s.ctx, s.Cfg.ReqTimeout())
|
||||
_, err := s.Do(ctx, req)
|
||||
cancel()
|
||||
switch err {
|
||||
@@ -1563,7 +1577,9 @@ func (s *EtcdServer) parseProposeCtxErr(err error, start time.Time) error {
|
||||
case context.Canceled:
|
||||
return ErrCanceled
|
||||
case context.DeadlineExceeded:
|
||||
curLeadElected := s.r.leadElectedTime()
|
||||
s.leadTimeMu.RLock()
|
||||
curLeadElected := s.leadElectedTime
|
||||
s.leadTimeMu.RUnlock()
|
||||
prevLeadLost := curLeadElected.Add(-2 * time.Duration(s.Cfg.ElectionTicks) * time.Duration(s.Cfg.TickMs) * time.Millisecond)
|
||||
if start.After(prevLeadLost) && start.Before(curLeadElected) {
|
||||
return ErrTimeoutDueToLeaderFail
|
||||
|
||||
7
vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go
generated
vendored
7
vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go
generated
vendored
@@ -60,9 +60,14 @@ func newSnapshotReaderCloser(snapshot backend.Snapshot) io.ReadCloser {
|
||||
n, err := snapshot.WriteTo(pw)
|
||||
if err == nil {
|
||||
plog.Infof("wrote database snapshot out [total bytes: %d]", n)
|
||||
} else {
|
||||
plog.Warningf("failed to write database snapshot out [written bytes: %d]: %v", n, err)
|
||||
}
|
||||
pw.CloseWithError(err)
|
||||
snapshot.Close()
|
||||
err = snapshot.Close()
|
||||
if err != nil {
|
||||
plog.Panicf("failed to close database snapshot: %v", err)
|
||||
}
|
||||
}()
|
||||
return pr
|
||||
}
|
||||
|
||||
15
vendor/github.com/coreos/etcd/etcdserver/stats/leader.go
generated
vendored
15
vendor/github.com/coreos/etcd/etcdserver/stats/leader.go
generated
vendored
@@ -24,25 +24,30 @@ import (
|
||||
// LeaderStats is used by the leader in an etcd cluster, and encapsulates
|
||||
// statistics about communication with its followers
|
||||
type LeaderStats struct {
|
||||
leaderStats
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
type leaderStats struct {
|
||||
// Leader is the ID of the leader in the etcd cluster.
|
||||
// TODO(jonboulle): clarify that these are IDs, not names
|
||||
Leader string `json:"leader"`
|
||||
Followers map[string]*FollowerStats `json:"followers"`
|
||||
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
// NewLeaderStats generates a new LeaderStats with the given id as leader
|
||||
func NewLeaderStats(id string) *LeaderStats {
|
||||
return &LeaderStats{
|
||||
Leader: id,
|
||||
Followers: make(map[string]*FollowerStats),
|
||||
leaderStats: leaderStats{
|
||||
Leader: id,
|
||||
Followers: make(map[string]*FollowerStats),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (ls *LeaderStats) JSON() []byte {
|
||||
ls.Lock()
|
||||
stats := *ls
|
||||
stats := ls.leaderStats
|
||||
ls.Unlock()
|
||||
b, err := json.Marshal(stats)
|
||||
// TODO(jonboulle): appropriate error handling?
|
||||
|
||||
54
vendor/github.com/coreos/etcd/etcdserver/stats/server.go
generated
vendored
54
vendor/github.com/coreos/etcd/etcdserver/stats/server.go
generated
vendored
@@ -26,6 +26,26 @@ import (
|
||||
// ServerStats encapsulates various statistics about an EtcdServer and its
|
||||
// communication with other members of the cluster
|
||||
type ServerStats struct {
|
||||
serverStats
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func NewServerStats(name, id string) *ServerStats {
|
||||
ss := &ServerStats{
|
||||
serverStats: serverStats{
|
||||
Name: name,
|
||||
ID: id,
|
||||
},
|
||||
}
|
||||
now := time.Now()
|
||||
ss.StartTime = now
|
||||
ss.LeaderInfo.StartTime = now
|
||||
ss.sendRateQueue = &statsQueue{back: -1}
|
||||
ss.recvRateQueue = &statsQueue{back: -1}
|
||||
return ss
|
||||
}
|
||||
|
||||
type serverStats struct {
|
||||
Name string `json:"name"`
|
||||
// ID is the raft ID of the node.
|
||||
// TODO(jonboulle): use ID instead of name?
|
||||
@@ -49,17 +69,15 @@ type ServerStats struct {
|
||||
|
||||
sendRateQueue *statsQueue
|
||||
recvRateQueue *statsQueue
|
||||
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func (ss *ServerStats) JSON() []byte {
|
||||
ss.Lock()
|
||||
stats := *ss
|
||||
stats := ss.serverStats
|
||||
ss.Unlock()
|
||||
stats.LeaderInfo.Uptime = time.Since(stats.LeaderInfo.StartTime).String()
|
||||
stats.SendingPkgRate, stats.SendingBandwidthRate = stats.SendRates()
|
||||
stats.RecvingPkgRate, stats.RecvingBandwidthRate = stats.RecvRates()
|
||||
stats.SendingPkgRate, stats.SendingBandwidthRate = stats.sendRateQueue.Rate()
|
||||
stats.RecvingPkgRate, stats.RecvingBandwidthRate = stats.recvRateQueue.Rate()
|
||||
b, err := json.Marshal(stats)
|
||||
// TODO(jonboulle): appropriate error handling?
|
||||
if err != nil {
|
||||
@@ -68,32 +86,6 @@ func (ss *ServerStats) JSON() []byte {
|
||||
return b
|
||||
}
|
||||
|
||||
// Initialize clears the statistics of ServerStats and resets its start time
|
||||
func (ss *ServerStats) Initialize() {
|
||||
if ss == nil {
|
||||
return
|
||||
}
|
||||
now := time.Now()
|
||||
ss.StartTime = now
|
||||
ss.LeaderInfo.StartTime = now
|
||||
ss.sendRateQueue = &statsQueue{
|
||||
back: -1,
|
||||
}
|
||||
ss.recvRateQueue = &statsQueue{
|
||||
back: -1,
|
||||
}
|
||||
}
|
||||
|
||||
// RecvRates calculates and returns the rate of received append requests
|
||||
func (ss *ServerStats) RecvRates() (float64, float64) {
|
||||
return ss.recvRateQueue.Rate()
|
||||
}
|
||||
|
||||
// SendRates calculates and returns the rate of sent append requests
|
||||
func (ss *ServerStats) SendRates() (float64, float64) {
|
||||
return ss.sendRateQueue.Rate()
|
||||
}
|
||||
|
||||
// RecvAppendReq updates the ServerStats in response to an AppendRequest
|
||||
// from the given leader being received
|
||||
func (ss *ServerStats) RecvAppendReq(leader string, reqSize int) {
|
||||
|
||||
3
vendor/github.com/coreos/etcd/etcdserver/storage.go
generated
vendored
3
vendor/github.com/coreos/etcd/etcdserver/storage.go
generated
vendored
@@ -32,9 +32,6 @@ type Storage interface {
|
||||
Save(st raftpb.HardState, ents []raftpb.Entry) error
|
||||
// SaveSnap function saves snapshot to the underlying stable storage.
|
||||
SaveSnap(snap raftpb.Snapshot) error
|
||||
// DBFilePath returns the file path of database snapshot saved with given
|
||||
// id.
|
||||
DBFilePath(id uint64) (string, error)
|
||||
// Close closes the Storage and performs finalization.
|
||||
Close() error
|
||||
}
|
||||
|
||||
2
vendor/github.com/coreos/etcd/etcdserver/util.go
generated
vendored
2
vendor/github.com/coreos/etcd/etcdserver/util.go
generated
vendored
@@ -87,7 +87,7 @@ type notifier struct {
|
||||
|
||||
func newNotifier() *notifier {
|
||||
return ¬ifier{
|
||||
c: make(chan struct{}, 0),
|
||||
c: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
292
vendor/github.com/coreos/etcd/etcdserver/v3_server.go
generated
vendored
292
vendor/github.com/coreos/etcd/etcdserver/v3_server.go
generated
vendored
@@ -19,6 +19,8 @@ import (
|
||||
"encoding/binary"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
|
||||
"github.com/coreos/etcd/auth"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/etcdserver/membership"
|
||||
@@ -27,17 +29,10 @@ import (
|
||||
"github.com/coreos/etcd/mvcc"
|
||||
"github.com/coreos/etcd/raft"
|
||||
|
||||
"github.com/coreos/go-semver/semver"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
const (
|
||||
// the max request size that raft accepts.
|
||||
// TODO: make this a flag? But we probably do not want to
|
||||
// accept large request which might block raft stream. User
|
||||
// specify a large value might end up with shooting in the foot.
|
||||
maxRequestBytes = 1.5 * 1024 * 1024
|
||||
|
||||
// In the health case, there might be a small gap (10s of entries) between
|
||||
// the applied index and committed index.
|
||||
// However, if the committed entries are very heavy to apply, the gap might grow.
|
||||
@@ -45,10 +40,6 @@ const (
|
||||
maxGapBetweenApplyAndCommitIndex = 5000
|
||||
)
|
||||
|
||||
var (
|
||||
newRangeClusterVersion = *semver.Must(semver.NewVersion("3.1.0"))
|
||||
)
|
||||
|
||||
type RaftKV interface {
|
||||
Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error)
|
||||
Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error)
|
||||
@@ -91,11 +82,6 @@ type Authenticator interface {
|
||||
}
|
||||
|
||||
func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) {
|
||||
// TODO: remove this checking when we release etcd 3.2
|
||||
if s.ClusterVersion() == nil || s.ClusterVersion().LessThan(newRangeClusterVersion) {
|
||||
return s.legacyRange(ctx, r)
|
||||
}
|
||||
|
||||
if !r.Serializable {
|
||||
err := s.linearizableReadNotify(ctx)
|
||||
if err != nil {
|
||||
@@ -107,65 +93,30 @@ func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeRe
|
||||
chk := func(ai *auth.AuthInfo) error {
|
||||
return s.authStore.IsRangePermitted(ai, r.Key, r.RangeEnd)
|
||||
}
|
||||
get := func() { resp, err = s.applyV3Base.Range(noTxn, r) }
|
||||
get := func() { resp, err = s.applyV3Base.Range(nil, r) }
|
||||
if serr := s.doSerialize(ctx, chk, get); serr != nil {
|
||||
return nil, serr
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// TODO: remove this func when we release etcd 3.2
|
||||
func (s *EtcdServer) legacyRange(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) {
|
||||
if r.Serializable {
|
||||
var resp *pb.RangeResponse
|
||||
var err error
|
||||
chk := func(ai *auth.AuthInfo) error {
|
||||
return s.authStore.IsRangePermitted(ai, r.Key, r.RangeEnd)
|
||||
}
|
||||
get := func() { resp, err = s.applyV3Base.Range(noTxn, r) }
|
||||
if serr := s.doSerialize(ctx, chk, get); serr != nil {
|
||||
return nil, serr
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Range: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.RangeResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) {
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Put: r})
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Put: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.PutResponse), nil
|
||||
return resp.(*pb.PutResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{DeleteRange: r})
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{DeleteRange: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.DeleteRangeResponse), nil
|
||||
return resp.(*pb.DeleteRangeResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
|
||||
// TODO: remove this checking when we release etcd 3.2
|
||||
if s.ClusterVersion() == nil || s.ClusterVersion().LessThan(newRangeClusterVersion) {
|
||||
return s.legacyTxn(ctx, r)
|
||||
}
|
||||
|
||||
if isTxnReadonly(r) {
|
||||
if !isTxnSerializable(r) {
|
||||
err := s.linearizableReadNotify(ctx)
|
||||
@@ -184,38 +135,11 @@ func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Txn: r})
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Txn: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.TxnResponse), nil
|
||||
}
|
||||
|
||||
// TODO: remove this func when we release etcd 3.2
|
||||
func (s *EtcdServer) legacyTxn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
|
||||
if isTxnSerializable(r) {
|
||||
var resp *pb.TxnResponse
|
||||
var err error
|
||||
chk := func(ai *auth.AuthInfo) error {
|
||||
return checkTxnAuth(s.authStore, ai, r)
|
||||
}
|
||||
get := func() { resp, err = s.applyV3Base.Txn(r) }
|
||||
if serr := s.doSerialize(ctx, chk, get); serr != nil {
|
||||
return nil, serr
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Txn: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.TxnResponse), nil
|
||||
return resp.(*pb.TxnResponse), nil
|
||||
}
|
||||
|
||||
func isTxnSerializable(r *pb.TxnRequest) bool {
|
||||
@@ -280,25 +204,19 @@ func (s *EtcdServer) LeaseGrant(ctx context.Context, r *pb.LeaseGrantRequest) (*
|
||||
// only use positive int64 id's
|
||||
r.ID = int64(s.reqIDGen.Next() & ((1 << 63) - 1))
|
||||
}
|
||||
result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{LeaseGrant: r})
|
||||
resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseGrant: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.LeaseGrantResponse), nil
|
||||
return resp.(*pb.LeaseGrantResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
|
||||
result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{LeaseRevoke: r})
|
||||
resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseRevoke: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.LeaseRevokeResponse), nil
|
||||
return resp.(*pb.LeaseRevokeResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, error) {
|
||||
@@ -394,54 +312,45 @@ func (s *EtcdServer) waitLeader(ctx context.Context) (*membership.Member, error)
|
||||
}
|
||||
|
||||
func (s *EtcdServer) Alarm(ctx context.Context, r *pb.AlarmRequest) (*pb.AlarmResponse, error) {
|
||||
result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{Alarm: r})
|
||||
resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{Alarm: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.AlarmResponse), nil
|
||||
return resp.(*pb.AlarmResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) {
|
||||
result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{AuthEnable: r})
|
||||
resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{AuthEnable: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.AuthEnableResponse), nil
|
||||
return resp.(*pb.AuthEnableResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) {
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthDisable: r})
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthDisable: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.AuthDisableResponse), nil
|
||||
return resp.(*pb.AuthDisableResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) {
|
||||
var result *applyResult
|
||||
|
||||
err := s.linearizableReadNotify(ctx)
|
||||
if err != nil {
|
||||
if err := s.linearizableReadNotify(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var resp proto.Message
|
||||
for {
|
||||
checkedRevision, err := s.AuthStore().CheckPassword(r.Name, r.Password)
|
||||
if err != nil {
|
||||
plog.Errorf("invalid authentication request to user %s was issued", r.Name)
|
||||
if err != auth.ErrAuthNotEnabled {
|
||||
plog.Errorf("invalid authentication request to user %s was issued", r.Name)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
st, err := s.AuthStore().GenSimpleToken()
|
||||
st, err := s.AuthStore().GenTokenPrefix()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -452,172 +361,147 @@ func (s *EtcdServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest
|
||||
SimpleToken: st,
|
||||
}
|
||||
|
||||
result, err = s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{Authenticate: internalReq})
|
||||
resp, err = s.raftRequestOnce(ctx, pb.InternalRaftRequest{Authenticate: internalReq})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
if checkedRevision == s.AuthStore().Revision() {
|
||||
break
|
||||
}
|
||||
|
||||
if checkedRevision != s.AuthStore().Revision() {
|
||||
plog.Infof("revision when password checked is obsolete, retrying")
|
||||
continue
|
||||
}
|
||||
|
||||
break
|
||||
plog.Infof("revision when password checked is obsolete, retrying")
|
||||
}
|
||||
|
||||
return result.resp.(*pb.AuthenticateResponse), nil
|
||||
return resp.(*pb.AuthenticateResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserAdd: r})
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserAdd: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.AuthUserAddResponse), nil
|
||||
return resp.(*pb.AuthUserAddResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) {
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserDelete: r})
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserDelete: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.AuthUserDeleteResponse), nil
|
||||
return resp.(*pb.AuthUserDeleteResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserChangePassword: r})
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserChangePassword: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.AuthUserChangePasswordResponse), nil
|
||||
return resp.(*pb.AuthUserChangePasswordResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) {
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserGrantRole: r})
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserGrantRole: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.AuthUserGrantRoleResponse), nil
|
||||
return resp.(*pb.AuthUserGrantRoleResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserGet: r})
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserGet: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.AuthUserGetResponse), nil
|
||||
return resp.(*pb.AuthUserGetResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserList: r})
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserList: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.AuthUserListResponse), nil
|
||||
return resp.(*pb.AuthUserListResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserRevokeRole: r})
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserRevokeRole: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.AuthUserRevokeRoleResponse), nil
|
||||
return resp.(*pb.AuthUserRevokeRoleResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) {
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthRoleAdd: r})
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleAdd: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.AuthRoleAddResponse), nil
|
||||
return resp.(*pb.AuthRoleAddResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthRoleGrantPermission: r})
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleGrantPermission: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.AuthRoleGrantPermissionResponse), nil
|
||||
return resp.(*pb.AuthRoleGrantPermissionResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthRoleGet: r})
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleGet: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.AuthRoleGetResponse), nil
|
||||
return resp.(*pb.AuthRoleGetResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthRoleList: r})
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleList: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.AuthRoleListResponse), nil
|
||||
return resp.(*pb.AuthRoleListResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthRoleRevokePermission: r})
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleRevokePermission: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.AuthRoleRevokePermissionResponse), nil
|
||||
return resp.(*pb.AuthRoleRevokePermissionResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthRoleDelete: r})
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleDelete: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*pb.AuthRoleDeleteResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) raftRequestOnce(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) {
|
||||
result, err := s.processInternalRaftRequestOnce(ctx, r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.AuthRoleDeleteResponse), nil
|
||||
return result.resp, nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) raftRequest(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) {
|
||||
for {
|
||||
resp, err := s.raftRequestOnce(ctx, r)
|
||||
if err != auth.ErrAuthOldRevision {
|
||||
return resp, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// doSerialize handles the auth logic, with permissions checked by "chk", for a serialized request "get". Returns a non-nil error on authentication failure.
|
||||
func (s *EtcdServer) doSerialize(ctx context.Context, chk func(*auth.AuthInfo) error, get func()) error {
|
||||
for {
|
||||
ai, err := s.AuthStore().AuthInfoFromCtx(ctx)
|
||||
ai, err := s.AuthInfoFromCtx(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -652,7 +536,7 @@ func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.In
|
||||
ID: s.reqIDGen.Next(),
|
||||
}
|
||||
|
||||
authInfo, err := s.AuthStore().AuthInfoFromCtx(ctx)
|
||||
authInfo, err := s.AuthInfoFromCtx(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -666,7 +550,7 @@ func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.In
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(data) > maxRequestBytes {
|
||||
if len(data) > int(s.Cfg.MaxRequestBytes) {
|
||||
return nil, ErrRequestTooLarge
|
||||
}
|
||||
|
||||
@@ -696,19 +580,6 @@ func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.In
|
||||
}
|
||||
}
|
||||
|
||||
func (s *EtcdServer) processInternalRaftRequest(ctx context.Context, r pb.InternalRaftRequest) (*applyResult, error) {
|
||||
var result *applyResult
|
||||
var err error
|
||||
for {
|
||||
result, err = s.processInternalRaftRequestOnce(ctx, r)
|
||||
if err != auth.ErrAuthOldRevision {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return result, err
|
||||
}
|
||||
|
||||
// Watchable returns a watchable interface attached to the etcdserver.
|
||||
func (s *EtcdServer) Watchable() mvcc.WatchableKV { return s.KV() }
|
||||
|
||||
@@ -802,3 +673,14 @@ func (s *EtcdServer) linearizableReadNotify(ctx context.Context) error {
|
||||
return ErrStopped
|
||||
}
|
||||
}
|
||||
|
||||
func (s *EtcdServer) AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error) {
|
||||
if s.Cfg.ClientCertAuthEnabled {
|
||||
authInfo := s.AuthStore().AuthInfoFromTLS(ctx)
|
||||
if authInfo != nil {
|
||||
return authInfo, nil
|
||||
}
|
||||
}
|
||||
|
||||
return s.AuthStore().AuthInfoFromCtx(ctx)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user