Update godeps for etcd 3.0.4

This commit is contained in:
Timothy St. Clair 2016-07-22 13:54:40 -05:00
parent 456c43c22d
commit 5f008faa8b
457 changed files with 25492 additions and 10481 deletions

445
Godeps/Godeps.json generated
View File

@ -258,8 +258,8 @@
}, },
{ {
"ImportPath": "github.com/boltdb/bolt", "ImportPath": "github.com/boltdb/bolt",
"Comment": "v1.1.0-65-gee4a088", "Comment": "v1.2.1",
"Rev": "ee4a0888a9abe7eefe5a0992ca4cb06864839873" "Rev": "dfb21201d9270c1082d5fb0f07f500311ff72f18"
}, },
{ {
"ImportPath": "github.com/cloudflare/cfssl/auth", "ImportPath": "github.com/cloudflare/cfssl/auth",
@ -333,263 +333,263 @@
}, },
{ {
"ImportPath": "github.com/coreos/etcd/alarm", "ImportPath": "github.com/coreos/etcd/alarm",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/auth", "ImportPath": "github.com/coreos/etcd/auth",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/auth/authpb", "ImportPath": "github.com/coreos/etcd/auth/authpb",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/client", "ImportPath": "github.com/coreos/etcd/client",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/clientv3", "ImportPath": "github.com/coreos/etcd/clientv3",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/compactor", "ImportPath": "github.com/coreos/etcd/compactor",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/discovery", "ImportPath": "github.com/coreos/etcd/discovery",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/error", "ImportPath": "github.com/coreos/etcd/error",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/etcdserver", "ImportPath": "github.com/coreos/etcd/etcdserver",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/etcdserver/api", "ImportPath": "github.com/coreos/etcd/etcdserver/api",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v2http", "ImportPath": "github.com/coreos/etcd/etcdserver/api/v2http",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v2http/httptypes", "ImportPath": "github.com/coreos/etcd/etcdserver/api/v2http/httptypes",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc", "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes", "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/etcdserver/auth", "ImportPath": "github.com/coreos/etcd/etcdserver/auth",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb", "ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/etcdserver/membership", "ImportPath": "github.com/coreos/etcd/etcdserver/membership",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/etcdserver/stats", "ImportPath": "github.com/coreos/etcd/etcdserver/stats",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/integration", "ImportPath": "github.com/coreos/etcd/integration",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/lease", "ImportPath": "github.com/coreos/etcd/lease",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/lease/leasehttp", "ImportPath": "github.com/coreos/etcd/lease/leasehttp",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/lease/leasepb", "ImportPath": "github.com/coreos/etcd/lease/leasepb",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/mvcc",
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/mvcc/backend",
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/mvcc/mvccpb",
"Comment": "v3.0.4",
"Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/pkg/adt", "ImportPath": "github.com/coreos/etcd/pkg/adt",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/pkg/contention", "ImportPath": "github.com/coreos/etcd/pkg/contention",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/pkg/crc", "ImportPath": "github.com/coreos/etcd/pkg/crc",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/pkg/fileutil", "ImportPath": "github.com/coreos/etcd/pkg/fileutil",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/pkg/httputil", "ImportPath": "github.com/coreos/etcd/pkg/httputil",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/pkg/idutil", "ImportPath": "github.com/coreos/etcd/pkg/idutil",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/pkg/ioutil", "ImportPath": "github.com/coreos/etcd/pkg/ioutil",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/pkg/logutil", "ImportPath": "github.com/coreos/etcd/pkg/logutil",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/pkg/netutil", "ImportPath": "github.com/coreos/etcd/pkg/netutil",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/pkg/pathutil", "ImportPath": "github.com/coreos/etcd/pkg/pathutil",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/pkg/pbutil", "ImportPath": "github.com/coreos/etcd/pkg/pbutil",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/pkg/runtime", "ImportPath": "github.com/coreos/etcd/pkg/runtime",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/pkg/schedule", "ImportPath": "github.com/coreos/etcd/pkg/schedule",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/pkg/testutil", "ImportPath": "github.com/coreos/etcd/pkg/testutil",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/pkg/tlsutil", "ImportPath": "github.com/coreos/etcd/pkg/tlsutil",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/pkg/transport", "ImportPath": "github.com/coreos/etcd/pkg/transport",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/pkg/types", "ImportPath": "github.com/coreos/etcd/pkg/types",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/pkg/wait", "ImportPath": "github.com/coreos/etcd/pkg/wait",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/raft", "ImportPath": "github.com/coreos/etcd/raft",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/raft/raftpb", "ImportPath": "github.com/coreos/etcd/raft/raftpb",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/rafthttp", "ImportPath": "github.com/coreos/etcd/rafthttp",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/snap", "ImportPath": "github.com/coreos/etcd/snap",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/snap/snappb", "ImportPath": "github.com/coreos/etcd/snap/snappb",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
},
{
"ImportPath": "github.com/coreos/etcd/storage",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
},
{
"ImportPath": "github.com/coreos/etcd/storage/backend",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
},
{
"ImportPath": "github.com/coreos/etcd/storage/storagepb",
"Comment": "v2.3.0-282-g8b320e7",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/store", "ImportPath": "github.com/coreos/etcd/store",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/version", "ImportPath": "github.com/coreos/etcd/version",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/wal", "ImportPath": "github.com/coreos/etcd/wal",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/etcd/wal/walpb", "ImportPath": "github.com/coreos/etcd/wal/walpb",
"Comment": "v2.3.0-282-g8b320e7", "Comment": "v3.0.4",
"Rev": "8b320e7c550067b1dfb37bd1682e8067023e0751" "Rev": "d53923c636e0e4ab7f00cb75681b97a8f11f5a9d"
}, },
{ {
"ImportPath": "github.com/coreos/go-oidc/http", "ImportPath": "github.com/coreos/go-oidc/http",
@ -613,7 +613,7 @@
}, },
{ {
"ImportPath": "github.com/coreos/go-semver/semver", "ImportPath": "github.com/coreos/go-semver/semver",
"Rev": "d043ae190b3202550d026daf009359bb5d761672" "Rev": "568e959cd89871e61434c1143528d9162da89ef2"
}, },
{ {
"ImportPath": "github.com/coreos/go-systemd/activation", "ImportPath": "github.com/coreos/go-systemd/activation",
@ -647,33 +647,33 @@
}, },
{ {
"ImportPath": "github.com/coreos/pkg/capnslog", "ImportPath": "github.com/coreos/pkg/capnslog",
"Comment": "v2", "Comment": "v2-8-gfa29b1d",
"Rev": "7f080b6c11ac2d2347c3cd7521e810207ea1a041" "Rev": "fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8"
}, },
{ {
"ImportPath": "github.com/coreos/pkg/dlopen", "ImportPath": "github.com/coreos/pkg/dlopen",
"Comment": "v2", "Comment": "v2-8-gfa29b1d",
"Rev": "7f080b6c11ac2d2347c3cd7521e810207ea1a041" "Rev": "fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8"
}, },
{ {
"ImportPath": "github.com/coreos/pkg/health", "ImportPath": "github.com/coreos/pkg/health",
"Comment": "v2", "Comment": "v2-8-gfa29b1d",
"Rev": "7f080b6c11ac2d2347c3cd7521e810207ea1a041" "Rev": "fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8"
}, },
{ {
"ImportPath": "github.com/coreos/pkg/httputil", "ImportPath": "github.com/coreos/pkg/httputil",
"Comment": "v2", "Comment": "v2-8-gfa29b1d",
"Rev": "7f080b6c11ac2d2347c3cd7521e810207ea1a041" "Rev": "fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8"
}, },
{ {
"ImportPath": "github.com/coreos/pkg/timeutil", "ImportPath": "github.com/coreos/pkg/timeutil",
"Comment": "v2", "Comment": "v2-8-gfa29b1d",
"Rev": "7f080b6c11ac2d2347c3cd7521e810207ea1a041" "Rev": "fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8"
}, },
{ {
"ImportPath": "github.com/coreos/rkt/api/v1alpha", "ImportPath": "github.com/coreos/rkt/api/v1alpha",
"Comment": "v1.6.0", "Comment": "v1.11.0-59-ga83419b",
"Rev": "14437382a98e5ebeb6cafb57cff445370e3f7d56" "Rev": "a83419be28ac626876f94a28b4df2dbc9eac7448"
}, },
{ {
"ImportPath": "github.com/cpuguy83/go-md2man/md2man", "ImportPath": "github.com/cpuguy83/go-md2man/md2man",
@ -895,123 +895,128 @@
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/gogoproto", "ImportPath": "github.com/gogo/protobuf/gogoproto",
"Comment": "v0.1-125-g82d16f7", "Comment": "v0.2-33-ge18d7aa",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c" "Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/compare",
"Comment": "v0.2-33-ge18d7aa",
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/defaultcheck", "ImportPath": "github.com/gogo/protobuf/plugin/defaultcheck",
"Comment": "v0.1-125-g82d16f7", "Comment": "v0.2-33-ge18d7aa",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c" "Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/description", "ImportPath": "github.com/gogo/protobuf/plugin/description",
"Comment": "v0.1-125-g82d16f7", "Comment": "v0.2-33-ge18d7aa",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c" "Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/embedcheck", "ImportPath": "github.com/gogo/protobuf/plugin/embedcheck",
"Comment": "v0.1-125-g82d16f7", "Comment": "v0.2-33-ge18d7aa",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c" "Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/enumstringer", "ImportPath": "github.com/gogo/protobuf/plugin/enumstringer",
"Comment": "v0.1-125-g82d16f7", "Comment": "v0.2-33-ge18d7aa",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c" "Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/equal", "ImportPath": "github.com/gogo/protobuf/plugin/equal",
"Comment": "v0.1-125-g82d16f7", "Comment": "v0.2-33-ge18d7aa",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c" "Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/face", "ImportPath": "github.com/gogo/protobuf/plugin/face",
"Comment": "v0.1-125-g82d16f7", "Comment": "v0.2-33-ge18d7aa",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c" "Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/gostring", "ImportPath": "github.com/gogo/protobuf/plugin/gostring",
"Comment": "v0.1-125-g82d16f7", "Comment": "v0.2-33-ge18d7aa",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c" "Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/grpc",
"Comment": "v0.1-125-g82d16f7",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/marshalto", "ImportPath": "github.com/gogo/protobuf/plugin/marshalto",
"Comment": "v0.1-125-g82d16f7", "Comment": "v0.2-33-ge18d7aa",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c" "Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/oneofcheck", "ImportPath": "github.com/gogo/protobuf/plugin/oneofcheck",
"Comment": "v0.1-125-g82d16f7", "Comment": "v0.2-33-ge18d7aa",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c" "Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/populate", "ImportPath": "github.com/gogo/protobuf/plugin/populate",
"Comment": "v0.1-125-g82d16f7", "Comment": "v0.2-33-ge18d7aa",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c" "Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/size", "ImportPath": "github.com/gogo/protobuf/plugin/size",
"Comment": "v0.1-125-g82d16f7", "Comment": "v0.2-33-ge18d7aa",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c" "Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/stringer", "ImportPath": "github.com/gogo/protobuf/plugin/stringer",
"Comment": "v0.1-125-g82d16f7", "Comment": "v0.2-33-ge18d7aa",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c" "Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/testgen", "ImportPath": "github.com/gogo/protobuf/plugin/testgen",
"Comment": "v0.1-125-g82d16f7", "Comment": "v0.2-33-ge18d7aa",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c" "Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/union", "ImportPath": "github.com/gogo/protobuf/plugin/union",
"Comment": "v0.1-125-g82d16f7", "Comment": "v0.2-33-ge18d7aa",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c" "Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/plugin/unmarshal", "ImportPath": "github.com/gogo/protobuf/plugin/unmarshal",
"Comment": "v0.1-125-g82d16f7", "Comment": "v0.2-33-ge18d7aa",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c" "Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/proto", "ImportPath": "github.com/gogo/protobuf/proto",
"Comment": "v0.1-125-g82d16f7", "Comment": "v0.2-33-ge18d7aa",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c" "Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/descriptor", "ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/descriptor",
"Comment": "v0.1-125-g82d16f7", "Comment": "v0.2-33-ge18d7aa",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c" "Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/generator", "ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/generator",
"Comment": "v0.1-125-g82d16f7", "Comment": "v0.2-33-ge18d7aa",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c" "Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
},
{
"ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/grpc",
"Comment": "v0.2-33-ge18d7aa",
"Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/plugin", "ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/plugin",
"Comment": "v0.1-125-g82d16f7", "Comment": "v0.2-33-ge18d7aa",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c" "Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/sortkeys", "ImportPath": "github.com/gogo/protobuf/sortkeys",
"Comment": "v0.1-125-g82d16f7", "Comment": "v0.2-33-ge18d7aa",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c" "Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/vanity", "ImportPath": "github.com/gogo/protobuf/vanity",
"Comment": "v0.1-125-g82d16f7", "Comment": "v0.2-33-ge18d7aa",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c" "Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/vanity/command", "ImportPath": "github.com/gogo/protobuf/vanity/command",
"Comment": "v0.1-125-g82d16f7", "Comment": "v0.2-33-ge18d7aa",
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c" "Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173"
}, },
{ {
"ImportPath": "github.com/golang/glog", "ImportPath": "github.com/golang/glog",
@ -1019,19 +1024,23 @@
}, },
{ {
"ImportPath": "github.com/golang/groupcache/lru", "ImportPath": "github.com/golang/groupcache/lru",
"Rev": "604ed5785183e59ae2789449d89e73f3a2a77987" "Rev": "02826c3e79038b59d737d3b1c0a1d937f71a4433"
}, },
{ {
"ImportPath": "github.com/golang/mock/gomock", "ImportPath": "github.com/golang/mock/gomock",
"Rev": "bd3c8e81be01eef76d4b503f5e687d2d1354d2d9" "Rev": "bd3c8e81be01eef76d4b503f5e687d2d1354d2d9"
}, },
{
"ImportPath": "github.com/golang/protobuf/jsonpb",
"Rev": "8616e8ee5e20a1704615e6c8d7afcdac06087a67"
},
{ {
"ImportPath": "github.com/golang/protobuf/proto", "ImportPath": "github.com/golang/protobuf/proto",
"Rev": "b982704f8bb716bb608144408cff30e15fbde841" "Rev": "8616e8ee5e20a1704615e6c8d7afcdac06087a67"
}, },
{ {
"ImportPath": "github.com/google/btree", "ImportPath": "github.com/google/btree",
"Rev": "cc6329d4279e3f025a53a83c397d2339b5705c45" "Rev": "7d79101e329e5a3adf994758c578dab82b90c017"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/api", "ImportPath": "github.com/google/cadvisor/api",
@ -1270,6 +1279,21 @@
"ImportPath": "github.com/gorilla/mux", "ImportPath": "github.com/gorilla/mux",
"Rev": "8096f47503459bcc74d1f4c487b7e6e42e5746b5" "Rev": "8096f47503459bcc74d1f4c487b7e6e42e5746b5"
}, },
{
"ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime",
"Comment": "v1.0.0-8-gf52d055",
"Rev": "f52d055dc48aec25854ed7d31862f78913cf17d1"
},
{
"ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime/internal",
"Comment": "v1.0.0-8-gf52d055",
"Rev": "f52d055dc48aec25854ed7d31862f78913cf17d1"
},
{
"ImportPath": "github.com/grpc-ecosystem/grpc-gateway/utilities",
"Comment": "v1.0.0-8-gf52d055",
"Rev": "f52d055dc48aec25854ed7d31862f78913cf17d1"
},
{ {
"ImportPath": "github.com/hashicorp/go-msgpack/codec", "ImportPath": "github.com/hashicorp/go-msgpack/codec",
"Rev": "fa3f63826f7c23912c15263591e65d54d080b458" "Rev": "fa3f63826f7c23912c15263591e65d54d080b458"
@ -1351,7 +1375,7 @@
}, },
{ {
"ImportPath": "github.com/jonboulle/clockwork", "ImportPath": "github.com/jonboulle/clockwork",
"Rev": "3f831b65b61282ba6bece21b91beea2edc4c887a" "Rev": "72f9bd7c4e0c2a40055ab3d0f09654f730cce982"
}, },
{ {
"ImportPath": "github.com/juju/ratelimit", "ImportPath": "github.com/juju/ratelimit",
@ -1736,8 +1760,8 @@
}, },
{ {
"ImportPath": "github.com/prometheus/client_golang/prometheus", "ImportPath": "github.com/prometheus/client_golang/prometheus",
"Comment": "0.7.0-39-g3b78d7a", "Comment": "0.7.0-52-ge51041b",
"Rev": "3b78d7a77f51ccbc364d4bc170920153022cfd08" "Rev": "e51041b3fa41cece0dca035740ba6411905be473"
}, },
{ {
"ImportPath": "github.com/prometheus/client_model/go", "ImportPath": "github.com/prometheus/client_model/go",
@ -1746,19 +1770,15 @@
}, },
{ {
"ImportPath": "github.com/prometheus/common/expfmt", "ImportPath": "github.com/prometheus/common/expfmt",
"Rev": "a6ab08426bb262e2d190097751f5cfd1cfdfd17d" "Rev": "ffe929a3f4c4faeaa10f2b9535c2b1be3ad15650"
},
{
"ImportPath": "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg",
"Rev": "a6ab08426bb262e2d190097751f5cfd1cfdfd17d"
}, },
{ {
"ImportPath": "github.com/prometheus/common/model", "ImportPath": "github.com/prometheus/common/model",
"Rev": "a6ab08426bb262e2d190097751f5cfd1cfdfd17d" "Rev": "ffe929a3f4c4faeaa10f2b9535c2b1be3ad15650"
}, },
{ {
"ImportPath": "github.com/prometheus/procfs", "ImportPath": "github.com/prometheus/procfs",
"Rev": "490cc6eb5fa45bf8a8b7b73c8bc82a8160e8531d" "Rev": "454a56f35412459b5e684fd5ec0f9211b94f002a"
}, },
{ {
"ImportPath": "github.com/rackspace/gophercloud", "ImportPath": "github.com/rackspace/gophercloud",
@ -1927,8 +1947,8 @@
}, },
{ {
"ImportPath": "github.com/russross/blackfriday", "ImportPath": "github.com/russross/blackfriday",
"Comment": "v1.2-42-g77efab5", "Comment": "v1.4-2-g300106c",
"Rev": "77efab57b2f74dd3f9051c79752b2e8995c8b789" "Rev": "300106c228d52c8941d4b3de6054a6062a86dda3"
}, },
{ {
"ImportPath": "github.com/samuel/go-zookeeper/zk", "ImportPath": "github.com/samuel/go-zookeeper/zk",
@ -2004,11 +2024,11 @@
}, },
{ {
"ImportPath": "github.com/ugorji/go/codec", "ImportPath": "github.com/ugorji/go/codec",
"Rev": "f4485b318aadd133842532f841dc205a8e339d74" "Rev": "f1f1a805ed361a0e078bb537e4ea78cd37dcf065"
}, },
{ {
"ImportPath": "github.com/ugorji/go/codec/codecgen", "ImportPath": "github.com/ugorji/go/codec/codecgen",
"Rev": "f4485b318aadd133842532f841dc205a8e339d74" "Rev": "f1f1a805ed361a0e078bb537e4ea78cd37dcf065"
}, },
{ {
"ImportPath": "github.com/vishvananda/netlink", "ImportPath": "github.com/vishvananda/netlink",
@ -2200,7 +2220,7 @@
}, },
{ {
"ImportPath": "golang.org/x/sys/unix", "ImportPath": "golang.org/x/sys/unix",
"Rev": "833a04a10549a95dc34458c195cbad61bbb6cb4d" "Rev": "9c60d1c508f5134d1ca726b4641db998f2523357"
}, },
{ {
"ImportPath": "google.golang.org/api/cloudmonitoring/v2beta2", "ImportPath": "google.golang.org/api/cloudmonitoring/v2beta2",
@ -2240,35 +2260,48 @@
}, },
{ {
"ImportPath": "google.golang.org/grpc", "ImportPath": "google.golang.org/grpc",
"Rev": "933601d8cd6418a8a891bd9075a7161b0a67badb" "Comment": "v1.0.0-6-g02fca89",
"Rev": "02fca896ff5f50c6bbbee0860345a49344b37a03"
}, },
{ {
"ImportPath": "google.golang.org/grpc/codes", "ImportPath": "google.golang.org/grpc/codes",
"Rev": "933601d8cd6418a8a891bd9075a7161b0a67badb" "Comment": "v1.0.0-6-g02fca89",
"Rev": "02fca896ff5f50c6bbbee0860345a49344b37a03"
}, },
{ {
"ImportPath": "google.golang.org/grpc/credentials", "ImportPath": "google.golang.org/grpc/credentials",
"Rev": "933601d8cd6418a8a891bd9075a7161b0a67badb" "Comment": "v1.0.0-6-g02fca89",
"Rev": "02fca896ff5f50c6bbbee0860345a49344b37a03"
}, },
{ {
"ImportPath": "google.golang.org/grpc/grpclog", "ImportPath": "google.golang.org/grpc/grpclog",
"Rev": "933601d8cd6418a8a891bd9075a7161b0a67badb" "Comment": "v1.0.0-6-g02fca89",
"Rev": "02fca896ff5f50c6bbbee0860345a49344b37a03"
},
{
"ImportPath": "google.golang.org/grpc/internal",
"Comment": "v1.0.0-6-g02fca89",
"Rev": "02fca896ff5f50c6bbbee0860345a49344b37a03"
}, },
{ {
"ImportPath": "google.golang.org/grpc/metadata", "ImportPath": "google.golang.org/grpc/metadata",
"Rev": "933601d8cd6418a8a891bd9075a7161b0a67badb" "Comment": "v1.0.0-6-g02fca89",
"Rev": "02fca896ff5f50c6bbbee0860345a49344b37a03"
}, },
{ {
"ImportPath": "google.golang.org/grpc/naming", "ImportPath": "google.golang.org/grpc/naming",
"Rev": "933601d8cd6418a8a891bd9075a7161b0a67badb" "Comment": "v1.0.0-6-g02fca89",
"Rev": "02fca896ff5f50c6bbbee0860345a49344b37a03"
}, },
{ {
"ImportPath": "google.golang.org/grpc/peer", "ImportPath": "google.golang.org/grpc/peer",
"Rev": "933601d8cd6418a8a891bd9075a7161b0a67badb" "Comment": "v1.0.0-6-g02fca89",
"Rev": "02fca896ff5f50c6bbbee0860345a49344b37a03"
}, },
{ {
"ImportPath": "google.golang.org/grpc/transport", "ImportPath": "google.golang.org/grpc/transport",
"Rev": "933601d8cd6418a8a891bd9075a7161b0a67badb" "Comment": "v1.0.0-6-g02fca89",
"Rev": "02fca896ff5f50c6bbbee0860345a49344b37a03"
}, },
{ {
"ImportPath": "gopkg.in/gcfg.v1", "ImportPath": "gopkg.in/gcfg.v1",
@ -2302,7 +2335,7 @@
}, },
{ {
"ImportPath": "gopkg.in/yaml.v2", "ImportPath": "gopkg.in/yaml.v2",
"Rev": "a83829b6f1293c91addabc89d0571c246397bbf4" "Rev": "53feefa2559fb8dfa8d81baad31be332c97d6c77"
}, },
{ {
"ImportPath": "k8s.io/heapster/metrics/api/v1/types", "ImportPath": "k8s.io/heapster/metrics/api/v1/types",

1781
Godeps/LICENSES generated

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,4 @@
Bolt [![Build Status](https://drone.io/github.com/boltdb/bolt/status.png)](https://drone.io/github.com/boltdb/bolt/latest) [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.svg?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.svg)](https://godoc.org/github.com/boltdb/bolt) ![Version](https://img.shields.io/badge/version-1.0-green.svg) Bolt [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.svg?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.svg)](https://godoc.org/github.com/boltdb/bolt) ![Version](https://img.shields.io/badge/version-1.0-green.svg)
==== ====
Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas] Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas]
@ -427,6 +427,8 @@ db.View(func(tx *bolt.Tx) error {
}) })
``` ```
Note that, while RFC3339 is sortable, the Golang implementation of RFC3339Nano does not use a fixed number of digits after the decimal point and is therefore not sortable.
#### ForEach() #### ForEach()
@ -835,6 +837,14 @@ Below is a list of public, open source projects that use Bolt:
backed by boltdb. backed by boltdb.
* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining * [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining
simple tx and key scans. simple tx and key scans.
* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets.
* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service * [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service
* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service.
* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners.
* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores.
* [Storm](https://github.com/asdine/storm) - A simple ORM around BoltDB.
* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB.
* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings.
* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend.
If you are using Bolt in a project please send a pull request to add it to the list. If you are using Bolt in a project please send a pull request to add it to the list.

18
vendor/github.com/boltdb/bolt/appveyor.yml generated vendored Normal file
View File

@ -0,0 +1,18 @@
version: "{build}"
os: Windows Server 2012 R2
clone_folder: c:\gopath\src\github.com\boltdb\bolt
environment:
GOPATH: c:\gopath
install:
- echo %PATH%
- echo %GOPATH%
- go version
- go env
- go get -v -t ./...
build_script:
- go test -v ./...

9
vendor/github.com/boltdb/bolt/bolt_ppc.go generated vendored Normal file
View File

@ -0,0 +1,9 @@
// +build ppc
package bolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0x7FFFFFFF // 2GB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0xFFFFFFF

9
vendor/github.com/boltdb/bolt/bolt_ppc64.go generated vendored Normal file
View File

@ -0,0 +1,9 @@
// +build ppc64
package bolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF

View File

@ -11,7 +11,7 @@ import (
) )
// flock acquires an advisory lock on a file descriptor. // flock acquires an advisory lock on a file descriptor.
func flock(f *os.File, exclusive bool, timeout time.Duration) error { func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
var t time.Time var t time.Time
for { for {
// If we're beyond our timeout then return an error. // If we're beyond our timeout then return an error.
@ -27,7 +27,7 @@ func flock(f *os.File, exclusive bool, timeout time.Duration) error {
} }
// Otherwise attempt to obtain an exclusive lock. // Otherwise attempt to obtain an exclusive lock.
err := syscall.Flock(int(f.Fd()), flag|syscall.LOCK_NB) err := syscall.Flock(int(db.file.Fd()), flag|syscall.LOCK_NB)
if err == nil { if err == nil {
return nil return nil
} else if err != syscall.EWOULDBLOCK { } else if err != syscall.EWOULDBLOCK {
@ -40,8 +40,8 @@ func flock(f *os.File, exclusive bool, timeout time.Duration) error {
} }
// funlock releases an advisory lock on a file descriptor. // funlock releases an advisory lock on a file descriptor.
func funlock(f *os.File) error { func funlock(db *DB) error {
return syscall.Flock(int(f.Fd()), syscall.LOCK_UN) return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN)
} }
// mmap memory maps a DB's data file. // mmap memory maps a DB's data file.

View File

@ -11,7 +11,7 @@ import (
) )
// flock acquires an advisory lock on a file descriptor. // flock acquires an advisory lock on a file descriptor.
func flock(f *os.File, exclusive bool, timeout time.Duration) error { func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
var t time.Time var t time.Time
for { for {
// If we're beyond our timeout then return an error. // If we're beyond our timeout then return an error.
@ -32,7 +32,7 @@ func flock(f *os.File, exclusive bool, timeout time.Duration) error {
} else { } else {
lock.Type = syscall.F_RDLCK lock.Type = syscall.F_RDLCK
} }
err := syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &lock) err := syscall.FcntlFlock(db.file.Fd(), syscall.F_SETLK, &lock)
if err == nil { if err == nil {
return nil return nil
} else if err != syscall.EAGAIN { } else if err != syscall.EAGAIN {
@ -45,13 +45,13 @@ func flock(f *os.File, exclusive bool, timeout time.Duration) error {
} }
// funlock releases an advisory lock on a file descriptor. // funlock releases an advisory lock on a file descriptor.
func funlock(f *os.File) error { func funlock(db *DB) error {
var lock syscall.Flock_t var lock syscall.Flock_t
lock.Start = 0 lock.Start = 0
lock.Len = 0 lock.Len = 0
lock.Type = syscall.F_UNLCK lock.Type = syscall.F_UNLCK
lock.Whence = 0 lock.Whence = 0
return syscall.FcntlFlock(uintptr(f.Fd()), syscall.F_SETLK, &lock) return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock)
} }
// mmap memory maps a DB's data file. // mmap memory maps a DB's data file.

View File

@ -16,6 +16,8 @@ var (
) )
const ( const (
lockExt = ".lock"
// see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx // see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
flagLockExclusive = 2 flagLockExclusive = 2
flagLockFailImmediately = 1 flagLockFailImmediately = 1
@ -46,7 +48,16 @@ func fdatasync(db *DB) error {
} }
// flock acquires an advisory lock on a file descriptor. // flock acquires an advisory lock on a file descriptor.
func flock(f *os.File, exclusive bool, timeout time.Duration) error { func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
// Create a separate lock file on windows because a process
// cannot share an exclusive lock on the same file. This is
// needed during Tx.WriteTo().
f, err := os.OpenFile(db.path+lockExt, os.O_CREATE, mode)
if err != nil {
return err
}
db.lockfile = f
var t time.Time var t time.Time
for { for {
// If we're beyond our timeout then return an error. // If we're beyond our timeout then return an error.
@ -62,7 +73,7 @@ func flock(f *os.File, exclusive bool, timeout time.Duration) error {
flag |= flagLockExclusive flag |= flagLockExclusive
} }
err := lockFileEx(syscall.Handle(f.Fd()), flag, 0, 1, 0, &syscall.Overlapped{}) err := lockFileEx(syscall.Handle(db.lockfile.Fd()), flag, 0, 1, 0, &syscall.Overlapped{})
if err == nil { if err == nil {
return nil return nil
} else if err != errLockViolation { } else if err != errLockViolation {
@ -75,8 +86,11 @@ func flock(f *os.File, exclusive bool, timeout time.Duration) error {
} }
// funlock releases an advisory lock on a file descriptor. // funlock releases an advisory lock on a file descriptor.
func funlock(f *os.File) error { func funlock(db *DB) error {
return unlockFileEx(syscall.Handle(f.Fd()), 0, 1, 0, &syscall.Overlapped{}) err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{})
db.lockfile.Close()
os.Remove(db.path+lockExt)
return err
} }
// mmap memory maps a DB's data file. // mmap memory maps a DB's data file.

86
vendor/github.com/boltdb/bolt/db.go generated vendored
View File

@ -36,6 +36,9 @@ const (
DefaultAllocSize = 16 * 1024 * 1024 DefaultAllocSize = 16 * 1024 * 1024
) )
// default page size for db is set to the OS page size.
var defaultPageSize = os.Getpagesize()
// DB represents a collection of buckets persisted to a file on disk. // DB represents a collection of buckets persisted to a file on disk.
// All data access is performed through transactions which can be obtained through the DB. // All data access is performed through transactions which can be obtained through the DB.
// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called. // All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called.
@ -93,6 +96,7 @@ type DB struct {
path string path string
file *os.File file *os.File
lockfile *os.File // windows only
dataref []byte // mmap'ed readonly, write throws SEGV dataref []byte // mmap'ed readonly, write throws SEGV
data *[maxMapSize]byte data *[maxMapSize]byte
datasz int datasz int
@ -106,6 +110,8 @@ type DB struct {
freelist *freelist freelist *freelist
stats Stats stats Stats
pagePool sync.Pool
batchMu sync.Mutex batchMu sync.Mutex
batch *batch batch *batch
@ -177,7 +183,7 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
// if !options.ReadOnly. // if !options.ReadOnly.
// The database file is locked using the shared lock (more than one process may // The database file is locked using the shared lock (more than one process may
// hold a lock at the same time) otherwise (options.ReadOnly is set). // hold a lock at the same time) otherwise (options.ReadOnly is set).
if err := flock(db.file, !db.readOnly, options.Timeout); err != nil { if err := flock(db, mode, !db.readOnly, options.Timeout); err != nil {
_ = db.close() _ = db.close()
return nil, err return nil, err
} }
@ -199,11 +205,26 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
if _, err := db.file.ReadAt(buf[:], 0); err == nil { if _, err := db.file.ReadAt(buf[:], 0); err == nil {
m := db.pageInBuffer(buf[:], 0).meta() m := db.pageInBuffer(buf[:], 0).meta()
if err := m.validate(); err != nil { if err := m.validate(); err != nil {
return nil, err // If we can't read the page size, we can assume it's the same
} // as the OS -- since that's how the page size was chosen in the
// first place.
//
// If the first page is invalid and this OS uses a different
// page size than what the database was created with then we
// are out of luck and cannot access the database.
db.pageSize = os.Getpagesize()
} else {
db.pageSize = int(m.pageSize) db.pageSize = int(m.pageSize)
} }
} }
}
// Initialize page pool.
db.pagePool = sync.Pool{
New: func() interface{} {
return make([]byte, db.pageSize)
},
}
// Memory map the data file. // Memory map the data file.
if err := db.mmap(options.InitialMmapSize); err != nil { if err := db.mmap(options.InitialMmapSize); err != nil {
@ -261,12 +282,13 @@ func (db *DB) mmap(minsz int) error {
db.meta0 = db.page(0).meta() db.meta0 = db.page(0).meta()
db.meta1 = db.page(1).meta() db.meta1 = db.page(1).meta()
// Validate the meta pages. // Validate the meta pages. We only return an error if both meta pages fail
if err := db.meta0.validate(); err != nil { // validation, since meta0 failing validation means that it wasn't saved
return err // properly -- but we can recover using meta1. And vice-versa.
} err0 := db.meta0.validate()
if err := db.meta1.validate(); err != nil { err1 := db.meta1.validate()
return err if err0 != nil && err1 != nil {
return err0
} }
return nil return nil
@ -338,6 +360,7 @@ func (db *DB) init() error {
m.root = bucket{root: 3} m.root = bucket{root: 3}
m.pgid = 4 m.pgid = 4
m.txid = txid(i) m.txid = txid(i)
m.checksum = m.sum64()
} }
// Write an empty freelist at page 3. // Write an empty freelist at page 3.
@ -379,10 +402,13 @@ func (db *DB) Close() error {
} }
func (db *DB) close() error { func (db *DB) close() error {
if !db.opened {
return nil
}
db.opened = false db.opened = false
db.freelist = nil db.freelist = nil
db.path = ""
// Clear ops. // Clear ops.
db.ops.writeAt = nil db.ops.writeAt = nil
@ -397,7 +423,7 @@ func (db *DB) close() error {
// No need to unlock read-only file. // No need to unlock read-only file.
if !db.readOnly { if !db.readOnly {
// Unlock the file. // Unlock the file.
if err := funlock(db.file); err != nil { if err := funlock(db); err != nil {
log.Printf("bolt.Close(): funlock error: %s", err) log.Printf("bolt.Close(): funlock error: %s", err)
} }
} }
@ -409,6 +435,7 @@ func (db *DB) close() error {
db.file = nil db.file = nil
} }
db.path = ""
return nil return nil
} }
@ -773,16 +800,37 @@ func (db *DB) pageInBuffer(b []byte, id pgid) *page {
// meta retrieves the current meta page reference. // meta retrieves the current meta page reference.
func (db *DB) meta() *meta { func (db *DB) meta() *meta {
if db.meta0.txid > db.meta1.txid { // We have to return the meta with the highest txid which doesn't fail
return db.meta0 // validation. Otherwise, we can cause errors when in fact the database is
// in a consistent state. metaA is the one with the higher txid.
metaA := db.meta0
metaB := db.meta1
if db.meta1.txid > db.meta0.txid {
metaA = db.meta1
metaB = db.meta0
} }
return db.meta1
// Use higher meta page if valid. Otherwise fallback to previous, if valid.
if err := metaA.validate(); err == nil {
return metaA
} else if err := metaB.validate(); err == nil {
return metaB
}
// This should never be reached, because both meta1 and meta0 were validated
// on mmap() and we do fsync() on every write.
panic("bolt.DB.meta(): invalid meta pages")
} }
// allocate returns a contiguous block of memory starting at a given page. // allocate returns a contiguous block of memory starting at a given page.
func (db *DB) allocate(count int) (*page, error) { func (db *DB) allocate(count int) (*page, error) {
// Allocate a temporary buffer for the page. // Allocate a temporary buffer for the page.
buf := make([]byte, count*db.pageSize) var buf []byte
if count == 1 {
buf = db.pagePool.Get().([]byte)
} else {
buf = make([]byte, count*db.pageSize)
}
p := (*page)(unsafe.Pointer(&buf[0])) p := (*page)(unsafe.Pointer(&buf[0]))
p.overflow = uint32(count - 1) p.overflow = uint32(count - 1)
@ -824,9 +872,11 @@ func (db *DB) grow(sz int) error {
// Truncate and fsync to ensure file size metadata is flushed. // Truncate and fsync to ensure file size metadata is flushed.
// https://github.com/boltdb/bolt/issues/284 // https://github.com/boltdb/bolt/issues/284
if !db.NoGrowSync && !db.readOnly { if !db.NoGrowSync && !db.readOnly {
if runtime.GOOS != "windows" {
if err := db.file.Truncate(int64(sz)); err != nil { if err := db.file.Truncate(int64(sz)); err != nil {
return fmt.Errorf("file resize error: %s", err) return fmt.Errorf("file resize error: %s", err)
} }
}
if err := db.file.Sync(); err != nil { if err := db.file.Sync(); err != nil {
return fmt.Errorf("file sync error: %s", err) return fmt.Errorf("file sync error: %s", err)
} }
@ -930,12 +980,12 @@ type meta struct {
// validate checks the marker bytes and version of the meta page to ensure it matches this binary. // validate checks the marker bytes and version of the meta page to ensure it matches this binary.
func (m *meta) validate() error { func (m *meta) validate() error {
if m.checksum != 0 && m.checksum != m.sum64() { if m.magic != magic {
return ErrChecksum
} else if m.magic != magic {
return ErrInvalid return ErrInvalid
} else if m.version != version { } else if m.version != version {
return ErrVersionMismatch return ErrVersionMismatch
} else if m.checksum != 0 && m.checksum != m.sum64() {
return ErrChecksum
} }
return nil return nil
} }

View File

@ -12,7 +12,8 @@ var (
// already open. // already open.
ErrDatabaseOpen = errors.New("database already open") ErrDatabaseOpen = errors.New("database already open")
// ErrInvalid is returned when a data file is not a Bolt-formatted database. // ErrInvalid is returned when both meta pages on a database are invalid.
// This typically occurs when a file is not a bolt database.
ErrInvalid = errors.New("invalid database") ErrInvalid = errors.New("invalid database")
// ErrVersionMismatch is returned when the data file was created with a // ErrVersionMismatch is returned when the data file was created with a

View File

@ -463,43 +463,6 @@ func (n *node) rebalance() {
target = n.prevSibling() target = n.prevSibling()
} }
// If target node has extra nodes then just move one over.
if target.numChildren() > target.minKeys() {
if useNextSibling {
// Reparent and move node.
if child, ok := n.bucket.nodes[target.inodes[0].pgid]; ok {
child.parent.removeChild(child)
child.parent = n
child.parent.children = append(child.parent.children, child)
}
n.inodes = append(n.inodes, target.inodes[0])
target.inodes = target.inodes[1:]
// Update target key on parent.
target.parent.put(target.key, target.inodes[0].key, nil, target.pgid, 0)
target.key = target.inodes[0].key
_assert(len(target.key) > 0, "rebalance(1): zero-length node key")
} else {
// Reparent and move node.
if child, ok := n.bucket.nodes[target.inodes[len(target.inodes)-1].pgid]; ok {
child.parent.removeChild(child)
child.parent = n
child.parent.children = append(child.parent.children, child)
}
n.inodes = append(n.inodes, inode{})
copy(n.inodes[1:], n.inodes)
n.inodes[0] = target.inodes[len(target.inodes)-1]
target.inodes = target.inodes[:len(target.inodes)-1]
}
// Update parent key for node.
n.parent.put(n.key, n.inodes[0].key, nil, n.pgid, 0)
n.key = n.inodes[0].key
_assert(len(n.key) > 0, "rebalance(2): zero-length node key")
return
}
// If both this node and the target node are too small then merge them. // If both this node and the target node are too small then merge them.
if useNextSibling { if useNextSibling {
// Reparent all child nodes being moved. // Reparent all child nodes being moved.

View File

@ -111,13 +111,13 @@ type leafPageElement struct {
// key returns a byte slice of the node key. // key returns a byte slice of the node key.
func (n *leafPageElement) key() []byte { func (n *leafPageElement) key() []byte {
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize] return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize:n.ksize]
} }
// value returns a byte slice of the node value. // value returns a byte slice of the node value.
func (n *leafPageElement) value() []byte { func (n *leafPageElement) value() []byte {
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize] return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize:n.vsize]
} }
// PageInfo represents human readable information about a page. // PageInfo represents human readable information about a page.

66
vendor/github.com/boltdb/bolt/tx.go generated vendored
View File

@ -5,6 +5,7 @@ import (
"io" "io"
"os" "os"
"sort" "sort"
"strings"
"time" "time"
"unsafe" "unsafe"
) )
@ -202,8 +203,17 @@ func (tx *Tx) Commit() error {
// If strict mode is enabled then perform a consistency check. // If strict mode is enabled then perform a consistency check.
// Only the first consistency error is reported in the panic. // Only the first consistency error is reported in the panic.
if tx.db.StrictMode { if tx.db.StrictMode {
if err, ok := <-tx.Check(); ok { ch := tx.Check()
panic("check fail: " + err.Error()) var errs []string
for {
err, ok := <-ch
if !ok {
break
}
errs = append(errs, err.Error())
}
if len(errs) > 0 {
panic("check fail: " + strings.Join(errs, "\n"))
} }
} }
@ -297,12 +307,34 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
} }
defer func() { _ = f.Close() }() defer func() { _ = f.Close() }()
// Copy the meta pages. // Generate a meta page. We use the same page data for both meta pages.
tx.db.metalock.Lock() buf := make([]byte, tx.db.pageSize)
n, err = io.CopyN(w, f, int64(tx.db.pageSize*2)) page := (*page)(unsafe.Pointer(&buf[0]))
tx.db.metalock.Unlock() page.flags = metaPageFlag
*page.meta() = *tx.meta
// Write meta 0.
page.id = 0
page.meta().checksum = page.meta().sum64()
nn, err := w.Write(buf)
n += int64(nn)
if err != nil { if err != nil {
return n, fmt.Errorf("meta copy: %s", err) return n, fmt.Errorf("meta 0 copy: %s", err)
}
// Write meta 1 with a lower transaction id.
page.id = 1
page.meta().txid -= 1
page.meta().checksum = page.meta().sum64()
nn, err = w.Write(buf)
n += int64(nn)
if err != nil {
return n, fmt.Errorf("meta 1 copy: %s", err)
}
// Move past the meta pages in the file.
if _, err := f.Seek(int64(tx.db.pageSize*2), os.SEEK_SET); err != nil {
return n, fmt.Errorf("seek: %s", err)
} }
// Copy data pages. // Copy data pages.
@ -441,6 +473,8 @@ func (tx *Tx) write() error {
for _, p := range tx.pages { for _, p := range tx.pages {
pages = append(pages, p) pages = append(pages, p)
} }
// Clear out page cache early.
tx.pages = make(map[pgid]*page)
sort.Sort(pages) sort.Sort(pages)
// Write pages to disk in order. // Write pages to disk in order.
@ -485,8 +519,22 @@ func (tx *Tx) write() error {
} }
} }
// Clear out page cache. // Put small pages back to page pool.
tx.pages = make(map[pgid]*page) for _, p := range pages {
// Ignore page sizes over 1 page.
// These are allocated using make() instead of the page pool.
if int(p.overflow) != 0 {
continue
}
buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:tx.db.pageSize]
// See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1
for i := range buf {
buf[i] = 0
}
tx.db.pagePool.Put(buf)
}
return nil return nil
} }

View File

@ -1,4 +1,4 @@
// Copyright 2016 CoreOS, Inc. // Copyright 2016 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -19,8 +19,8 @@ import (
"sync" "sync"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb" pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/mvcc/backend"
"github.com/coreos/etcd/pkg/types" "github.com/coreos/etcd/pkg/types"
"github.com/coreos/etcd/storage/backend"
"github.com/coreos/pkg/capnslog" "github.com/coreos/pkg/capnslog"
) )

View File

@ -10,6 +10,7 @@
It has these top-level messages: It has these top-level messages:
User User
Permission
Role Role
*/ */
package authpb package authpb
@ -17,7 +18,7 @@ package authpb
import ( import (
"fmt" "fmt"
proto "github.com/gogo/protobuf/proto" proto "github.com/golang/protobuf/proto"
math "math" math "math"
) )
@ -29,29 +30,74 @@ var _ = proto.Marshal
var _ = fmt.Errorf var _ = fmt.Errorf
var _ = math.Inf var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
const _ = proto.ProtoPackageIsVersion1
type Permission_Type int32
const (
READ Permission_Type = 0
WRITE Permission_Type = 1
READWRITE Permission_Type = 2
)
var Permission_Type_name = map[int32]string{
0: "READ",
1: "WRITE",
2: "READWRITE",
}
var Permission_Type_value = map[string]int32{
"READ": 0,
"WRITE": 1,
"READWRITE": 2,
}
func (x Permission_Type) String() string {
return proto.EnumName(Permission_Type_name, int32(x))
}
func (Permission_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptorAuth, []int{1, 0} }
// User is a single entry in the bucket authUsers // User is a single entry in the bucket authUsers
type User struct { type User struct {
Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Password []byte `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` Password []byte `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
Tombstone int64 `protobuf:"varint,3,opt,name=tombstone,proto3" json:"tombstone,omitempty"` Roles []string `protobuf:"bytes,3,rep,name=roles" json:"roles,omitempty"`
} }
func (m *User) Reset() { *m = User{} } func (m *User) Reset() { *m = User{} }
func (m *User) String() string { return proto.CompactTextString(m) } func (m *User) String() string { return proto.CompactTextString(m) }
func (*User) ProtoMessage() {} func (*User) ProtoMessage() {}
func (*User) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{0} }
// Permission is a single entity
type Permission struct {
PermType Permission_Type `protobuf:"varint,1,opt,name=permType,proto3,enum=authpb.Permission_Type" json:"permType,omitempty"`
Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
RangeEnd []byte `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
}
func (m *Permission) Reset() { *m = Permission{} }
func (m *Permission) String() string { return proto.CompactTextString(m) }
func (*Permission) ProtoMessage() {}
func (*Permission) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{1} }
// Role is a single entry in the bucket authRoles // Role is a single entry in the bucket authRoles
type Role struct { type Role struct {
Name []byte `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
KeyPermission []*Permission `protobuf:"bytes,2,rep,name=keyPermission" json:"keyPermission,omitempty"`
} }
func (m *Role) Reset() { *m = Role{} } func (m *Role) Reset() { *m = Role{} }
func (m *Role) String() string { return proto.CompactTextString(m) } func (m *Role) String() string { return proto.CompactTextString(m) }
func (*Role) ProtoMessage() {} func (*Role) ProtoMessage() {}
func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{2} }
func init() { func init() {
proto.RegisterType((*User)(nil), "authpb.User") proto.RegisterType((*User)(nil), "authpb.User")
proto.RegisterType((*Permission)(nil), "authpb.Permission")
proto.RegisterType((*Role)(nil), "authpb.Role") proto.RegisterType((*Role)(nil), "authpb.Role")
proto.RegisterEnum("authpb.Permission_Type", Permission_Type_name, Permission_Type_value)
} }
func (m *User) Marshal() (data []byte, err error) { func (m *User) Marshal() (data []byte, err error) {
size := m.Size() size := m.Size()
@ -68,26 +114,67 @@ func (m *User) MarshalTo(data []byte) (int, error) {
_ = i _ = i
var l int var l int
_ = l _ = l
if m.Name != nil {
if len(m.Name) > 0 { if len(m.Name) > 0 {
data[i] = 0xa data[i] = 0xa
i++ i++
i = encodeVarintAuth(data, i, uint64(len(m.Name))) i = encodeVarintAuth(data, i, uint64(len(m.Name)))
i += copy(data[i:], m.Name) i += copy(data[i:], m.Name)
} }
}
if m.Password != nil {
if len(m.Password) > 0 { if len(m.Password) > 0 {
data[i] = 0x12 data[i] = 0x12
i++ i++
i = encodeVarintAuth(data, i, uint64(len(m.Password))) i = encodeVarintAuth(data, i, uint64(len(m.Password)))
i += copy(data[i:], m.Password) i += copy(data[i:], m.Password)
} }
} if len(m.Roles) > 0 {
if m.Tombstone != 0 { for _, s := range m.Roles {
data[i] = 0x18 data[i] = 0x1a
i++ i++
i = encodeVarintAuth(data, i, uint64(m.Tombstone)) l = len(s)
for l >= 1<<7 {
data[i] = uint8(uint64(l)&0x7f | 0x80)
l >>= 7
i++
}
data[i] = uint8(l)
i++
i += copy(data[i:], s)
}
}
return i, nil
}
func (m *Permission) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *Permission) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.PermType != 0 {
data[i] = 0x8
i++
i = encodeVarintAuth(data, i, uint64(m.PermType))
}
if len(m.Key) > 0 {
data[i] = 0x12
i++
i = encodeVarintAuth(data, i, uint64(len(m.Key)))
i += copy(data[i:], m.Key)
}
if len(m.RangeEnd) > 0 {
data[i] = 0x1a
i++
i = encodeVarintAuth(data, i, uint64(len(m.RangeEnd)))
i += copy(data[i:], m.RangeEnd)
} }
return i, nil return i, nil
} }
@ -107,13 +194,23 @@ func (m *Role) MarshalTo(data []byte) (int, error) {
_ = i _ = i
var l int var l int
_ = l _ = l
if m.Name != nil {
if len(m.Name) > 0 { if len(m.Name) > 0 {
data[i] = 0x12 data[i] = 0xa
i++ i++
i = encodeVarintAuth(data, i, uint64(len(m.Name))) i = encodeVarintAuth(data, i, uint64(len(m.Name)))
i += copy(data[i:], m.Name) i += copy(data[i:], m.Name)
} }
if len(m.KeyPermission) > 0 {
for _, msg := range m.KeyPermission {
data[i] = 0x12
i++
i = encodeVarintAuth(data, i, uint64(msg.Size()))
n, err := msg.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n
}
} }
return i, nil return i, nil
} }
@ -148,20 +245,36 @@ func encodeVarintAuth(data []byte, offset int, v uint64) int {
func (m *User) Size() (n int) { func (m *User) Size() (n int) {
var l int var l int
_ = l _ = l
if m.Name != nil {
l = len(m.Name) l = len(m.Name)
if l > 0 { if l > 0 {
n += 1 + l + sovAuth(uint64(l)) n += 1 + l + sovAuth(uint64(l))
} }
}
if m.Password != nil {
l = len(m.Password) l = len(m.Password)
if l > 0 { if l > 0 {
n += 1 + l + sovAuth(uint64(l)) n += 1 + l + sovAuth(uint64(l))
} }
if len(m.Roles) > 0 {
for _, s := range m.Roles {
l = len(s)
n += 1 + l + sovAuth(uint64(l))
} }
if m.Tombstone != 0 { }
n += 1 + sovAuth(uint64(m.Tombstone)) return n
}
func (m *Permission) Size() (n int) {
var l int
_ = l
if m.PermType != 0 {
n += 1 + sovAuth(uint64(m.PermType))
}
l = len(m.Key)
if l > 0 {
n += 1 + l + sovAuth(uint64(l))
}
l = len(m.RangeEnd)
if l > 0 {
n += 1 + l + sovAuth(uint64(l))
} }
return n return n
} }
@ -169,11 +282,15 @@ func (m *User) Size() (n int) {
func (m *Role) Size() (n int) { func (m *Role) Size() (n int) {
var l int var l int
_ = l _ = l
if m.Name != nil {
l = len(m.Name) l = len(m.Name)
if l > 0 { if l > 0 {
n += 1 + l + sovAuth(uint64(l)) n += 1 + l + sovAuth(uint64(l))
} }
if len(m.KeyPermission) > 0 {
for _, e := range m.KeyPermission {
l = e.Size()
n += 1 + l + sovAuth(uint64(l))
}
} }
return n return n
} }
@ -283,10 +400,10 @@ func (m *User) Unmarshal(data []byte) error {
} }
iNdEx = postIndex iNdEx = postIndex
case 3: case 3:
if wireType != 0 { if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Tombstone", wireType) return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType)
} }
m.Tombstone = 0 var stringLen uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 { if shift >= 64 {
return ErrIntOverflowAuth return ErrIntOverflowAuth
@ -296,11 +413,152 @@ func (m *User) Unmarshal(data []byte) error {
} }
b := data[iNdEx] b := data[iNdEx]
iNdEx++ iNdEx++
m.Tombstone |= (int64(b) & 0x7F) << shift stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 { if b < 0x80 {
break break
} }
} }
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthAuth
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Roles = append(m.Roles, string(data[iNdEx:postIndex]))
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipAuth(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthAuth
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Permission) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Permission: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Permission: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field PermType", wireType)
}
m.PermType = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.PermType |= (Permission_Type(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthAuth
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = append(m.Key[:0], data[iNdEx:postIndex]...)
if m.Key == nil {
m.Key = []byte{}
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthAuth
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.RangeEnd = append(m.RangeEnd[:0], data[iNdEx:postIndex]...)
if m.RangeEnd == nil {
m.RangeEnd = []byte{}
}
iNdEx = postIndex
default: default:
iNdEx = preIndex iNdEx = preIndex
skippy, err := skipAuth(data[iNdEx:]) skippy, err := skipAuth(data[iNdEx:])
@ -351,7 +609,7 @@ func (m *Role) Unmarshal(data []byte) error {
return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire) return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire)
} }
switch fieldNum { switch fieldNum {
case 2: case 1:
if wireType != 2 { if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
} }
@ -382,6 +640,37 @@ func (m *Role) Unmarshal(data []byte) error {
m.Name = []byte{} m.Name = []byte{}
} }
iNdEx = postIndex iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field KeyPermission", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthAuth
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.KeyPermission = append(m.KeyPermission, &Permission{})
if err := m.KeyPermission[len(m.KeyPermission)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default: default:
iNdEx = preIndex iNdEx = preIndex
skippy, err := skipAuth(data[iNdEx:]) skippy, err := skipAuth(data[iNdEx:])
@ -507,3 +796,25 @@ var (
ErrInvalidLengthAuth = fmt.Errorf("proto: negative length found during unmarshaling") ErrInvalidLengthAuth = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowAuth = fmt.Errorf("proto: integer overflow") ErrIntOverflowAuth = fmt.Errorf("proto: integer overflow")
) )
var fileDescriptorAuth = []byte{
// 288 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4a, 0xc3, 0x30,
0x1c, 0xc6, 0x9b, 0xb6, 0x1b, 0xed, 0x5f, 0x27, 0x25, 0x0c, 0x0c, 0x13, 0x42, 0xe9, 0xa9, 0x78,
0xa8, 0xb0, 0x5d, 0xbc, 0x2a, 0xf6, 0x20, 0x78, 0x90, 0x50, 0xf1, 0x28, 0x1d, 0x0d, 0x75, 0x6c,
0x6d, 0x4a, 0x32, 0x91, 0xbe, 0x89, 0x07, 0x1f, 0x68, 0xc7, 0x3d, 0x82, 0xab, 0x2f, 0x22, 0x4d,
0x64, 0x43, 0xdc, 0xed, 0xfb, 0xbe, 0xff, 0x97, 0xe4, 0x97, 0x3f, 0x40, 0xfe, 0xb6, 0x7e, 0x4d,
0x1a, 0x29, 0xd6, 0x02, 0x0f, 0x7b, 0xdd, 0xcc, 0x27, 0xe3, 0x52, 0x94, 0x42, 0x47, 0x57, 0xbd,
0x32, 0xd3, 0xe8, 0x01, 0xdc, 0x27, 0xc5, 0x25, 0xc6, 0xe0, 0xd6, 0x79, 0xc5, 0x09, 0x0a, 0x51,
0x7c, 0xca, 0xb4, 0xc6, 0x13, 0xf0, 0x9a, 0x5c, 0xa9, 0x77, 0x21, 0x0b, 0x62, 0xeb, 0x7c, 0xef,
0xf1, 0x18, 0x06, 0x52, 0xac, 0xb8, 0x22, 0x4e, 0xe8, 0xc4, 0x3e, 0x33, 0x26, 0xfa, 0x44, 0x00,
0x8f, 0x5c, 0x56, 0x0b, 0xa5, 0x16, 0xa2, 0xc6, 0x33, 0xf0, 0x1a, 0x2e, 0xab, 0xac, 0x6d, 0xcc,
0xc5, 0x67, 0xd3, 0xf3, 0xc4, 0xd0, 0x24, 0x87, 0x56, 0xd2, 0x8f, 0xd9, 0xbe, 0x88, 0x03, 0x70,
0x96, 0xbc, 0xfd, 0x7d, 0xb0, 0x97, 0xf8, 0x02, 0x7c, 0x99, 0xd7, 0x25, 0x7f, 0xe1, 0x75, 0x41,
0x1c, 0x03, 0xa2, 0x83, 0xb4, 0x2e, 0xa2, 0x4b, 0x70, 0xf5, 0x31, 0x0f, 0x5c, 0x96, 0xde, 0xdc,
0x05, 0x16, 0xf6, 0x61, 0xf0, 0xcc, 0xee, 0xb3, 0x34, 0x40, 0x78, 0x04, 0x7e, 0x1f, 0x1a, 0x6b,
0x47, 0x19, 0xb8, 0x4c, 0xac, 0xf8, 0xd1, 0xcf, 0x5e, 0xc3, 0x68, 0xc9, 0xdb, 0x03, 0x16, 0xb1,
0x43, 0x27, 0x3e, 0x99, 0xe2, 0xff, 0xc0, 0xec, 0x6f, 0xf1, 0x96, 0x6c, 0x76, 0xd4, 0xda, 0xee,
0xa8, 0xb5, 0xe9, 0x28, 0xda, 0x76, 0x14, 0x7d, 0x75, 0x14, 0x7d, 0x7c, 0x53, 0x6b, 0x3e, 0xd4,
0x3b, 0x9e, 0xfd, 0x04, 0x00, 0x00, 0xff, 0xff, 0xcc, 0x76, 0x8d, 0x4f, 0x8f, 0x01, 0x00, 0x00,
}

View File

@ -13,10 +13,25 @@ option (gogoproto.goproto_enum_prefix_all) = false;
message User { message User {
bytes name = 1; bytes name = 1;
bytes password = 2; bytes password = 2;
int64 tombstone = 3; repeated string roles = 3;
}
// Permission is a single entity
message Permission {
enum Type {
READ = 0;
WRITE = 1;
READWRITE = 2;
}
Type permType = 1;
bytes key = 2;
bytes range_end = 3;
} }
// Role is a single entry in the bucket authRoles // Role is a single entry in the bucket authRoles
message Role { message Role {
bytes name = 2; bytes name = 1;
repeated Permission keyPermission = 2;
} }

16
vendor/github.com/coreos/etcd/auth/doc.go generated vendored Normal file
View File

@ -0,0 +1,16 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package auth provides client role authentication for accessing keys in etcd.
package auth

219
vendor/github.com/coreos/etcd/auth/range_perm_cache.go generated vendored Normal file
View File

@ -0,0 +1,219 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package auth
import (
"bytes"
"sort"
"github.com/coreos/etcd/auth/authpb"
"github.com/coreos/etcd/mvcc/backend"
)
// isSubset returns true if a is a subset of b
func isSubset(a, b *rangePerm) bool {
switch {
case len(a.end) == 0 && len(b.end) == 0:
// a, b are both keys
return bytes.Equal(a.begin, b.begin)
case len(b.end) == 0:
// b is a key, a is a range
return false
case len(a.end) == 0:
return 0 <= bytes.Compare(a.begin, b.begin) && bytes.Compare(a.begin, b.end) <= 0
default:
return 0 <= bytes.Compare(a.begin, b.begin) && bytes.Compare(a.end, b.end) <= 0
}
}
func isRangeEqual(a, b *rangePerm) bool {
return bytes.Equal(a.begin, b.begin) && bytes.Equal(a.end, b.end)
}
// removeSubsetRangePerms removes any rangePerms that are subsets of other rangePerms.
// If there are equal ranges, removeSubsetRangePerms only keeps one of them.
func removeSubsetRangePerms(perms []*rangePerm) []*rangePerm {
// TODO(mitake): currently it is O(n^2), we need a better algorithm
newp := make([]*rangePerm, 0)
for i := range perms {
skip := false
for j := range perms {
if i == j {
continue
}
if isRangeEqual(perms[i], perms[j]) {
// if ranges are equal, we only keep the first range.
if i > j {
skip = true
break
}
} else if isSubset(perms[i], perms[j]) {
// if a range is a strict subset of the other one, we skip the subset.
skip = true
break
}
}
if skip {
continue
}
newp = append(newp, perms[i])
}
return newp
}
// mergeRangePerms merges adjacent rangePerms.
func mergeRangePerms(perms []*rangePerm) []*rangePerm {
merged := make([]*rangePerm, 0)
perms = removeSubsetRangePerms(perms)
sort.Sort(RangePermSliceByBegin(perms))
i := 0
for i < len(perms) {
begin, next := i, i
for next+1 < len(perms) && bytes.Compare(perms[next].end, perms[next+1].begin) != -1 {
next++
}
merged = append(merged, &rangePerm{begin: perms[begin].begin, end: perms[next].end})
i = next + 1
}
return merged
}
func getMergedPerms(tx backend.BatchTx, userName string) *unifiedRangePermissions {
user := getUser(tx, userName)
if user == nil {
plog.Errorf("invalid user name %s", userName)
return nil
}
var readPerms, writePerms []*rangePerm
for _, roleName := range user.Roles {
role := getRole(tx, roleName)
if role == nil {
continue
}
for _, perm := range role.KeyPermission {
rp := &rangePerm{begin: perm.Key, end: perm.RangeEnd}
switch perm.PermType {
case authpb.READWRITE:
readPerms = append(readPerms, rp)
writePerms = append(writePerms, rp)
case authpb.READ:
readPerms = append(readPerms, rp)
case authpb.WRITE:
writePerms = append(writePerms, rp)
}
}
}
return &unifiedRangePermissions{
readPerms: mergeRangePerms(readPerms),
writePerms: mergeRangePerms(writePerms),
}
}
func checkKeyPerm(cachedPerms *unifiedRangePermissions, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool {
var tocheck []*rangePerm
switch permtyp {
case authpb.READ:
tocheck = cachedPerms.readPerms
case authpb.WRITE:
tocheck = cachedPerms.writePerms
default:
plog.Panicf("unknown auth type: %v", permtyp)
}
requiredPerm := &rangePerm{begin: key, end: rangeEnd}
for _, perm := range tocheck {
if isSubset(requiredPerm, perm) {
return true
}
}
return false
}
func (as *authStore) isRangeOpPermitted(tx backend.BatchTx, userName string, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool {
// assumption: tx is Lock()ed
_, ok := as.rangePermCache[userName]
if !ok {
perms := getMergedPerms(tx, userName)
if perms == nil {
plog.Errorf("failed to create a unified permission of user %s", userName)
return false
}
as.rangePermCache[userName] = perms
}
return checkKeyPerm(as.rangePermCache[userName], key, rangeEnd, permtyp)
}
func (as *authStore) clearCachedPerm() {
as.rangePermCache = make(map[string]*unifiedRangePermissions)
}
func (as *authStore) invalidateCachedPerm(userName string) {
delete(as.rangePermCache, userName)
}
type unifiedRangePermissions struct {
// readPerms[i] and readPerms[j] (i != j) don't overlap
readPerms []*rangePerm
// writePerms[i] and writePerms[j] (i != j) don't overlap, too
writePerms []*rangePerm
}
type rangePerm struct {
begin, end []byte
}
type RangePermSliceByBegin []*rangePerm
func (slice RangePermSliceByBegin) Len() int {
return len(slice)
}
func (slice RangePermSliceByBegin) Less(i, j int) bool {
switch bytes.Compare(slice[i].begin, slice[j].begin) {
case 0: // begin(i) == begin(j)
return bytes.Compare(slice[i].end, slice[j].end) == -1
case -1: // begin(i) < begin(j)
return true
default:
return false
}
}
func (slice RangePermSliceByBegin) Swap(i, j int) {
slice[i], slice[j] = slice[j], slice[i]
}

55
vendor/github.com/coreos/etcd/auth/simple_token.go generated vendored Normal file
View File

@ -0,0 +1,55 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package auth
// CAUTION: This randum number based token mechanism is only for testing purpose.
// JWT based mechanism will be added in the near future.
import (
"crypto/rand"
"math/big"
)
const (
letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
defaultSimpleTokenLength = 16
)
func (as *authStore) GenSimpleToken() (string, error) {
ret := make([]byte, defaultSimpleTokenLength)
for i := 0; i < defaultSimpleTokenLength; i++ {
bInt, err := rand.Int(rand.Reader, big.NewInt(int64(len(letters))))
if err != nil {
return "", err
}
ret[i] = letters[bInt.Int64()]
}
return string(ret), nil
}
func (as *authStore) assignSimpleTokenToUser(username, token string) {
as.simpleTokensMu.Lock()
_, ok := as.simpleTokens[token]
if ok {
plog.Panicf("token %s is alredy used", token)
}
as.simpleTokens[token] = username
as.simpleTokensMu.Unlock()
}

View File

@ -1,4 +1,4 @@
// Copyright 2016 Nippon Telegraph and Telephone Corporation. // Copyright 2016 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -15,17 +15,25 @@
package auth package auth
import ( import (
"bytes"
"errors" "errors"
"fmt"
"sort"
"strings"
"sync"
"github.com/coreos/etcd/auth/authpb" "github.com/coreos/etcd/auth/authpb"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb" pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/storage/backend" "github.com/coreos/etcd/mvcc/backend"
"github.com/coreos/pkg/capnslog" "github.com/coreos/pkg/capnslog"
"golang.org/x/crypto/bcrypt" "golang.org/x/crypto/bcrypt"
"golang.org/x/net/context"
) )
var ( var (
enableFlagKey = []byte("authEnabled") enableFlagKey = []byte("authEnabled")
authEnabled = []byte{1}
authDisabled = []byte{0}
authBucketName = []byte("auth") authBucketName = []byte("auth")
authUsersBucketName = []byte("authUsers") authUsersBucketName = []byte("authUsers")
@ -33,14 +41,32 @@ var (
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "auth") plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "auth")
ErrRootUserNotExist = errors.New("auth: root user does not exist")
ErrRootRoleNotExist = errors.New("auth: root user does not have root role")
ErrUserAlreadyExist = errors.New("auth: user already exists") ErrUserAlreadyExist = errors.New("auth: user already exists")
ErrUserNotFound = errors.New("auth: user not found") ErrUserNotFound = errors.New("auth: user not found")
ErrRoleAlreadyExist = errors.New("auth: role already exists") ErrRoleAlreadyExist = errors.New("auth: role already exists")
ErrRoleNotFound = errors.New("auth: role not found")
ErrAuthFailed = errors.New("auth: authentication failed, invalid user ID or password")
ErrPermissionDenied = errors.New("auth: permission denied")
ErrRoleNotGranted = errors.New("auth: role is not granted to the user")
ErrPermissionNotGranted = errors.New("auth: permission is not granted to the role")
)
const (
rootUser = "root"
rootRole = "root"
) )
type AuthStore interface { type AuthStore interface {
// AuthEnable() turns on the authentication feature // AuthEnable turns on the authentication feature
AuthEnable() AuthEnable() error
// AuthDisable turns off the authentication feature
AuthDisable()
// Authenticate does authentication based on given user name and password
Authenticate(ctx context.Context, username, password string) (*pb.AuthenticateResponse, error)
// Recover recovers the state of auth store from the given backend // Recover recovers the state of auth store from the given backend
Recover(b backend.Backend) Recover(b backend.Backend)
@ -54,35 +80,157 @@ type AuthStore interface {
// UserChangePassword changes a password of a user // UserChangePassword changes a password of a user
UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error)
// UserGrantRole grants a role to the user
UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error)
// UserGet gets the detailed information of a users
UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error)
// UserRevokeRole revokes a role of a user
UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error)
// RoleAdd adds a new role // RoleAdd adds a new role
RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error)
// RoleGrantPermission grants a permission to a role
RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error)
// RoleGet gets the detailed information of a role
RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error)
// RoleRevokePermission gets the detailed information of a role
RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error)
// RoleDelete gets the detailed information of a role
RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error)
// UserList gets a list of all users
UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error)
// RoleList gets a list of all roles
RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error)
// UsernameFromToken gets a username from the given Token
UsernameFromToken(token string) (string, bool)
// IsPutPermitted checks put permission of the user
IsPutPermitted(username string, key []byte) bool
// IsRangePermitted checks range permission of the user
IsRangePermitted(username string, key, rangeEnd []byte) bool
// IsDeleteRangePermitted checks delete-range permission of the user
IsDeleteRangePermitted(username string, key, rangeEnd []byte) bool
// IsAdminPermitted checks admin permission of the user
IsAdminPermitted(username string) bool
// GenSimpleToken produces a simple random string
GenSimpleToken() (string, error)
} }
type authStore struct { type authStore struct {
be backend.Backend be backend.Backend
enabled bool
enabledMu sync.RWMutex
rangePermCache map[string]*unifiedRangePermissions // username -> unifiedRangePermissions
simpleTokensMu sync.RWMutex
simpleTokens map[string]string // token -> username
} }
func (as *authStore) AuthEnable() { func (as *authStore) AuthEnable() error {
value := []byte{1}
b := as.be b := as.be
tx := b.BatchTx() tx := b.BatchTx()
tx.Lock() tx.Lock()
tx.UnsafePut(authBucketName, enableFlagKey, value) defer func() {
tx.Unlock()
b.ForceCommit()
}()
u := getUser(tx, rootUser)
if u == nil {
return ErrRootUserNotExist
}
if !hasRootRole(u) {
return ErrRootRoleNotExist
}
tx.UnsafePut(authBucketName, enableFlagKey, authEnabled)
as.enabledMu.Lock()
as.enabled = true
as.enabledMu.Unlock()
as.rangePermCache = make(map[string]*unifiedRangePermissions)
plog.Noticef("Authentication enabled")
return nil
}
func (as *authStore) AuthDisable() {
b := as.be
tx := b.BatchTx()
tx.Lock()
tx.UnsafePut(authBucketName, enableFlagKey, authDisabled)
tx.Unlock() tx.Unlock()
b.ForceCommit() b.ForceCommit()
plog.Noticef("Authentication enabled") as.enabledMu.Lock()
as.enabled = false
as.enabledMu.Unlock()
plog.Noticef("Authentication disabled")
}
func (as *authStore) Authenticate(ctx context.Context, username, password string) (*pb.AuthenticateResponse, error) {
// TODO(mitake): after adding jwt support, branching based on values of ctx is required
index := ctx.Value("index").(uint64)
simpleToken := ctx.Value("simpleToken").(string)
tx := as.be.BatchTx()
tx.Lock()
defer tx.Unlock()
user := getUser(tx, username)
if user == nil {
return nil, ErrAuthFailed
}
if bcrypt.CompareHashAndPassword(user.Password, []byte(password)) != nil {
plog.Noticef("authentication failed, invalid password for user %s", username)
return &pb.AuthenticateResponse{}, ErrAuthFailed
}
token := fmt.Sprintf("%s.%d", simpleToken, index)
as.assignSimpleTokenToUser(username, token)
plog.Infof("authorized %s, token is %s", username, token)
return &pb.AuthenticateResponse{Token: token}, nil
} }
func (as *authStore) Recover(be backend.Backend) { func (as *authStore) Recover(be backend.Backend) {
enabled := false
as.be = be as.be = be
// TODO(mitake): recovery process tx := be.BatchTx()
tx.Lock()
_, vs := tx.UnsafeRange(authBucketName, enableFlagKey, nil, 0)
if len(vs) == 1 {
if bytes.Equal(vs[0], authEnabled) {
enabled = true
}
}
tx.Unlock()
as.enabledMu.Lock()
as.enabled = enabled
as.enabledMu.Unlock()
} }
func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) { func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
plog.Noticef("adding a new user: %s", r.Name)
hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), bcrypt.DefaultCost) hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), bcrypt.DefaultCost)
if err != nil { if err != nil {
plog.Errorf("failed to hash password: %s", err) plog.Errorf("failed to hash password: %s", err)
@ -93,23 +241,17 @@ func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse,
tx.Lock() tx.Lock()
defer tx.Unlock() defer tx.Unlock()
_, vs := tx.UnsafeRange(authUsersBucketName, []byte(r.Name), nil, 0) user := getUser(tx, r.Name)
if len(vs) != 0 { if user != nil {
return &pb.AuthUserAddResponse{}, ErrUserAlreadyExist return nil, ErrUserAlreadyExist
} }
newUser := authpb.User{ newUser := &authpb.User{
Name: []byte(r.Name), Name: []byte(r.Name),
Password: hashed, Password: hashed,
} }
marshaledUser, merr := newUser.Marshal() putUser(tx, newUser)
if merr != nil {
plog.Errorf("failed to marshal a new user data: %s", merr)
return nil, merr
}
tx.UnsafePut(authUsersBucketName, []byte(r.Name), marshaledUser)
plog.Noticef("added a new user: %s", r.Name) plog.Noticef("added a new user: %s", r.Name)
@ -121,12 +263,12 @@ func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDelete
tx.Lock() tx.Lock()
defer tx.Unlock() defer tx.Unlock()
_, vs := tx.UnsafeRange(authUsersBucketName, []byte(r.Name), nil, 0) user := getUser(tx, r.Name)
if len(vs) != 1 { if user == nil {
return &pb.AuthUserDeleteResponse{}, ErrUserNotFound return nil, ErrUserNotFound
} }
tx.UnsafeDelete(authUsersBucketName, []byte(r.Name)) delUser(tx, r.Name)
plog.Noticef("deleted a user: %s", r.Name) plog.Noticef("deleted a user: %s", r.Name)
@ -146,36 +288,230 @@ func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*p
tx.Lock() tx.Lock()
defer tx.Unlock() defer tx.Unlock()
_, vs := tx.UnsafeRange(authUsersBucketName, []byte(r.Name), nil, 0) user := getUser(tx, r.Name)
if len(vs) != 1 { if user == nil {
return &pb.AuthUserChangePasswordResponse{}, ErrUserNotFound return nil, ErrUserNotFound
} }
updatedUser := authpb.User{ updatedUser := &authpb.User{
Name: []byte(r.Name), Name: []byte(r.Name),
Roles: user.Roles,
Password: hashed, Password: hashed,
} }
marshaledUser, merr := updatedUser.Marshal() putUser(tx, updatedUser)
if merr != nil {
plog.Errorf("failed to marshal a new user data: %s", merr)
return nil, merr
}
tx.UnsafePut(authUsersBucketName, []byte(r.Name), marshaledUser)
plog.Noticef("changed a password of a user: %s", r.Name) plog.Noticef("changed a password of a user: %s", r.Name)
return &pb.AuthUserChangePasswordResponse{}, nil return &pb.AuthUserChangePasswordResponse{}, nil
} }
func (as *authStore) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) {
tx := as.be.BatchTx()
tx.Lock()
defer tx.Unlock()
user := getUser(tx, r.User)
if user == nil {
return nil, ErrUserNotFound
}
if r.Role != rootRole {
role := getRole(tx, r.Role)
if role == nil {
return nil, ErrRoleNotFound
}
}
idx := sort.SearchStrings(user.Roles, r.Role)
if idx < len(user.Roles) && strings.Compare(user.Roles[idx], r.Role) == 0 {
plog.Warningf("user %s is already granted role %s", r.User, r.Role)
return &pb.AuthUserGrantRoleResponse{}, nil
}
user.Roles = append(user.Roles, r.Role)
sort.Sort(sort.StringSlice(user.Roles))
putUser(tx, user)
as.invalidateCachedPerm(r.User)
plog.Noticef("granted role %s to user %s", r.Role, r.User)
return &pb.AuthUserGrantRoleResponse{}, nil
}
func (as *authStore) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
tx := as.be.BatchTx()
tx.Lock()
defer tx.Unlock()
var resp pb.AuthUserGetResponse
user := getUser(tx, r.Name)
if user == nil {
return nil, ErrUserNotFound
}
for _, role := range user.Roles {
resp.Roles = append(resp.Roles, role)
}
return &resp, nil
}
func (as *authStore) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
tx := as.be.BatchTx()
tx.Lock()
defer tx.Unlock()
var resp pb.AuthUserListResponse
users := getAllUsers(tx)
for _, u := range users {
resp.Users = append(resp.Users, string(u.Name))
}
return &resp, nil
}
func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
tx := as.be.BatchTx()
tx.Lock()
defer tx.Unlock()
user := getUser(tx, r.Name)
if user == nil {
return nil, ErrUserNotFound
}
updatedUser := &authpb.User{
Name: user.Name,
Password: user.Password,
}
for _, role := range user.Roles {
if strings.Compare(role, r.Role) != 0 {
updatedUser.Roles = append(updatedUser.Roles, role)
}
}
if len(updatedUser.Roles) == len(user.Roles) {
return nil, ErrRoleNotGranted
}
putUser(tx, updatedUser)
as.invalidateCachedPerm(r.Name)
plog.Noticef("revoked role %s from user %s", r.Role, r.Name)
return &pb.AuthUserRevokeRoleResponse{}, nil
}
func (as *authStore) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
tx := as.be.BatchTx()
tx.Lock()
defer tx.Unlock()
var resp pb.AuthRoleGetResponse
role := getRole(tx, r.Role)
if role == nil {
return nil, ErrRoleNotFound
}
for _, perm := range role.KeyPermission {
resp.Perm = append(resp.Perm, perm)
}
return &resp, nil
}
func (as *authStore) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
tx := as.be.BatchTx()
tx.Lock()
defer tx.Unlock()
var resp pb.AuthRoleListResponse
roles := getAllRoles(tx)
for _, r := range roles {
resp.Roles = append(resp.Roles, string(r.Name))
}
return &resp, nil
}
func (as *authStore) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
tx := as.be.BatchTx()
tx.Lock()
defer tx.Unlock()
role := getRole(tx, r.Role)
if role == nil {
return nil, ErrRoleNotFound
}
updatedRole := &authpb.Role{
Name: role.Name,
}
for _, perm := range role.KeyPermission {
if !bytes.Equal(perm.Key, []byte(r.Key)) || !bytes.Equal(perm.RangeEnd, []byte(r.RangeEnd)) {
updatedRole.KeyPermission = append(updatedRole.KeyPermission, perm)
}
}
if len(role.KeyPermission) == len(updatedRole.KeyPermission) {
return nil, ErrPermissionNotGranted
}
putRole(tx, updatedRole)
// TODO(mitake): currently single role update invalidates every cache
// It should be optimized.
as.clearCachedPerm()
plog.Noticef("revoked key %s from role %s", r.Key, r.Role)
return &pb.AuthRoleRevokePermissionResponse{}, nil
}
func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
// TODO(mitake): current scheme of role deletion allows existing users to have the deleted roles
//
// Assume a case like below:
// create a role r1
// create a user u1 and grant r1 to u1
// delete r1
//
// After this sequence, u1 is still granted the role r1. So if admin create a new role with the name r1,
// the new r1 is automatically granted u1.
// In some cases, it would be confusing. So we need to provide an option for deleting the grant relation
// from all users.
tx := as.be.BatchTx()
tx.Lock()
defer tx.Unlock()
role := getRole(tx, r.Role)
if role == nil {
return nil, ErrRoleNotFound
}
delRole(tx, r.Role)
plog.Noticef("deleted role %s", r.Role)
return &pb.AuthRoleDeleteResponse{}, nil
}
func (as *authStore) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) { func (as *authStore) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) {
tx := as.be.BatchTx() tx := as.be.BatchTx()
tx.Lock() tx.Lock()
defer tx.Unlock() defer tx.Unlock()
_, vs := tx.UnsafeRange(authRolesBucketName, []byte(r.Name), nil, 0) role := getRole(tx, r.Name)
if len(vs) != 0 { if role != nil {
return nil, ErrRoleAlreadyExist return nil, ErrRoleAlreadyExist
} }
@ -183,18 +519,227 @@ func (as *authStore) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse,
Name: []byte(r.Name), Name: []byte(r.Name),
} }
marshaledRole, err := newRole.Marshal() putRole(tx, newRole)
if err != nil {
return nil, err
}
tx.UnsafePut(authRolesBucketName, []byte(r.Name), marshaledRole)
plog.Noticef("Role %s is created", r.Name) plog.Noticef("Role %s is created", r.Name)
return &pb.AuthRoleAddResponse{}, nil return &pb.AuthRoleAddResponse{}, nil
} }
func (as *authStore) UsernameFromToken(token string) (string, bool) {
as.simpleTokensMu.RLock()
defer as.simpleTokensMu.RUnlock()
t, ok := as.simpleTokens[token]
return t, ok
}
type permSlice []*authpb.Permission
func (perms permSlice) Len() int {
return len(perms)
}
func (perms permSlice) Less(i, j int) bool {
return bytes.Compare(perms[i].Key, perms[j].Key) < 0
}
func (perms permSlice) Swap(i, j int) {
perms[i], perms[j] = perms[j], perms[i]
}
func (as *authStore) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
tx := as.be.BatchTx()
tx.Lock()
defer tx.Unlock()
role := getRole(tx, r.Name)
if role == nil {
return nil, ErrRoleNotFound
}
idx := sort.Search(len(role.KeyPermission), func(i int) bool {
return bytes.Compare(role.KeyPermission[i].Key, []byte(r.Perm.Key)) >= 0
})
if idx < len(role.KeyPermission) && bytes.Equal(role.KeyPermission[idx].Key, r.Perm.Key) && bytes.Equal(role.KeyPermission[idx].RangeEnd, r.Perm.RangeEnd) {
// update existing permission
role.KeyPermission[idx].PermType = r.Perm.PermType
} else {
// append new permission to the role
newPerm := &authpb.Permission{
Key: []byte(r.Perm.Key),
RangeEnd: []byte(r.Perm.RangeEnd),
PermType: r.Perm.PermType,
}
role.KeyPermission = append(role.KeyPermission, newPerm)
sort.Sort(permSlice(role.KeyPermission))
}
putRole(tx, role)
// TODO(mitake): currently single role update invalidates every cache
// It should be optimized.
as.clearCachedPerm()
plog.Noticef("role %s's permission of key %s is updated as %s", r.Name, r.Perm.Key, authpb.Permission_Type_name[int32(r.Perm.PermType)])
return &pb.AuthRoleGrantPermissionResponse{}, nil
}
func (as *authStore) isOpPermitted(userName string, key, rangeEnd []byte, permTyp authpb.Permission_Type) bool {
// TODO(mitake): this function would be costly so we need a caching mechanism
if !as.isAuthEnabled() {
return true
}
tx := as.be.BatchTx()
tx.Lock()
defer tx.Unlock()
user := getUser(tx, userName)
if user == nil {
plog.Errorf("invalid user name %s for permission checking", userName)
return false
}
if as.isRangeOpPermitted(tx, userName, key, rangeEnd, permTyp) {
return true
}
return false
}
func (as *authStore) IsPutPermitted(username string, key []byte) bool {
return as.isOpPermitted(username, key, nil, authpb.WRITE)
}
func (as *authStore) IsRangePermitted(username string, key, rangeEnd []byte) bool {
return as.isOpPermitted(username, key, rangeEnd, authpb.READ)
}
func (as *authStore) IsDeleteRangePermitted(username string, key, rangeEnd []byte) bool {
return as.isOpPermitted(username, key, rangeEnd, authpb.WRITE)
}
func (as *authStore) IsAdminPermitted(username string) bool {
if !as.isAuthEnabled() {
return true
}
tx := as.be.BatchTx()
tx.Lock()
defer tx.Unlock()
u := getUser(tx, username)
if u == nil {
return false
}
return hasRootRole(u)
}
func getUser(tx backend.BatchTx, username string) *authpb.User {
_, vs := tx.UnsafeRange(authUsersBucketName, []byte(username), nil, 0)
if len(vs) == 0 {
return nil
}
user := &authpb.User{}
err := user.Unmarshal(vs[0])
if err != nil {
plog.Panicf("failed to unmarshal user struct (name: %s): %s", username, err)
}
return user
}
func getAllUsers(tx backend.BatchTx) []*authpb.User {
_, vs := tx.UnsafeRange(authUsersBucketName, []byte{0}, []byte{0xff}, -1)
if len(vs) == 0 {
return nil
}
var users []*authpb.User
for _, v := range vs {
user := &authpb.User{}
err := user.Unmarshal(v)
if err != nil {
plog.Panicf("failed to unmarshal user struct: %s", err)
}
users = append(users, user)
}
return users
}
func putUser(tx backend.BatchTx, user *authpb.User) {
b, err := user.Marshal()
if err != nil {
plog.Panicf("failed to marshal user struct (name: %s): %s", user.Name, err)
}
tx.UnsafePut(authUsersBucketName, user.Name, b)
}
func delUser(tx backend.BatchTx, username string) {
tx.UnsafeDelete(authUsersBucketName, []byte(username))
}
func getRole(tx backend.BatchTx, rolename string) *authpb.Role {
_, vs := tx.UnsafeRange(authRolesBucketName, []byte(rolename), nil, 0)
if len(vs) == 0 {
return nil
}
role := &authpb.Role{}
err := role.Unmarshal(vs[0])
if err != nil {
plog.Panicf("failed to unmarshal role struct (name: %s): %s", rolename, err)
}
return role
}
func getAllRoles(tx backend.BatchTx) []*authpb.Role {
_, vs := tx.UnsafeRange(authRolesBucketName, []byte{0}, []byte{0xff}, -1)
if len(vs) == 0 {
return nil
}
var roles []*authpb.Role
for _, v := range vs {
role := &authpb.Role{}
err := role.Unmarshal(v)
if err != nil {
plog.Panicf("failed to unmarshal role struct: %s", err)
}
roles = append(roles, role)
}
return roles
}
func putRole(tx backend.BatchTx, role *authpb.Role) {
b, err := role.Marshal()
if err != nil {
plog.Panicf("failed to marshal role struct (name: %s): %s", role.Name, err)
}
tx.UnsafePut(authRolesBucketName, []byte(role.Name), b)
}
func delRole(tx backend.BatchTx, rolename string) {
tx.UnsafeDelete(authRolesBucketName, []byte(rolename))
}
func (as *authStore) isAuthEnabled() bool {
as.enabledMu.RLock()
defer as.enabledMu.RUnlock()
return as.enabled
}
func NewAuthStore(be backend.Backend) *authStore { func NewAuthStore(be backend.Backend) *authStore {
tx := be.BatchTx() tx := be.BatchTx()
tx.Lock() tx.Lock()
@ -208,5 +753,15 @@ func NewAuthStore(be backend.Backend) *authStore {
return &authStore{ return &authStore{
be: be, be: be,
simpleTokens: make(map[string]string),
} }
} }
func hasRootRole(u *authpb.User) bool {
for _, r := range u.Roles {
if r == rootRole {
return true
}
}
return false
}

View File

@ -4,9 +4,11 @@ etcd/client is the Go client library for etcd.
[![GoDoc](https://godoc.org/github.com/coreos/etcd/client?status.png)](https://godoc.org/github.com/coreos/etcd/client) [![GoDoc](https://godoc.org/github.com/coreos/etcd/client?status.png)](https://godoc.org/github.com/coreos/etcd/client)
etcd uses go's `vendor` directory to manage external dependencies. If `client` is imported etcd uses `cmd/vendor` directory to store external dependencies, which are
outside of etcd, simply copy `client` to the `vendor` directory or use tools like godep to to be compiled into etcd release binaries. `client` can be imported without
manage your own dependency, as in [vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories). vendoring. For full compatibility, it is recommended to vendor builds using
etcd's vendored packages, using tools like godep, as in
[vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories).
For more detail, please read [Go vendor design](https://golang.org/s/go15vendor). For more detail, please read [Go vendor design](https://golang.org/s/go15vendor).
## Install ## Install

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc. // Copyright 2015 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc. // Copyright 2015 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -36,6 +36,12 @@ type User struct {
Revoke []string `json:"revoke,omitempty"` Revoke []string `json:"revoke,omitempty"`
} }
// userListEntry is the user representation given by the server for ListUsers
type userListEntry struct {
User string `json:"user"`
Roles []Role `json:"roles"`
}
type UserRoles struct { type UserRoles struct {
User string `json:"user"` User string `json:"user"`
Roles []Role `json:"roles"` Roles []Role `json:"roles"`
@ -194,7 +200,7 @@ func (u *httpAuthUserAPI) ListUsers(ctx context.Context) ([]string, error) {
} }
var userList struct { var userList struct {
Users []User `json:"users"` Users []userListEntry `json:"users"`
} }
if err = json.Unmarshal(body, &userList); err != nil { if err = json.Unmarshal(body, &userList); err != nil {

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc. // Copyright 2015 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -37,6 +37,10 @@ var (
ErrClusterUnavailable = errors.New("client: etcd cluster is unavailable or misconfigured") ErrClusterUnavailable = errors.New("client: etcd cluster is unavailable or misconfigured")
ErrNoLeaderEndpoint = errors.New("client: no leader endpoint available") ErrNoLeaderEndpoint = errors.New("client: no leader endpoint available")
errTooManyRedirectChecks = errors.New("client: too many redirect checks") errTooManyRedirectChecks = errors.New("client: too many redirect checks")
// oneShotCtxValue is set on a context using WithValue(&oneShotValue) so
// that Do() will not retry a request
oneShotCtxValue interface{}
) )
var DefaultRequestTimeout = 5 * time.Second var DefaultRequestTimeout = 5 * time.Second
@ -335,6 +339,7 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo
var body []byte var body []byte
var err error var err error
cerr := &ClusterError{} cerr := &ClusterError{}
isOneShot := ctx.Value(&oneShotCtxValue) != nil
for i := pinned; i < leps+pinned; i++ { for i := pinned; i < leps+pinned; i++ {
k := i % leps k := i % leps
@ -348,6 +353,9 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo
if err == context.Canceled || err == context.DeadlineExceeded { if err == context.Canceled || err == context.DeadlineExceeded {
return nil, nil, err return nil, nil, err
} }
if isOneShot {
return nil, nil, err
}
continue continue
} }
if resp.StatusCode/100 == 5 { if resp.StatusCode/100 == 5 {
@ -358,6 +366,9 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo
default: default:
cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode))) cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode)))
} }
if isOneShot {
return nil, nil, cerr.Errors[0]
}
continue continue
} }
if k != pinned { if k != pinned {

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc. // Copyright 2015 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc. // Copyright 2015 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc. // Copyright 2015 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc. // Copyright 2015 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc. // Copyright 2015 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -337,7 +337,11 @@ func (k *httpKeysAPI) Set(ctx context.Context, key, val string, opts *SetOptions
act.Dir = opts.Dir act.Dir = opts.Dir
} }
resp, body, err := k.client.Do(ctx, act) doCtx := ctx
if act.PrevExist == PrevNoExist {
doCtx = context.WithValue(doCtx, &oneShotCtxValue, &oneShotCtxValue)
}
resp, body, err := k.client.Do(doCtx, act)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -385,7 +389,8 @@ func (k *httpKeysAPI) Delete(ctx context.Context, key string, opts *DeleteOption
act.Recursive = opts.Recursive act.Recursive = opts.Recursive
} }
resp, body, err := k.client.Do(ctx, act) doCtx := context.WithValue(ctx, &oneShotCtxValue, &oneShotCtxValue)
resp, body, err := k.client.Do(doCtx, act)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc. // Copyright 2015 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc. // Copyright 2015 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2016 CoreOS, Inc. // Copyright 2016 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.

View File

@ -40,9 +40,11 @@ if err != nil {
// use the response // use the response
``` ```
etcd uses go's `vendor` directory to manage external dependencies. If `clientv3` is imported etcd uses `cmd/vendor` directory to store external dependencies, which are
outside of etcd, simply copy `clientv3` to the `vendor` directory or use tools like godep to to be compiled into etcd release binaries. `client` can be imported without
manage your own dependency, as in [vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories). vendoring. For full compatibility, it is recommended to vendor builds using
etcd's vendored packages, using tools like godep, as in
[vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories).
For more detail, please read [Go vendor design](https://golang.org/s/go15vendor). For more detail, please read [Go vendor design](https://golang.org/s/go15vendor).
## Error Handling ## Error Handling
@ -50,21 +52,22 @@ For more detail, please read [Go vendor design](https://golang.org/s/go15vendor)
etcd client returns 2 types of errors: etcd client returns 2 types of errors:
1. context error: canceled or deadline exceeded. 1. context error: canceled or deadline exceeded.
2. gRPC error: see [v3rpc/error](https://github.com/coreos/etcd/blob/master/etcdserver/api/v3rpc/error.go). 2. gRPC error: see [api/v3rpc/rpctypes](https://godoc.org/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes).
Here is the example code to handle client errors: Here is the example code to handle client errors:
```go ```go
resp, err := kvc.Put(ctx, "", "") resp, err := kvc.Put(ctx, "", "")
if err != nil { if err != nil {
if err == context.Canceled { switch err {
// ctx is canceled by another routine case context.Canceled:
} else if err == context.DeadlineExceeded { log.Fatalf("ctx is canceled by another routine: %v", err)
// ctx is attached with a deadline and it exceeded case context.DeadlineExceeded:
} else if verr, ok := err.(*v3rpc.ErrEmptyKey); ok { log.Fatalf("ctx is attached with a deadline is exceeded: %v", err)
// process (verr.Errors) case rpctypes.ErrEmptyKey:
} else { log.Fatalf("client-side error: %v", err)
// bad cluster endpoints, which are not etcd servers default:
log.Fatalf("bad cluster endpoints, which are not etcd servers: %v", err)
} }
} }
``` ```

View File

@ -1,4 +1,4 @@
// Copyright 2016 Nippon Telegraph and Telephone Corporation. // Copyright 2016 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -15,6 +15,10 @@
package clientv3 package clientv3
import ( import (
"fmt"
"strings"
"github.com/coreos/etcd/auth/authpb"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb" pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context" "golang.org/x/net/context"
"google.golang.org/grpc" "google.golang.org/grpc"
@ -22,16 +26,38 @@ import (
type ( type (
AuthEnableResponse pb.AuthEnableResponse AuthEnableResponse pb.AuthEnableResponse
AuthDisableResponse pb.AuthDisableResponse
AuthenticateResponse pb.AuthenticateResponse
AuthUserAddResponse pb.AuthUserAddResponse AuthUserAddResponse pb.AuthUserAddResponse
AuthUserDeleteResponse pb.AuthUserDeleteResponse AuthUserDeleteResponse pb.AuthUserDeleteResponse
AuthUserChangePasswordResponse pb.AuthUserChangePasswordResponse AuthUserChangePasswordResponse pb.AuthUserChangePasswordResponse
AuthUserGrantRoleResponse pb.AuthUserGrantRoleResponse
AuthUserGetResponse pb.AuthUserGetResponse
AuthUserRevokeRoleResponse pb.AuthUserRevokeRoleResponse
AuthRoleAddResponse pb.AuthRoleAddResponse AuthRoleAddResponse pb.AuthRoleAddResponse
AuthRoleGrantPermissionResponse pb.AuthRoleGrantPermissionResponse
AuthRoleGetResponse pb.AuthRoleGetResponse
AuthRoleRevokePermissionResponse pb.AuthRoleRevokePermissionResponse
AuthRoleDeleteResponse pb.AuthRoleDeleteResponse
AuthUserListResponse pb.AuthUserListResponse
AuthRoleListResponse pb.AuthRoleListResponse
PermissionType authpb.Permission_Type
)
const (
PermRead = authpb.READ
PermWrite = authpb.WRITE
PermReadWrite = authpb.READWRITE
) )
type Auth interface { type Auth interface {
// AuthEnable enables auth of an etcd cluster. // AuthEnable enables auth of an etcd cluster.
AuthEnable(ctx context.Context) (*AuthEnableResponse, error) AuthEnable(ctx context.Context) (*AuthEnableResponse, error)
// AuthDisable disables auth of an etcd cluster.
AuthDisable(ctx context.Context) (*AuthDisableResponse, error)
// UserAdd adds a new user to an etcd cluster. // UserAdd adds a new user to an etcd cluster.
UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error)
@ -41,8 +67,35 @@ type Auth interface {
// UserChangePassword changes a password of a user. // UserChangePassword changes a password of a user.
UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error)
// RoleAdd adds a new user to an etcd cluster. // UserGrantRole grants a role to a user.
UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error)
// UserGet gets a detailed information of a user.
UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error)
// UserList gets a list of all users.
UserList(ctx context.Context) (*AuthUserListResponse, error)
// UserRevokeRole revokes a role of a user.
UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error)
// RoleAdd adds a new role to an etcd cluster.
RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error)
// RoleGrantPermission grants a permission to a role.
RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error)
// RoleGet gets a detailed information of a role.
RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error)
// RoleList gets a list of all roles.
RoleList(ctx context.Context) (*AuthRoleListResponse, error)
// RoleRevokePermission revokes a permission from a role.
RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error)
// RoleDelete deletes a role.
RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error)
} }
type auth struct { type auth struct {
@ -62,26 +115,115 @@ func NewAuth(c *Client) Auth {
} }
func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) { func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) {
resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}) resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, grpc.FailFast(false))
return (*AuthEnableResponse)(resp), err return (*AuthEnableResponse)(resp), toErr(ctx, err)
}
func (auth *auth) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) {
resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, grpc.FailFast(false))
return (*AuthDisableResponse)(resp), toErr(ctx, err)
} }
func (auth *auth) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) { func (auth *auth) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) {
resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password}) resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password}, grpc.FailFast(false))
return (*AuthUserAddResponse)(resp), err return (*AuthUserAddResponse)(resp), toErr(ctx, err)
} }
func (auth *auth) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) { func (auth *auth) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) {
resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}) resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}, grpc.FailFast(false))
return (*AuthUserDeleteResponse)(resp), err return (*AuthUserDeleteResponse)(resp), toErr(ctx, err)
} }
func (auth *auth) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) { func (auth *auth) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) {
resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}) resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}, grpc.FailFast(false))
return (*AuthUserChangePasswordResponse)(resp), err return (*AuthUserChangePasswordResponse)(resp), toErr(ctx, err)
}
func (auth *auth) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) {
resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role}, grpc.FailFast(false))
return (*AuthUserGrantRoleResponse)(resp), toErr(ctx, err)
}
func (auth *auth) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) {
resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, grpc.FailFast(false))
return (*AuthUserGetResponse)(resp), toErr(ctx, err)
}
func (auth *auth) UserList(ctx context.Context) (*AuthUserListResponse, error) {
resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, grpc.FailFast(false))
return (*AuthUserListResponse)(resp), toErr(ctx, err)
}
func (auth *auth) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) {
resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role}, grpc.FailFast(false))
return (*AuthUserRevokeRoleResponse)(resp), toErr(ctx, err)
} }
func (auth *auth) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) { func (auth *auth) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) {
resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}) resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}, grpc.FailFast(false))
return (*AuthRoleAddResponse)(resp), err return (*AuthRoleAddResponse)(resp), toErr(ctx, err)
}
func (auth *auth) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) {
perm := &authpb.Permission{
Key: []byte(key),
RangeEnd: []byte(rangeEnd),
PermType: authpb.Permission_Type(permType),
}
resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm}, grpc.FailFast(false))
return (*AuthRoleGrantPermissionResponse)(resp), toErr(ctx, err)
}
func (auth *auth) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) {
resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, grpc.FailFast(false))
return (*AuthRoleGetResponse)(resp), toErr(ctx, err)
}
func (auth *auth) RoleList(ctx context.Context) (*AuthRoleListResponse, error) {
resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, grpc.FailFast(false))
return (*AuthRoleListResponse)(resp), toErr(ctx, err)
}
func (auth *auth) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) {
resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: key, RangeEnd: rangeEnd}, grpc.FailFast(false))
return (*AuthRoleRevokePermissionResponse)(resp), toErr(ctx, err)
}
func (auth *auth) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) {
resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role}, grpc.FailFast(false))
return (*AuthRoleDeleteResponse)(resp), toErr(ctx, err)
}
func StrToPermissionType(s string) (PermissionType, error) {
val, ok := authpb.Permission_Type_value[strings.ToUpper(s)]
if ok {
return PermissionType(val), nil
}
return PermissionType(-1), fmt.Errorf("invalid permission type: %s", s)
}
type authenticator struct {
conn *grpc.ClientConn // conn in-use
remote pb.AuthClient
}
func (auth *authenticator) authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) {
resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, grpc.FailFast(false))
return (*AuthenticateResponse)(resp), toErr(ctx, err)
}
func (auth *authenticator) close() {
auth.conn.Close()
}
func newAuthenticator(endpoint string, opts []grpc.DialOption) (*authenticator, error) {
conn, err := grpc.Dial(endpoint, opts...)
if err != nil {
return nil, err
}
return &authenticator{
conn: conn,
remote: pb.NewAuthClient(conn),
}, nil
} }

64
vendor/github.com/coreos/etcd/clientv3/balancer.go generated vendored Normal file
View File

@ -0,0 +1,64 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"net/url"
"strings"
"sync/atomic"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
// simpleBalancer does the bare minimum to expose multiple eps
// to the grpc reconnection code path
type simpleBalancer struct {
// eps are the client's endpoints stripped of any URL scheme
eps []string
ch chan []grpc.Address
numGets uint32
}
func newSimpleBalancer(eps []string) grpc.Balancer {
ch := make(chan []grpc.Address, 1)
addrs := make([]grpc.Address, len(eps))
for i := range eps {
addrs[i].Addr = getHost(eps[i])
}
ch <- addrs
return &simpleBalancer{eps: eps, ch: ch}
}
func (b *simpleBalancer) Start(target string) error { return nil }
func (b *simpleBalancer) Up(addr grpc.Address) func(error) { return func(error) {} }
func (b *simpleBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) {
v := atomic.AddUint32(&b.numGets, 1)
ep := b.eps[v%uint32(len(b.eps))]
return grpc.Address{Addr: getHost(ep)}, func() {}, nil
}
func (b *simpleBalancer) Notify() <-chan []grpc.Address { return b.ch }
func (b *simpleBalancer) Close() error {
close(b.ch)
return nil
}
func getHost(ep string) string {
url, uerr := url.Parse(ep)
if uerr != nil || !strings.Contains(ep, "://") {
return ep
}
return url.Host
}

View File

@ -1,4 +1,4 @@
// Copyright 2016 CoreOS, Inc. // Copyright 2016 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -15,18 +15,22 @@
package clientv3 package clientv3
import ( import (
"crypto/tls"
"errors" "errors"
"fmt"
"io/ioutil" "io/ioutil"
"log" "log"
"net" "net"
"net/url" "net/url"
"strings" "strings"
"sync"
"time" "time"
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
"golang.org/x/net/context" "golang.org/x/net/context"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials"
"google.golang.org/grpc/metadata"
) )
var ( var (
@ -44,19 +48,19 @@ type Client struct {
conn *grpc.ClientConn conn *grpc.ClientConn
cfg Config cfg Config
creds *credentials.TransportAuthenticator creds *credentials.TransportCredentials
mu sync.RWMutex // protects connection selection and error list
errors []error // errors passed to retryConnection
ctx context.Context ctx context.Context
cancel context.CancelFunc cancel context.CancelFunc
// Username is a username for authentication
Username string
// Password is a password for authentication
Password string
} }
// New creates a new etcdv3 client from a given configuration. // New creates a new etcdv3 client from a given configuration.
func New(cfg Config) (*Client, error) { func New(cfg Config) (*Client, error) {
if cfg.RetryDialer == nil {
cfg.RetryDialer = dialEndpointList
}
if len(cfg.Endpoints) == 0 { if len(cfg.Endpoints) == 0 {
return nil, ErrNoAvailableEndpoints return nil, ErrNoAvailableEndpoints
} }
@ -80,17 +84,8 @@ func NewFromConfigFile(path string) (*Client, error) {
// Close shuts down the client's etcd connections. // Close shuts down the client's etcd connections.
func (c *Client) Close() error { func (c *Client) Close() error {
c.mu.Lock()
if c.cancel == nil {
c.mu.Unlock()
return nil
}
c.cancel() c.cancel()
c.cancel = nil return toErr(c.ctx, c.conn.Close())
c.mu.Unlock()
c.Watcher.Close()
c.Lease.Close()
return c.conn.Close()
} }
// Ctx is a context for "out of band" messages (e.g., for sending // Ctx is a context for "out of band" messages (e.g., for sending
@ -101,72 +96,157 @@ func (c *Client) Ctx() context.Context { return c.ctx }
// Endpoints lists the registered endpoints for the client. // Endpoints lists the registered endpoints for the client.
func (c *Client) Endpoints() []string { return c.cfg.Endpoints } func (c *Client) Endpoints() []string { return c.cfg.Endpoints }
// Errors returns all errors that have been observed since called last. type authTokenCredential struct {
func (c *Client) Errors() (errs []error) { token string
c.mu.Lock()
defer c.mu.Unlock()
errs = c.errors
c.errors = nil
return errs
} }
// Dial establishes a connection for a given endpoint using the client's config func (cred authTokenCredential) RequireTransportSecurity() bool {
func (c *Client) Dial(endpoint string) (*grpc.ClientConn, error) { return false
}
func (cred authTokenCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) {
return map[string]string{
"token": cred.token,
}, nil
}
func (c *Client) dialTarget(endpoint string) (proto string, host string, creds *credentials.TransportCredentials) {
proto = "tcp"
host = endpoint
creds = c.creds
url, uerr := url.Parse(endpoint)
if uerr != nil || !strings.Contains(endpoint, "://") {
return
}
// strip scheme:// prefix since grpc dials by host
host = url.Host
switch url.Scheme {
case "unix":
proto = "unix"
case "http":
creds = nil
case "https":
if creds != nil {
break
}
tlsconfig := &tls.Config{}
emptyCreds := credentials.NewTLS(tlsconfig)
creds = &emptyCreds
default:
return "", "", nil
}
return
}
// dialSetupOpts gives the dial opts prioer to any authentication
func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) []grpc.DialOption {
opts := []grpc.DialOption{ opts := []grpc.DialOption{
grpc.WithBlock(), grpc.WithBlock(),
grpc.WithTimeout(c.cfg.DialTimeout), grpc.WithTimeout(c.cfg.DialTimeout),
} }
if c.creds != nil { opts = append(opts, dopts...)
opts = append(opts, grpc.WithTransportCredentials(*c.creds))
} else { // grpc issues TLS cert checks using the string passed into dial so
opts = append(opts, grpc.WithInsecure()) // that string must be the host. To recover the full scheme://host URL,
// have a map from hosts to the original endpoint.
host2ep := make(map[string]string)
for i := range c.cfg.Endpoints {
_, host, _ := c.dialTarget(c.cfg.Endpoints[i])
host2ep[host] = c.cfg.Endpoints[i]
} }
proto := "tcp" f := func(host string, t time.Duration) (net.Conn, error) {
if url, uerr := url.Parse(endpoint); uerr == nil && url.Scheme == "unix" { proto, host, _ := c.dialTarget(host2ep[host])
proto = "unix" if proto == "" {
// strip unix:// prefix so certs work return nil, fmt.Errorf("unknown scheme for %q", host)
endpoint = url.Host
} }
f := func(a string, t time.Duration) (net.Conn, error) {
select { select {
case <-c.ctx.Done(): case <-c.ctx.Done():
return nil, c.ctx.Err() return nil, c.ctx.Err()
default: default:
} }
return net.DialTimeout(proto, a, t) return net.DialTimeout(proto, host, t)
} }
opts = append(opts, grpc.WithDialer(f)) opts = append(opts, grpc.WithDialer(f))
conn, err := grpc.Dial(endpoint, opts...) _, _, creds := c.dialTarget(endpoint)
if creds != nil {
opts = append(opts, grpc.WithTransportCredentials(*creds))
} else {
opts = append(opts, grpc.WithInsecure())
}
return opts
}
// Dial connects to a single endpoint using the client's config.
func (c *Client) Dial(endpoint string) (*grpc.ClientConn, error) {
return c.dial(endpoint)
}
func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
opts := c.dialSetupOpts(endpoint, dopts...)
host := getHost(endpoint)
if c.Username != "" && c.Password != "" {
// use dial options without dopts to avoid reusing the client balancer
auth, err := newAuthenticator(host, c.dialSetupOpts(endpoint))
if err != nil {
return nil, err
}
defer auth.close()
resp, err := auth.authenticate(c.ctx, c.Username, c.Password)
if err != nil {
return nil, err
}
opts = append(opts, grpc.WithPerRPCCredentials(authTokenCredential{token: resp.Token}))
}
conn, err := grpc.Dial(host, opts...)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return conn, nil return conn, nil
} }
// WithRequireLeader requires client requests to only succeed
// when the cluster has a leader.
func WithRequireLeader(ctx context.Context) context.Context {
md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
return metadata.NewContext(ctx, md)
}
func newClient(cfg *Config) (*Client, error) { func newClient(cfg *Config) (*Client, error) {
if cfg == nil { if cfg == nil {
cfg = &Config{RetryDialer: dialEndpointList} cfg = &Config{}
} }
var creds *credentials.TransportAuthenticator var creds *credentials.TransportCredentials
if cfg.TLS != nil { if cfg.TLS != nil {
c := credentials.NewTLS(cfg.TLS) c := credentials.NewTLS(cfg.TLS)
creds = &c creds = &c
} }
// use a temporary skeleton client to bootstrap first connection // use a temporary skeleton client to bootstrap first connection
ctx, cancel := context.WithCancel(context.TODO()) ctx, cancel := context.WithCancel(context.TODO())
conn, err := cfg.RetryDialer(&Client{cfg: *cfg, creds: creds, ctx: ctx})
if err != nil {
return nil, err
}
client := &Client{ client := &Client{
conn: conn, conn: nil,
cfg: *cfg, cfg: *cfg,
creds: creds, creds: creds,
ctx: ctx, ctx: ctx,
cancel: cancel, cancel: cancel,
} }
if cfg.Username != "" && cfg.Password != "" {
client.Username = cfg.Username
client.Password = cfg.Password
}
b := newSimpleBalancer(cfg.Endpoints)
conn, err := client.dial(cfg.Endpoints[0], grpc.WithBalancer(b))
if err != nil {
return nil, err
}
client.conn = conn
client.Cluster = NewCluster(client) client.Cluster = NewCluster(client)
client.KV = NewKV(client) client.KV = NewKV(client)
client.Lease = NewLease(client) client.Lease = NewLease(client)
@ -184,60 +264,35 @@ func newClient(cfg *Config) (*Client, error) {
} }
// ActiveConnection returns the current in-use connection // ActiveConnection returns the current in-use connection
func (c *Client) ActiveConnection() *grpc.ClientConn { func (c *Client) ActiveConnection() *grpc.ClientConn { return c.conn }
c.mu.RLock()
defer c.mu.RUnlock()
return c.conn
}
// retryConnection establishes a new connection // isHaltErr returns true if the given error and context indicate no forward
func (c *Client) retryConnection(oldConn *grpc.ClientConn, err error) (*grpc.ClientConn, error) {
c.mu.Lock()
defer c.mu.Unlock()
if err != nil {
c.errors = append(c.errors, err)
}
if c.cancel == nil {
return nil, c.ctx.Err()
}
if oldConn != c.conn {
// conn has already been updated
return c.conn, nil
}
oldConn.Close()
if st, _ := oldConn.State(); st != grpc.Shutdown {
// wait for shutdown so grpc doesn't leak sleeping goroutines
oldConn.WaitForStateChange(c.ctx, st)
}
conn, dialErr := c.cfg.RetryDialer(c)
if dialErr != nil {
c.errors = append(c.errors, dialErr)
return nil, dialErr
}
c.conn = conn
return c.conn, nil
}
// dialEndpointList attempts to connect to each endpoint in order until a
// connection is established.
func dialEndpointList(c *Client) (*grpc.ClientConn, error) {
var err error
for _, ep := range c.Endpoints() {
conn, curErr := c.Dial(ep)
if curErr != nil {
err = curErr
} else {
return conn, nil
}
}
return nil, err
}
// isHalted returns true if the given error and context indicate no forward
// progress can be made, even after reconnecting. // progress can be made, even after reconnecting.
func isHalted(ctx context.Context, err error) bool { func isHaltErr(ctx context.Context, err error) bool {
isRPCError := strings.HasPrefix(grpc.ErrorDesc(err), "etcdserver: ") if ctx != nil && ctx.Err() != nil {
return isRPCError || ctx.Err() != nil return true
}
if err == nil {
return false
}
eErr := rpctypes.Error(err)
if _, ok := eErr.(rpctypes.EtcdError); ok {
return eErr != rpctypes.ErrStopped && eErr != rpctypes.ErrNoLeader
}
// treat etcdserver errors not recognized by the client as halting
return strings.Contains(err.Error(), grpc.ErrClientConnClosing.Error()) ||
strings.Contains(err.Error(), "etcdserver:")
}
func toErr(ctx context.Context, err error) error {
if err == nil {
return nil
}
err = rpctypes.Error(err)
if ctx.Err() != nil && strings.Contains(err.Error(), "context") {
err = ctx.Err()
} else if strings.Contains(err.Error(), grpc.ErrClientConnClosing.Error()) {
err = grpc.ErrClientConnClosing
}
return err
} }

View File

@ -1,4 +1,4 @@
// Copyright 2016 CoreOS, Inc. // Copyright 2016 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -15,8 +15,6 @@
package clientv3 package clientv3
import ( import (
"sync"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb" pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context" "golang.org/x/net/context"
"google.golang.org/grpc" "google.golang.org/grpc"
@ -34,9 +32,6 @@ type Cluster interface {
// MemberList lists the current cluster membership. // MemberList lists the current cluster membership.
MemberList(ctx context.Context) (*MemberListResponse, error) MemberList(ctx context.Context) (*MemberListResponse, error)
// MemberLeader returns the current leader member.
MemberLeader(ctx context.Context) (*Member, error)
// MemberAdd adds a new member into the cluster. // MemberAdd adds a new member into the cluster.
MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error)
@ -48,70 +43,47 @@ type Cluster interface {
} }
type cluster struct { type cluster struct {
c *Client
mu sync.Mutex
conn *grpc.ClientConn // conn in-use
remote pb.ClusterClient remote pb.ClusterClient
} }
func NewCluster(c *Client) Cluster { func NewCluster(c *Client) Cluster {
conn := c.ActiveConnection() return &cluster{remote: pb.NewClusterClient(c.conn)}
return &cluster{
c: c,
conn: conn,
remote: pb.NewClusterClient(conn),
}
} }
func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) { func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
r := &pb.MemberAddRequest{PeerURLs: peerAddrs} r := &pb.MemberAddRequest{PeerURLs: peerAddrs}
resp, err := c.getRemote().MemberAdd(ctx, r) resp, err := c.remote.MemberAdd(ctx, r, grpc.FailFast(false))
if err == nil { if err == nil {
return (*MemberAddResponse)(resp), nil return (*MemberAddResponse)(resp), nil
} }
if isHaltErr(ctx, err) {
if isHalted(ctx, err) { return nil, toErr(ctx, err)
return nil, err
} }
return nil, toErr(ctx, err)
go c.switchRemote(err)
return nil, err
} }
func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) { func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) {
r := &pb.MemberRemoveRequest{ID: id} r := &pb.MemberRemoveRequest{ID: id}
resp, err := c.getRemote().MemberRemove(ctx, r) resp, err := c.remote.MemberRemove(ctx, r, grpc.FailFast(false))
if err == nil { if err == nil {
return (*MemberRemoveResponse)(resp), nil return (*MemberRemoveResponse)(resp), nil
} }
if isHaltErr(ctx, err) {
if isHalted(ctx, err) { return nil, toErr(ctx, err)
return nil, err
} }
return nil, toErr(ctx, err)
go c.switchRemote(err)
return nil, err
} }
func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) { func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) {
// it is safe to retry on update. // it is safe to retry on update.
for { for {
r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs} r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs}
resp, err := c.getRemote().MemberUpdate(ctx, r) resp, err := c.remote.MemberUpdate(ctx, r, grpc.FailFast(false))
if err == nil { if err == nil {
return (*MemberUpdateResponse)(resp), nil return (*MemberUpdateResponse)(resp), nil
} }
if isHaltErr(ctx, err) {
if isHalted(ctx, err) { return nil, toErr(ctx, err)
return nil, err
}
err = c.switchRemote(err)
if err != nil {
return nil, err
} }
} }
} }
@ -119,52 +91,12 @@ func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []strin
func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) { func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) {
// it is safe to retry on list. // it is safe to retry on list.
for { for {
resp, err := c.getRemote().MemberList(ctx, &pb.MemberListRequest{}) resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, grpc.FailFast(false))
if err == nil { if err == nil {
return (*MemberListResponse)(resp), nil return (*MemberListResponse)(resp), nil
} }
if isHaltErr(ctx, err) {
if isHalted(ctx, err) { return nil, toErr(ctx, err)
return nil, err
}
err = c.switchRemote(err)
if err != nil {
return nil, err
} }
} }
} }
func (c *cluster) MemberLeader(ctx context.Context) (*Member, error) {
resp, err := c.MemberList(ctx)
if err != nil {
return nil, err
}
for _, m := range resp.Members {
if m.IsLeader {
return (*Member)(m), nil
}
}
return nil, nil
}
func (c *cluster) getRemote() pb.ClusterClient {
c.mu.Lock()
defer c.mu.Unlock()
return c.remote
}
func (c *cluster) switchRemote(prevErr error) error {
newConn, err := c.c.retryConnection(c.conn, prevErr)
if err != nil {
return err
}
c.mu.Lock()
defer c.mu.Unlock()
c.conn = newConn
c.remote = pb.NewClusterClient(c.conn)
return nil
}

53
vendor/github.com/coreos/etcd/clientv3/compact_op.go generated vendored Normal file
View File

@ -0,0 +1,53 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
)
// CompactOp represents a compact operation.
type CompactOp struct {
revision int64
physical bool
}
// CompactOption configures compact operation.
type CompactOption func(*CompactOp)
func (op *CompactOp) applyCompactOpts(opts []CompactOption) {
for _, opt := range opts {
opt(op)
}
}
// OpCompact wraps slice CompactOption to create a CompactOp.
func OpCompact(rev int64, opts ...CompactOption) CompactOp {
ret := CompactOp{revision: rev}
ret.applyCompactOpts(opts)
return ret
}
func (op CompactOp) toRequest() *pb.CompactionRequest {
return &pb.CompactionRequest{Revision: op.revision, Physical: op.physical}
}
// WithCompactPhysical makes compact RPC call wait until
// the compaction is physically applied to the local database
// such that compacted entries are totally removed from the
// backend database.
func WithCompactPhysical() CompactOption {
return func(op *CompactOp) { op.physical = true }
}

View File

@ -1,4 +1,4 @@
// Copyright 2016 CoreOS, Inc. // Copyright 2016 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2016 CoreOS, Inc. // Copyright 2016 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -22,19 +22,12 @@ import (
"github.com/coreos/etcd/pkg/tlsutil" "github.com/coreos/etcd/pkg/tlsutil"
"github.com/ghodss/yaml" "github.com/ghodss/yaml"
"google.golang.org/grpc"
) )
// EndpointDialer is a policy for choosing which endpoint to dial next
type EndpointDialer func(*Client) (*grpc.ClientConn, error)
type Config struct { type Config struct {
// Endpoints is a list of URLs // Endpoints is a list of URLs
Endpoints []string Endpoints []string
// RetryDialer chooses the next endpoint to use
RetryDialer EndpointDialer
// DialTimeout is the timeout for failing to establish a connection. // DialTimeout is the timeout for failing to establish a connection.
DialTimeout time.Duration DialTimeout time.Duration
@ -43,9 +36,15 @@ type Config struct {
// Logger is the logger used by client library. // Logger is the logger used by client library.
Logger Logger Logger Logger
// Username is a username for authentication
Username string
// Password is a password for authentication
Password string
} }
type YamlConfig struct { type yamlConfig struct {
Endpoints []string `json:"endpoints"` Endpoints []string `json:"endpoints"`
DialTimeout time.Duration `json:"dial-timeout"` DialTimeout time.Duration `json:"dial-timeout"`
InsecureTransport bool `json:"insecure-transport"` InsecureTransport bool `json:"insecure-transport"`
@ -61,7 +60,7 @@ func configFromFile(fpath string) (*Config, error) {
return nil, err return nil, err
} }
yc := &YamlConfig{} yc := &yamlConfig{}
err = yaml.Unmarshal(b, yc) err = yaml.Unmarshal(b, yc)
if err != nil { if err != nil {

View File

@ -1,4 +1,4 @@
// Copyright 2016 CoreOS, Inc. // Copyright 2016 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc. // Copyright 2015 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -15,14 +15,13 @@
package clientv3 package clientv3
import ( import (
"sync"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb" pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context" "golang.org/x/net/context"
"google.golang.org/grpc" "google.golang.org/grpc"
) )
type ( type (
CompactResponse pb.CompactionResponse
PutResponse pb.PutResponse PutResponse pb.PutResponse
GetResponse pb.RangeResponse GetResponse pb.RangeResponse
DeleteResponse pb.DeleteRangeResponse DeleteResponse pb.DeleteRangeResponse
@ -50,7 +49,7 @@ type KV interface {
Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error)
// Compact compacts etcd KV history before the given rev. // Compact compacts etcd KV history before the given rev.
Compact(ctx context.Context, rev int64) error Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error)
// Do applies a single Op on KV without a transaction. // Do applies a single Op on KV without a transaction.
// Do is useful when declaring operations to be issued at a later time // Do is useful when declaring operations to be issued at a later time
@ -74,54 +73,39 @@ type OpResponse struct {
del *DeleteResponse del *DeleteResponse
} }
type kv struct { func (op OpResponse) Put() *PutResponse { return op.put }
c *Client func (op OpResponse) Get() *GetResponse { return op.get }
func (op OpResponse) Del() *DeleteResponse { return op.del }
mu sync.Mutex // guards all fields type kv struct {
conn *grpc.ClientConn // conn in-use
remote pb.KVClient remote pb.KVClient
} }
func NewKV(c *Client) KV { func NewKV(c *Client) KV {
conn := c.ActiveConnection() return &kv{remote: pb.NewKVClient(c.conn)}
remote := pb.NewKVClient(conn)
return &kv{
conn: c.ActiveConnection(),
remote: remote,
c: c,
}
} }
func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) { func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) {
r, err := kv.Do(ctx, OpPut(key, val, opts...)) r, err := kv.Do(ctx, OpPut(key, val, opts...))
return r.put, err return r.put, toErr(ctx, err)
} }
func (kv *kv) Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) { func (kv *kv) Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) {
r, err := kv.Do(ctx, OpGet(key, opts...)) r, err := kv.Do(ctx, OpGet(key, opts...))
return r.get, err return r.get, toErr(ctx, err)
} }
func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) { func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) {
r, err := kv.Do(ctx, OpDelete(key, opts...)) r, err := kv.Do(ctx, OpDelete(key, opts...))
return r.del, err return r.del, toErr(ctx, err)
} }
func (kv *kv) Compact(ctx context.Context, rev int64) error { func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) {
r := &pb.CompactionRequest{Revision: rev} resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest(), grpc.FailFast(false))
_, err := kv.getRemote().Compact(ctx, r) if err != nil {
if err == nil { return nil, toErr(ctx, err)
return nil
} }
return (*CompactResponse)(resp), err
if isHalted(ctx, err) {
return err
}
go kv.switchRemote(err)
return err
} }
func (kv *kv) Txn(ctx context.Context) Txn { func (kv *kv) Txn(ctx context.Context) Txn {
@ -133,75 +117,60 @@ func (kv *kv) Txn(ctx context.Context) Txn {
func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) { func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) {
for { for {
resp, err := kv.do(ctx, op)
if err == nil {
return resp, nil
}
if isHaltErr(ctx, err) {
return resp, toErr(ctx, err)
}
// do not retry on modifications
if op.isWrite() {
return resp, toErr(ctx, err)
}
}
}
func (kv *kv) do(ctx context.Context, op Op) (OpResponse, error) {
var err error var err error
switch op.t { switch op.t {
// TODO: handle other ops // TODO: handle other ops
case tRange: case tRange:
var resp *pb.RangeResponse var resp *pb.RangeResponse
r := &pb.RangeRequest{Key: op.key, RangeEnd: op.end, Limit: op.limit, Revision: op.rev, Serializable: op.serializable} r := &pb.RangeRequest{
Key: op.key,
RangeEnd: op.end,
Limit: op.limit,
Revision: op.rev,
Serializable: op.serializable,
KeysOnly: op.keysOnly,
CountOnly: op.countOnly,
}
if op.sort != nil { if op.sort != nil {
r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order) r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order)
r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target) r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target)
} }
resp, err = kv.getRemote().Range(ctx, r) resp, err = kv.remote.Range(ctx, r, grpc.FailFast(false))
if err == nil { if err == nil {
return OpResponse{get: (*GetResponse)(resp)}, nil return OpResponse{get: (*GetResponse)(resp)}, nil
} }
case tPut: case tPut:
var resp *pb.PutResponse var resp *pb.PutResponse
r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID)} r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID)}
resp, err = kv.getRemote().Put(ctx, r) resp, err = kv.remote.Put(ctx, r, grpc.FailFast(false))
if err == nil { if err == nil {
return OpResponse{put: (*PutResponse)(resp)}, nil return OpResponse{put: (*PutResponse)(resp)}, nil
} }
case tDeleteRange: case tDeleteRange:
var resp *pb.DeleteRangeResponse var resp *pb.DeleteRangeResponse
r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end} r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end}
resp, err = kv.getRemote().DeleteRange(ctx, r) resp, err = kv.remote.DeleteRange(ctx, r, grpc.FailFast(false))
if err == nil { if err == nil {
return OpResponse{del: (*DeleteResponse)(resp)}, nil return OpResponse{del: (*DeleteResponse)(resp)}, nil
} }
default: default:
panic("Unknown op") panic("Unknown op")
} }
if isHalted(ctx, err) {
return OpResponse{}, err return OpResponse{}, err
} }
// do not retry on modifications
if op.isWrite() {
go kv.switchRemote(err)
return OpResponse{}, err
}
if nerr := kv.switchRemote(err); nerr != nil {
return OpResponse{}, nerr
}
}
}
func (kv *kv) switchRemote(prevErr error) error {
// Usually it's a bad idea to lock on network i/o but here it's OK
// since the link is down and new requests can't be processed anyway.
// Likewise, if connecting stalls, closing the Client can break the
// lock via context cancelation.
kv.mu.Lock()
defer kv.mu.Unlock()
newConn, err := kv.c.retryConnection(kv.conn, prevErr)
if err != nil {
return err
}
kv.conn = newConn
kv.remote = pb.NewKVClient(kv.conn)
return nil
}
func (kv *kv) getRemote() pb.KVClient {
kv.mu.Lock()
defer kv.mu.Unlock()
return kv.remote
}

View File

@ -1,4 +1,4 @@
// Copyright 2016 CoreOS, Inc. // Copyright 2016 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -18,19 +18,36 @@ import (
"sync" "sync"
"time" "time"
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb" pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context" "golang.org/x/net/context"
"google.golang.org/grpc" "google.golang.org/grpc"
) )
type ( type (
LeaseGrantResponse pb.LeaseGrantResponse
LeaseRevokeResponse pb.LeaseRevokeResponse LeaseRevokeResponse pb.LeaseRevokeResponse
LeaseKeepAliveResponse pb.LeaseKeepAliveResponse
LeaseID int64 LeaseID int64
) )
// LeaseGrantResponse is used to convert the protobuf grant response.
type LeaseGrantResponse struct {
*pb.ResponseHeader
ID LeaseID
TTL int64
Error string
}
// LeaseKeepAliveResponse is used to convert the protobuf keepalive response.
type LeaseKeepAliveResponse struct {
*pb.ResponseHeader
ID LeaseID
TTL int64
}
const ( const (
// defaultTTL is the assumed lease TTL used for the first keepalive
// deadline before the actual TTL is known to the client.
defaultTTL = 5 * time.Second
// a small buffer to store unsent lease responses. // a small buffer to store unsent lease responses.
leaseResponseChSize = 16 leaseResponseChSize = 16
// NoLease is a lease ID for the absence of a lease. // NoLease is a lease ID for the absence of a lease.
@ -57,10 +74,7 @@ type Lease interface {
} }
type lessor struct { type lessor struct {
c *Client
mu sync.Mutex // guards all fields mu sync.Mutex // guards all fields
conn *grpc.ClientConn // conn in-use
// donec is closed when recvKeepAliveLoop stops // donec is closed when recvKeepAliveLoop stops
donec chan struct{} donec chan struct{}
@ -74,32 +88,38 @@ type lessor struct {
stopCancel context.CancelFunc stopCancel context.CancelFunc
keepAlives map[LeaseID]*keepAlive keepAlives map[LeaseID]*keepAlive
// firstKeepAliveTimeout is the timeout for the first keepalive request
// before the actual TTL is known to the lease client
firstKeepAliveTimeout time.Duration
} }
// keepAlive multiplexes a keepalive for a lease over multiple channels // keepAlive multiplexes a keepalive for a lease over multiple channels
type keepAlive struct { type keepAlive struct {
chs []chan<- *LeaseKeepAliveResponse chs []chan<- *LeaseKeepAliveResponse
ctxs []context.Context ctxs []context.Context
// deadline is the next time to send a keep alive message // deadline is the time the keep alive channels close if no response
deadline time.Time deadline time.Time
// nextKeepAlive is when to send the next keep alive message
nextKeepAlive time.Time
// donec is closed on lease revoke, expiration, or cancel. // donec is closed on lease revoke, expiration, or cancel.
donec chan struct{} donec chan struct{}
} }
func NewLease(c *Client) Lease { func NewLease(c *Client) Lease {
l := &lessor{ l := &lessor{
c: c,
conn: c.ActiveConnection(),
donec: make(chan struct{}), donec: make(chan struct{}),
keepAlives: make(map[LeaseID]*keepAlive), keepAlives: make(map[LeaseID]*keepAlive),
remote: pb.NewLeaseClient(c.conn),
firstKeepAliveTimeout: c.cfg.DialTimeout + time.Second,
}
if l.firstKeepAliveTimeout == time.Second {
l.firstKeepAliveTimeout = defaultTTL
} }
l.remote = pb.NewLeaseClient(l.conn)
l.stopCtx, l.stopCancel = context.WithCancel(context.Background()) l.stopCtx, l.stopCancel = context.WithCancel(context.Background())
go l.recvKeepAliveLoop() go l.recvKeepAliveLoop()
go l.deadlineLoop()
return l return l
} }
@ -110,14 +130,20 @@ func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, err
for { for {
r := &pb.LeaseGrantRequest{TTL: ttl} r := &pb.LeaseGrantRequest{TTL: ttl}
resp, err := l.getRemote().LeaseGrant(cctx, r) resp, err := l.remote.LeaseGrant(cctx, r, grpc.FailFast(false))
if err == nil { if err == nil {
return (*LeaseGrantResponse)(resp), nil gresp := &LeaseGrantResponse{
ResponseHeader: resp.GetHeader(),
ID: LeaseID(resp.ID),
TTL: resp.TTL,
Error: resp.Error,
} }
if isHalted(cctx, err) { return gresp, nil
return nil, err
} }
if nerr := l.switchRemoteAndStream(err); nerr != nil { if isHaltErr(cctx, err) {
return nil, toErr(ctx, err)
}
if nerr := l.newStream(); nerr != nil {
return nil, nerr return nil, nerr
} }
} }
@ -130,16 +156,15 @@ func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse,
for { for {
r := &pb.LeaseRevokeRequest{ID: int64(id)} r := &pb.LeaseRevokeRequest{ID: int64(id)}
resp, err := l.getRemote().LeaseRevoke(cctx, r) resp, err := l.remote.LeaseRevoke(cctx, r, grpc.FailFast(false))
if err == nil { if err == nil {
return (*LeaseRevokeResponse)(resp), nil return (*LeaseRevokeResponse)(resp), nil
} }
if isHalted(ctx, err) { if isHaltErr(ctx, err) {
return nil, err return nil, toErr(ctx, err)
} }
if nerr := l.newStream(); nerr != nil {
if nerr := l.switchRemoteAndStream(err); nerr != nil {
return nil, nerr return nil, nerr
} }
} }
@ -155,7 +180,8 @@ func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAl
ka = &keepAlive{ ka = &keepAlive{
chs: []chan<- *LeaseKeepAliveResponse{ch}, chs: []chan<- *LeaseKeepAliveResponse{ch},
ctxs: []context.Context{ctx}, ctxs: []context.Context{ctx},
deadline: time.Now(), deadline: time.Now().Add(l.firstKeepAliveTimeout),
nextKeepAlive: time.Now(),
donec: make(chan struct{}), donec: make(chan struct{}),
} }
l.keepAlives[id] = ka l.keepAlives[id] = ka
@ -179,11 +205,16 @@ func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAlive
for { for {
resp, err := l.keepAliveOnce(cctx, id) resp, err := l.keepAliveOnce(cctx, id)
if err == nil { if err == nil {
if resp.TTL == 0 {
err = rpctypes.ErrLeaseNotFound
}
return resp, err return resp, err
} }
if isHaltErr(ctx, err) {
return nil, toErr(ctx, err)
}
nerr := l.switchRemoteAndStream(err) if nerr := l.newStream(); nerr != nil {
if nerr != nil {
return nil, nerr return nil, nerr
} }
} }
@ -228,26 +259,34 @@ func (l *lessor) keepAliveCtxCloser(id LeaseID, ctx context.Context, donec <-cha
} }
func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) { func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
stream, err := l.getRemote().LeaseKeepAlive(ctx) cctx, cancel := context.WithCancel(ctx)
defer cancel()
stream, err := l.remote.LeaseKeepAlive(cctx, grpc.FailFast(false))
if err != nil { if err != nil {
return nil, err return nil, toErr(ctx, err)
} }
err = stream.Send(&pb.LeaseKeepAliveRequest{ID: int64(id)}) err = stream.Send(&pb.LeaseKeepAliveRequest{ID: int64(id)})
if err != nil { if err != nil {
return nil, err return nil, toErr(ctx, err)
} }
resp, rerr := stream.Recv() resp, rerr := stream.Recv()
if rerr != nil { if rerr != nil {
return nil, rerr return nil, toErr(ctx, rerr)
} }
return (*LeaseKeepAliveResponse)(resp), nil
karesp := &LeaseKeepAliveResponse{
ResponseHeader: resp.GetHeader(),
ID: LeaseID(resp.ID),
TTL: resp.TTL,
}
return karesp, nil
} }
func (l *lessor) recvKeepAliveLoop() { func (l *lessor) recvKeepAliveLoop() {
defer func() { defer func() {
l.stopCancel()
l.mu.Lock() l.mu.Lock()
close(l.donec) close(l.donec)
for _, ka := range l.keepAlives { for _, ka := range l.keepAlives {
@ -261,7 +300,7 @@ func (l *lessor) recvKeepAliveLoop() {
for serr == nil { for serr == nil {
resp, err := stream.Recv() resp, err := stream.Recv()
if err != nil { if err != nil {
if isHalted(l.stopCtx, err) { if isHaltErr(l.stopCtx, err) {
return return
} }
stream, serr = l.resetRecv() stream, serr = l.resetRecv()
@ -273,7 +312,7 @@ func (l *lessor) recvKeepAliveLoop() {
// resetRecv opens a new lease stream and starts sending LeaseKeepAliveRequests // resetRecv opens a new lease stream and starts sending LeaseKeepAliveRequests
func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) { func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) {
if err := l.switchRemoteAndStream(nil); err != nil { if err := l.newStream(); err != nil {
return nil, err return nil, err
} }
stream := l.getKeepAliveStream() stream := l.getKeepAliveStream()
@ -283,39 +322,68 @@ func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) {
// recvKeepAlive updates a lease based on its LeaseKeepAliveResponse // recvKeepAlive updates a lease based on its LeaseKeepAliveResponse
func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) { func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) {
id := LeaseID(resp.ID) karesp := &LeaseKeepAliveResponse{
ResponseHeader: resp.GetHeader(),
ID: LeaseID(resp.ID),
TTL: resp.TTL,
}
l.mu.Lock() l.mu.Lock()
defer l.mu.Unlock() defer l.mu.Unlock()
ka, ok := l.keepAlives[id] ka, ok := l.keepAlives[karesp.ID]
if !ok { if !ok {
return return
} }
if resp.TTL <= 0 { if karesp.TTL <= 0 {
// lease expired; close all keep alive channels // lease expired; close all keep alive channels
delete(l.keepAlives, id) delete(l.keepAlives, karesp.ID)
ka.Close() ka.Close()
return return
} }
// send update to all channels // send update to all channels
nextDeadline := time.Now().Add(1 + time.Duration(resp.TTL/3)*time.Second) nextKeepAlive := time.Now().Add(1 + time.Duration(karesp.TTL/3)*time.Second)
ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second)
for _, ch := range ka.chs { for _, ch := range ka.chs {
select { select {
case ch <- (*LeaseKeepAliveResponse)(resp): case ch <- karesp:
ka.deadline = nextDeadline ka.nextKeepAlive = nextKeepAlive
default: default:
} }
} }
} }
// deadlineLoop reaps any keep alive channels that have not received a response
// within the lease TTL
func (l *lessor) deadlineLoop() {
for {
select {
case <-time.After(time.Second):
case <-l.donec:
return
}
now := time.Now()
l.mu.Lock()
for id, ka := range l.keepAlives {
if ka.deadline.Before(now) {
// waited too long for response; lease may be expired
ka.Close()
delete(l.keepAlives, id)
}
}
l.mu.Unlock()
}
}
// sendKeepAliveLoop sends LeaseKeepAliveRequests for the lifetime of a lease stream // sendKeepAliveLoop sends LeaseKeepAliveRequests for the lifetime of a lease stream
func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) { func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
for { for {
select { select {
case <-time.After(500 * time.Millisecond): case <-time.After(500 * time.Millisecond):
case <-stream.Context().Done():
return
case <-l.donec: case <-l.donec:
return return
case <-l.stopCtx.Done(): case <-l.stopCtx.Done():
@ -327,7 +395,7 @@ func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
now := time.Now() now := time.Now()
l.mu.Lock() l.mu.Lock()
for id, ka := range l.keepAlives { for id, ka := range l.keepAlives {
if ka.deadline.Before(now) { if ka.nextKeepAlive.Before(now) {
tosend = append(tosend, id) tosend = append(tosend, id)
} }
} }
@ -343,57 +411,18 @@ func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
} }
} }
func (l *lessor) getRemote() pb.LeaseClient {
l.mu.Lock()
defer l.mu.Unlock()
return l.remote
}
func (l *lessor) getKeepAliveStream() pb.Lease_LeaseKeepAliveClient { func (l *lessor) getKeepAliveStream() pb.Lease_LeaseKeepAliveClient {
l.mu.Lock() l.mu.Lock()
defer l.mu.Unlock() defer l.mu.Unlock()
return l.stream return l.stream
} }
func (l *lessor) switchRemoteAndStream(prevErr error) error {
l.mu.Lock()
conn := l.conn
l.mu.Unlock()
var (
err error
newConn *grpc.ClientConn
)
if prevErr != nil {
conn.Close()
newConn, err = l.c.retryConnection(conn, prevErr)
if err != nil {
return err
}
}
l.mu.Lock()
if newConn != nil {
l.conn = newConn
}
l.remote = pb.NewLeaseClient(l.conn)
l.mu.Unlock()
serr := l.newStream()
if serr != nil {
return serr
}
return nil
}
func (l *lessor) newStream() error { func (l *lessor) newStream() error {
sctx, cancel := context.WithCancel(l.stopCtx) sctx, cancel := context.WithCancel(l.stopCtx)
stream, err := l.getRemote().LeaseKeepAlive(sctx) stream, err := l.remote.LeaseKeepAlive(sctx, grpc.FailFast(false))
if err != nil { if err != nil {
cancel() cancel()
return err return toErr(sctx, err)
} }
l.mu.Lock() l.mu.Lock()

View File

@ -1,4 +1,4 @@
// Copyright 2016 CoreOS, Inc. // Copyright 2016 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2016 CoreOS, Inc. // Copyright 2016 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -15,7 +15,7 @@
package clientv3 package clientv3
import ( import (
"sync" "io"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb" pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context" "golang.org/x/net/context"
@ -45,25 +45,20 @@ type Maintenance interface {
// times with different endpoints. // times with different endpoints.
Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error)
// Status gets the status of the member. // Status gets the status of the endpoint.
Status(ctx context.Context, endpoint string) (*StatusResponse, error) Status(ctx context.Context, endpoint string) (*StatusResponse, error)
// Snapshot provides a reader for a snapshot of a backend.
Snapshot(ctx context.Context) (io.ReadCloser, error)
} }
type maintenance struct { type maintenance struct {
c *Client c *Client
mu sync.Mutex
conn *grpc.ClientConn // conn in-use
remote pb.MaintenanceClient remote pb.MaintenanceClient
} }
func NewMaintenance(c *Client) Maintenance { func NewMaintenance(c *Client) Maintenance {
conn := c.ActiveConnection() return &maintenance{c: c, remote: pb.NewMaintenanceClient(c.conn)}
return &maintenance{
c: c,
conn: conn,
remote: pb.NewMaintenanceClient(conn),
}
} }
func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) { func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) {
@ -73,15 +68,12 @@ func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) {
Alarm: pb.AlarmType_NONE, // all Alarm: pb.AlarmType_NONE, // all
} }
for { for {
resp, err := m.getRemote().Alarm(ctx, req) resp, err := m.remote.Alarm(ctx, req, grpc.FailFast(false))
if err == nil { if err == nil {
return (*AlarmResponse)(resp), nil return (*AlarmResponse)(resp), nil
} }
if isHalted(ctx, err) { if isHaltErr(ctx, err) {
return nil, err return nil, toErr(ctx, err)
}
if err = m.switchRemote(err); err != nil {
return nil, err
} }
} }
} }
@ -96,38 +88,36 @@ func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmR
if req.MemberID == 0 && req.Alarm == pb.AlarmType_NONE { if req.MemberID == 0 && req.Alarm == pb.AlarmType_NONE {
ar, err := m.AlarmList(ctx) ar, err := m.AlarmList(ctx)
if err != nil { if err != nil {
return nil, err return nil, toErr(ctx, err)
} }
ret := AlarmResponse{} ret := AlarmResponse{}
for _, am := range ar.Alarms { for _, am := range ar.Alarms {
dresp, derr := m.AlarmDisarm(ctx, (*AlarmMember)(am)) dresp, derr := m.AlarmDisarm(ctx, (*AlarmMember)(am))
if derr != nil { if derr != nil {
return nil, derr return nil, toErr(ctx, derr)
} }
ret.Alarms = append(ret.Alarms, dresp.Alarms...) ret.Alarms = append(ret.Alarms, dresp.Alarms...)
} }
return &ret, nil return &ret, nil
} }
resp, err := m.getRemote().Alarm(ctx, req) resp, err := m.remote.Alarm(ctx, req, grpc.FailFast(false))
if err == nil { if err == nil {
return (*AlarmResponse)(resp), nil return (*AlarmResponse)(resp), nil
} }
if !isHalted(ctx, err) { return nil, toErr(ctx, err)
go m.switchRemote(err)
}
return nil, err
} }
func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) { func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) {
conn, err := m.c.Dial(endpoint) conn, err := m.c.Dial(endpoint)
if err != nil { if err != nil {
return nil, err return nil, toErr(ctx, err)
} }
defer conn.Close()
remote := pb.NewMaintenanceClient(conn) remote := pb.NewMaintenanceClient(conn)
resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}) resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, grpc.FailFast(false))
if err != nil { if err != nil {
return nil, err return nil, toErr(ctx, err)
} }
return (*DefragmentResponse)(resp), nil return (*DefragmentResponse)(resp), nil
} }
@ -135,30 +125,40 @@ func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*Defragm
func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) { func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) {
conn, err := m.c.Dial(endpoint) conn, err := m.c.Dial(endpoint)
if err != nil { if err != nil {
return nil, err return nil, toErr(ctx, err)
} }
defer conn.Close()
remote := pb.NewMaintenanceClient(conn) remote := pb.NewMaintenanceClient(conn)
resp, err := remote.Status(ctx, &pb.StatusRequest{}) resp, err := remote.Status(ctx, &pb.StatusRequest{}, grpc.FailFast(false))
if err != nil { if err != nil {
return nil, err return nil, toErr(ctx, err)
} }
return (*StatusResponse)(resp), nil return (*StatusResponse)(resp), nil
} }
func (m *maintenance) getRemote() pb.MaintenanceClient { func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
m.mu.Lock() ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, grpc.FailFast(false))
defer m.mu.Unlock() if err != nil {
return m.remote return nil, toErr(ctx, err)
} }
func (m *maintenance) switchRemote(prevErr error) error { pr, pw := io.Pipe()
m.mu.Lock() go func() {
defer m.mu.Unlock() for {
newConn, err := m.c.retryConnection(m.conn, prevErr) resp, err := ss.Recv()
if err != nil { if err != nil {
return err pw.CloseWithError(err)
return
} }
m.conn = newConn if resp == nil && err == nil {
m.remote = pb.NewMaintenanceClient(m.conn) break
return nil }
if _, werr := pw.Write(resp.Blob); werr != nil {
pw.CloseWithError(werr)
return
}
}
pw.Close()
}()
return pr, nil
} }

View File

@ -1,4 +1,4 @@
// Copyright 2016 CoreOS, Inc. // Copyright 2016 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -41,6 +41,8 @@ type Op struct {
limit int64 limit int64
sort *SortOption sort *SortOption
serializable bool serializable bool
keysOnly bool
countOnly bool
// for range, watch // for range, watch
rev int64 rev int64
@ -53,21 +55,29 @@ type Op struct {
leaseID LeaseID leaseID LeaseID
} }
func (op Op) toRequestUnion() *pb.RequestUnion { func (op Op) toRequestOp() *pb.RequestOp {
switch op.t { switch op.t {
case tRange: case tRange:
r := &pb.RangeRequest{Key: op.key, RangeEnd: op.end, Limit: op.limit, Revision: op.rev, Serializable: op.serializable} r := &pb.RangeRequest{
Key: op.key,
RangeEnd: op.end,
Limit: op.limit,
Revision: op.rev,
Serializable: op.serializable,
KeysOnly: op.keysOnly,
CountOnly: op.countOnly,
}
if op.sort != nil { if op.sort != nil {
r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order) r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order)
r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target) r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target)
} }
return &pb.RequestUnion{Request: &pb.RequestUnion_RequestRange{RequestRange: r}} return &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: r}}
case tPut: case tPut:
r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID)} r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID)}
return &pb.RequestUnion{Request: &pb.RequestUnion_RequestPut{RequestPut: r}} return &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: r}}
case tDeleteRange: case tDeleteRange:
r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end} r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end}
return &pb.RequestUnion{Request: &pb.RequestUnion_RequestDeleteRange{RequestDeleteRange: r}} return &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{RequestDeleteRange: r}}
default: default:
panic("Unknown Op") panic("Unknown Op")
} }
@ -97,6 +107,8 @@ func OpDelete(key string, opts ...OpOption) Op {
panic("unexpected sort in delete") panic("unexpected sort in delete")
case ret.serializable: case ret.serializable:
panic("unexpected serializable in delete") panic("unexpected serializable in delete")
case ret.countOnly:
panic("unexpected countOnly in delete")
} }
return ret return ret
} }
@ -114,7 +126,9 @@ func OpPut(key, val string, opts ...OpOption) Op {
case ret.sort != nil: case ret.sort != nil:
panic("unexpected sort in put") panic("unexpected sort in put")
case ret.serializable: case ret.serializable:
panic("unexpected serializable in delete") panic("unexpected serializable in put")
case ret.countOnly:
panic("unexpected countOnly in delete")
} }
return ret return ret
} }
@ -131,6 +145,8 @@ func opWatch(key string, opts ...OpOption) Op {
panic("unexpected sort in watch") panic("unexpected sort in watch")
case ret.serializable: case ret.serializable:
panic("unexpected serializable in watch") panic("unexpected serializable in watch")
case ret.countOnly:
panic("unexpected countOnly in delete")
} }
return ret return ret
} }
@ -166,6 +182,12 @@ func WithSort(target SortTarget, order SortOrder) OpOption {
} }
} }
// GetPrefixRangeEnd gets the range end of the prefix.
// 'Get(foo, WithPrefix())' is equal to 'Get(foo, WithRange(GetPrefixRangeEnd(foo))'.
func GetPrefixRangeEnd(prefix string) string {
return string(getPrefix([]byte(prefix)))
}
func getPrefix(key []byte) []byte { func getPrefix(key []byte) []byte {
end := make([]byte, len(key)) end := make([]byte, len(key))
copy(end, key) copy(end, key)
@ -198,7 +220,7 @@ func WithRange(endKey string) OpOption {
} }
// WithFromKey specifies the range of 'Get' or 'Delete' requests // WithFromKey specifies the range of 'Get' or 'Delete' requests
// to be equal or greater than they key in the argument. // to be equal or greater than the key in the argument.
func WithFromKey() OpOption { return WithRange("\x00") } func WithFromKey() OpOption { return WithRange("\x00") }
// WithSerializable makes 'Get' request serializable. By default, // WithSerializable makes 'Get' request serializable. By default,
@ -208,6 +230,17 @@ func WithSerializable() OpOption {
return func(op *Op) { op.serializable = true } return func(op *Op) { op.serializable = true }
} }
// WithKeysOnly makes the 'Get' request return only the keys and the corresponding
// values will be omitted.
func WithKeysOnly() OpOption {
return func(op *Op) { op.keysOnly = true }
}
// WithCountOnly makes the 'Get' request return only the count of keys.
func WithCountOnly() OpOption {
return func(op *Op) { op.countOnly = true }
}
// WithFirstCreate gets the key with the oldest creation revision in the request range. // WithFirstCreate gets the key with the oldest creation revision in the request range.
func WithFirstCreate() []OpOption { return withTop(SortByCreateRevision, SortAscend) } func WithFirstCreate() []OpOption { return withTop(SortByCreateRevision, SortAscend) }

View File

@ -1,4 +1,4 @@
// Copyright 2016 CoreOS, Inc. // Copyright 2016 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2016 CoreOS, Inc. // Copyright 2016 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -19,8 +19,10 @@ import (
pb "github.com/coreos/etcd/etcdserver/etcdserverpb" pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context" "golang.org/x/net/context"
"google.golang.org/grpc"
) )
// Txn is the interface that wraps mini-transactions.
// //
// Tx.If( // Tx.If(
// Compare(Value(k1), ">", v1), // Compare(Value(k1), ">", v1),
@ -30,6 +32,7 @@ import (
// ).Else( // ).Else(
// OpPut(k4,v4), OpPut(k5,v5) // OpPut(k4,v4), OpPut(k5,v5)
// ).Commit() // ).Commit()
//
type Txn interface { type Txn interface {
// If takes a list of comparison. If all comparisons passed in succeed, // If takes a list of comparison. If all comparisons passed in succeed,
// the operations passed into Then() will be executed. Or the operations // the operations passed into Then() will be executed. Or the operations
@ -63,8 +66,8 @@ type txn struct {
cmps []*pb.Compare cmps []*pb.Compare
sus []*pb.RequestUnion sus []*pb.RequestOp
fas []*pb.RequestUnion fas []*pb.RequestOp
} }
func (txn *txn) If(cs ...Cmp) Txn { func (txn *txn) If(cs ...Cmp) Txn {
@ -107,7 +110,7 @@ func (txn *txn) Then(ops ...Op) Txn {
for _, op := range ops { for _, op := range ops {
txn.isWrite = txn.isWrite || op.isWrite() txn.isWrite = txn.isWrite || op.isWrite()
txn.sus = append(txn.sus, op.toRequestUnion()) txn.sus = append(txn.sus, op.toRequestOp())
} }
return txn return txn
@ -125,7 +128,7 @@ func (txn *txn) Else(ops ...Op) Txn {
for _, op := range ops { for _, op := range ops {
txn.isWrite = txn.isWrite || op.isWrite() txn.isWrite = txn.isWrite || op.isWrite()
txn.fas = append(txn.fas, op.toRequestUnion()) txn.fas = append(txn.fas, op.toRequestOp())
} }
return txn return txn
@ -134,27 +137,25 @@ func (txn *txn) Else(ops ...Op) Txn {
func (txn *txn) Commit() (*TxnResponse, error) { func (txn *txn) Commit() (*TxnResponse, error) {
txn.mu.Lock() txn.mu.Lock()
defer txn.mu.Unlock() defer txn.mu.Unlock()
kv := txn.kv
for { for {
r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas} resp, err := txn.commit()
resp, err := kv.getRemote().Txn(txn.ctx, r)
if err == nil { if err == nil {
return resp, err
}
if isHaltErr(txn.ctx, err) {
return nil, toErr(txn.ctx, err)
}
if txn.isWrite {
return nil, toErr(txn.ctx, err)
}
}
}
func (txn *txn) commit() (*TxnResponse, error) {
r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas}
resp, err := txn.kv.remote.Txn(txn.ctx, r, grpc.FailFast(false))
if err != nil {
return nil, err
}
return (*TxnResponse)(resp), nil return (*TxnResponse)(resp), nil
} }
if isHalted(txn.ctx, err) {
return nil, err
}
if txn.isWrite {
go kv.switchRemote(err)
return nil, err
}
if nerr := kv.switchRemote(err); nerr != nil {
return nil, nerr
}
}
}

View File

@ -1,4 +1,4 @@
// Copyright 2016 CoreOS, Inc. // Copyright 2016 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -17,20 +17,23 @@ package clientv3
import ( import (
"fmt" "fmt"
"sync" "sync"
"time"
v3rpc "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" v3rpc "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb" pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
storagepb "github.com/coreos/etcd/storage/storagepb" mvccpb "github.com/coreos/etcd/mvcc/mvccpb"
"golang.org/x/net/context" "golang.org/x/net/context"
"google.golang.org/grpc" "google.golang.org/grpc"
) )
const ( const (
EventTypeDelete = storagepb.DELETE EventTypeDelete = mvccpb.DELETE
EventTypePut = storagepb.PUT EventTypePut = mvccpb.PUT
closeSendErrTimeout = 250 * time.Millisecond
) )
type Event storagepb.Event type Event mvccpb.Event
type WatchChan <-chan WatchResponse type WatchChan <-chan WatchResponse
@ -39,7 +42,7 @@ type Watcher interface {
// through the returned channel. // through the returned channel.
// If the watch is slow or the required rev is compacted, the watch request // If the watch is slow or the required rev is compacted, the watch request
// might be canceled from the server-side and the chan will be closed. // might be canceled from the server-side and the chan will be closed.
// 'opts' can be: 'WithRev' and/or 'WitchPrefix'. // 'opts' can be: 'WithRev' and/or 'WithPrefix'.
Watch(ctx context.Context, key string, opts ...OpOption) WatchChan Watch(ctx context.Context, key string, opts ...OpOption) WatchChan
// Close closes the watcher and cancels all watch requests. // Close closes the watcher and cancels all watch requests.
@ -57,6 +60,8 @@ type WatchResponse struct {
// If the watch failed and the stream was about to close, before the channel is closed, // If the watch failed and the stream was about to close, before the channel is closed,
// the channel sends a final response that has Canceled set to true with a non-nil Err(). // the channel sends a final response that has Canceled set to true with a non-nil Err().
Canceled bool Canceled bool
closeErr error
} }
// IsCreate returns true if the event tells that the key is newly created. // IsCreate returns true if the event tells that the key is newly created.
@ -71,10 +76,12 @@ func (e *Event) IsModify() bool {
// Err is the error value if this WatchResponse holds an error. // Err is the error value if this WatchResponse holds an error.
func (wr *WatchResponse) Err() error { func (wr *WatchResponse) Err() error {
if wr.CompactRevision != 0 { switch {
case wr.closeErr != nil:
return v3rpc.Error(wr.closeErr)
case wr.CompactRevision != 0:
return v3rpc.ErrCompacted return v3rpc.ErrCompacted
} case wr.Canceled:
if wr.Canceled {
return v3rpc.ErrFutureRev return v3rpc.ErrFutureRev
} }
return nil return nil
@ -87,18 +94,28 @@ func (wr *WatchResponse) IsProgressNotify() bool {
// watcher implements the Watcher interface // watcher implements the Watcher interface
type watcher struct { type watcher struct {
c *Client remote pb.WatchClient
conn *grpc.ClientConn
// mu protects the grpc streams map
mu sync.RWMutex
// streams holds all the active grpc streams keyed by ctx value.
streams map[string]*watchGrpcStream
}
type watchGrpcStream struct {
owner *watcher
remote pb.WatchClient remote pb.WatchClient
// ctx controls internal remote.Watch requests // ctx controls internal remote.Watch requests
ctx context.Context ctx context.Context
// ctxKey is the key used when looking up this stream's context
ctxKey string
cancel context.CancelFunc cancel context.CancelFunc
// streams holds all active watchers
streams map[int64]*watcherStream
// mu protects the streams map // mu protects the streams map
mu sync.RWMutex mu sync.RWMutex
// streams holds all active watchers
streams map[int64]*watcherStream
// reqc sends a watch request from Watch() to the main goroutine // reqc sends a watch request from Watch() to the main goroutine
reqc chan *watchRequest reqc chan *watchRequest
@ -108,8 +125,11 @@ type watcher struct {
stopc chan struct{} stopc chan struct{}
// donec closes to broadcast shutdown // donec closes to broadcast shutdown
donec chan struct{} donec chan struct{}
// errc transmits errors from grpc Recv // errc transmits errors from grpc Recv to the watch stream reconn logic
errc chan error errc chan error
// the error that closed the watch stream
closeErr error
} }
// watchRequest is issued by the subscriber to start a new watcher // watchRequest is issued by the subscriber to start a new watcher
@ -126,6 +146,7 @@ type watchRequest struct {
// watcherStream represents a registered watcher // watcherStream represents a registered watcher
type watcherStream struct { type watcherStream struct {
// initReq is the request that initiated this request
initReq watchRequest initReq watchRequest
// outc publishes watch responses to subscriber // outc publishes watch responses to subscriber
@ -141,15 +162,30 @@ type watcherStream struct {
} }
func NewWatcher(c *Client) Watcher { func NewWatcher(c *Client) Watcher {
ctx, cancel := context.WithCancel(context.Background()) return &watcher{
conn := c.ActiveConnection() remote: pb.NewWatchClient(c.conn),
streams: make(map[string]*watchGrpcStream),
}
}
w := &watcher{ // never closes
c: c, var valCtxCh = make(chan struct{})
conn: conn, var zeroTime = time.Unix(0, 0)
remote: pb.NewWatchClient(conn),
// ctx with only the values; never Done
type valCtx struct{ context.Context }
func (vc *valCtx) Deadline() (time.Time, bool) { return zeroTime, false }
func (vc *valCtx) Done() <-chan struct{} { return valCtxCh }
func (vc *valCtx) Err() error { return nil }
func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream {
ctx, cancel := context.WithCancel(&valCtx{inctx})
wgs := &watchGrpcStream{
owner: w,
remote: w.remote,
ctx: ctx, ctx: ctx,
ctxKey: fmt.Sprintf("%v", inctx),
cancel: cancel, cancel: cancel,
streams: make(map[int64]*watcherStream), streams: make(map[int64]*watcherStream),
@ -159,8 +195,8 @@ func NewWatcher(c *Client) Watcher {
donec: make(chan struct{}), donec: make(chan struct{}),
errc: make(chan error, 1), errc: make(chan error, 1),
} }
go w.run() go wgs.run()
return w return wgs
} }
// Watch posts a watch request to run() and waits for a new watcher channel // Watch posts a watch request to run() and waits for a new watcher channel
@ -178,13 +214,41 @@ func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) Watch
} }
ok := false ok := false
ctxKey := fmt.Sprintf("%v", ctx)
// find or allocate appropriate grpc watch stream
w.mu.Lock()
if w.streams == nil {
// closed
w.mu.Unlock()
ch := make(chan WatchResponse)
close(ch)
return ch
}
wgs := w.streams[ctxKey]
if wgs == nil {
wgs = w.newWatcherGrpcStream(ctx)
w.streams[ctxKey] = wgs
}
donec := wgs.donec
reqc := wgs.reqc
w.mu.Unlock()
// couldn't create channel; return closed channel
closeCh := make(chan WatchResponse, 1)
// submit request // submit request
select { select {
case w.reqc <- wr: case reqc <- wr:
ok = true ok = true
case <-wr.ctx.Done(): case <-wr.ctx.Done():
case <-w.donec: case <-donec:
if wgs.closeErr != nil {
closeCh <- WatchResponse{closeErr: wgs.closeErr}
break
}
// retry; may have dropped stream from no ctxs
return w.Watch(ctx, key, opts...)
} }
// receive channel // receive channel
@ -193,26 +257,44 @@ func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) Watch
case ret := <-retc: case ret := <-retc:
return ret return ret
case <-ctx.Done(): case <-ctx.Done():
case <-w.donec: case <-donec:
if wgs.closeErr != nil {
closeCh <- WatchResponse{closeErr: wgs.closeErr}
break
}
// retry; may have dropped stream from no ctxs
return w.Watch(ctx, key, opts...)
} }
} }
// couldn't create channel; return closed channel close(closeCh)
ch := make(chan WatchResponse) return closeCh
close(ch)
return ch
} }
func (w *watcher) Close() error { func (w *watcher) Close() (err error) {
select { w.mu.Lock()
case w.stopc <- struct{}{}: streams := w.streams
case <-w.donec: w.streams = nil
w.mu.Unlock()
for _, wgs := range streams {
if werr := wgs.Close(); werr != nil {
err = werr
} }
}
return err
}
func (w *watchGrpcStream) Close() (err error) {
close(w.stopc)
<-w.donec <-w.donec
return <-w.errc select {
case err = <-w.errc:
default:
}
return toErr(w.ctx, err)
} }
func (w *watcher) addStream(resp *pb.WatchResponse, pendingReq *watchRequest) { func (w *watchGrpcStream) addStream(resp *pb.WatchResponse, pendingReq *watchRequest) {
if pendingReq == nil { if pendingReq == nil {
// no pending request; ignore // no pending request; ignore
return return
@ -265,27 +347,32 @@ func (w *watcher) addStream(resp *pb.WatchResponse, pendingReq *watchRequest) {
} }
// closeStream closes the watcher resources and removes it // closeStream closes the watcher resources and removes it
func (w *watcher) closeStream(ws *watcherStream) { func (w *watchGrpcStream) closeStream(ws *watcherStream) {
// cancels request stream; subscriber receives nil channel // cancels request stream; subscriber receives nil channel
close(ws.initReq.retc) close(ws.initReq.retc)
// close subscriber's channel // close subscriber's channel
close(ws.outc) close(ws.outc)
// shutdown serveStream
close(ws.recvc)
delete(w.streams, ws.id) delete(w.streams, ws.id)
} }
// run is the root of the goroutines for managing a watcher client // run is the root of the goroutines for managing a watcher client
func (w *watcher) run() { func (w *watchGrpcStream) run() {
var wc pb.Watch_WatchClient
var closeErr error
defer func() { defer func() {
w.owner.mu.Lock()
w.closeErr = closeErr
if w.owner.streams != nil {
delete(w.owner.streams, w.ctxKey)
}
close(w.donec) close(w.donec)
w.owner.mu.Unlock()
w.cancel() w.cancel()
}() }()
// start a stream with the etcd grpc server // start a stream with the etcd grpc server
wc, wcerr := w.newWatchClient() if wc, closeErr = w.newWatchClient(); closeErr != nil {
if wcerr != nil {
w.errc <- wcerr
return return
} }
@ -314,6 +401,18 @@ func (w *watcher) run() {
curReqC = w.reqc curReqC = w.reqc
case pbresp.Canceled: case pbresp.Canceled:
delete(cancelSet, pbresp.WatchId) delete(cancelSet, pbresp.WatchId)
// shutdown serveStream, if any
w.mu.Lock()
if ws, ok := w.streams[pbresp.WatchId]; ok {
close(ws.recvc)
delete(w.streams, ws.id)
}
numStreams := len(w.streams)
w.mu.Unlock()
if numStreams == 0 {
// don't leak watcher streams
return
}
default: default:
// dispatch to appropriate watch stream // dispatch to appropriate watch stream
if ok := w.dispatchEvent(pbresp); ok { if ok := w.dispatchEvent(pbresp); ok {
@ -334,9 +433,12 @@ func (w *watcher) run() {
} }
// watch client failed to recv; spawn another if possible // watch client failed to recv; spawn another if possible
// TODO report watch client errors from errc? // TODO report watch client errors from errc?
case <-w.errc: case err := <-w.errc:
if wc, wcerr = w.newWatchClient(); wcerr != nil { if toErr(w.ctx, err) == v3rpc.ErrNoLeader {
w.errc <- wcerr closeErr = err
return
}
if wc, closeErr = w.newWatchClient(); closeErr != nil {
return return
} }
curReqC = w.reqc curReqC = w.reqc
@ -345,7 +447,6 @@ func (w *watcher) run() {
} }
cancelSet = make(map[int64]struct{}) cancelSet = make(map[int64]struct{})
case <-w.stopc: case <-w.stopc:
w.errc <- nil
return return
} }
@ -365,7 +466,7 @@ func (w *watcher) run() {
} }
// dispatchEvent sends a WatchResponse to the appropriate watcher stream // dispatchEvent sends a WatchResponse to the appropriate watcher stream
func (w *watcher) dispatchEvent(pbresp *pb.WatchResponse) bool { func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool {
w.mu.RLock() w.mu.RLock()
defer w.mu.RUnlock() defer w.mu.RUnlock()
ws, ok := w.streams[pbresp.WatchId] ws, ok := w.streams[pbresp.WatchId]
@ -385,7 +486,7 @@ func (w *watcher) dispatchEvent(pbresp *pb.WatchResponse) bool {
} }
// serveWatchClient forwards messages from the grpc stream to run() // serveWatchClient forwards messages from the grpc stream to run()
func (w *watcher) serveWatchClient(wc pb.Watch_WatchClient) { func (w *watchGrpcStream) serveWatchClient(wc pb.Watch_WatchClient) {
for { for {
resp, err := wc.Recv() resp, err := wc.Recv()
if err != nil { if err != nil {
@ -404,7 +505,8 @@ func (w *watcher) serveWatchClient(wc pb.Watch_WatchClient) {
} }
// serveStream forwards watch responses from run() to the subscriber // serveStream forwards watch responses from run() to the subscriber
func (w *watcher) serveStream(ws *watcherStream) { func (w *watchGrpcStream) serveStream(ws *watcherStream) {
var closeErr error
emptyWr := &WatchResponse{} emptyWr := &WatchResponse{}
wrs := []*WatchResponse{} wrs := []*WatchResponse{}
resuming := false resuming := false
@ -440,7 +542,7 @@ func (w *watcher) serveStream(ws *watcherStream) {
return return
} }
// resume up to last seen event if disconnected // resume up to last seen event if disconnected
if resuming { if resuming && wr.Err() == nil {
resuming = false resuming = false
// trim events already seen // trim events already seen
for i := 0; i < len(wr.Events); i++ { for i := 0; i < len(wr.Events); i++ {
@ -454,6 +556,7 @@ func (w *watcher) serveStream(ws *watcherStream) {
break break
} }
} }
resuming = false
// TODO don't keep buffering if subscriber stops reading // TODO don't keep buffering if subscriber stops reading
wrs = append(wrs, wr) wrs = append(wrs, wr)
case resumeRev := <-ws.resumec: case resumeRev := <-ws.resumec:
@ -468,17 +571,28 @@ func (w *watcher) serveStream(ws *watcherStream) {
} }
case <-w.donec: case <-w.donec:
closing = true closing = true
closeErr = w.closeErr
case <-ws.initReq.ctx.Done(): case <-ws.initReq.ctx.Done():
closing = true closing = true
} }
} }
// try to send off close error
if closeErr != nil {
select {
case ws.outc <- WatchResponse{closeErr: w.closeErr}:
case <-w.donec:
case <-time.After(closeSendErrTimeout):
}
}
w.mu.Lock() w.mu.Lock()
w.closeStream(ws) w.closeStream(ws)
w.mu.Unlock() w.mu.Unlock()
// lazily send cancel message if events on missing id // lazily send cancel message if events on missing id
} }
func (w *watcher) newWatchClient() (pb.Watch_WatchClient, error) { func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) {
ws, rerr := w.resume() ws, rerr := w.resume()
if rerr != nil { if rerr != nil {
return nil, rerr return nil, rerr
@ -488,7 +602,7 @@ func (w *watcher) newWatchClient() (pb.Watch_WatchClient, error) {
} }
// resume creates a new WatchClient with all current watchers reestablished // resume creates a new WatchClient with all current watchers reestablished
func (w *watcher) resume() (ws pb.Watch_WatchClient, err error) { func (w *watchGrpcStream) resume() (ws pb.Watch_WatchClient, err error) {
for { for {
if ws, err = w.openWatchClient(); err != nil { if ws, err = w.openWatchClient(); err != nil {
break break
@ -496,31 +610,34 @@ func (w *watcher) resume() (ws pb.Watch_WatchClient, err error) {
break break
} }
} }
return ws, err return ws, v3rpc.Error(err)
} }
// openWatchClient retries opening a watchclient until retryConnection fails // openWatchClient retries opening a watchclient until retryConnection fails
func (w *watcher) openWatchClient() (ws pb.Watch_WatchClient, err error) { func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) {
for { for {
if ws, err = w.remote.Watch(w.ctx); ws != nil { select {
break case <-w.stopc:
} else if isHalted(w.ctx, err) { if err == nil {
err = context.Canceled
}
return nil, err return nil, err
default:
} }
newConn, nerr := w.c.retryConnection(w.conn, nil) if ws, err = w.remote.Watch(w.ctx, grpc.FailFast(false)); ws != nil && err == nil {
if nerr != nil { break
return nil, nerr }
if isHaltErr(w.ctx, err) {
return nil, v3rpc.Error(err)
} }
w.conn = newConn
w.remote = pb.NewWatchClient(w.conn)
} }
return ws, nil return ws, nil
} }
// resumeWatchers rebuilds every registered watcher on a new client // resumeWatchers rebuilds every registered watcher on a new client
func (w *watcher) resumeWatchers(wc pb.Watch_WatchClient) error { func (w *watchGrpcStream) resumeWatchers(wc pb.Watch_WatchClient) error {
streams := []*watcherStream{}
w.mu.RLock() w.mu.RLock()
streams := make([]*watcherStream, 0, len(w.streams))
for _, ws := range w.streams { for _, ws := range w.streams {
streams = append(streams, ws) streams = append(streams, ws)
} }

View File

@ -1,4 +1,4 @@
// Copyright 2016 CoreOS, Inc. // Copyright 2016 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -19,7 +19,7 @@ import (
"time" "time"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb" pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/storage" "github.com/coreos/etcd/mvcc"
"github.com/coreos/pkg/capnslog" "github.com/coreos/pkg/capnslog"
"github.com/jonboulle/clockwork" "github.com/jonboulle/clockwork"
"golang.org/x/net/context" "golang.org/x/net/context"
@ -96,7 +96,7 @@ func (t *Periodic) Run() {
plog.Noticef("Starting auto-compaction at revision %d", rev) plog.Noticef("Starting auto-compaction at revision %d", rev)
_, err := t.c.Compact(t.ctx, &pb.CompactionRequest{Revision: rev}) _, err := t.c.Compact(t.ctx, &pb.CompactionRequest{Revision: rev})
if err == nil || err == storage.ErrCompacted { if err == nil || err == mvcc.ErrCompacted {
t.revs = make([]int64, 0) t.revs = make([]int64, 0)
last = clock.Now() last = clock.Now()
plog.Noticef("Finished auto-compaction at revision %d", rev) plog.Noticef("Finished auto-compaction at revision %d", rev)

16
vendor/github.com/coreos/etcd/compactor/doc.go generated vendored Normal file
View File

@ -0,0 +1,16 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package compactor implements automated policies for compacting etcd's mvcc storage.
package compactor

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc. // Copyright 2015 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -20,7 +20,6 @@ import (
"errors" "errors"
"fmt" "fmt"
"math" "math"
"net"
"net/http" "net/http"
"net/url" "net/url"
"path" "path"
@ -30,6 +29,7 @@ import (
"time" "time"
"github.com/coreos/etcd/client" "github.com/coreos/etcd/client"
"github.com/coreos/etcd/pkg/transport"
"github.com/coreos/etcd/pkg/types" "github.com/coreos/etcd/pkg/types"
"github.com/coreos/pkg/capnslog" "github.com/coreos/pkg/capnslog"
"github.com/jonboulle/clockwork" "github.com/jonboulle/clockwork"
@ -124,16 +124,15 @@ func newDiscovery(durl, dproxyurl string, id types.ID) (*discovery, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
cfg := client.Config{
Transport: &http.Transport{
Proxy: pf,
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
TLSHandshakeTimeout: 10 * time.Second,
// TODO: add ResponseHeaderTimeout back when watch on discovery service writes header early // TODO: add ResponseHeaderTimeout back when watch on discovery service writes header early
}, tr, err := transport.NewTransport(transport.TLSInfo{}, 30*time.Second)
if err != nil {
return nil, err
}
tr.Proxy = pf
cfg := client.Config{
Transport: tr,
Endpoints: []string{u.String()}, Endpoints: []string{u.String()},
} }
c, err := client.New(cfg) c, err := client.New(cfg)

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc. // Copyright 2015 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc. // Copyright 2015 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -13,7 +13,7 @@
// limitations under the License. // limitations under the License.
// Package error describes errors in etcd project. When any change happens, // Package error describes errors in etcd project. When any change happens,
// Documentation/errorcode.md needs to be updated correspondingly. // Documentation/v2/errorcode.md needs to be updated correspondingly.
package error package error
import ( import (

View File

@ -0,0 +1,85 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package api
import (
"sync"
"github.com/coreos/etcd/version"
"github.com/coreos/go-semver/semver"
"github.com/coreos/pkg/capnslog"
)
type Capability string
const (
AuthCapability Capability = "auth"
V3rpcCapability Capability = "v3rpc"
)
var (
plog = capnslog.NewPackageLogger("github.com/coreos/etcd/etcdserver", "api")
// capabilityMaps is a static map of version to capability map.
// the base capabilities is the set of capability 2.0 supports.
capabilityMaps = map[string]map[Capability]bool{
"2.1.0": {AuthCapability: true},
"2.2.0": {AuthCapability: true},
"2.3.0": {AuthCapability: true},
"3.0.0": {AuthCapability: true, V3rpcCapability: true},
}
enableMapMu sync.RWMutex
// enabledMap points to a map in capabilityMaps
enabledMap map[Capability]bool
curVersion *semver.Version
)
func init() {
enabledMap = make(map[Capability]bool)
}
// UpdateCapability updates the enabledMap when the cluster version increases.
func UpdateCapability(v *semver.Version) {
if v == nil {
// if recovered but version was never set by cluster
return
}
enableMapMu.Lock()
if curVersion != nil && !curVersion.LessThan(*v) {
enableMapMu.Unlock()
return
}
curVersion = v
enabledMap = capabilityMaps[curVersion.String()]
enableMapMu.Unlock()
plog.Infof("enabled capabilities for version %s", version.Cluster(v.String()))
}
func IsCapabilityEnabled(c Capability) bool {
enableMapMu.RLock()
defer enableMapMu.RUnlock()
if enabledMap == nil {
return false
}
return enabledMap[c]
}
func EnableCapability(c Capability) {
enableMapMu.Lock()
defer enableMapMu.Unlock()
enabledMap[c] = true
}

View File

@ -1,4 +1,4 @@
// Copyright 2016 CoreOS, Inc. // Copyright 2016 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.

16
vendor/github.com/coreos/etcd/etcdserver/api/doc.go generated vendored Normal file
View File

@ -0,0 +1,16 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package api manages the capabilities and features that are exposed to clients by the etcd cluster.
package api

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc. // Copyright 2015 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -17,75 +17,14 @@ package v2http
import ( import (
"fmt" "fmt"
"net/http" "net/http"
"sync"
"time"
"github.com/coreos/etcd/etcdserver" "github.com/coreos/etcd/etcdserver/api"
"github.com/coreos/etcd/etcdserver/api/v2http/httptypes" "github.com/coreos/etcd/etcdserver/api/v2http/httptypes"
"github.com/coreos/go-semver/semver"
) )
type capability string func capabilityHandler(c api.Capability, fn func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
const (
authCapability capability = "auth"
)
var (
// capabilityMaps is a static map of version to capability map.
// the base capabilities is the set of capability 2.0 supports.
capabilityMaps = map[string]map[capability]bool{
"2.1.0": {authCapability: true},
"2.2.0": {authCapability: true},
"2.3.0": {authCapability: true},
}
enableMapMu sync.Mutex
// enabledMap points to a map in capabilityMaps
enabledMap map[capability]bool
)
// capabilityLoop checks the cluster version every 500ms and updates
// the enabledMap when the cluster version increased.
// capabilityLoop MUST be ran in a goroutine before checking capability
// or using capabilityHandler.
func capabilityLoop(s *etcdserver.EtcdServer) {
stopped := s.StopNotify()
var pv *semver.Version
for {
if v := s.ClusterVersion(); v != pv {
if pv == nil {
pv = v
} else if v != nil && pv.LessThan(*v) {
pv = v
}
enableMapMu.Lock()
enabledMap = capabilityMaps[pv.String()]
enableMapMu.Unlock()
plog.Infof("enabled capabilities for version %s", pv)
}
select {
case <-stopped:
return
case <-time.After(500 * time.Millisecond):
}
}
}
func isCapabilityEnabled(c capability) bool {
enableMapMu.Lock()
defer enableMapMu.Unlock()
if enabledMap == nil {
return false
}
return enabledMap[c]
}
func capabilityHandler(c capability, fn func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) { return func(w http.ResponseWriter, r *http.Request) {
if !isCapabilityEnabled(c) { if !api.IsCapabilityEnabled(c) {
notCapable(w, r, c) notCapable(w, r, c)
return return
} }
@ -93,7 +32,7 @@ func capabilityHandler(c capability, fn func(http.ResponseWriter, *http.Request)
} }
} }
func notCapable(w http.ResponseWriter, r *http.Request, c capability) { func notCapable(w http.ResponseWriter, r *http.Request, c api.Capability) {
herr := httptypes.NewHTTPError(http.StatusInternalServerError, fmt.Sprintf("Not capable of accessing %s feature during rolling upgrades.", c)) herr := httptypes.NewHTTPError(http.StatusInternalServerError, fmt.Sprintf("Not capable of accessing %s feature during rolling upgrades.", c))
if err := herr.WriteTo(w); err != nil { if err := herr.WriteTo(w); err != nil {
plog.Debugf("error writing HTTPError (%v) to %s", err, r.RemoteAddr) plog.Debugf("error writing HTTPError (%v) to %s", err, r.RemoteAddr)

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc. // Copyright 2015 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -62,8 +62,6 @@ const (
// NewClientHandler generates a muxed http.Handler with the given parameters to serve etcd client requests. // NewClientHandler generates a muxed http.Handler with the given parameters to serve etcd client requests.
func NewClientHandler(server *etcdserver.EtcdServer, timeout time.Duration) http.Handler { func NewClientHandler(server *etcdserver.EtcdServer, timeout time.Duration) http.Handler {
go capabilityLoop(server)
sec := auth.NewStore(server, timeout) sec := auth.NewStore(server, timeout)
kh := &keysHandler{ kh := &keysHandler{
@ -72,6 +70,7 @@ func NewClientHandler(server *etcdserver.EtcdServer, timeout time.Duration) http
cluster: server.Cluster(), cluster: server.Cluster(),
timer: server, timer: server,
timeout: timeout, timeout: timeout,
clientCertAuthEnabled: server.Cfg.ClientCertAuthEnabled,
} }
sh := &statsHandler{ sh := &statsHandler{
@ -84,6 +83,7 @@ func NewClientHandler(server *etcdserver.EtcdServer, timeout time.Duration) http
cluster: server.Cluster(), cluster: server.Cluster(),
timeout: timeout, timeout: timeout,
clock: clockwork.NewRealClock(), clock: clockwork.NewRealClock(),
clientCertAuthEnabled: server.Cfg.ClientCertAuthEnabled,
} }
dmh := &deprecatedMachinesHandler{ dmh := &deprecatedMachinesHandler{
@ -93,6 +93,7 @@ func NewClientHandler(server *etcdserver.EtcdServer, timeout time.Duration) http
sech := &authHandler{ sech := &authHandler{
sec: sec, sec: sec,
cluster: server.Cluster(), cluster: server.Cluster(),
clientCertAuthEnabled: server.Cfg.ClientCertAuthEnabled,
} }
mux := http.NewServeMux() mux := http.NewServeMux()
@ -138,6 +139,7 @@ type keysHandler struct {
cluster api.Cluster cluster api.Cluster
timer etcdserver.RaftTimer timer etcdserver.RaftTimer
timeout time.Duration timeout time.Duration
clientCertAuthEnabled bool
} }
func (h *keysHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { func (h *keysHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
@ -157,7 +159,7 @@ func (h *keysHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return return
} }
// The path must be valid at this point (we've parsed the request successfully). // The path must be valid at this point (we've parsed the request successfully).
if !hasKeyPrefixAccess(h.sec, r, r.URL.Path[len(keysPrefix):], rr.Recursive) { if !hasKeyPrefixAccess(h.sec, r, r.URL.Path[len(keysPrefix):], rr.Recursive, h.clientCertAuthEnabled) {
writeKeyNoAuth(w) writeKeyNoAuth(w)
return return
} }
@ -205,13 +207,14 @@ type membersHandler struct {
cluster api.Cluster cluster api.Cluster
timeout time.Duration timeout time.Duration
clock clockwork.Clock clock clockwork.Clock
clientCertAuthEnabled bool
} }
func (h *membersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { func (h *membersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if !allowMethod(w, r.Method, "GET", "POST", "DELETE", "PUT") { if !allowMethod(w, r.Method, "GET", "POST", "DELETE", "PUT") {
return return
} }
if !hasWriteRootAccess(h.sec, r) { if !hasWriteRootAccess(h.sec, r, h.clientCertAuthEnabled) {
writeNoAuth(w, r) writeNoAuth(w, r)
return return
} }
@ -720,20 +723,19 @@ func trimEventPrefix(ev *store.Event, prefix string) *store.Event {
// Since the *Event may reference one in the store history // Since the *Event may reference one in the store history
// history, we must copy it before modifying // history, we must copy it before modifying
e := ev.Clone() e := ev.Clone()
e.Node = trimNodeExternPrefix(e.Node, prefix) trimNodeExternPrefix(e.Node, prefix)
e.PrevNode = trimNodeExternPrefix(e.PrevNode, prefix) trimNodeExternPrefix(e.PrevNode, prefix)
return e return e
} }
func trimNodeExternPrefix(n *store.NodeExtern, prefix string) *store.NodeExtern { func trimNodeExternPrefix(n *store.NodeExtern, prefix string) {
if n == nil { if n == nil {
return nil return
} }
n.Key = strings.TrimPrefix(n.Key, prefix) n.Key = strings.TrimPrefix(n.Key, prefix)
for _, nn := range n.Nodes { for _, nn := range n.Nodes {
nn = trimNodeExternPrefix(nn, prefix) trimNodeExternPrefix(nn, prefix)
} }
return n
} }
func trimErrorPrefix(err error, prefix string) error { func trimErrorPrefix(err error, prefix string) error {

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc. // Copyright 2015 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -28,16 +28,54 @@ import (
type authHandler struct { type authHandler struct {
sec auth.Store sec auth.Store
cluster api.Cluster cluster api.Cluster
clientCertAuthEnabled bool
} }
func hasWriteRootAccess(sec auth.Store, r *http.Request) bool { func hasWriteRootAccess(sec auth.Store, r *http.Request, clientCertAuthEnabled bool) bool {
if r.Method == "GET" || r.Method == "HEAD" { if r.Method == "GET" || r.Method == "HEAD" {
return true return true
} }
return hasRootAccess(sec, r) return hasRootAccess(sec, r, clientCertAuthEnabled)
} }
func hasRootAccess(sec auth.Store, r *http.Request) bool { func userFromBasicAuth(sec auth.Store, r *http.Request) *auth.User {
username, password, ok := r.BasicAuth()
if !ok {
plog.Warningf("auth: malformed basic auth encoding")
return nil
}
user, err := sec.GetUser(username)
if err != nil {
return nil
}
ok = sec.CheckPassword(user, password)
if !ok {
plog.Warningf("auth: incorrect password for user: %s", username)
return nil
}
return &user
}
func userFromClientCertificate(sec auth.Store, r *http.Request) *auth.User {
if r.TLS == nil {
return nil
}
for _, chains := range r.TLS.VerifiedChains {
for _, chain := range chains {
plog.Debugf("auth: found common name %s.\n", chain.Subject.CommonName)
user, err := sec.GetUser(chain.Subject.CommonName)
if err == nil {
plog.Debugf("auth: authenticated user %s by cert common name.", user.User)
return &user
}
}
}
return nil
}
func hasRootAccess(sec auth.Store, r *http.Request, clientCertAuthEnabled bool) bool {
if sec == nil { if sec == nil {
// No store means no auth available, eg, tests. // No store means no auth available, eg, tests.
return true return true
@ -45,30 +83,30 @@ func hasRootAccess(sec auth.Store, r *http.Request) bool {
if !sec.AuthEnabled() { if !sec.AuthEnabled() {
return true return true
} }
username, password, ok := r.BasicAuth()
if !ok { var rootUser *auth.User
if r.Header.Get("Authorization") == "" && clientCertAuthEnabled {
rootUser = userFromClientCertificate(sec, r)
if rootUser == nil {
return false return false
} }
rootUser, err := sec.GetUser(username) } else {
if err != nil { rootUser = userFromBasicAuth(sec, r)
if rootUser == nil {
return false return false
} }
}
ok = sec.CheckPassword(rootUser, password)
if !ok {
plog.Warningf("auth: wrong password for user %s", username)
return false
}
for _, role := range rootUser.Roles { for _, role := range rootUser.Roles {
if role == auth.RootRoleName { if role == auth.RootRoleName {
return true return true
} }
} }
plog.Warningf("auth: user %s does not have the %s role for resource %s.", username, auth.RootRoleName, r.URL.Path) plog.Warningf("auth: user %s does not have the %s role for resource %s.", rootUser.User, auth.RootRoleName, r.URL.Path)
return false return false
} }
func hasKeyPrefixAccess(sec auth.Store, r *http.Request, key string, recursive bool) bool { func hasKeyPrefixAccess(sec auth.Store, r *http.Request, key string, recursive, clientCertAuthEnabled bool) bool {
if sec == nil { if sec == nil {
// No store means no auth available, eg, tests. // No store means no auth available, eg, tests.
return true return true
@ -76,25 +114,21 @@ func hasKeyPrefixAccess(sec auth.Store, r *http.Request, key string, recursive b
if !sec.AuthEnabled() { if !sec.AuthEnabled() {
return true return true
} }
if r.Header.Get("Authorization") == "" {
var user *auth.User
if r.Header.Get("Authorization") == "" && clientCertAuthEnabled {
user = userFromClientCertificate(sec, r)
if user == nil {
plog.Warningf("auth: no authorization provided, checking guest access") plog.Warningf("auth: no authorization provided, checking guest access")
return hasGuestAccess(sec, r, key) return hasGuestAccess(sec, r, key)
} }
username, password, ok := r.BasicAuth() } else {
if !ok { user = userFromBasicAuth(sec, r)
plog.Warningf("auth: malformed basic auth encoding") if user == nil {
return false return false
} }
user, err := sec.GetUser(username)
if err != nil {
plog.Warningf("auth: no such user: %s.", username)
return false
}
authAsUser := sec.CheckPassword(user, password)
if !authAsUser {
plog.Warningf("auth: incorrect password for user: %s.", username)
return false
} }
writeAccess := r.Method != "GET" && r.Method != "HEAD" writeAccess := r.Method != "GET" && r.Method != "HEAD"
for _, roleName := range user.Roles { for _, roleName := range user.Roles {
role, err := sec.GetRole(roleName) role, err := sec.GetRole(roleName)
@ -109,7 +143,7 @@ func hasKeyPrefixAccess(sec auth.Store, r *http.Request, key string, recursive b
return true return true
} }
} }
plog.Warningf("auth: invalid access for user %s on key %s.", username, key) plog.Warningf("auth: invalid access for user %s on key %s.", user.User, key)
return false return false
} }
@ -134,18 +168,18 @@ func writeNoAuth(w http.ResponseWriter, r *http.Request) {
} }
func handleAuth(mux *http.ServeMux, sh *authHandler) { func handleAuth(mux *http.ServeMux, sh *authHandler) {
mux.HandleFunc(authPrefix+"/roles", capabilityHandler(authCapability, sh.baseRoles)) mux.HandleFunc(authPrefix+"/roles", capabilityHandler(api.AuthCapability, sh.baseRoles))
mux.HandleFunc(authPrefix+"/roles/", capabilityHandler(authCapability, sh.handleRoles)) mux.HandleFunc(authPrefix+"/roles/", capabilityHandler(api.AuthCapability, sh.handleRoles))
mux.HandleFunc(authPrefix+"/users", capabilityHandler(authCapability, sh.baseUsers)) mux.HandleFunc(authPrefix+"/users", capabilityHandler(api.AuthCapability, sh.baseUsers))
mux.HandleFunc(authPrefix+"/users/", capabilityHandler(authCapability, sh.handleUsers)) mux.HandleFunc(authPrefix+"/users/", capabilityHandler(api.AuthCapability, sh.handleUsers))
mux.HandleFunc(authPrefix+"/enable", capabilityHandler(authCapability, sh.enableDisable)) mux.HandleFunc(authPrefix+"/enable", capabilityHandler(api.AuthCapability, sh.enableDisable))
} }
func (sh *authHandler) baseRoles(w http.ResponseWriter, r *http.Request) { func (sh *authHandler) baseRoles(w http.ResponseWriter, r *http.Request) {
if !allowMethod(w, r.Method, "GET") { if !allowMethod(w, r.Method, "GET") {
return return
} }
if !hasRootAccess(sh.sec, r) { if !hasRootAccess(sh.sec, r, sh.clientCertAuthEnabled) {
writeNoAuth(w, r) writeNoAuth(w, r)
return return
} }
@ -209,7 +243,7 @@ func (sh *authHandler) forRole(w http.ResponseWriter, r *http.Request, role stri
if !allowMethod(w, r.Method, "GET", "PUT", "DELETE") { if !allowMethod(w, r.Method, "GET", "PUT", "DELETE") {
return return
} }
if !hasRootAccess(sh.sec, r) { if !hasRootAccess(sh.sec, r, sh.clientCertAuthEnabled) {
writeNoAuth(w, r) writeNoAuth(w, r)
return return
} }
@ -285,11 +319,15 @@ type userWithRoles struct {
Roles []auth.Role `json:"roles,omitempty"` Roles []auth.Role `json:"roles,omitempty"`
} }
type usersCollections struct {
Users []userWithRoles `json:"users"`
}
func (sh *authHandler) baseUsers(w http.ResponseWriter, r *http.Request) { func (sh *authHandler) baseUsers(w http.ResponseWriter, r *http.Request) {
if !allowMethod(w, r.Method, "GET") { if !allowMethod(w, r.Method, "GET") {
return return
} }
if !hasRootAccess(sh.sec, r) { if !hasRootAccess(sh.sec, r, sh.clientCertAuthEnabled) {
writeNoAuth(w, r) writeNoAuth(w, r)
return return
} }
@ -311,9 +349,7 @@ func (sh *authHandler) baseUsers(w http.ResponseWriter, r *http.Request) {
return return
} }
var usersCollections struct { ucs := usersCollections{}
Users []userWithRoles `json:"users"`
}
for _, userName := range users { for _, userName := range users {
var user auth.User var user auth.User
user, err = sh.sec.GetUser(userName) user, err = sh.sec.GetUser(userName)
@ -327,15 +363,14 @@ func (sh *authHandler) baseUsers(w http.ResponseWriter, r *http.Request) {
var role auth.Role var role auth.Role
role, err = sh.sec.GetRole(roleName) role, err = sh.sec.GetRole(roleName)
if err != nil { if err != nil {
writeError(w, r, err) continue
return
} }
uwr.Roles = append(uwr.Roles, role) uwr.Roles = append(uwr.Roles, role)
} }
usersCollections.Users = append(usersCollections.Users, uwr) ucs.Users = append(ucs.Users, uwr)
} }
err = json.NewEncoder(w).Encode(usersCollections) err = json.NewEncoder(w).Encode(ucs)
if err != nil { if err != nil {
plog.Warningf("baseUsers error encoding on %s", r.URL) plog.Warningf("baseUsers error encoding on %s", r.URL)
@ -364,7 +399,7 @@ func (sh *authHandler) forUser(w http.ResponseWriter, r *http.Request, user stri
if !allowMethod(w, r.Method, "GET", "PUT", "DELETE") { if !allowMethod(w, r.Method, "GET", "PUT", "DELETE") {
return return
} }
if !hasRootAccess(sh.sec, r) { if !hasRootAccess(sh.sec, r, sh.clientCertAuthEnabled) {
writeNoAuth(w, r) writeNoAuth(w, r)
return return
} }
@ -477,7 +512,7 @@ func (sh *authHandler) enableDisable(w http.ResponseWriter, r *http.Request) {
if !allowMethod(w, r.Method, "GET", "PUT", "DELETE") { if !allowMethod(w, r.Method, "GET", "PUT", "DELETE") {
return return
} }
if !hasWriteRootAccess(sh.sec, r) { if !hasWriteRootAccess(sh.sec, r, sh.clientCertAuthEnabled) {
writeNoAuth(w, r) writeNoAuth(w, r)
return return
} }

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc. // Copyright 2015 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc. // Copyright 2015 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc. // Copyright 2015 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc. // Copyright 2015 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc. // Copyright 2015 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -48,7 +48,7 @@ var (
prometheus.HistogramOpts{ prometheus.HistogramOpts{
Namespace: "etcd", Namespace: "etcd",
Subsystem: "http", Subsystem: "http",
Name: "successful_duration_second", Name: "successful_duration_seconds",
Help: "Bucketed histogram of processing time (s) of successfully handled requests (non-watches), by method (GET/PUT etc.).", Help: "Bucketed histogram of processing time (s) of successfully handled requests (non-watches), by method (GET/PUT etc.).",
Buckets: prometheus.ExponentialBuckets(0.0005, 2, 13), Buckets: prometheus.ExponentialBuckets(0.0005, 2, 13),
}, []string{"method"}) }, []string{"method"})

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc. // Copyright 2015 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2016 Nippon Telegraph and Telephone Corporation. // Copyright 2016 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -37,13 +37,19 @@ func (as *AuthServer) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (
} }
func (as *AuthServer) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) { func (as *AuthServer) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) {
plog.Info("not implemented yet") resp, err := as.authenticator.AuthDisable(ctx, r)
return nil, nil if err != nil {
return nil, togRPCError(err)
}
return resp, nil
} }
func (as *AuthServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) { func (as *AuthServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) {
plog.Info("not implemented yet") resp, err := as.authenticator.Authenticate(ctx, r)
return nil, nil if err != nil {
return nil, togRPCError(err)
}
return resp, nil
} }
func (as *AuthServer) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) { func (as *AuthServer) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) {
@ -55,23 +61,43 @@ func (as *AuthServer) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*p
} }
func (as *AuthServer) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) { func (as *AuthServer) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
plog.Info("not implemented yet") resp, err := as.authenticator.RoleDelete(ctx, r)
return nil, nil if err != nil {
return nil, togRPCError(err)
}
return resp, nil
} }
func (as *AuthServer) RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) { func (as *AuthServer) RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
plog.Info("not implemented yet") resp, err := as.authenticator.RoleGet(ctx, r)
return nil, nil if err != nil {
return nil, togRPCError(err)
}
return resp, nil
} }
func (as *AuthServer) RoleRevoke(ctx context.Context, r *pb.AuthRoleRevokeRequest) (*pb.AuthRoleRevokeResponse, error) { func (as *AuthServer) RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
plog.Info("not implemented yet") resp, err := as.authenticator.RoleList(ctx, r)
return nil, nil if err != nil {
return nil, togRPCError(err)
}
return resp, nil
} }
func (as *AuthServer) RoleGrant(ctx context.Context, r *pb.AuthRoleGrantRequest) (*pb.AuthRoleGrantResponse, error) { func (as *AuthServer) RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
plog.Info("not implemented yet") resp, err := as.authenticator.RoleRevokePermission(ctx, r)
return nil, nil if err != nil {
return nil, togRPCError(err)
}
return resp, nil
}
func (as *AuthServer) RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
resp, err := as.authenticator.RoleGrantPermission(ctx, r)
if err != nil {
return nil, togRPCError(err)
}
return resp, nil
} }
func (as *AuthServer) UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) { func (as *AuthServer) UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
@ -91,18 +117,35 @@ func (as *AuthServer) UserDelete(ctx context.Context, r *pb.AuthUserDeleteReques
} }
func (as *AuthServer) UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { func (as *AuthServer) UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
plog.Info("not implemented yet") resp, err := as.authenticator.UserGet(ctx, r)
return nil, nil if err != nil {
return nil, togRPCError(err)
}
return resp, nil
} }
func (as *AuthServer) UserGrant(ctx context.Context, r *pb.AuthUserGrantRequest) (*pb.AuthUserGrantResponse, error) { func (as *AuthServer) UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
plog.Info("not implemented yet") resp, err := as.authenticator.UserList(ctx, r)
return nil, nil if err != nil {
return nil, togRPCError(err)
}
return resp, nil
} }
func (as *AuthServer) UserRevoke(ctx context.Context, r *pb.AuthUserRevokeRequest) (*pb.AuthUserRevokeResponse, error) { func (as *AuthServer) UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) {
plog.Info("not implemented yet") resp, err := as.authenticator.UserGrantRole(ctx, r)
return nil, nil if err != nil {
return nil, togRPCError(err)
}
return resp, nil
}
func (as *AuthServer) UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
resp, err := as.authenticator.UserRevokeRole(ctx, r)
if err != nil {
return nil, togRPCError(err)
}
return resp, nil
} }
func (as *AuthServer) UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) { func (as *AuthServer) UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {

View File

@ -0,0 +1,34 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v3rpc
import "github.com/gogo/protobuf/proto"
type codec struct{}
func (c *codec) Marshal(v interface{}) ([]byte, error) {
b, err := proto.Marshal(v.(proto.Message))
sentBytes.Add(float64(len(b)))
return b, err
}
func (c *codec) Unmarshal(data []byte, v interface{}) error {
receivedBytes.Add(float64(len(data)))
return proto.Unmarshal(data, v.(proto.Message))
}
func (c *codec) String() string {
return "proto"
}

View File

@ -1,4 +1,4 @@
// Copyright 2016 CoreOS, Inc. // Copyright 2016 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -19,15 +19,24 @@ import (
"github.com/coreos/etcd/etcdserver" "github.com/coreos/etcd/etcdserver"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb" pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/pkg/capnslog"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials"
"google.golang.org/grpc/grpclog"
) )
func init() {
grpclog.SetLogger(capnslog.NewPackageLogger("github.com/coreos/etcd/etcdserver", "v3rpc/grpc"))
}
func Server(s *etcdserver.EtcdServer, tls *tls.Config) *grpc.Server { func Server(s *etcdserver.EtcdServer, tls *tls.Config) *grpc.Server {
var opts []grpc.ServerOption var opts []grpc.ServerOption
opts = append(opts, grpc.CustomCodec(&codec{}))
if tls != nil { if tls != nil {
opts = append(opts, grpc.Creds(credentials.NewTLS(tls))) opts = append(opts, grpc.Creds(credentials.NewTLS(tls)))
} }
opts = append(opts, grpc.UnaryInterceptor(newUnaryInterceptor(s)))
opts = append(opts, grpc.StreamInterceptor(newStreamInterceptor(s)))
grpcServer := grpc.NewServer(opts...) grpcServer := grpc.NewServer(opts...)
pb.RegisterKVServer(grpcServer, NewQuotaKVServer(s)) pb.RegisterKVServer(grpcServer, NewQuotaKVServer(s))
@ -36,5 +45,6 @@ func Server(s *etcdserver.EtcdServer, tls *tls.Config) *grpc.Server {
pb.RegisterClusterServer(grpcServer, NewClusterServer(s)) pb.RegisterClusterServer(grpcServer, NewClusterServer(s))
pb.RegisterAuthServer(grpcServer, NewAuthServer(s)) pb.RegisterAuthServer(grpcServer, NewAuthServer(s))
pb.RegisterMaintenanceServer(grpcServer, NewMaintenanceServer(s)) pb.RegisterMaintenanceServer(grpcServer, NewMaintenanceServer(s))
return grpcServer return grpcServer
} }

View File

@ -1,4 +1,4 @@
// Copyright 2016 CoreOS, Inc. // Copyright 2016 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -40,4 +40,7 @@ func (h *header) fill(rh *pb.ResponseHeader) {
rh.ClusterId = uint64(h.clusterID) rh.ClusterId = uint64(h.clusterID)
rh.MemberId = uint64(h.memberID) rh.MemberId = uint64(h.memberID)
rh.RaftTerm = h.raftTimer.Term() rh.RaftTerm = h.raftTimer.Term()
if rh.Revision == 0 {
rh.Revision = h.rev()
}
} }

View File

@ -0,0 +1,176 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v3rpc
import (
"strings"
"sync"
"time"
"github.com/coreos/etcd/etcdserver"
"github.com/coreos/etcd/etcdserver/api"
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
"github.com/coreos/etcd/pkg/types"
"github.com/coreos/etcd/raft"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
)
const (
maxNoLeaderCnt = 3
)
type streamsMap struct {
mu sync.Mutex
streams map[grpc.ServerStream]struct{}
}
func newUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor {
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
if !api.IsCapabilityEnabled(api.V3rpcCapability) {
return nil, rpctypes.ErrGRPCNotCapable
}
md, ok := metadata.FromContext(ctx)
if ok {
if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader {
if s.Leader() == types.ID(raft.None) {
return nil, rpctypes.ErrGRPCNoLeader
}
}
}
return metricsUnaryInterceptor(ctx, req, info, handler)
}
}
func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor {
smap := monitorLeader(s)
return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
if !api.IsCapabilityEnabled(api.V3rpcCapability) {
return rpctypes.ErrGRPCNotCapable
}
md, ok := metadata.FromContext(ss.Context())
if ok {
if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader {
if s.Leader() == types.ID(raft.None) {
return rpctypes.ErrGRPCNoLeader
}
cctx, cancel := context.WithCancel(ss.Context())
ss = serverStreamWithCtx{ctx: cctx, cancel: &cancel, ServerStream: ss}
smap.mu.Lock()
smap.streams[ss] = struct{}{}
smap.mu.Unlock()
defer func() {
smap.mu.Lock()
delete(smap.streams, ss)
smap.mu.Unlock()
cancel()
}()
}
}
return metricsStreamInterceptor(srv, ss, info, handler)
}
}
func metricsUnaryInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
service, method := splitMethodName(info.FullMethod)
receivedCounter.WithLabelValues(service, method).Inc()
start := time.Now()
resp, err = handler(ctx, req)
if err != nil {
failedCounter.WithLabelValues(service, method, grpc.Code(err).String()).Inc()
}
handlingDuration.WithLabelValues(service, method).Observe(time.Since(start).Seconds())
return resp, err
}
func metricsStreamInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
service, method := splitMethodName(info.FullMethod)
receivedCounter.WithLabelValues(service, method).Inc()
err := handler(srv, ss)
if err != nil {
failedCounter.WithLabelValues(service, method, grpc.Code(err).String()).Inc()
}
return err
}
func splitMethodName(fullMethodName string) (string, string) {
fullMethodName = strings.TrimPrefix(fullMethodName, "/") // remove leading slash
if i := strings.Index(fullMethodName, "/"); i >= 0 {
return fullMethodName[:i], fullMethodName[i+1:]
}
return "unknown", "unknown"
}
type serverStreamWithCtx struct {
grpc.ServerStream
ctx context.Context
cancel *context.CancelFunc
}
func (ssc serverStreamWithCtx) Context() context.Context { return ssc.ctx }
func monitorLeader(s *etcdserver.EtcdServer) *streamsMap {
smap := &streamsMap{
streams: make(map[grpc.ServerStream]struct{}),
}
go func() {
election := time.Duration(s.Cfg.TickMs) * time.Duration(s.Cfg.ElectionTicks) * time.Millisecond
noLeaderCnt := 0
for {
select {
case <-s.StopNotify():
return
case <-time.After(election):
if s.Leader() == types.ID(raft.None) {
noLeaderCnt++
} else {
noLeaderCnt = 0
}
// We are more conservative on canceling existing streams. Reconnecting streams
// cost much more than just rejecting new requests. So we wait until the member
// cannot find a leader for maxNoLeaderCnt election timeouts to cancel existing streams.
if noLeaderCnt >= maxNoLeaderCnt {
smap.mu.Lock()
for ss := range smap.streams {
if ssWithCtx, ok := ss.(serverStreamWithCtx); ok {
(*ssWithCtx.cancel)()
<-ss.Context().Done()
}
}
smap.streams = make(map[grpc.ServerStream]struct{})
smap.mu.Unlock()
}
}
}
}()
return smap
}

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc. // Copyright 2015 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -125,38 +125,38 @@ func (s *kvServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.Co
func checkRangeRequest(r *pb.RangeRequest) error { func checkRangeRequest(r *pb.RangeRequest) error {
if len(r.Key) == 0 { if len(r.Key) == 0 {
return rpctypes.ErrEmptyKey return rpctypes.ErrGRPCEmptyKey
} }
return nil return nil
} }
func checkPutRequest(r *pb.PutRequest) error { func checkPutRequest(r *pb.PutRequest) error {
if len(r.Key) == 0 { if len(r.Key) == 0 {
return rpctypes.ErrEmptyKey return rpctypes.ErrGRPCEmptyKey
} }
return nil return nil
} }
func checkDeleteRequest(r *pb.DeleteRangeRequest) error { func checkDeleteRequest(r *pb.DeleteRangeRequest) error {
if len(r.Key) == 0 { if len(r.Key) == 0 {
return rpctypes.ErrEmptyKey return rpctypes.ErrGRPCEmptyKey
} }
return nil return nil
} }
func checkTxnRequest(r *pb.TxnRequest) error { func checkTxnRequest(r *pb.TxnRequest) error {
if len(r.Compare) > MaxOpsPerTxn || len(r.Success) > MaxOpsPerTxn || len(r.Failure) > MaxOpsPerTxn { if len(r.Compare) > MaxOpsPerTxn || len(r.Success) > MaxOpsPerTxn || len(r.Failure) > MaxOpsPerTxn {
return rpctypes.ErrTooManyOps return rpctypes.ErrGRPCTooManyOps
} }
for _, c := range r.Compare { for _, c := range r.Compare {
if len(c.Key) == 0 { if len(c.Key) == 0 {
return rpctypes.ErrEmptyKey return rpctypes.ErrGRPCEmptyKey
} }
} }
for _, u := range r.Success { for _, u := range r.Success {
if err := checkRequestUnion(u); err != nil { if err := checkRequestOp(u); err != nil {
return err return err
} }
} }
@ -165,23 +165,19 @@ func checkTxnRequest(r *pb.TxnRequest) error {
} }
for _, u := range r.Failure { for _, u := range r.Failure {
if err := checkRequestUnion(u); err != nil { if err := checkRequestOp(u); err != nil {
return err return err
} }
} }
if err := checkRequestDupKeys(r.Failure); err != nil { return checkRequestDupKeys(r.Failure)
return err
} }
return nil // checkRequestDupKeys gives rpctypes.ErrGRPCDuplicateKey if the same key is modified twice
} func checkRequestDupKeys(reqs []*pb.RequestOp) error {
// checkRequestDupKeys gives rpctypes.ErrDuplicateKey if the same key is modified twice
func checkRequestDupKeys(reqs []*pb.RequestUnion) error {
// check put overlap // check put overlap
keys := make(map[string]struct{}) keys := make(map[string]struct{})
for _, requ := range reqs { for _, requ := range reqs {
tv, ok := requ.Request.(*pb.RequestUnion_RequestPut) tv, ok := requ.Request.(*pb.RequestOp_RequestPut)
if !ok { if !ok {
continue continue
} }
@ -189,11 +185,10 @@ func checkRequestDupKeys(reqs []*pb.RequestUnion) error {
if preq == nil { if preq == nil {
continue continue
} }
key := string(preq.Key) if _, ok := keys[string(preq.Key)]; ok {
if _, ok := keys[key]; ok { return rpctypes.ErrGRPCDuplicateKey
return rpctypes.ErrDuplicateKey
} }
keys[key] = struct{}{} keys[string(preq.Key)] = struct{}{}
} }
// no need to check deletes if no puts; delete overlaps are permitted // no need to check deletes if no puts; delete overlaps are permitted
@ -210,7 +205,7 @@ func checkRequestDupKeys(reqs []*pb.RequestUnion) error {
// check put overlap with deletes // check put overlap with deletes
for _, requ := range reqs { for _, requ := range reqs {
tv, ok := requ.Request.(*pb.RequestUnion_RequestDeleteRange) tv, ok := requ.Request.(*pb.RequestOp_RequestDeleteRange)
if !ok { if !ok {
continue continue
} }
@ -218,17 +213,16 @@ func checkRequestDupKeys(reqs []*pb.RequestUnion) error {
if dreq == nil { if dreq == nil {
continue continue
} }
key := string(dreq.Key)
if dreq.RangeEnd == nil { if dreq.RangeEnd == nil {
if _, found := keys[key]; found { if _, found := keys[string(dreq.Key)]; found {
return rpctypes.ErrDuplicateKey return rpctypes.ErrGRPCDuplicateKey
} }
} else { } else {
lo := sort.SearchStrings(sortedKeys, key) lo := sort.SearchStrings(sortedKeys, string(dreq.Key))
hi := sort.SearchStrings(sortedKeys, string(dreq.RangeEnd)) hi := sort.SearchStrings(sortedKeys, string(dreq.RangeEnd))
if lo != hi { if lo != hi {
// element between lo and hi => overlap // element between lo and hi => overlap
return rpctypes.ErrDuplicateKey return rpctypes.ErrGRPCDuplicateKey
} }
} }
} }
@ -236,23 +230,23 @@ func checkRequestDupKeys(reqs []*pb.RequestUnion) error {
return nil return nil
} }
func checkRequestUnion(u *pb.RequestUnion) error { func checkRequestOp(u *pb.RequestOp) error {
// TODO: ensure only one of the field is set. // TODO: ensure only one of the field is set.
switch uv := u.Request.(type) { switch uv := u.Request.(type) {
case *pb.RequestUnion_RequestRange: case *pb.RequestOp_RequestRange:
if uv.RequestRange != nil { if uv.RequestRange != nil {
return checkRangeRequest(uv.RequestRange) return checkRangeRequest(uv.RequestRange)
} }
case *pb.RequestUnion_RequestPut: case *pb.RequestOp_RequestPut:
if uv.RequestPut != nil { if uv.RequestPut != nil {
return checkPutRequest(uv.RequestPut) return checkPutRequest(uv.RequestPut)
} }
case *pb.RequestUnion_RequestDeleteRange: case *pb.RequestOp_RequestDeleteRange:
if uv.RequestDeleteRange != nil { if uv.RequestDeleteRange != nil {
return checkDeleteRequest(uv.RequestDeleteRange) return checkDeleteRequest(uv.RequestDeleteRange)
} }
default: default:
// empty union // empty op
return nil return nil
} }
return nil return nil

View File

@ -1,4 +1,4 @@
// Copyright 2016 CoreOS, Inc. // Copyright 2016 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -25,27 +25,33 @@ import (
) )
type LeaseServer struct { type LeaseServer struct {
hdr header
le etcdserver.Lessor le etcdserver.Lessor
} }
func NewLeaseServer(le etcdserver.Lessor) pb.LeaseServer { func NewLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer {
return &LeaseServer{le: le} return &LeaseServer{le: s, hdr: newHeader(s)}
} }
func (ls *LeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { func (ls *LeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
resp, err := ls.le.LeaseGrant(ctx, cr) resp, err := ls.le.LeaseGrant(ctx, cr)
if err == lease.ErrLeaseExists { if err == lease.ErrLeaseExists {
return nil, rpctypes.ErrLeaseExist return nil, rpctypes.ErrGRPCLeaseExist
} }
if err != nil {
return nil, err
}
ls.hdr.fill(resp.Header)
return resp, err return resp, err
} }
func (ls *LeaseServer) LeaseRevoke(ctx context.Context, rr *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { func (ls *LeaseServer) LeaseRevoke(ctx context.Context, rr *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
r, err := ls.le.LeaseRevoke(ctx, rr) resp, err := ls.le.LeaseRevoke(ctx, rr)
if err != nil { if err != nil {
return nil, rpctypes.ErrLeaseNotFound return nil, rpctypes.ErrGRPCLeaseNotFound
} }
return r, nil ls.hdr.fill(resp.Header)
return resp, nil
} }
func (ls *LeaseServer) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error { func (ls *LeaseServer) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error {
@ -58,16 +64,26 @@ func (ls *LeaseServer) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) erro
return err return err
} }
// Create header before we sent out the renew request.
// This can make sure that the revision is strictly smaller or equal to
// when the keepalive happened at the local server (when the local server is the leader)
// or remote leader.
// Without this, a lease might be revoked at rev 3 but client can see the keepalive succeeded
// at rev 4.
resp := &pb.LeaseKeepAliveResponse{ID: req.ID, Header: &pb.ResponseHeader{}}
ls.hdr.fill(resp.Header)
ttl, err := ls.le.LeaseRenew(lease.LeaseID(req.ID)) ttl, err := ls.le.LeaseRenew(lease.LeaseID(req.ID))
if err == lease.ErrLeaseNotFound { if err == lease.ErrLeaseNotFound {
return rpctypes.ErrLeaseNotFound err = nil
ttl = 0
} }
if err != nil && err != lease.ErrLeaseNotFound { if err != nil {
return err return err
} }
resp := &pb.LeaseKeepAliveResponse{ID: req.ID, TTL: ttl} resp.TTL = ttl
err = stream.Send(resp) err = stream.Send(resp)
if err != nil { if err != nil {
return err return err

View File

@ -1,4 +1,4 @@
// Copyright 2016 CoreOS, Inc. // Copyright 2016 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -15,13 +15,22 @@
package v3rpc package v3rpc
import ( import (
"crypto/sha256"
"io"
"github.com/coreos/etcd/etcdserver" "github.com/coreos/etcd/etcdserver"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb" pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/storage/backend" "github.com/coreos/etcd/mvcc"
"github.com/coreos/etcd/mvcc/backend"
"github.com/coreos/etcd/pkg/types"
"github.com/coreos/etcd/version" "github.com/coreos/etcd/version"
"golang.org/x/net/context" "golang.org/x/net/context"
) )
type KVGetter interface {
KV() mvcc.ConsistentWatchableKV
}
type BackendGetter interface { type BackendGetter interface {
Backend() backend.Backend Backend() backend.Backend
} }
@ -30,33 +39,86 @@ type Alarmer interface {
Alarm(ctx context.Context, ar *pb.AlarmRequest) (*pb.AlarmResponse, error) Alarm(ctx context.Context, ar *pb.AlarmRequest) (*pb.AlarmResponse, error)
} }
type RaftStatusGetter interface {
Index() uint64
Term() uint64
Leader() types.ID
}
type maintenanceServer struct { type maintenanceServer struct {
rg RaftStatusGetter
kg KVGetter
bg BackendGetter bg BackendGetter
a Alarmer a Alarmer
hdr header hdr header
} }
func NewMaintenanceServer(s *etcdserver.EtcdServer) pb.MaintenanceServer { func NewMaintenanceServer(s *etcdserver.EtcdServer) pb.MaintenanceServer {
return &maintenanceServer{bg: s, a: s, hdr: newHeader(s)} return &maintenanceServer{rg: s, kg: s, bg: s, a: s, hdr: newHeader(s)}
} }
func (ms *maintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) { func (ms *maintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) {
plog.Noticef("starting to defragment the storage backend...") plog.Noticef("starting to defragment the storage backend...")
err := ms.bg.Backend().Defrag() err := ms.bg.Backend().Defrag()
if err != nil { if err != nil {
plog.Errorf("failed to deframent the storage backend (%v)", err) plog.Errorf("failed to defragment the storage backend (%v)", err)
return nil, err return nil, err
} }
plog.Noticef("finished defragmenting the storage backend") plog.Noticef("finished defragmenting the storage backend")
return &pb.DefragmentResponse{}, nil return &pb.DefragmentResponse{}, nil
} }
func (ms *maintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error {
snap := ms.bg.Backend().Snapshot()
pr, pw := io.Pipe()
defer pr.Close()
go func() {
snap.WriteTo(pw)
if err := snap.Close(); err != nil {
plog.Errorf("error closing snapshot (%v)", err)
}
pw.Close()
}()
// send file data
h := sha256.New()
br := int64(0)
buf := make([]byte, 32*1024)
sz := snap.Size()
for br < sz {
n, err := io.ReadFull(pr, buf)
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
return togRPCError(err)
}
br += int64(n)
resp := &pb.SnapshotResponse{
RemainingBytes: uint64(sz - br),
Blob: buf[:n],
}
if err = srv.Send(resp); err != nil {
return togRPCError(err)
}
h.Write(buf[:n])
}
// send sha
sha := h.Sum(nil)
hresp := &pb.SnapshotResponse{RemainingBytes: 0, Blob: sha}
if err := srv.Send(hresp); err != nil {
return togRPCError(err)
}
return nil
}
func (ms *maintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) { func (ms *maintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) {
h, err := ms.bg.Backend().Hash() h, rev, err := ms.kg.KV().Hash()
if err != nil { if err != nil {
return nil, togRPCError(err) return nil, togRPCError(err)
} }
resp := &pb.HashResponse{Header: &pb.ResponseHeader{Revision: ms.hdr.rev()}, Hash: h} resp := &pb.HashResponse{Header: &pb.ResponseHeader{Revision: rev}, Hash: h}
ms.hdr.fill(resp.Header) ms.hdr.fill(resp.Header)
return resp, nil return resp, nil
} }
@ -66,7 +128,14 @@ func (ms *maintenanceServer) Alarm(ctx context.Context, ar *pb.AlarmRequest) (*p
} }
func (ms *maintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) { func (ms *maintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) {
resp := &pb.StatusResponse{Header: &pb.ResponseHeader{Revision: ms.hdr.rev()}, Version: version.Version} resp := &pb.StatusResponse{
Header: &pb.ResponseHeader{Revision: ms.hdr.rev()},
Version: version.Version,
DbSize: ms.bg.Backend().Size(),
Leader: uint64(ms.rg.Leader()),
RaftIndex: ms.rg.Index(),
RaftTerm: ms.rg.Term(),
}
ms.hdr.fill(resp.Header) ms.hdr.fill(resp.Header)
return resp, nil return resp, nil
} }

View File

@ -1,4 +1,4 @@
// Copyright 2016 CoreOS, Inc. // Copyright 2016 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -45,7 +45,7 @@ func NewClusterServer(s *etcdserver.EtcdServer) *ClusterServer {
func (cs *ClusterServer) MemberAdd(ctx context.Context, r *pb.MemberAddRequest) (*pb.MemberAddResponse, error) { func (cs *ClusterServer) MemberAdd(ctx context.Context, r *pb.MemberAddRequest) (*pb.MemberAddResponse, error) {
urls, err := types.NewURLs(r.PeerURLs) urls, err := types.NewURLs(r.PeerURLs)
if err != nil { if err != nil {
return nil, rpctypes.ErrMemberBadURLs return nil, rpctypes.ErrGRPCMemberBadURLs
} }
now := time.Now() now := time.Now()
@ -53,16 +53,16 @@ func (cs *ClusterServer) MemberAdd(ctx context.Context, r *pb.MemberAddRequest)
err = cs.server.AddMember(ctx, *m) err = cs.server.AddMember(ctx, *m)
switch { switch {
case err == membership.ErrIDExists: case err == membership.ErrIDExists:
return nil, rpctypes.ErrMemberExist return nil, rpctypes.ErrGRPCMemberExist
case err == membership.ErrPeerURLexists: case err == membership.ErrPeerURLexists:
return nil, rpctypes.ErrPeerURLExist return nil, rpctypes.ErrGRPCPeerURLExist
case err != nil: case err != nil:
return nil, grpc.Errorf(codes.Internal, err.Error()) return nil, grpc.Errorf(codes.Internal, err.Error())
} }
return &pb.MemberAddResponse{ return &pb.MemberAddResponse{
Header: cs.header(), Header: cs.header(),
Member: &pb.Member{ID: uint64(m.ID), IsLeader: m.ID == cs.server.Leader(), PeerURLs: m.PeerURLs}, Member: &pb.Member{ID: uint64(m.ID), PeerURLs: m.PeerURLs},
}, nil }, nil
} }
@ -72,7 +72,7 @@ func (cs *ClusterServer) MemberRemove(ctx context.Context, r *pb.MemberRemoveReq
case err == membership.ErrIDRemoved: case err == membership.ErrIDRemoved:
fallthrough fallthrough
case err == membership.ErrIDNotFound: case err == membership.ErrIDNotFound:
return nil, rpctypes.ErrMemberNotFound return nil, rpctypes.ErrGRPCMemberNotFound
case err != nil: case err != nil:
return nil, grpc.Errorf(codes.Internal, err.Error()) return nil, grpc.Errorf(codes.Internal, err.Error())
} }
@ -88,9 +88,9 @@ func (cs *ClusterServer) MemberUpdate(ctx context.Context, r *pb.MemberUpdateReq
err := cs.server.UpdateMember(ctx, m) err := cs.server.UpdateMember(ctx, m)
switch { switch {
case err == membership.ErrPeerURLexists: case err == membership.ErrPeerURLexists:
return nil, rpctypes.ErrPeerURLExist return nil, rpctypes.ErrGRPCPeerURLExist
case err == membership.ErrIDNotFound: case err == membership.ErrIDNotFound:
return nil, rpctypes.ErrMemberNotFound return nil, rpctypes.ErrGRPCMemberNotFound
case err != nil: case err != nil:
return nil, grpc.Errorf(codes.Internal, err.Error()) return nil, grpc.Errorf(codes.Internal, err.Error())
} }
@ -106,7 +106,6 @@ func (cs *ClusterServer) MemberList(ctx context.Context, r *pb.MemberListRequest
protoMembs[i] = &pb.Member{ protoMembs[i] = &pb.Member{
Name: membs[i].Name, Name: membs[i].Name,
ID: uint64(membs[i].ID), ID: uint64(membs[i].ID),
IsLeader: membs[i].ID == cs.server.Leader(),
PeerURLs: membs[i].PeerURLs, PeerURLs: membs[i].PeerURLs,
ClientURLs: membs[i].ClientURLs, ClientURLs: membs[i].ClientURLs,
} }

View File

@ -0,0 +1,67 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v3rpc
import "github.com/prometheus/client_golang/prometheus"
var (
receivedCounter = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: "etcd",
Subsystem: "grpc",
Name: "requests_total",
Help: "Counter of received requests.",
}, []string{"grpc_service", "grpc_method"})
failedCounter = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: "etcd",
Subsystem: "grpc",
Name: "requests_failed_total",
Help: "Counter of failed requests.",
}, []string{"grpc_service", "grpc_method", "grpc_code"})
handlingDuration = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "etcd",
Subsystem: "grpc",
Name: "unary_requests_duration_seconds",
Help: "Bucketed histogram of processing time (s) of handled unary (non-stream) requests.",
Buckets: prometheus.ExponentialBuckets(0.0005, 2, 13),
}, []string{"grpc_service", "grpc_method"})
sentBytes = prometheus.NewCounter(prometheus.CounterOpts{
Namespace: "etcd",
Subsystem: "network",
Name: "client_grpc_sent_bytes_total",
Help: "The total number of bytes sent to grpc clients.",
})
receivedBytes = prometheus.NewCounter(prometheus.CounterOpts{
Namespace: "etcd",
Subsystem: "network",
Name: "client_grpc_received_bytes_total",
Help: "The total number of bytes received from grpc clients.",
})
)
func init() {
prometheus.MustRegister(receivedCounter)
prometheus.MustRegister(failedCounter)
prometheus.MustRegister(handlingDuration)
prometheus.MustRegister(sentBytes)
prometheus.MustRegister(receivedBytes)
}

View File

@ -1,4 +1,4 @@
// Copyright 2016 CoreOS, Inc. // Copyright 2016 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -45,7 +45,7 @@ func (qa *quotaAlarmer) check(ctx context.Context, r interface{}) error {
Alarm: pb.AlarmType_NOSPACE, Alarm: pb.AlarmType_NOSPACE,
} }
qa.a.Alarm(ctx, req) qa.a.Alarm(ctx, req)
return rpctypes.ErrNoSpace return rpctypes.ErrGRPCNoSpace
} }
func NewQuotaKVServer(s *etcdserver.EtcdServer) pb.KVServer { func NewQuotaKVServer(s *etcdserver.EtcdServer) pb.KVServer {

View File

@ -0,0 +1,16 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package rpctypes has types and values shared by the etcd server and client for v3 RPC interaction.
package rpctypes

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc. // Copyright 2015 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -20,24 +20,131 @@ import (
) )
var ( var (
ErrEmptyKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: key is not provided") // server-side error
ErrTooManyOps = grpc.Errorf(codes.InvalidArgument, "etcdserver: too many operations in txn request") ErrGRPCEmptyKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: key is not provided")
ErrDuplicateKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: duplicate key given in txn request") ErrGRPCTooManyOps = grpc.Errorf(codes.InvalidArgument, "etcdserver: too many operations in txn request")
ErrCompacted = grpc.Errorf(codes.OutOfRange, "etcdserver: storage: required revision has been compacted") ErrGRPCDuplicateKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: duplicate key given in txn request")
ErrFutureRev = grpc.Errorf(codes.OutOfRange, "etcdserver: storage: required revision is a future revision") ErrGRPCCompacted = grpc.Errorf(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted")
ErrNoSpace = grpc.Errorf(codes.ResourceExhausted, "etcdserver: storage: database space exceeded") ErrGRPCFutureRev = grpc.Errorf(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision")
ErrGRPCNoSpace = grpc.Errorf(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded")
ErrLeaseNotFound = grpc.Errorf(codes.NotFound, "etcdserver: requested lease not found") ErrGRPCLeaseNotFound = grpc.Errorf(codes.NotFound, "etcdserver: requested lease not found")
ErrLeaseExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: lease already exists") ErrGRPCLeaseExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: lease already exists")
ErrMemberExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: member ID already exist") ErrGRPCMemberExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: member ID already exist")
ErrPeerURLExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: Peer URLs already exists") ErrGRPCPeerURLExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: Peer URLs already exists")
ErrMemberBadURLs = grpc.Errorf(codes.InvalidArgument, "etcdserver: given member URLs are invalid") ErrGRPCMemberBadURLs = grpc.Errorf(codes.InvalidArgument, "etcdserver: given member URLs are invalid")
ErrMemberNotFound = grpc.Errorf(codes.NotFound, "etcdserver: member not found") ErrGRPCMemberNotFound = grpc.Errorf(codes.NotFound, "etcdserver: member not found")
ErrRequestTooLarge = grpc.Errorf(codes.InvalidArgument, "etcdserver: request is too large") ErrGRPCRequestTooLarge = grpc.Errorf(codes.InvalidArgument, "etcdserver: request is too large")
ErrUserAlreadyExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: user name already exists") ErrGRPCRootUserNotExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: root user does not exist")
ErrUserNotFound = grpc.Errorf(codes.FailedPrecondition, "etcdserver: user name not found") ErrGRPCRootRoleNotExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: root user does not have root role")
ErrRoleAlreadyExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role name already exists") ErrGRPCUserAlreadyExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: user name already exists")
ErrGRPCUserNotFound = grpc.Errorf(codes.FailedPrecondition, "etcdserver: user name not found")
ErrGRPCRoleAlreadyExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role name already exists")
ErrGRPCRoleNotFound = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role name not found")
ErrGRPCAuthFailed = grpc.Errorf(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password")
ErrGRPCPermissionDenied = grpc.Errorf(codes.FailedPrecondition, "etcdserver: permission denied")
ErrGRPCRoleNotGranted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role is not granted to the user")
ErrGRPCPermissionNotGranted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: permission is not granted to the role")
ErrGRPCNoLeader = grpc.Errorf(codes.Unavailable, "etcdserver: no leader")
ErrGRPCNotCapable = grpc.Errorf(codes.Unavailable, "etcdserver: not capable")
ErrGRPCStopped = grpc.Errorf(codes.Unavailable, "etcdserver: server stopped")
errStringToError = map[string]error{
grpc.ErrorDesc(ErrGRPCEmptyKey): ErrGRPCEmptyKey,
grpc.ErrorDesc(ErrGRPCTooManyOps): ErrGRPCTooManyOps,
grpc.ErrorDesc(ErrGRPCDuplicateKey): ErrGRPCDuplicateKey,
grpc.ErrorDesc(ErrGRPCCompacted): ErrGRPCCompacted,
grpc.ErrorDesc(ErrGRPCFutureRev): ErrGRPCFutureRev,
grpc.ErrorDesc(ErrGRPCNoSpace): ErrGRPCNoSpace,
grpc.ErrorDesc(ErrGRPCLeaseNotFound): ErrGRPCLeaseNotFound,
grpc.ErrorDesc(ErrGRPCLeaseExist): ErrGRPCLeaseExist,
grpc.ErrorDesc(ErrGRPCMemberExist): ErrGRPCMemberExist,
grpc.ErrorDesc(ErrGRPCPeerURLExist): ErrGRPCPeerURLExist,
grpc.ErrorDesc(ErrGRPCMemberBadURLs): ErrGRPCMemberBadURLs,
grpc.ErrorDesc(ErrGRPCMemberNotFound): ErrGRPCMemberNotFound,
grpc.ErrorDesc(ErrGRPCRequestTooLarge): ErrGRPCRequestTooLarge,
grpc.ErrorDesc(ErrGRPCRootUserNotExist): ErrGRPCRootUserNotExist,
grpc.ErrorDesc(ErrGRPCRootRoleNotExist): ErrGRPCRootRoleNotExist,
grpc.ErrorDesc(ErrGRPCUserAlreadyExist): ErrGRPCUserAlreadyExist,
grpc.ErrorDesc(ErrGRPCUserNotFound): ErrGRPCUserNotFound,
grpc.ErrorDesc(ErrGRPCRoleAlreadyExist): ErrGRPCRoleAlreadyExist,
grpc.ErrorDesc(ErrGRPCRoleNotFound): ErrGRPCRoleNotFound,
grpc.ErrorDesc(ErrGRPCAuthFailed): ErrGRPCAuthFailed,
grpc.ErrorDesc(ErrGRPCPermissionDenied): ErrGRPCPermissionDenied,
grpc.ErrorDesc(ErrGRPCRoleNotGranted): ErrGRPCRoleNotGranted,
grpc.ErrorDesc(ErrGRPCPermissionNotGranted): ErrGRPCPermissionNotGranted,
grpc.ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader,
grpc.ErrorDesc(ErrGRPCNotCapable): ErrGRPCNotCapable,
grpc.ErrorDesc(ErrGRPCStopped): ErrGRPCStopped,
}
// client-side error
ErrEmptyKey = Error(ErrGRPCEmptyKey)
ErrTooManyOps = Error(ErrGRPCTooManyOps)
ErrDuplicateKey = Error(ErrGRPCDuplicateKey)
ErrCompacted = Error(ErrGRPCCompacted)
ErrFutureRev = Error(ErrGRPCFutureRev)
ErrNoSpace = Error(ErrGRPCNoSpace)
ErrLeaseNotFound = Error(ErrGRPCLeaseNotFound)
ErrLeaseExist = Error(ErrGRPCLeaseExist)
ErrMemberExist = Error(ErrGRPCMemberExist)
ErrPeerURLExist = Error(ErrGRPCPeerURLExist)
ErrMemberBadURLs = Error(ErrGRPCMemberBadURLs)
ErrMemberNotFound = Error(ErrGRPCMemberNotFound)
ErrRequestTooLarge = Error(ErrGRPCRequestTooLarge)
ErrRootUserNotExist = Error(ErrGRPCRootUserNotExist)
ErrRootRoleNotExist = Error(ErrGRPCRootRoleNotExist)
ErrUserAlreadyExist = Error(ErrGRPCUserAlreadyExist)
ErrUserNotFound = Error(ErrGRPCUserNotFound)
ErrRoleAlreadyExist = Error(ErrGRPCRoleAlreadyExist)
ErrRoleNotFound = Error(ErrGRPCRoleNotFound)
ErrAuthFailed = Error(ErrGRPCAuthFailed)
ErrPermissionDenied = Error(ErrGRPCPermissionDenied)
ErrRoleNotGranted = Error(ErrGRPCRoleNotGranted)
ErrPermissionNotGranted = Error(ErrGRPCPermissionNotGranted)
ErrNoLeader = Error(ErrGRPCNoLeader)
ErrNotCapable = Error(ErrGRPCNotCapable)
ErrStopped = Error(ErrGRPCStopped)
) )
// EtcdError defines gRPC server errors.
// (https://github.com/grpc/grpc-go/blob/master/rpc_util.go#L319-L323)
type EtcdError struct {
code codes.Code
desc string
}
// Code returns grpc/codes.Code.
// TODO: define clientv3/codes.Code.
func (e EtcdError) Code() codes.Code {
return e.code
}
func (e EtcdError) Error() string {
return e.desc
}
func Error(err error) error {
if err == nil {
return nil
}
verr, ok := errStringToError[grpc.ErrorDesc(err)]
if !ok { // not gRPC error
return err
}
return EtcdError{code: grpc.Code(verr), desc: grpc.ErrorDesc(verr)}
}

View File

@ -0,0 +1,20 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rpctypes
var (
MetadataRequireLeaderKey = "hasleader"
MetadataHasLeader = "true"
)

View File

@ -1,4 +1,4 @@
// Copyright 2016 Nippon Telegraph and Telephone Corporation. // Copyright 2016 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -19,30 +19,45 @@ import (
"github.com/coreos/etcd/etcdserver" "github.com/coreos/etcd/etcdserver"
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
"github.com/coreos/etcd/lease" "github.com/coreos/etcd/lease"
"github.com/coreos/etcd/storage" "github.com/coreos/etcd/mvcc"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
) )
func togRPCError(err error) error { func togRPCError(err error) error {
switch err { switch err {
case storage.ErrCompacted: case mvcc.ErrCompacted:
return rpctypes.ErrCompacted return rpctypes.ErrGRPCCompacted
case storage.ErrFutureRev: case mvcc.ErrFutureRev:
return rpctypes.ErrFutureRev return rpctypes.ErrGRPCFutureRev
case lease.ErrLeaseNotFound: case lease.ErrLeaseNotFound:
return rpctypes.ErrLeaseNotFound return rpctypes.ErrGRPCLeaseNotFound
// TODO: handle error from raft and timeout // TODO: handle error from raft and timeout
case etcdserver.ErrRequestTooLarge: case etcdserver.ErrRequestTooLarge:
return rpctypes.ErrRequestTooLarge return rpctypes.ErrGRPCRequestTooLarge
case etcdserver.ErrNoSpace: case etcdserver.ErrNoSpace:
return rpctypes.ErrNoSpace return rpctypes.ErrGRPCNoSpace
case auth.ErrRootUserNotExist:
return rpctypes.ErrGRPCRootUserNotExist
case auth.ErrRootRoleNotExist:
return rpctypes.ErrGRPCRootRoleNotExist
case auth.ErrUserAlreadyExist: case auth.ErrUserAlreadyExist:
return rpctypes.ErrUserAlreadyExist return rpctypes.ErrGRPCUserAlreadyExist
case auth.ErrUserNotFound: case auth.ErrUserNotFound:
return rpctypes.ErrUserNotFound return rpctypes.ErrGRPCUserNotFound
case auth.ErrRoleAlreadyExist: case auth.ErrRoleAlreadyExist:
return rpctypes.ErrRoleAlreadyExist return rpctypes.ErrGRPCRoleAlreadyExist
case auth.ErrRoleNotFound:
return rpctypes.ErrGRPCRoleNotFound
case auth.ErrAuthFailed:
return rpctypes.ErrGRPCAuthFailed
case auth.ErrPermissionDenied:
return rpctypes.ErrGRPCPermissionDenied
case auth.ErrRoleNotGranted:
return rpctypes.ErrGRPCRoleNotGranted
case auth.ErrPermissionNotGranted:
return rpctypes.ErrGRPCPermissionNotGranted
default: default:
return grpc.Errorf(codes.Internal, err.Error()) return grpc.Errorf(codes.Internal, err.Error())
} }

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc. // Copyright 2015 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -19,17 +19,20 @@ import (
"sync" "sync"
"time" "time"
"golang.org/x/net/context"
"github.com/coreos/etcd/etcdserver" "github.com/coreos/etcd/etcdserver"
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb" pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/storage" "github.com/coreos/etcd/mvcc"
"github.com/coreos/etcd/storage/storagepb" "github.com/coreos/etcd/mvcc/mvccpb"
) )
type watchServer struct { type watchServer struct {
clusterID int64 clusterID int64
memberID int64 memberID int64
raftTimer etcdserver.RaftTimer raftTimer etcdserver.RaftTimer
watchable storage.Watchable watchable mvcc.Watchable
} }
func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer { func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer {
@ -71,7 +74,7 @@ const (
) )
// serverWatchStream is an etcd server side stream. It receives requests // serverWatchStream is an etcd server side stream. It receives requests
// from client side gRPC stream. It receives watch events from storage.WatchStream, // from client side gRPC stream. It receives watch events from mvcc.WatchStream,
// and creates responses that forwarded to gRPC stream. // and creates responses that forwarded to gRPC stream.
// It also forwards control message like watch created and canceled. // It also forwards control message like watch created and canceled.
type serverWatchStream struct { type serverWatchStream struct {
@ -80,20 +83,23 @@ type serverWatchStream struct {
raftTimer etcdserver.RaftTimer raftTimer etcdserver.RaftTimer
gRPCStream pb.Watch_WatchServer gRPCStream pb.Watch_WatchServer
watchStream storage.WatchStream watchStream mvcc.WatchStream
ctrlStream chan *pb.WatchResponse ctrlStream chan *pb.WatchResponse
// mu protects progress, prevKV
mu sync.Mutex
// progress tracks the watchID that stream might need to send // progress tracks the watchID that stream might need to send
// progress to. // progress to.
progress map[storage.WatchID]bool progress map[mvcc.WatchID]bool
// mu protects progress
mu sync.Mutex
// closec indicates the stream is closed. // closec indicates the stream is closed.
closec chan struct{} closec chan struct{}
// wg waits for the send loop to complete
wg sync.WaitGroup
} }
func (ws *watchServer) Watch(stream pb.Watch_WatchServer) error { func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
sws := serverWatchStream{ sws := serverWatchStream{
clusterID: ws.clusterID, clusterID: ws.clusterID,
memberID: ws.memberID, memberID: ws.memberID,
@ -102,16 +108,38 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) error {
watchStream: ws.watchable.NewWatchStream(), watchStream: ws.watchable.NewWatchStream(),
// chan for sending control response like watcher created and canceled. // chan for sending control response like watcher created and canceled.
ctrlStream: make(chan *pb.WatchResponse, ctrlStreamBufLen), ctrlStream: make(chan *pb.WatchResponse, ctrlStreamBufLen),
progress: make(map[storage.WatchID]bool), progress: make(map[mvcc.WatchID]bool),
closec: make(chan struct{}), closec: make(chan struct{}),
} }
defer sws.close()
go sws.sendLoop() sws.wg.Add(1)
return sws.recvLoop() go func() {
sws.sendLoop()
sws.wg.Done()
}()
errc := make(chan error, 1)
// Ideally recvLoop would also use sws.wg to signal its completion
// but when stream.Context().Done() is closed, the stream's recv
// may continue to block since it uses a different context, leading to
// deadlock when calling sws.close().
go func() { errc <- sws.recvLoop() }()
select {
case err = <-errc:
case <-stream.Context().Done():
err = stream.Context().Err()
// the only server-side cancellation is noleader for now.
if err == context.Canceled {
err = rpctypes.ErrGRPCNoLeader
}
}
sws.close()
return err
} }
func (sws *serverWatchStream) recvLoop() error { func (sws *serverWatchStream) recvLoop() error {
defer close(sws.ctrlStream)
for { for {
req, err := sws.gRPCStream.Recv() req, err := sws.gRPCStream.Recv()
if err == io.EOF { if err == io.EOF {
@ -143,18 +171,25 @@ func (sws *serverWatchStream) recvLoop() error {
} }
id := sws.watchStream.Watch(creq.Key, creq.RangeEnd, rev) id := sws.watchStream.Watch(creq.Key, creq.RangeEnd, rev)
if id != -1 && creq.ProgressNotify { if id != -1 && creq.ProgressNotify {
sws.mu.Lock()
sws.progress[id] = true sws.progress[id] = true
sws.mu.Unlock()
} }
sws.ctrlStream <- &pb.WatchResponse{ wr := &pb.WatchResponse{
Header: sws.newResponseHeader(wsrev), Header: sws.newResponseHeader(wsrev),
WatchId: int64(id), WatchId: int64(id),
Created: true, Created: true,
Canceled: id == -1, Canceled: id == -1,
} }
select {
case sws.ctrlStream <- wr:
case <-sws.closec:
return nil
}
case *pb.WatchRequest_CancelRequest: case *pb.WatchRequest_CancelRequest:
if uv.CancelRequest != nil { if uv.CancelRequest != nil {
id := uv.CancelRequest.WatchId id := uv.CancelRequest.WatchId
err := sws.watchStream.Cancel(storage.WatchID(id)) err := sws.watchStream.Cancel(mvcc.WatchID(id))
if err == nil { if err == nil {
sws.ctrlStream <- &pb.WatchResponse{ sws.ctrlStream <- &pb.WatchResponse{
Header: sws.newResponseHeader(sws.watchStream.Rev()), Header: sws.newResponseHeader(sws.watchStream.Rev()),
@ -162,26 +197,40 @@ func (sws *serverWatchStream) recvLoop() error {
Canceled: true, Canceled: true,
} }
sws.mu.Lock() sws.mu.Lock()
delete(sws.progress, storage.WatchID(id)) delete(sws.progress, mvcc.WatchID(id))
sws.mu.Unlock() sws.mu.Unlock()
} }
} }
// TODO: do we need to return error back to client?
default: default:
panic("not implemented") // we probably should not shutdown the entire stream when
// receive an valid command.
// so just do nothing instead.
continue
} }
} }
} }
func (sws *serverWatchStream) sendLoop() { func (sws *serverWatchStream) sendLoop() {
// watch ids that are currently active // watch ids that are currently active
ids := make(map[storage.WatchID]struct{}) ids := make(map[mvcc.WatchID]struct{})
// watch responses pending on a watch id creation message // watch responses pending on a watch id creation message
pending := make(map[storage.WatchID][]*pb.WatchResponse) pending := make(map[mvcc.WatchID][]*pb.WatchResponse)
interval := GetProgressReportInterval() interval := GetProgressReportInterval()
progressTicker := time.NewTicker(interval) progressTicker := time.NewTicker(interval)
defer progressTicker.Stop()
defer func() {
progressTicker.Stop()
// drain the chan to clean up pending events
for ws := range sws.watchStream.Chan() {
mvcc.ReportEventReceived(len(ws.Events))
}
for _, wrs := range pending {
for _, ws := range wrs {
mvcc.ReportEventReceived(len(ws.Events))
}
}
}()
for { for {
select { select {
@ -190,11 +239,11 @@ func (sws *serverWatchStream) sendLoop() {
return return
} }
// TODO: evs is []storagepb.Event type // TODO: evs is []mvccpb.Event type
// either return []*storagepb.Event from storage package // either return []*mvccpb.Event from the mvcc package
// or define protocol buffer with []storagepb.Event. // or define protocol buffer with []mvccpb.Event.
evs := wresp.Events evs := wresp.Events
events := make([]*storagepb.Event, len(evs)) events := make([]*mvccpb.Event, len(evs))
for i := range evs { for i := range evs {
events[i] = &evs[i] events[i] = &evs[i]
} }
@ -213,13 +262,14 @@ func (sws *serverWatchStream) sendLoop() {
continue continue
} }
storage.ReportEventReceived() mvcc.ReportEventReceived(len(evs))
if err := sws.gRPCStream.Send(wr); err != nil { if err := sws.gRPCStream.Send(wr); err != nil {
return return
} }
sws.mu.Lock() sws.mu.Lock()
if _, ok := sws.progress[wresp.WatchID]; ok { if len(evs) > 0 && sws.progress[wresp.WatchID] {
// elide next progress update if sent a key update
sws.progress[wresp.WatchID] = false sws.progress[wresp.WatchID] = false
} }
sws.mu.Unlock() sws.mu.Unlock()
@ -234,7 +284,7 @@ func (sws *serverWatchStream) sendLoop() {
} }
// track id creation // track id creation
wid := storage.WatchID(c.WatchId) wid := mvcc.WatchID(c.WatchId)
if c.Canceled { if c.Canceled {
delete(ids, wid) delete(ids, wid)
continue continue
@ -243,7 +293,7 @@ func (sws *serverWatchStream) sendLoop() {
// flush buffered events // flush buffered events
ids[wid] = struct{}{} ids[wid] = struct{}{}
for _, v := range pending[wid] { for _, v := range pending[wid] {
storage.ReportEventReceived() mvcc.ReportEventReceived(len(v.Events))
if err := sws.gRPCStream.Send(v); err != nil { if err := sws.gRPCStream.Send(v); err != nil {
return return
} }
@ -251,22 +301,16 @@ func (sws *serverWatchStream) sendLoop() {
delete(pending, wid) delete(pending, wid)
} }
case <-progressTicker.C: case <-progressTicker.C:
sws.mu.Lock()
for id, ok := range sws.progress { for id, ok := range sws.progress {
if ok { if ok {
sws.watchStream.RequestProgress(id) sws.watchStream.RequestProgress(id)
} }
sws.progress[id] = true sws.progress[id] = true
} }
sws.mu.Unlock()
case <-sws.closec: case <-sws.closec:
// drain the chan to clean up pending events return
for range sws.watchStream.Chan() {
storage.ReportEventReceived()
}
for _, wrs := range pending {
for range wrs {
storage.ReportEventReceived()
}
}
} }
} }
} }
@ -274,7 +318,7 @@ func (sws *serverWatchStream) sendLoop() {
func (sws *serverWatchStream) close() { func (sws *serverWatchStream) close() {
sws.watchStream.Close() sws.watchStream.Close()
close(sws.closec) close(sws.closec)
close(sws.ctrlStream) sws.wg.Wait()
} }
func (sws *serverWatchStream) newResponseHeader(rev int64) *pb.ResponseHeader { func (sws *serverWatchStream) newResponseHeader(rev int64) *pb.ResponseHeader {

View File

@ -1,4 +1,4 @@
// Copyright 2016 CoreOS, Inc. // Copyright 2016 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -18,13 +18,15 @@ import (
"bytes" "bytes"
"fmt" "fmt"
"sort" "sort"
"time"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb" pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/lease" "github.com/coreos/etcd/lease"
"github.com/coreos/etcd/mvcc"
"github.com/coreos/etcd/mvcc/mvccpb"
"github.com/coreos/etcd/pkg/types" "github.com/coreos/etcd/pkg/types"
dstorage "github.com/coreos/etcd/storage"
"github.com/coreos/etcd/storage/storagepb"
"github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/proto"
"golang.org/x/net/context"
) )
const ( const (
@ -32,6 +34,8 @@ const (
// To apply with independent Range, Put, Delete, you can pass noTxn // To apply with independent Range, Put, Delete, you can pass noTxn
// to apply functions instead of a valid txn ID. // to apply functions instead of a valid txn ID.
noTxn = -1 noTxn = -1
warnApplyDuration = 10 * time.Millisecond
) )
type applyResult struct { type applyResult struct {
@ -45,54 +49,103 @@ type applyResult struct {
// applierV3 is the interface for processing V3 raft messages // applierV3 is the interface for processing V3 raft messages
type applierV3 interface { type applierV3 interface {
Apply(r *pb.InternalRaftRequest) *applyResult
Put(txnID int64, p *pb.PutRequest) (*pb.PutResponse, error) Put(txnID int64, p *pb.PutRequest) (*pb.PutResponse, error)
Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResponse, error) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResponse, error)
DeleteRange(txnID int64, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) DeleteRange(txnID int64, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error)
Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error)
Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, error) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, error)
LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error)
LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error)
Alarm(*pb.AlarmRequest) (*pb.AlarmResponse, error) Alarm(*pb.AlarmRequest) (*pb.AlarmResponse, error)
Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error)
AuthEnable() (*pb.AuthEnableResponse, error) AuthEnable() (*pb.AuthEnableResponse, error)
AuthDisable() (*pb.AuthDisableResponse, error)
UserAdd(ua *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) UserAdd(ua *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error)
UserDelete(ua *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) UserDelete(ua *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error)
UserChangePassword(ua *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) UserChangePassword(ua *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error)
UserGrantRole(ua *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error)
UserGet(ua *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error)
UserRevokeRole(ua *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error)
RoleAdd(ua *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) RoleAdd(ua *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error)
RoleGrantPermission(ua *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error)
RoleGet(ua *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error)
RoleRevokePermission(ua *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error)
RoleDelete(ua *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error)
UserList(ua *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error)
RoleList(ua *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error)
} }
type applierV3backend struct { type applierV3backend struct {
s *EtcdServer s *EtcdServer
} }
func (s *EtcdServer) applyV3Request(r *pb.InternalRaftRequest) *applyResult { func (s *EtcdServer) newApplierV3() applierV3 {
return newAuthApplierV3(
s.AuthStore(),
newQuotaApplierV3(s, &applierV3backend{s}),
)
}
func (a *applierV3backend) Apply(r *pb.InternalRaftRequest) *applyResult {
ar := &applyResult{} ar := &applyResult{}
// call into a.s.applyV3.F instead of a.F so upper appliers can check individual calls
switch { switch {
case r.Range != nil: case r.Range != nil:
ar.resp, ar.err = s.applyV3.Range(noTxn, r.Range) ar.resp, ar.err = a.s.applyV3.Range(noTxn, r.Range)
case r.Put != nil: case r.Put != nil:
ar.resp, ar.err = s.applyV3.Put(noTxn, r.Put) ar.resp, ar.err = a.s.applyV3.Put(noTxn, r.Put)
case r.DeleteRange != nil: case r.DeleteRange != nil:
ar.resp, ar.err = s.applyV3.DeleteRange(noTxn, r.DeleteRange) ar.resp, ar.err = a.s.applyV3.DeleteRange(noTxn, r.DeleteRange)
case r.Txn != nil: case r.Txn != nil:
ar.resp, ar.err = s.applyV3.Txn(r.Txn) ar.resp, ar.err = a.s.applyV3.Txn(r.Txn)
case r.Compaction != nil: case r.Compaction != nil:
ar.resp, ar.physc, ar.err = s.applyV3.Compaction(r.Compaction) ar.resp, ar.physc, ar.err = a.s.applyV3.Compaction(r.Compaction)
case r.LeaseGrant != nil: case r.LeaseGrant != nil:
ar.resp, ar.err = s.applyV3.LeaseGrant(r.LeaseGrant) ar.resp, ar.err = a.s.applyV3.LeaseGrant(r.LeaseGrant)
case r.LeaseRevoke != nil: case r.LeaseRevoke != nil:
ar.resp, ar.err = s.applyV3.LeaseRevoke(r.LeaseRevoke) ar.resp, ar.err = a.s.applyV3.LeaseRevoke(r.LeaseRevoke)
case r.Alarm != nil: case r.Alarm != nil:
ar.resp, ar.err = s.applyV3.Alarm(r.Alarm) ar.resp, ar.err = a.s.applyV3.Alarm(r.Alarm)
case r.Authenticate != nil:
ar.resp, ar.err = a.s.applyV3.Authenticate(r.Authenticate)
case r.AuthEnable != nil: case r.AuthEnable != nil:
ar.resp, ar.err = s.applyV3.AuthEnable() ar.resp, ar.err = a.s.applyV3.AuthEnable()
case r.AuthDisable != nil:
ar.resp, ar.err = a.s.applyV3.AuthDisable()
case r.AuthUserAdd != nil: case r.AuthUserAdd != nil:
ar.resp, ar.err = s.applyV3.UserAdd(r.AuthUserAdd) ar.resp, ar.err = a.s.applyV3.UserAdd(r.AuthUserAdd)
case r.AuthUserDelete != nil: case r.AuthUserDelete != nil:
ar.resp, ar.err = s.applyV3.UserDelete(r.AuthUserDelete) ar.resp, ar.err = a.s.applyV3.UserDelete(r.AuthUserDelete)
case r.AuthUserChangePassword != nil: case r.AuthUserChangePassword != nil:
ar.resp, ar.err = s.applyV3.UserChangePassword(r.AuthUserChangePassword) ar.resp, ar.err = a.s.applyV3.UserChangePassword(r.AuthUserChangePassword)
case r.AuthUserGrantRole != nil:
ar.resp, ar.err = a.s.applyV3.UserGrantRole(r.AuthUserGrantRole)
case r.AuthUserGet != nil:
ar.resp, ar.err = a.s.applyV3.UserGet(r.AuthUserGet)
case r.AuthUserRevokeRole != nil:
ar.resp, ar.err = a.s.applyV3.UserRevokeRole(r.AuthUserRevokeRole)
case r.AuthRoleAdd != nil: case r.AuthRoleAdd != nil:
ar.resp, ar.err = s.applyV3.RoleAdd(r.AuthRoleAdd) ar.resp, ar.err = a.s.applyV3.RoleAdd(r.AuthRoleAdd)
case r.AuthRoleGrantPermission != nil:
ar.resp, ar.err = a.s.applyV3.RoleGrantPermission(r.AuthRoleGrantPermission)
case r.AuthRoleGet != nil:
ar.resp, ar.err = a.s.applyV3.RoleGet(r.AuthRoleGet)
case r.AuthRoleRevokePermission != nil:
ar.resp, ar.err = a.s.applyV3.RoleRevokePermission(r.AuthRoleRevokePermission)
case r.AuthRoleDelete != nil:
ar.resp, ar.err = a.s.applyV3.RoleDelete(r.AuthRoleDelete)
case r.AuthUserList != nil:
ar.resp, ar.err = a.s.applyV3.UserList(r.AuthUserList)
case r.AuthRoleList != nil:
ar.resp, ar.err = a.s.applyV3.RoleList(r.AuthRoleList)
default: default:
panic("not implemented") panic("not implemented")
} }
@ -157,8 +210,7 @@ func (a *applierV3backend) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResp
resp.Header = &pb.ResponseHeader{} resp.Header = &pb.ResponseHeader{}
var ( var (
kvs []storagepb.KeyValue rr *mvcc.RangeResult
rev int64
err error err error
) )
@ -176,13 +228,19 @@ func (a *applierV3backend) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResp
limit = limit + 1 limit = limit + 1
} }
ro := mvcc.RangeOptions{
Limit: limit,
Rev: r.Revision,
Count: r.CountOnly,
}
if txnID != noTxn { if txnID != noTxn {
kvs, rev, err = a.s.KV().TxnRange(txnID, r.Key, r.RangeEnd, limit, r.Revision) rr, err = a.s.KV().TxnRange(txnID, r.Key, r.RangeEnd, ro)
if err != nil { if err != nil {
return nil, err return nil, err
} }
} else { } else {
kvs, rev, err = a.s.KV().Range(r.Key, r.RangeEnd, limit, r.Revision) rr, err = a.s.KV().Range(r.Key, r.RangeEnd, ro)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -192,15 +250,15 @@ func (a *applierV3backend) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResp
var sorter sort.Interface var sorter sort.Interface
switch { switch {
case r.SortTarget == pb.RangeRequest_KEY: case r.SortTarget == pb.RangeRequest_KEY:
sorter = &kvSortByKey{&kvSort{kvs}} sorter = &kvSortByKey{&kvSort{rr.KVs}}
case r.SortTarget == pb.RangeRequest_VERSION: case r.SortTarget == pb.RangeRequest_VERSION:
sorter = &kvSortByVersion{&kvSort{kvs}} sorter = &kvSortByVersion{&kvSort{rr.KVs}}
case r.SortTarget == pb.RangeRequest_CREATE: case r.SortTarget == pb.RangeRequest_CREATE:
sorter = &kvSortByCreate{&kvSort{kvs}} sorter = &kvSortByCreate{&kvSort{rr.KVs}}
case r.SortTarget == pb.RangeRequest_MOD: case r.SortTarget == pb.RangeRequest_MOD:
sorter = &kvSortByMod{&kvSort{kvs}} sorter = &kvSortByMod{&kvSort{rr.KVs}}
case r.SortTarget == pb.RangeRequest_VALUE: case r.SortTarget == pb.RangeRequest_VALUE:
sorter = &kvSortByValue{&kvSort{kvs}} sorter = &kvSortByValue{&kvSort{rr.KVs}}
} }
switch { switch {
case r.SortOrder == pb.RangeRequest_ASCEND: case r.SortOrder == pb.RangeRequest_ASCEND:
@ -210,29 +268,31 @@ func (a *applierV3backend) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResp
} }
} }
if r.Limit > 0 && len(kvs) > int(r.Limit) { if r.Limit > 0 && len(rr.KVs) > int(r.Limit) {
kvs = kvs[:r.Limit] rr.KVs = rr.KVs[:r.Limit]
resp.More = true resp.More = true
} }
resp.Header.Revision = rev resp.Header.Revision = rr.Rev
for i := range kvs { resp.Count = int64(rr.Count)
resp.Kvs = append(resp.Kvs, &kvs[i]) for i := range rr.KVs {
if r.KeysOnly {
rr.KVs[i].Value = nil
}
resp.Kvs = append(resp.Kvs, &rr.KVs[i])
} }
return resp, nil return resp, nil
} }
func (a *applierV3backend) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) { func (a *applierV3backend) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
var revision int64
ok := true ok := true
for _, c := range rt.Compare { for _, c := range rt.Compare {
if revision, ok = a.applyCompare(c); !ok { if _, ok = a.applyCompare(c); !ok {
break break
} }
} }
var reqs []*pb.RequestUnion var reqs []*pb.RequestOp
if ok { if ok {
reqs = rt.Success reqs = rt.Success
} else { } else {
@ -246,6 +306,8 @@ func (a *applierV3backend) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
return nil, err return nil, err
} }
revision := a.s.KV().Rev()
// When executing the operations of txn, we need to hold the txn lock. // When executing the operations of txn, we need to hold the txn lock.
// So the reader will not see any intermediate results. // So the reader will not see any intermediate results.
txnID := a.s.KV().TxnBegin() txnID := a.s.KV().TxnBegin()
@ -256,12 +318,16 @@ func (a *applierV3backend) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
} }
}() }()
resps := make([]*pb.ResponseUnion, len(reqs)) resps := make([]*pb.ResponseOp, len(reqs))
changedKV := false
for i := range reqs { for i := range reqs {
if reqs[i].GetRequestRange() == nil {
changedKV = true
}
resps[i] = a.applyUnion(txnID, reqs[i]) resps[i] = a.applyUnion(txnID, reqs[i])
} }
if len(resps) != 0 { if changedKV {
revision += 1 revision += 1
} }
@ -277,16 +343,18 @@ func (a *applierV3backend) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
// It returns the revision at which the comparison happens. If the comparison // It returns the revision at which the comparison happens. If the comparison
// succeeds, the it returns true. Otherwise it returns false. // succeeds, the it returns true. Otherwise it returns false.
func (a *applierV3backend) applyCompare(c *pb.Compare) (int64, bool) { func (a *applierV3backend) applyCompare(c *pb.Compare) (int64, bool) {
ckvs, rev, err := a.s.KV().Range(c.Key, nil, 1, 0) rr, err := a.s.KV().Range(c.Key, nil, mvcc.RangeOptions{})
rev := rr.Rev
if err != nil { if err != nil {
if err == dstorage.ErrTxnIDMismatch { if err == mvcc.ErrTxnIDMismatch {
panic("unexpected txn ID mismatch error") panic("unexpected txn ID mismatch error")
} }
return rev, false return rev, false
} }
var ckv storagepb.KeyValue var ckv mvccpb.KeyValue
if len(ckvs) != 0 { if len(rr.KVs) != 0 {
ckv = ckvs[0] ckv = rr.KVs[0]
} else { } else {
// Use the zero value of ckv normally. However... // Use the zero value of ckv normally. However...
if c.Target == pb.Compare_VALUE { if c.Target == pb.Compare_VALUE {
@ -341,31 +409,31 @@ func (a *applierV3backend) applyCompare(c *pb.Compare) (int64, bool) {
return rev, true return rev, true
} }
func (a *applierV3backend) applyUnion(txnID int64, union *pb.RequestUnion) *pb.ResponseUnion { func (a *applierV3backend) applyUnion(txnID int64, union *pb.RequestOp) *pb.ResponseOp {
switch tv := union.Request.(type) { switch tv := union.Request.(type) {
case *pb.RequestUnion_RequestRange: case *pb.RequestOp_RequestRange:
if tv.RequestRange != nil { if tv.RequestRange != nil {
resp, err := a.Range(txnID, tv.RequestRange) resp, err := a.Range(txnID, tv.RequestRange)
if err != nil { if err != nil {
panic("unexpected error during txn") panic("unexpected error during txn")
} }
return &pb.ResponseUnion{Response: &pb.ResponseUnion_ResponseRange{ResponseRange: resp}} return &pb.ResponseOp{Response: &pb.ResponseOp_ResponseRange{ResponseRange: resp}}
} }
case *pb.RequestUnion_RequestPut: case *pb.RequestOp_RequestPut:
if tv.RequestPut != nil { if tv.RequestPut != nil {
resp, err := a.Put(txnID, tv.RequestPut) resp, err := a.Put(txnID, tv.RequestPut)
if err != nil { if err != nil {
panic("unexpected error during txn") panic("unexpected error during txn")
} }
return &pb.ResponseUnion{Response: &pb.ResponseUnion_ResponsePut{ResponsePut: resp}} return &pb.ResponseOp{Response: &pb.ResponseOp_ResponsePut{ResponsePut: resp}}
} }
case *pb.RequestUnion_RequestDeleteRange: case *pb.RequestOp_RequestDeleteRange:
if tv.RequestDeleteRange != nil { if tv.RequestDeleteRange != nil {
resp, err := a.DeleteRange(txnID, tv.RequestDeleteRange) resp, err := a.DeleteRange(txnID, tv.RequestDeleteRange)
if err != nil { if err != nil {
panic("unexpected error during txn") panic("unexpected error during txn")
} }
return &pb.ResponseUnion{Response: &pb.ResponseUnion_ResponseDeleteRange{ResponseDeleteRange: resp}} return &pb.ResponseOp{Response: &pb.ResponseOp_ResponseDeleteRange{ResponseDeleteRange: resp}}
} }
default: default:
// empty union // empty union
@ -383,7 +451,8 @@ func (a *applierV3backend) Compaction(compaction *pb.CompactionRequest) (*pb.Com
return nil, ch, err return nil, ch, err
} }
// get the current revision. which key to get is not important. // get the current revision. which key to get is not important.
_, resp.Header.Revision, _ = a.s.KV().Range([]byte("compaction"), nil, 1, 0) rr, _ := a.s.KV().Range([]byte("compaction"), nil, mvcc.RangeOptions{})
resp.Header.Revision = rr.Rev
return resp, ch, err return resp, ch, err
} }
@ -393,13 +462,15 @@ func (a *applierV3backend) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantR
if err == nil { if err == nil {
resp.ID = int64(l.ID) resp.ID = int64(l.ID)
resp.TTL = l.TTL resp.TTL = l.TTL
resp.Header = &pb.ResponseHeader{Revision: a.s.KV().Rev()}
} }
return resp, err return resp, err
} }
func (a *applierV3backend) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { func (a *applierV3backend) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
err := a.s.lessor.Revoke(lease.LeaseID(lc.ID)) err := a.s.lessor.Revoke(lease.LeaseID(lc.ID))
return &pb.LeaseRevokeResponse{}, err return &pb.LeaseRevokeResponse{Header: &pb.ResponseHeader{Revision: a.s.KV().Rev()}}, err
} }
func (a *applierV3backend) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error) { func (a *applierV3backend) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error) {
@ -441,7 +512,7 @@ func (a *applierV3backend) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error)
switch m.Alarm { switch m.Alarm {
case pb.AlarmType_NOSPACE: case pb.AlarmType_NOSPACE:
plog.Infof("alarm disarmed %+v", ar) plog.Infof("alarm disarmed %+v", ar)
a.s.applyV3 = newQuotaApplierV3(a.s, &applierV3backend{a.s}) a.s.applyV3 = a.s.newApplierV3()
default: default:
plog.Errorf("unimplemented alarm deactivation (%+v)", m) plog.Errorf("unimplemented alarm deactivation (%+v)", m)
} }
@ -476,10 +547,23 @@ func (a *applierV3Capped) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantRe
} }
func (a *applierV3backend) AuthEnable() (*pb.AuthEnableResponse, error) { func (a *applierV3backend) AuthEnable() (*pb.AuthEnableResponse, error) {
a.s.AuthStore().AuthEnable() err := a.s.AuthStore().AuthEnable()
if err != nil {
return nil, err
}
return &pb.AuthEnableResponse{}, nil return &pb.AuthEnableResponse{}, nil
} }
func (a *applierV3backend) AuthDisable() (*pb.AuthDisableResponse, error) {
a.s.AuthStore().AuthDisable()
return &pb.AuthDisableResponse{}, nil
}
func (a *applierV3backend) Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error) {
ctx := context.WithValue(context.WithValue(context.TODO(), "index", a.s.consistIndex.ConsistentIndex()), "simpleToken", r.SimpleToken)
return a.s.AuthStore().Authenticate(ctx, r.Name, r.Password)
}
func (a *applierV3backend) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) { func (a *applierV3backend) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
return a.s.AuthStore().UserAdd(r) return a.s.AuthStore().UserAdd(r)
} }
@ -492,10 +576,46 @@ func (a *applierV3backend) UserChangePassword(r *pb.AuthUserChangePasswordReques
return a.s.AuthStore().UserChangePassword(r) return a.s.AuthStore().UserChangePassword(r)
} }
func (a *applierV3backend) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) {
return a.s.AuthStore().UserGrantRole(r)
}
func (a *applierV3backend) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
return a.s.AuthStore().UserGet(r)
}
func (a *applierV3backend) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
return a.s.AuthStore().UserRevokeRole(r)
}
func (a *applierV3backend) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) { func (a *applierV3backend) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) {
return a.s.AuthStore().RoleAdd(r) return a.s.AuthStore().RoleAdd(r)
} }
func (a *applierV3backend) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
return a.s.AuthStore().RoleGrantPermission(r)
}
func (a *applierV3backend) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
return a.s.AuthStore().RoleGet(r)
}
func (a *applierV3backend) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
return a.s.AuthStore().RoleRevokePermission(r)
}
func (a *applierV3backend) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
return a.s.AuthStore().RoleDelete(r)
}
func (a *applierV3backend) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
return a.s.AuthStore().UserList(r)
}
func (a *applierV3backend) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
return a.s.AuthStore().RoleList(r)
}
type quotaApplierV3 struct { type quotaApplierV3 struct {
applierV3 applierV3
q Quota q Quota
@ -532,7 +652,7 @@ func (a *quotaApplierV3) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantRes
return resp, err return resp, err
} }
type kvSort struct{ kvs []storagepb.KeyValue } type kvSort struct{ kvs []mvccpb.KeyValue }
func (s *kvSort) Swap(i, j int) { func (s *kvSort) Swap(i, j int) {
t := s.kvs[i] t := s.kvs[i]
@ -571,9 +691,9 @@ func (s *kvSortByValue) Less(i, j int) bool {
return bytes.Compare(s.kvs[i].Value, s.kvs[j].Value) < 0 return bytes.Compare(s.kvs[i].Value, s.kvs[j].Value) < 0
} }
func (a *applierV3backend) checkRequestLeases(reqs []*pb.RequestUnion) error { func (a *applierV3backend) checkRequestLeases(reqs []*pb.RequestOp) error {
for _, requ := range reqs { for _, requ := range reqs {
tv, ok := requ.Request.(*pb.RequestUnion_RequestPut) tv, ok := requ.Request.(*pb.RequestOp_RequestPut)
if !ok { if !ok {
continue continue
} }
@ -588,9 +708,9 @@ func (a *applierV3backend) checkRequestLeases(reqs []*pb.RequestUnion) error {
return nil return nil
} }
func (a *applierV3backend) checkRequestRange(reqs []*pb.RequestUnion) error { func (a *applierV3backend) checkRequestRange(reqs []*pb.RequestOp) error {
for _, requ := range reqs { for _, requ := range reqs {
tv, ok := requ.Request.(*pb.RequestUnion_RequestRange) tv, ok := requ.Request.(*pb.RequestOp_RequestRange)
if !ok { if !ok {
continue continue
} }
@ -600,10 +720,10 @@ func (a *applierV3backend) checkRequestRange(reqs []*pb.RequestUnion) error {
} }
if greq.Revision > a.s.KV().Rev() { if greq.Revision > a.s.KV().Rev() {
return dstorage.ErrFutureRev return mvcc.ErrFutureRev
} }
if greq.Revision < a.s.KV().FirstRev() { if greq.Revision < a.s.KV().FirstRev() {
return dstorage.ErrCompacted return mvcc.ErrCompacted
} }
} }
return nil return nil

163
vendor/github.com/coreos/etcd/etcdserver/apply_auth.go generated vendored Normal file
View File

@ -0,0 +1,163 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package etcdserver
import (
"sync"
"github.com/coreos/etcd/auth"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
)
type authApplierV3 struct {
applierV3
as auth.AuthStore
// mu serializes Apply so that user isn't corrupted and so that
// serialized requests don't leak data from TOCTOU errors
mu sync.Mutex
user string
}
func newAuthApplierV3(as auth.AuthStore, base applierV3) *authApplierV3 {
return &authApplierV3{applierV3: base, as: as}
}
func (aa *authApplierV3) Apply(r *pb.InternalRaftRequest) *applyResult {
aa.mu.Lock()
defer aa.mu.Unlock()
if r.Header != nil {
// backward-compatible with pre-3.0 releases when internalRaftRequest
// does not have header field
aa.user = r.Header.Username
}
if needAdminPermission(r) && !aa.as.IsAdminPermitted(aa.user) {
aa.user = ""
return &applyResult{err: auth.ErrPermissionDenied}
}
ret := aa.applierV3.Apply(r)
aa.user = ""
return ret
}
func (aa *authApplierV3) Put(txnID int64, r *pb.PutRequest) (*pb.PutResponse, error) {
if !aa.as.IsPutPermitted(aa.user, r.Key) {
return nil, auth.ErrPermissionDenied
}
return aa.applierV3.Put(txnID, r)
}
func (aa *authApplierV3) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResponse, error) {
if !aa.as.IsRangePermitted(aa.user, r.Key, r.RangeEnd) {
return nil, auth.ErrPermissionDenied
}
return aa.applierV3.Range(txnID, r)
}
func (aa *authApplierV3) DeleteRange(txnID int64, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
if !aa.as.IsDeleteRangePermitted(aa.user, r.Key, r.RangeEnd) {
return nil, auth.ErrPermissionDenied
}
return aa.applierV3.DeleteRange(txnID, r)
}
func (aa *authApplierV3) checkTxnReqsPermission(reqs []*pb.RequestOp) bool {
for _, requ := range reqs {
switch tv := requ.Request.(type) {
case *pb.RequestOp_RequestRange:
if tv.RequestRange == nil {
continue
}
if !aa.as.IsRangePermitted(aa.user, tv.RequestRange.Key, tv.RequestRange.RangeEnd) {
return false
}
case *pb.RequestOp_RequestPut:
if tv.RequestPut == nil {
continue
}
if !aa.as.IsPutPermitted(aa.user, tv.RequestPut.Key) {
return false
}
case *pb.RequestOp_RequestDeleteRange:
if tv.RequestDeleteRange == nil {
continue
}
if !aa.as.IsDeleteRangePermitted(aa.user, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd) {
return false
}
}
}
return true
}
func (aa *authApplierV3) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
for _, c := range rt.Compare {
if !aa.as.IsRangePermitted(aa.user, c.Key, nil) {
return nil, auth.ErrPermissionDenied
}
}
if !aa.checkTxnReqsPermission(rt.Success) {
return nil, auth.ErrPermissionDenied
}
if !aa.checkTxnReqsPermission(rt.Failure) {
return nil, auth.ErrPermissionDenied
}
return aa.applierV3.Txn(rt)
}
func needAdminPermission(r *pb.InternalRaftRequest) bool {
switch {
case r.AuthEnable != nil:
return true
case r.AuthDisable != nil:
return true
case r.AuthUserAdd != nil:
return true
case r.AuthUserDelete != nil:
return true
case r.AuthUserChangePassword != nil:
return true
case r.AuthUserGrantRole != nil:
return true
case r.AuthUserGet != nil:
return true
case r.AuthUserRevokeRole != nil:
return true
case r.AuthRoleAdd != nil:
return true
case r.AuthRoleGrantPermission != nil:
return true
case r.AuthRoleGet != nil:
return true
case r.AuthRoleRevokePermission != nil:
return true
case r.AuthRoleDelete != nil:
return true
case r.AuthUserList != nil:
return true
case r.AuthRoleList != nil:
return true
default:
return false
}
}

140
vendor/github.com/coreos/etcd/etcdserver/apply_v2.go generated vendored Normal file
View File

@ -0,0 +1,140 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package etcdserver
import (
"encoding/json"
"path"
"time"
"github.com/coreos/etcd/etcdserver/api"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/etcdserver/membership"
"github.com/coreos/etcd/pkg/pbutil"
"github.com/coreos/etcd/store"
"github.com/coreos/go-semver/semver"
)
// ApplierV2 is the interface for processing V2 raft messages
type ApplierV2 interface {
Delete(r *pb.Request) Response
Post(r *pb.Request) Response
Put(r *pb.Request) Response
QGet(r *pb.Request) Response
Sync(r *pb.Request) Response
}
func NewApplierV2(s store.Store, c *membership.RaftCluster) ApplierV2 {
return &applierV2store{store: s, cluster: c}
}
type applierV2store struct {
store store.Store
cluster *membership.RaftCluster
}
func (a *applierV2store) Delete(r *pb.Request) Response {
switch {
case r.PrevIndex > 0 || r.PrevValue != "":
return toResponse(a.store.CompareAndDelete(r.Path, r.PrevValue, r.PrevIndex))
default:
return toResponse(a.store.Delete(r.Path, r.Dir, r.Recursive))
}
}
func (a *applierV2store) Post(r *pb.Request) Response {
return toResponse(a.store.Create(r.Path, r.Dir, r.Val, true, toTTLOptions(r)))
}
func (a *applierV2store) Put(r *pb.Request) Response {
ttlOptions := toTTLOptions(r)
exists, existsSet := pbutil.GetBool(r.PrevExist)
switch {
case existsSet:
if exists {
if r.PrevIndex == 0 && r.PrevValue == "" {
return toResponse(a.store.Update(r.Path, r.Val, ttlOptions))
}
return toResponse(a.store.CompareAndSwap(r.Path, r.PrevValue, r.PrevIndex, r.Val, ttlOptions))
}
return toResponse(a.store.Create(r.Path, r.Dir, r.Val, false, ttlOptions))
case r.PrevIndex > 0 || r.PrevValue != "":
return toResponse(a.store.CompareAndSwap(r.Path, r.PrevValue, r.PrevIndex, r.Val, ttlOptions))
default:
if storeMemberAttributeRegexp.MatchString(r.Path) {
id := membership.MustParseMemberIDFromKey(path.Dir(r.Path))
var attr membership.Attributes
if err := json.Unmarshal([]byte(r.Val), &attr); err != nil {
plog.Panicf("unmarshal %s should never fail: %v", r.Val, err)
}
if a.cluster != nil {
a.cluster.UpdateAttributes(id, attr)
}
// return an empty response since there is no consumer.
return Response{}
}
if r.Path == membership.StoreClusterVersionKey() {
if a.cluster != nil {
a.cluster.SetVersion(semver.Must(semver.NewVersion(r.Val)), api.UpdateCapability)
}
// return an empty response since there is no consumer.
return Response{}
}
return toResponse(a.store.Set(r.Path, r.Dir, r.Val, ttlOptions))
}
}
func (a *applierV2store) QGet(r *pb.Request) Response {
return toResponse(a.store.Get(r.Path, r.Recursive, r.Sorted))
}
func (a *applierV2store) Sync(r *pb.Request) Response {
a.store.DeleteExpiredKeys(time.Unix(0, r.Time))
return Response{}
}
// applyV2Request interprets r as a call to store.X and returns a Response interpreted
// from store.Event
func (s *EtcdServer) applyV2Request(r *pb.Request) Response {
toTTLOptions(r)
switch r.Method {
case "POST":
return s.applyV2.Post(r)
case "PUT":
return s.applyV2.Put(r)
case "DELETE":
return s.applyV2.Delete(r)
case "QGET":
return s.applyV2.QGet(r)
case "SYNC":
return s.applyV2.Sync(r)
default:
// This should never be reached, but just in case:
return Response{err: ErrUnknownMethod}
}
}
func toTTLOptions(r *pb.Request) store.TTLOptionSet {
refresh, _ := pbutil.GetBool(r.Refresh)
ttlOptions := store.TTLOptionSet{Refresh: refresh}
if r.Expiration != 0 {
ttlOptions.ExpireTime = time.Unix(0, r.Expiration)
}
return ttlOptions
}
func toResponse(ev *store.Event, err error) Response {
return Response{Event: ev, err: err}
}

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc. // Copyright 2015 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -281,13 +281,7 @@ func (s *store) UpdateUser(user User) (User, error) {
return old, err return old, err
} }
hash, err := s.HashPassword(user.Password) newUser, err := old.merge(user, s.PasswordStore)
if err != nil {
return old, err
}
user.Password = hash
newUser, err := old.merge(user)
if err != nil { if err != nil {
return old, err return old, err
} }
@ -335,13 +329,9 @@ func (s *store) GetRole(name string) (Role, error) {
} }
var r Role var r Role
err = json.Unmarshal([]byte(*resp.Event.Node.Value), &r) err = json.Unmarshal([]byte(*resp.Event.Node.Value), &r)
if err != nil {
return r, err return r, err
} }
return r, nil
}
func (s *store) CreateRole(role Role) error { func (s *store) CreateRole(role Role) error {
if role.Role == RootRoleName { if role.Role == RootRoleName {
return authErr(http.StatusForbidden, "Cannot modify role %s: is root role.", role.Role) return authErr(http.StatusForbidden, "Cannot modify role %s: is root role.", role.Role)
@ -452,29 +442,33 @@ func (s *store) DisableAuth() error {
// is called and returns a new User with these modifications applied. Think of // is called and returns a new User with these modifications applied. Think of
// all Users as immutable sets of data. Merge allows you to perform the set // all Users as immutable sets of data. Merge allows you to perform the set
// operations (desired grants and revokes) atomically // operations (desired grants and revokes) atomically
func (u User) merge(n User) (User, error) { func (ou User) merge(nu User, s PasswordStore) (User, error) {
var out User var out User
if u.User != n.User { if ou.User != nu.User {
return out, authErr(http.StatusConflict, "Merging user data with conflicting usernames: %s %s", u.User, n.User) return out, authErr(http.StatusConflict, "Merging user data with conflicting usernames: %s %s", ou.User, nu.User)
} }
out.User = u.User out.User = ou.User
if n.Password != "" { if nu.Password != "" {
out.Password = n.Password hash, err := s.HashPassword(nu.Password)
if err != nil {
return ou, err
}
out.Password = hash
} else { } else {
out.Password = u.Password out.Password = ou.Password
} }
currentRoles := types.NewUnsafeSet(u.Roles...) currentRoles := types.NewUnsafeSet(ou.Roles...)
for _, g := range n.Grant { for _, g := range nu.Grant {
if currentRoles.Contains(g) { if currentRoles.Contains(g) {
plog.Noticef("granting duplicate role %s for user %s", g, n.User) plog.Noticef("granting duplicate role %s for user %s", g, nu.User)
return User{}, authErr(http.StatusConflict, fmt.Sprintf("Granting duplicate role %s for user %s", g, n.User)) return User{}, authErr(http.StatusConflict, fmt.Sprintf("Granting duplicate role %s for user %s", g, nu.User))
} }
currentRoles.Add(g) currentRoles.Add(g)
} }
for _, r := range n.Revoke { for _, r := range nu.Revoke {
if !currentRoles.Contains(r) { if !currentRoles.Contains(r) {
plog.Noticef("revoking ungranted role %s for user %s", r, n.User) plog.Noticef("revoking ungranted role %s for user %s", r, nu.User)
return User{}, authErr(http.StatusConflict, fmt.Sprintf("Revoking ungranted role %s for user %s", r, n.User)) return User{}, authErr(http.StatusConflict, fmt.Sprintf("Revoking ungranted role %s for user %s", r, nu.User))
} }
currentRoles.Remove(r) currentRoles.Remove(r)
} }
@ -497,11 +491,8 @@ func (r Role) merge(n Role) (Role, error) {
return out, err return out, err
} }
out.Permissions, err = out.Permissions.Revoke(n.Revoke) out.Permissions, err = out.Permissions.Revoke(n.Revoke)
if err != nil {
return out, err return out, err
} }
return out, nil
}
func (r Role) HasKeyAccess(key string, write bool) bool { func (r Role) HasKeyAccess(key string, write bool) bool {
if r.Role == RootRoleName { if r.Role == RootRoleName {

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc. // Copyright 2015 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc. // Copyright 2015 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -73,6 +73,7 @@ func getClusterFromRemotePeers(urls []string, timeout time.Duration, logerr bool
continue continue
} }
b, err := ioutil.ReadAll(resp.Body) b, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil { if err != nil {
if logerr { if logerr {
plog.Warningf("could not read the body of cluster response: %v", err) plog.Warningf("could not read the body of cluster response: %v", err)

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc. // Copyright 2015 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -56,6 +56,9 @@ type ServerConfig struct {
StrictReconfigCheck bool StrictReconfigCheck bool
EnablePprof bool EnablePprof bool
// ClientCertAuthEnabled is true when cert has been signed by the client CA.
ClientCertAuthEnabled bool
} }
// VerifyBootstrap sanity-checks the initial config for bootstrap case // VerifyBootstrap sanity-checks the initial config for bootstrap case

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc. // Copyright 2015 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -14,12 +14,20 @@
package etcdserver package etcdserver
import (
"sync/atomic"
)
// consistentIndex represents the offset of an entry in a consistent replica log. // consistentIndex represents the offset of an entry in a consistent replica log.
// It implements the storage.ConsistentIndexGetter interface. // It implements the mvcc.ConsistentIndexGetter interface.
// It is always set to the offset of current entry before executing the entry, // It is always set to the offset of current entry before executing the entry,
// so ConsistentWatchableKV could get the consistent index from it. // so ConsistentWatchableKV could get the consistent index from it.
type consistentIndex uint64 type consistentIndex uint64
func (i *consistentIndex) setConsistentIndex(v uint64) { *i = consistentIndex(v) } func (i *consistentIndex) setConsistentIndex(v uint64) {
atomic.StoreUint64((*uint64)(i), v)
}
func (i *consistentIndex) ConsistentIndex() uint64 { return uint64(*i) } func (i *consistentIndex) ConsistentIndex() uint64 {
return atomic.LoadUint64((*uint64)(i))
}

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc. // Copyright 2015 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc. // Copyright 2015 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -30,6 +30,7 @@ var (
ErrNoLeader = errors.New("etcdserver: no leader") ErrNoLeader = errors.New("etcdserver: no leader")
ErrRequestTooLarge = errors.New("etcdserver: request is too large") ErrRequestTooLarge = errors.New("etcdserver: request is too large")
ErrNoSpace = errors.New("etcdserver: no space") ErrNoSpace = errors.New("etcdserver: no space")
ErrInvalidAuthToken = errors.New("etcdserver: invalid auth token")
) )
type DiscoveryError struct { type DiscoveryError struct {

View File

@ -13,8 +13,10 @@
It has these top-level messages: It has these top-level messages:
Request Request
Metadata Metadata
RequestHeader
InternalRaftRequest InternalRaftRequest
EmptyResponse EmptyResponse
InternalAuthenticateRequest
ResponseHeader ResponseHeader
RangeRequest RangeRequest
RangeResponse RangeResponse
@ -22,8 +24,8 @@
PutResponse PutResponse
DeleteRangeRequest DeleteRangeRequest
DeleteRangeResponse DeleteRangeResponse
RequestUnion RequestOp
ResponseUnion ResponseOp
Compare Compare
TxnRequest TxnRequest
TxnResponse TxnResponse
@ -31,6 +33,8 @@
CompactionResponse CompactionResponse
HashRequest HashRequest
HashResponse HashResponse
SnapshotRequest
SnapshotResponse
WatchRequest WatchRequest
WatchCreateRequest WatchCreateRequest
WatchCancelRequest WatchCancelRequest
@ -64,13 +68,15 @@
AuthUserGetRequest AuthUserGetRequest
AuthUserDeleteRequest AuthUserDeleteRequest
AuthUserChangePasswordRequest AuthUserChangePasswordRequest
AuthUserGrantRequest AuthUserGrantRoleRequest
AuthUserRevokeRequest AuthUserRevokeRoleRequest
AuthRoleAddRequest AuthRoleAddRequest
AuthRoleGetRequest AuthRoleGetRequest
AuthUserListRequest
AuthRoleListRequest
AuthRoleDeleteRequest AuthRoleDeleteRequest
AuthRoleGrantRequest AuthRoleGrantPermissionRequest
AuthRoleRevokeRequest AuthRoleRevokePermissionRequest
AuthEnableResponse AuthEnableResponse
AuthDisableResponse AuthDisableResponse
AuthenticateResponse AuthenticateResponse
@ -78,20 +84,22 @@
AuthUserGetResponse AuthUserGetResponse
AuthUserDeleteResponse AuthUserDeleteResponse
AuthUserChangePasswordResponse AuthUserChangePasswordResponse
AuthUserGrantResponse AuthUserGrantRoleResponse
AuthUserRevokeResponse AuthUserRevokeRoleResponse
AuthRoleAddResponse AuthRoleAddResponse
AuthRoleGetResponse AuthRoleGetResponse
AuthRoleListResponse
AuthUserListResponse
AuthRoleDeleteResponse AuthRoleDeleteResponse
AuthRoleGrantResponse AuthRoleGrantPermissionResponse
AuthRoleRevokeResponse AuthRoleRevokePermissionResponse
*/ */
package etcdserverpb package etcdserverpb
import ( import (
"fmt" "fmt"
proto "github.com/gogo/protobuf/proto" proto "github.com/golang/protobuf/proto"
math "math" math "math"
) )
@ -103,40 +111,46 @@ var _ = proto.Marshal
var _ = fmt.Errorf var _ = fmt.Errorf
var _ = math.Inf var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
const _ = proto.ProtoPackageIsVersion1
type Request struct { type Request struct {
ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"` ID uint64 `protobuf:"varint,1,opt,name=ID,json=iD" json:"ID"`
Method string `protobuf:"bytes,2,opt,name=Method" json:"Method"` Method string `protobuf:"bytes,2,opt,name=Method,json=method" json:"Method"`
Path string `protobuf:"bytes,3,opt,name=Path" json:"Path"` Path string `protobuf:"bytes,3,opt,name=Path,json=path" json:"Path"`
Val string `protobuf:"bytes,4,opt,name=Val" json:"Val"` Val string `protobuf:"bytes,4,opt,name=Val,json=val" json:"Val"`
Dir bool `protobuf:"varint,5,opt,name=Dir" json:"Dir"` Dir bool `protobuf:"varint,5,opt,name=Dir,json=dir" json:"Dir"`
PrevValue string `protobuf:"bytes,6,opt,name=PrevValue" json:"PrevValue"` PrevValue string `protobuf:"bytes,6,opt,name=PrevValue,json=prevValue" json:"PrevValue"`
PrevIndex uint64 `protobuf:"varint,7,opt,name=PrevIndex" json:"PrevIndex"` PrevIndex uint64 `protobuf:"varint,7,opt,name=PrevIndex,json=prevIndex" json:"PrevIndex"`
PrevExist *bool `protobuf:"varint,8,opt,name=PrevExist" json:"PrevExist,omitempty"` PrevExist *bool `protobuf:"varint,8,opt,name=PrevExist,json=prevExist" json:"PrevExist,omitempty"`
Expiration int64 `protobuf:"varint,9,opt,name=Expiration" json:"Expiration"` Expiration int64 `protobuf:"varint,9,opt,name=Expiration,json=expiration" json:"Expiration"`
Wait bool `protobuf:"varint,10,opt,name=Wait" json:"Wait"` Wait bool `protobuf:"varint,10,opt,name=Wait,json=wait" json:"Wait"`
Since uint64 `protobuf:"varint,11,opt,name=Since" json:"Since"` Since uint64 `protobuf:"varint,11,opt,name=Since,json=since" json:"Since"`
Recursive bool `protobuf:"varint,12,opt,name=Recursive" json:"Recursive"` Recursive bool `protobuf:"varint,12,opt,name=Recursive,json=recursive" json:"Recursive"`
Sorted bool `protobuf:"varint,13,opt,name=Sorted" json:"Sorted"` Sorted bool `protobuf:"varint,13,opt,name=Sorted,json=sorted" json:"Sorted"`
Quorum bool `protobuf:"varint,14,opt,name=Quorum" json:"Quorum"` Quorum bool `protobuf:"varint,14,opt,name=Quorum,json=quorum" json:"Quorum"`
Time int64 `protobuf:"varint,15,opt,name=Time" json:"Time"` Time int64 `protobuf:"varint,15,opt,name=Time,json=time" json:"Time"`
Stream bool `protobuf:"varint,16,opt,name=Stream" json:"Stream"` Stream bool `protobuf:"varint,16,opt,name=Stream,json=stream" json:"Stream"`
Refresh *bool `protobuf:"varint,17,opt,name=Refresh" json:"Refresh,omitempty"` Refresh *bool `protobuf:"varint,17,opt,name=Refresh,json=refresh" json:"Refresh,omitempty"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
} }
func (m *Request) Reset() { *m = Request{} } func (m *Request) Reset() { *m = Request{} }
func (m *Request) String() string { return proto.CompactTextString(m) } func (m *Request) String() string { return proto.CompactTextString(m) }
func (*Request) ProtoMessage() {} func (*Request) ProtoMessage() {}
func (*Request) Descriptor() ([]byte, []int) { return fileDescriptorEtcdserver, []int{0} }
type Metadata struct { type Metadata struct {
NodeID uint64 `protobuf:"varint,1,opt,name=NodeID" json:"NodeID"` NodeID uint64 `protobuf:"varint,1,opt,name=NodeID,json=nodeID" json:"NodeID"`
ClusterID uint64 `protobuf:"varint,2,opt,name=ClusterID" json:"ClusterID"` ClusterID uint64 `protobuf:"varint,2,opt,name=ClusterID,json=clusterID" json:"ClusterID"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
} }
func (m *Metadata) Reset() { *m = Metadata{} } func (m *Metadata) Reset() { *m = Metadata{} }
func (m *Metadata) String() string { return proto.CompactTextString(m) } func (m *Metadata) String() string { return proto.CompactTextString(m) }
func (*Metadata) ProtoMessage() {} func (*Metadata) ProtoMessage() {}
func (*Metadata) Descriptor() ([]byte, []int) { return fileDescriptorEtcdserver, []int{1} }
func init() { func init() {
proto.RegisterType((*Request)(nil), "etcdserverpb.Request") proto.RegisterType((*Request)(nil), "etcdserverpb.Request")
@ -995,3 +1009,33 @@ var (
ErrInvalidLengthEtcdserver = fmt.Errorf("proto: negative length found during unmarshaling") ErrInvalidLengthEtcdserver = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowEtcdserver = fmt.Errorf("proto: integer overflow") ErrIntOverflowEtcdserver = fmt.Errorf("proto: integer overflow")
) )
var fileDescriptorEtcdserver = []byte{
// 404 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x5c, 0x92, 0x41, 0x6e, 0x13, 0x31,
0x14, 0x86, 0xe3, 0xc4, 0x99, 0x64, 0x4c, 0x81, 0x62, 0x45, 0xe8, 0xa9, 0x42, 0x43, 0x14, 0xb1,
0xc8, 0x0a, 0xee, 0x50, 0xd2, 0x45, 0x24, 0x8a, 0x4a, 0x8a, 0xca, 0xda, 0x64, 0x1e, 0x8d, 0xa5,
0xcc, 0x78, 0x6a, 0xbf, 0x19, 0x72, 0x03, 0xae, 0xc0, 0x91, 0xb2, 0xe4, 0x04, 0x08, 0xc2, 0x45,
0x90, 0x3d, 0x9d, 0x60, 0xba, 0xb3, 0xbe, 0xff, 0xf7, 0xef, 0xdf, 0xf6, 0x13, 0xa7, 0x48, 0xeb,
0xdc, 0xa1, 0x6d, 0xd0, 0xbe, 0xae, 0xac, 0x21, 0x23, 0x4f, 0xfe, 0x91, 0xea, 0xf3, 0xd9, 0xe4,
0xd6, 0xdc, 0x9a, 0x20, 0xbc, 0xf1, 0xab, 0xd6, 0x33, 0xfb, 0xc6, 0xc5, 0x68, 0x85, 0x77, 0x35,
0x3a, 0x92, 0x13, 0xd1, 0x5f, 0x2e, 0x80, 0x4d, 0xd9, 0x9c, 0x9f, 0xf3, 0xfd, 0xcf, 0x97, 0xbd,
0x55, 0x5f, 0x2f, 0xe4, 0x0b, 0x91, 0x5c, 0x22, 0x6d, 0x4c, 0x0e, 0xfd, 0x29, 0x9b, 0xa7, 0xf7,
0x4a, 0x52, 0x04, 0x26, 0x41, 0xf0, 0x2b, 0x45, 0x1b, 0x18, 0x44, 0x1a, 0xaf, 0x14, 0x6d, 0xe4,
0x73, 0x31, 0xb8, 0x51, 0x5b, 0xe0, 0x91, 0x30, 0x68, 0xd4, 0xd6, 0xf3, 0x85, 0xb6, 0x30, 0x9c,
0xb2, 0xf9, 0xb8, 0xe3, 0xb9, 0xb6, 0x72, 0x26, 0xd2, 0x2b, 0x8b, 0xcd, 0x8d, 0xda, 0xd6, 0x08,
0x49, 0xb4, 0x2b, 0xad, 0x3a, 0xdc, 0x79, 0x96, 0x65, 0x8e, 0x3b, 0x18, 0x45, 0x45, 0x83, 0x27,
0xe0, 0xce, 0x73, 0xb1, 0xd3, 0x8e, 0x60, 0x7c, 0x3c, 0x85, 0xb5, 0x9e, 0x80, 0xe5, 0x2b, 0x21,
0x2e, 0x76, 0x95, 0xb6, 0x8a, 0xb4, 0x29, 0x21, 0x9d, 0xb2, 0xf9, 0xe0, 0x3e, 0x48, 0xe0, 0x91,
0xfb, 0xbb, 0x7d, 0x52, 0x9a, 0x40, 0x44, 0x55, 0xf9, 0x57, 0xa5, 0x49, 0x9e, 0x89, 0xe1, 0xb5,
0x2e, 0xd7, 0x08, 0x8f, 0xa2, 0x0e, 0x43, 0xe7, 0x91, 0x3f, 0x7f, 0x85, 0xeb, 0xda, 0x3a, 0xdd,
0x20, 0x9c, 0x44, 0x5b, 0x53, 0xdb, 0x61, 0xff, 0xa6, 0xd7, 0xc6, 0x12, 0xe6, 0xf0, 0x38, 0x32,
0x24, 0x2e, 0x30, 0xaf, 0x7e, 0xa8, 0x8d, 0xad, 0x0b, 0x78, 0x12, 0xab, 0x77, 0x81, 0xf9, 0x56,
0x1f, 0x75, 0x81, 0xf0, 0x34, 0x6a, 0xcd, 0x49, 0x17, 0x6d, 0x2a, 0x59, 0x54, 0x05, 0x9c, 0xfe,
0x97, 0x1a, 0x98, 0xcc, 0xfc, 0x47, 0x7f, 0xb1, 0xe8, 0x36, 0xf0, 0x2c, 0x7a, 0x95, 0x91, 0x6d,
0xe1, 0xec, 0x9d, 0x18, 0x5f, 0x22, 0xa9, 0x5c, 0x91, 0xf2, 0x49, 0xef, 0x4d, 0x8e, 0x0f, 0xa6,
0x21, 0x29, 0x03, 0xf3, 0x37, 0x7c, 0xbb, 0xad, 0x1d, 0xa1, 0x5d, 0x2e, 0xc2, 0x50, 0x1c, 0x7f,
0x61, 0xdd, 0xe1, 0xf3, 0xc9, 0xfe, 0x77, 0xd6, 0xdb, 0x1f, 0x32, 0xf6, 0xe3, 0x90, 0xb1, 0x5f,
0x87, 0x8c, 0x7d, 0xff, 0x93, 0xf5, 0xfe, 0x06, 0x00, 0x00, 0xff, 0xff, 0x80, 0x62, 0xfc, 0x40,
0xa4, 0x02, 0x00, 0x00,
}

File diff suppressed because it is too large Load Diff

View File

@ -10,10 +10,18 @@ option (gogoproto.sizer_all) = true;
option (gogoproto.unmarshaler_all) = true; option (gogoproto.unmarshaler_all) = true;
option (gogoproto.goproto_getters_all) = false; option (gogoproto.goproto_getters_all) = false;
message RequestHeader {
uint64 ID = 1;
// username is a username that is associated with an auth token of gRPC connection
string username = 2;
}
// An InternalRaftRequest is the union of all requests which can be // An InternalRaftRequest is the union of all requests which can be
// sent via raft. // sent via raft.
message InternalRaftRequest { message InternalRaftRequest {
RequestHeader header = 100;
uint64 ID = 1; uint64 ID = 1;
Request v2 = 2; Request v2 = 2;
RangeRequest range = 3; RangeRequest range = 3;
@ -25,14 +33,40 @@ message InternalRaftRequest {
LeaseGrantRequest lease_grant = 8; LeaseGrantRequest lease_grant = 8;
LeaseRevokeRequest lease_revoke = 9; LeaseRevokeRequest lease_revoke = 9;
AuthEnableRequest auth_enable = 10; AlarmRequest alarm = 10;
AuthUserAddRequest auth_user_add = 11;
AuthUserDeleteRequest auth_user_delete = 12;
AuthUserChangePasswordRequest auth_user_change_password = 13;
AuthRoleAddRequest auth_role_add = 14;
AlarmRequest alarm = 15; AuthEnableRequest auth_enable = 1000;
AuthDisableRequest auth_disable = 1011;
InternalAuthenticateRequest authenticate = 1012;
AuthUserAddRequest auth_user_add = 1100;
AuthUserDeleteRequest auth_user_delete = 1101;
AuthUserGetRequest auth_user_get = 1102;
AuthUserChangePasswordRequest auth_user_change_password = 1103;
AuthUserGrantRoleRequest auth_user_grant_role = 1104;
AuthUserRevokeRoleRequest auth_user_revoke_role = 1105;
AuthUserListRequest auth_user_list = 1106;
AuthRoleListRequest auth_role_list = 1107;
AuthRoleAddRequest auth_role_add = 1200;
AuthRoleDeleteRequest auth_role_delete = 1201;
AuthRoleGetRequest auth_role_get = 1202;
AuthRoleGrantPermissionRequest auth_role_grant_permission = 1203;
AuthRoleRevokePermissionRequest auth_role_revoke_permission = 1204;
} }
message EmptyResponse { message EmptyResponse {
} }
// What is the difference between AuthenticateRequest (defined in rpc.proto) and InternalAuthenticateRequest?
// InternalAuthenticateRequest has a member that is filled by etcdserver and shouldn't be user-facing.
// For avoiding misusage the field, we have an internal version of AuthenticateRequest.
message InternalAuthenticateRequest {
string name = 1;
string password = 2;
// simple_token is generated in API layer (etcdserver/v3_server.go)
string simple_token = 3;
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -2,56 +2,107 @@ syntax = "proto3";
package etcdserverpb; package etcdserverpb;
import "gogoproto/gogo.proto"; import "gogoproto/gogo.proto";
import "etcd/storage/storagepb/kv.proto"; import "etcd/mvcc/mvccpb/kv.proto";
import "etcd/auth/authpb/auth.proto";
// for grpc-gateway
import "google/api/annotations.proto";
option (gogoproto.marshaler_all) = true; option (gogoproto.marshaler_all) = true;
option (gogoproto.unmarshaler_all) = true; option (gogoproto.unmarshaler_all) = true;
service KV { service KV {
// Range gets the keys in the range from the store. // Range gets the keys in the range from the key-value store.
rpc Range(RangeRequest) returns (RangeResponse) {} rpc Range(RangeRequest) returns (RangeResponse) {
option (google.api.http) = {
post: "/v3alpha/kv/range"
body: "*"
};
}
// Put puts the given key into the store. // Put puts the given key into the key-value store.
// A put request increases the revision of the store, // A put request increments the revision of the key-value store
// and generates one event in the event history. // and generates one event in the event history.
rpc Put(PutRequest) returns (PutResponse) {} rpc Put(PutRequest) returns (PutResponse) {
option (google.api.http) = {
post: "/v3alpha/kv/put"
body: "*"
};
}
// Delete deletes the given range from the store. // DeleteRange deletes the given range from the key-value store.
// A delete request increase the revision of the store, // A delete request increments the revision of the key-value store
// and generates one event in the event history. // and generates a delete event in the event history for every deleted key.
rpc DeleteRange(DeleteRangeRequest) returns (DeleteRangeResponse) {} rpc DeleteRange(DeleteRangeRequest) returns (DeleteRangeResponse) {
option (google.api.http) = {
post: "/v3alpha/kv/deleterange"
body: "*"
};
}
// Txn processes all the requests in one transaction. // Txn processes multiple requests in a single transaction.
// A txn request increases the revision of the store, // A txn request increments the revision of the key-value store
// and generates events with the same revision in the event history. // and generates events with the same revision for every completed request.
// It is not allowed to modify the same key several times within one txn. // It is not allowed to modify the same key several times within one txn.
rpc Txn(TxnRequest) returns (TxnResponse) {} rpc Txn(TxnRequest) returns (TxnResponse) {
option (google.api.http) = {
post: "/v3alpha/kv/txn"
body: "*"
};
}
// Compact compacts the event history in etcd. User should compact the // Compact compacts the event history in the etcd key-value store. The key-value
// event history periodically, or it will grow infinitely. // store should be periodically compacted or the event history will continue to grow
rpc Compact(CompactionRequest) returns (CompactionResponse) {} // indefinitely.
rpc Compact(CompactionRequest) returns (CompactionResponse) {
option (google.api.http) = {
post: "/v3alpha/kv/compaction"
body: "*"
};
}
} }
service Watch { service Watch {
// Watch watches the events happening or happened. Both input and output // Watch watches for events happening or that have happened. Both input and output
// are stream. One watch rpc can watch for multiple keys or prefixs and // are streams; the input stream is for creating and canceling watchers and the output
// get a stream of events. The whole events history can be watched unless // stream sends events. One watch RPC can watch on multiple key ranges, streaming events
// compacted. // for several watches at once. The entire event history can be watched starting from the
rpc Watch(stream WatchRequest) returns (stream WatchResponse) {} // last compaction revision.
rpc Watch(stream WatchRequest) returns (stream WatchResponse) {
option (google.api.http) = {
post: "/v3alpha/watch"
body: "*"
};
}
} }
service Lease { service Lease {
// LeaseGrant creates a lease. A lease has a TTL. The lease will expire if the // LeaseGrant creates a lease which expires if the server does not receive a keepAlive
// server does not receive a keepAlive within TTL from the lease holder. // within a given time to live period. All keys attached to the lease will be expired and
// All keys attached to the lease will be expired and deleted if the lease expires. // deleted if the lease expires. Each expired key generates a delete event in the event history.
// The key expiration generates an event in event history. rpc LeaseGrant(LeaseGrantRequest) returns (LeaseGrantResponse) {
rpc LeaseGrant(LeaseGrantRequest) returns (LeaseGrantResponse) {} option (google.api.http) = {
post: "/v3alpha/lease/grant"
body: "*"
};
}
// LeaseRevoke revokes a lease. All the key attached to the lease will be expired and deleted. // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted.
rpc LeaseRevoke(LeaseRevokeRequest) returns (LeaseRevokeResponse) {} rpc LeaseRevoke(LeaseRevokeRequest) returns (LeaseRevokeResponse) {
option (google.api.http) = {
post: "/v3alpha/kv/lease/revoke"
body: "*"
};
}
// KeepAlive keeps the lease alive. // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client
rpc LeaseKeepAlive(stream LeaseKeepAliveRequest) returns (stream LeaseKeepAliveResponse) {} // to the server and streaming keep alive responses from the server to the client.
rpc LeaseKeepAlive(stream LeaseKeepAliveRequest) returns (stream LeaseKeepAliveResponse) {
option (google.api.http) = {
post: "/v3alpha/lease/keepalive"
body: "*"
};
}
// TODO(xiangli) List all existing Leases? // TODO(xiangli) List all existing Leases?
// TODO(xiangli) Get details information (expirations, leased keys, etc.) of a lease? // TODO(xiangli) Get details information (expirations, leased keys, etc.) of a lease?
@ -59,83 +110,220 @@ service Lease {
service Cluster { service Cluster {
// MemberAdd adds a member into the cluster. // MemberAdd adds a member into the cluster.
rpc MemberAdd(MemberAddRequest) returns (MemberAddResponse) {} rpc MemberAdd(MemberAddRequest) returns (MemberAddResponse) {
option (google.api.http) = {
post: "/v3alpha/cluster/member/add"
body: "*"
};
}
// MemberRemove removes an existing member from the cluster. // MemberRemove removes an existing member from the cluster.
rpc MemberRemove(MemberRemoveRequest) returns (MemberRemoveResponse) {} rpc MemberRemove(MemberRemoveRequest) returns (MemberRemoveResponse) {
option (google.api.http) = {
post: "/v3alpha/cluster/member/remove"
body: "*"
};
}
// MemberUpdate updates the member configuration. // MemberUpdate updates the member configuration.
rpc MemberUpdate(MemberUpdateRequest) returns (MemberUpdateResponse) {} rpc MemberUpdate(MemberUpdateRequest) returns (MemberUpdateResponse) {
option (google.api.http) = {
post: "/v3alpha/cluster/member/update"
body: "*"
};
}
// MemberList lists all the members in the cluster. // MemberList lists all the members in the cluster.
rpc MemberList(MemberListRequest) returns (MemberListResponse) {} rpc MemberList(MemberListRequest) returns (MemberListResponse) {
option (google.api.http) = {
post: "/v3alpha/cluster/member/list"
body: "*"
};
}
} }
service Maintenance { service Maintenance {
// Alarm activates, deactivates, and queries alarms regarding cluster health. // Alarm activates, deactivates, and queries alarms regarding cluster health.
rpc Alarm(AlarmRequest) returns (AlarmResponse) {} rpc Alarm(AlarmRequest) returns (AlarmResponse) {
option (google.api.http) = {
post: "/v3alpha/maintenance/alarm"
body: "*"
};
}
// Status gets the status of the member. // Status gets the status of the member.
rpc Status(StatusRequest) returns (StatusResponse) {} rpc Status(StatusRequest) returns (StatusResponse) {
option (google.api.http) = {
post: "/v3alpha/maintenance/status"
body: "*"
};
}
rpc Defragment(DefragmentRequest) returns (DefragmentResponse) {} // Defragment defragments a member's backend database to recover storage space.
rpc Defragment(DefragmentRequest) returns (DefragmentResponse) {
option (google.api.http) = {
post: "/v3alpha/maintenance/defragment"
body: "*"
};
}
// Hash returns the hash of the local KV state for consistency checking purpose. // Hash returns the hash of the local KV state for consistency checking purpose.
// This is designed for testing; do not use this in production when there // This is designed for testing; do not use this in production when there
// are ongoing transactions. // are ongoing transactions.
rpc Hash(HashRequest) returns (HashResponse) {} rpc Hash(HashRequest) returns (HashResponse) {
option (google.api.http) = {
post: "/v3alpha/maintenance/hash"
body: "*"
};
}
// Snapshot sends a snapshot of the entire backend from a member over a stream to a client.
rpc Snapshot(SnapshotRequest) returns (stream SnapshotResponse) {
option (google.api.http) = {
post: "/v3alpha/maintenance/snapshot"
body: "*"
};
}
} }
service Auth { service Auth {
// AuthEnable enables authentication. // AuthEnable enables authentication.
rpc AuthEnable(AuthEnableRequest) returns (AuthEnableResponse) {} rpc AuthEnable(AuthEnableRequest) returns (AuthEnableResponse) {
option (google.api.http) = {
post: "/v3alpha/auth/enable"
body: "*"
};
}
// AuthDisable disables authentication. // AuthDisable disables authentication.
rpc AuthDisable(AuthDisableRequest) returns (AuthDisableResponse) {} rpc AuthDisable(AuthDisableRequest) returns (AuthDisableResponse) {
option (google.api.http) = {
post: "/v3alpha/auth/disable"
body: "*"
};
}
// Authenticate processes authenticate request. // Authenticate processes an authenticate request.
rpc Authenticate(AuthenticateRequest) returns (AuthenticateResponse) {} rpc Authenticate(AuthenticateRequest) returns (AuthenticateResponse) {
option (google.api.http) = {
post: "/v3alpha/auth/authenticate"
body: "*"
};
}
// UserAdd adds a new user. // UserAdd adds a new user.
rpc UserAdd(AuthUserAddRequest) returns (AuthUserAddResponse) {} rpc UserAdd(AuthUserAddRequest) returns (AuthUserAddResponse) {
option (google.api.http) = {
post: "/v3alpha/auth/user/add"
body: "*"
};
}
// UserGet gets a detailed information of a user or lists entire users. // UserGet gets detailed user information.
rpc UserGet(AuthUserGetRequest) returns (AuthUserGetResponse) {} rpc UserGet(AuthUserGetRequest) returns (AuthUserGetResponse) {
option (google.api.http) = {
post: "/v3alpha/auth/user/get"
body: "*"
};
}
// UserList gets a list of all users.
rpc UserList(AuthUserListRequest) returns (AuthUserListResponse) {
option (google.api.http) = {
post: "/v3alpha/auth/user/list"
body: "*"
};
}
// UserDelete deletes a specified user. // UserDelete deletes a specified user.
rpc UserDelete(AuthUserDeleteRequest) returns (AuthUserDeleteResponse) {} rpc UserDelete(AuthUserDeleteRequest) returns (AuthUserDeleteResponse) {
option (google.api.http) = {
post: "/v3alpha/auth/user/delete"
body: "*"
};
}
// UserChangePassword changes password of a specified user. // UserChangePassword changes the password of a specified user.
rpc UserChangePassword(AuthUserChangePasswordRequest) returns (AuthUserChangePasswordResponse) {} rpc UserChangePassword(AuthUserChangePasswordRequest) returns (AuthUserChangePasswordResponse) {
option (google.api.http) = {
post: "/v3alpha/auth/user/changepw"
body: "*"
};
}
// UserGrant grants a role to a specified user. // UserGrant grants a role to a specified user.
rpc UserGrant(AuthUserGrantRequest) returns (AuthUserGrantResponse) {} rpc UserGrantRole(AuthUserGrantRoleRequest) returns (AuthUserGrantRoleResponse) {
option (google.api.http) = {
post: "/v3alpha/auth/user/grant"
body: "*"
};
}
// UserRevoke revokes a role of specified user. // UserRevokeRole revokes a role of specified user.
rpc UserRevoke(AuthUserRevokeRequest) returns (AuthUserRevokeResponse) {} rpc UserRevokeRole(AuthUserRevokeRoleRequest) returns (AuthUserRevokeRoleResponse) {
option (google.api.http) = {
post: "/v3alpha/auth/user/revoke"
body: "*"
};
}
// RoleAdd adds a new role. // RoleAdd adds a new role.
rpc RoleAdd(AuthRoleAddRequest) returns (AuthRoleAddResponse) {} rpc RoleAdd(AuthRoleAddRequest) returns (AuthRoleAddResponse) {
option (google.api.http) = {
post: "/v3alpha/auth/role/add"
body: "*"
};
}
// RoleGet gets a detailed information of a role or lists entire roles. // RoleGet gets detailed role information.
rpc RoleGet(AuthRoleGetRequest) returns (AuthRoleGetResponse) {} rpc RoleGet(AuthRoleGetRequest) returns (AuthRoleGetResponse) {
option (google.api.http) = {
post: "/v3alpha/auth/role/get"
body: "*"
};
}
// RoleList gets lists of all roles.
rpc RoleList(AuthRoleListRequest) returns (AuthRoleListResponse) {
option (google.api.http) = {
post: "/v3alpha/auth/role/list"
body: "*"
};
}
// RoleDelete deletes a specified role. // RoleDelete deletes a specified role.
rpc RoleDelete(AuthRoleDeleteRequest) returns (AuthRoleDeleteResponse) {} rpc RoleDelete(AuthRoleDeleteRequest) returns (AuthRoleDeleteResponse) {
option (google.api.http) = {
post: "/v3alpha/auth/role/delete"
body: "*"
};
}
// RoleGrant grants a permission of a specified key or range to a specified role. // RoleGrantPermission grants a permission of a specified key or range to a specified role.
rpc RoleGrant(AuthRoleGrantRequest) returns (AuthRoleGrantResponse) {} rpc RoleGrantPermission(AuthRoleGrantPermissionRequest) returns (AuthRoleGrantPermissionResponse) {
option (google.api.http) = {
post: "/v3alpha/auth/role/grant"
body: "*"
};
}
// RoleRevoke revokes a key or range permission of a specified role. // RoleRevokePermission revokes a key or range permission of a specified role.
rpc RoleRevoke(AuthRoleRevokeRequest) returns (AuthRoleRevokeResponse) {} rpc RoleRevokePermission(AuthRoleRevokePermissionRequest) returns (AuthRoleRevokePermissionResponse) {
option (google.api.http) = {
post: "/v3alpha/auth/role/revoke"
body: "*"
};
}
} }
message ResponseHeader { message ResponseHeader {
// cluster_id is the ID of the cluster which sent the response.
uint64 cluster_id = 1; uint64 cluster_id = 1;
// member_id is the ID of the member which sent the response.
uint64 member_id = 2; uint64 member_id = 2;
// revision of the store when the request was applied. // revision is the key-value store revision when the request was applied.
int64 revision = 3; int64 revision = 3;
// term of raft when the request was applied. // raft_term is the raft term when the request was applied.
uint64 raft_term = 4; uint64 raft_term = 4;
} }
@ -153,43 +341,60 @@ message RangeRequest {
VALUE = 4; VALUE = 4;
} }
// if the range_end is not given, the request returns the key. // key is the first key for the range. If range_end is not given, the request only looks up key.
bytes key = 1; bytes key = 1;
// if the range_end is given, it gets the keys in range [key, range_end) // range_end is the upper bound on the requested range [key, range_end).
// if range_end is nonempty, otherwise it returns all keys >= key. // If range_end is '\0', the range is all keys >= key.
// If the range_end is one bit larger than the given key,
// then the range requests get the all keys with the prefix (the given key).
// If both key and range_end are '\0', then range requests returns all keys.
bytes range_end = 2; bytes range_end = 2;
// limit the number of keys returned. // limit is a limit on the number of keys returned for the request.
int64 limit = 3; int64 limit = 3;
// range over the store at the given revision. // revision is the point-in-time of the key-value store to use for the range.
// if revision is less or equal to zero, range over the newest store. // If revision is less or equal to zero, the range is over the newest key-value store.
// if the revision has been compacted, ErrCompaction will be returned in // If the revision has been compacted, ErrCompacted is returned as a response.
// response.
int64 revision = 4; int64 revision = 4;
// sort_order is the requested order for returned the results // sort_order is the order for returned sorted results.
SortOrder sort_order = 5; SortOrder sort_order = 5;
// sort_target is the kv field to use for sorting // sort_target is the key-value field to use for sorting.
SortTarget sort_target = 6; SortTarget sort_target = 6;
// range request is linearizable by default. Linearizable requests has a higher // serializable sets the range request to use serializable member-local reads.
// latency and lower throughput than serializable request. // Range requests are linearizable by default; linearizable requests have higher
// To reduce latency, serializable can be set. If serializable is set, range request // latency and lower throughput than serializable requests but reflect the current
// will be serializable, but not linearizable with other requests. // consensus of the cluster. For better performance, in exchange for possible stale reads,
// Serializable range can be served locally without waiting for other nodes in the cluster. // a serializable range request is served locally without needing to reach consensus
// with other nodes in the cluster.
bool serializable = 7; bool serializable = 7;
// keys_only when set returns only the keys and not the values.
bool keys_only = 8;
// count_only when set returns only the count of the keys in the range.
bool count_only = 9;
} }
message RangeResponse { message RangeResponse {
ResponseHeader header = 1; ResponseHeader header = 1;
repeated storagepb.KeyValue kvs = 2; // kvs is the list of key-value pairs matched by the range request.
// kvs is empty when count is requested.
repeated mvccpb.KeyValue kvs = 2;
// more indicates if there are more keys to return in the requested range. // more indicates if there are more keys to return in the requested range.
bool more = 3; bool more = 3;
// count is set to the number of keys within the range when requested.
int64 count = 4;
} }
message PutRequest { message PutRequest {
// key is the key, in bytes, to put into the key-value store.
bytes key = 1; bytes key = 1;
// value is the value, in bytes, to associate with the key in the key-value store.
bytes value = 2; bytes value = 2;
// lease is the lease ID to associate with the key in the key-value store. A lease
// value of 0 indicates no lease.
int64 lease = 3; int64 lease = 3;
} }
@ -198,19 +403,22 @@ message PutResponse {
} }
message DeleteRangeRequest { message DeleteRangeRequest {
// if the range_end is not given, the request deletes the key. // key is the first key to delete in the range.
bytes key = 1; bytes key = 1;
// if the range_end is given, it deletes the keys in range [key, range_end). // range_end is the key following the last key to delete for the range [key, range_end).
// If range_end is not given, the range is defined to contain only the key argument.
// If range_end is '\0', the range is all keys greater than or equal to the key argument.
bytes range_end = 2; bytes range_end = 2;
} }
message DeleteRangeResponse { message DeleteRangeResponse {
ResponseHeader header = 1; ResponseHeader header = 1;
// Deleted is the number of keys that got deleted. // deleted is the number of keys deleted by the delete range request.
int64 deleted = 2; int64 deleted = 2;
} }
message RequestUnion { message RequestOp {
// request is a union of request types accepted by a transaction.
oneof request { oneof request {
RangeRequest request_range = 1; RangeRequest request_range = 1;
PutRequest request_put = 2; PutRequest request_put = 2;
@ -218,7 +426,8 @@ message RequestUnion {
} }
} }
message ResponseUnion { message ResponseOp {
// response is a union of response types returned by a transaction.
oneof response { oneof response {
RangeResponse response_range = 1; RangeResponse response_range = 1;
PutResponse response_put = 2; PutResponse response_put = 2;
@ -238,27 +447,24 @@ message Compare {
MOD = 2; MOD = 2;
VALUE= 3; VALUE= 3;
} }
// result is logical comparison operation for this comparison.
CompareResult result = 1; CompareResult result = 1;
// target is the key-value field to inspect for the comparison.
CompareTarget target = 2; CompareTarget target = 2;
// key path // key is the subject key for the comparison operation.
bytes key = 3; bytes key = 3;
oneof target_union { oneof target_union {
// version of the given key // version is the version of the given key
int64 version = 4; int64 version = 4;
// create revision of the given key // create_revision is the creation revision of the given key
int64 create_revision = 5; int64 create_revision = 5;
// last modified revision of the given key // mod_revision is the last modified revision of the given key.
int64 mod_revision = 6; int64 mod_revision = 6;
// value of the given key // value is the value of the given key, in bytes.
bytes value = 7; bytes value = 7;
} }
} }
// If the comparisons succeed, then the success requests will be processed in order,
// and the response will contain their respective responses in order.
// If the comparisons fail, then the failure requests will be processed in order,
// and the response will contain their respective responses in order.
// From google paxosdb paper: // From google paxosdb paper:
// Our implementation hinges around a powerful primitive which we call MultiOp. All other database // Our implementation hinges around a powerful primitive which we call MultiOp. All other database
// operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically // operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically
@ -275,26 +481,35 @@ message Compare {
// true. // true.
// 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false. // 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false.
message TxnRequest { message TxnRequest {
// compare is a list of predicates representing a conjunction of terms.
// If the comparisons succeed, then the success requests will be processed in order,
// and the response will contain their respective responses in order.
// If the comparisons fail, then the failure requests will be processed in order,
// and the response will contain their respective responses in order.
repeated Compare compare = 1; repeated Compare compare = 1;
repeated RequestUnion success = 2; // success is a list of requests which will be applied when compare evaluates to true.
repeated RequestUnion failure = 3; repeated RequestOp success = 2;
// failure is a list of requests which will be applied when compare evaluates to false.
repeated RequestOp failure = 3;
} }
message TxnResponse { message TxnResponse {
ResponseHeader header = 1; ResponseHeader header = 1;
// succeeded is set to true if the compare evaluated to true or false otherwise.
bool succeeded = 2; bool succeeded = 2;
repeated ResponseUnion responses = 3; // responses is a list of responses corresponding to the results from applying
// success if succeeded is true or failure if succeeded is false.
repeated ResponseOp responses = 3;
} }
// Compaction compacts the kv store upto the given revision (including). // CompactionRequest compacts the key-value store up to a given revision. All superseded keys
// It removes the old versions of a key. It keeps the newest version of // with a revision less than the compaction revision will be removed.
// the key even if its latest modification revision is smaller than the given
// revision.
message CompactionRequest { message CompactionRequest {
// revision is the key-value store revision for the compaction operation.
int64 revision = 1; int64 revision = 1;
// physical is set so the RPC will wait until the compaction is physically // physical is set so the RPC will wait until the compaction is physically
// applied to the local database such that compacted entries are totally // applied to the local database such that compacted entries are totally
// removed from the backing store. // removed from the backend database.
bool physical = 2; bool physical = 2;
} }
@ -307,10 +522,27 @@ message HashRequest {
message HashResponse { message HashResponse {
ResponseHeader header = 1; ResponseHeader header = 1;
// hash is the hash value computed from the responding member's key-value store.
uint32 hash = 2; uint32 hash = 2;
} }
message SnapshotRequest {
}
message SnapshotResponse {
// header has the current key-value store information. The first header in the snapshot
// stream indicates the point in time of the snapshot.
ResponseHeader header = 1;
// remaining_bytes is the number of blob bytes to be sent after this message
uint64 remaining_bytes = 2;
// blob contains the next chunk of the snapshot in the snapshot stream.
bytes blob = 3;
}
message WatchRequest { message WatchRequest {
// request_union is a request to either create a new watcher or cancel an existing watcher.
oneof request_union { oneof request_union {
WatchCreateRequest create_request = 1; WatchCreateRequest create_request = 1;
WatchCancelRequest cancel_request = 2; WatchCancelRequest cancel_request = 2;
@ -318,65 +550,69 @@ message WatchRequest {
} }
message WatchCreateRequest { message WatchCreateRequest {
// the key to be watched // key is the key to register for watching.
bytes key = 1; bytes key = 1;
// if the range_end is given, keys in [key, range_end) are watched // range_end is the end of the range [key, range_end) to watch. If range_end is not given,
// NOTE: only range_end == prefixEnd(key) is accepted now // only the key argument is watched. If range_end is equal to '\0', all keys greater than
// or equal to the key argument are watched.
bytes range_end = 2; bytes range_end = 2;
// start_revision is an optional revision (including) to watch from. No start_revision is "now". // start_revision is an optional revision to watch from (inclusive). No start_revision is "now".
int64 start_revision = 3; int64 start_revision = 3;
// if progress_notify is set, etcd server sends WatchResponse with empty events to the // progress_notify is set so that the etcd server will periodically send a WatchResponse with
// created watcher when there are no recent events. It is useful when clients want always to be // no events to the new watcher if there are no recent events. It is useful when clients
// able to recover a disconnected watcher from a recent known revision. // wish to recover a disconnected watcher starting from a recent known revision.
// etcdsever can decide how long it should send a notification based on current load. // The etcd server may decide how often it will send notifications based on current load.
bool progress_notify = 4; bool progress_notify = 4;
} }
message WatchCancelRequest { message WatchCancelRequest {
// watch_id is the watcher id to cancel so that no more events are transmitted.
int64 watch_id = 1; int64 watch_id = 1;
} }
message WatchResponse { message WatchResponse {
ResponseHeader header = 1; ResponseHeader header = 1;
// watch_id is the ID of the watching the response sent to. // watch_id is the ID of the watcher that corresponds to the response.
int64 watch_id = 2; int64 watch_id = 2;
// If the response is for a create watch request, created is set to true. // created is set to true if the response is for a create watch request.
// Client should record the watch_id and prepare for receiving events for // The client should record the watch_id and expect to receive events for
// that watching from the same stream. // the created watcher from the same stream.
// All events sent to the created watching will attach with the same watch_id. // All events sent to the created watcher will attach with the same watch_id.
bool created = 3; bool created = 3;
// If the response is for a cancel watch request, cancel is set to true. // canceled is set to true if the response is for a cancel watch request.
// No further events will be sent to the canceled watching. // No further events will be sent to the canceled watcher.
bool canceled = 4; bool canceled = 4;
// CompactRevision is set to the minimum index if a watching tries to watch // compact_revision is set to the minimum index if a watcher tries to watch
// at a compacted index. // at a compacted index.
// //
// This happens when creating a watching at a compacted revision or the watching cannot // This happens when creating a watcher at a compacted revision or the watcher cannot
// catch up with the progress of the KV. // catch up with the progress of the key-value store.
// //
// Client should treat the watching as canceled and should not try to create any // The client should treat the watcher as canceled and should not try to create any
// watching with same start_revision again. // watcher with the same start_revision again.
int64 compact_revision = 5; int64 compact_revision = 5;
repeated storagepb.Event events = 11; repeated mvccpb.Event events = 11;
} }
message LeaseGrantRequest { message LeaseGrantRequest {
// advisory ttl in seconds // TTL is the advisory time-to-live in seconds.
int64 TTL = 1; int64 TTL = 1;
// requested ID to create; 0 lets lessor choose // ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID.
int64 ID = 2; int64 ID = 2;
} }
message LeaseGrantResponse { message LeaseGrantResponse {
ResponseHeader header = 1; ResponseHeader header = 1;
// ID is the lease ID for the granted lease.
int64 ID = 2; int64 ID = 2;
// server decided ttl in second // TTL is the server chosen lease time-to-live in seconds.
int64 TTL = 3; int64 TTL = 3;
string error = 4; string error = 4;
} }
message LeaseRevokeRequest { message LeaseRevokeRequest {
// ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted.
int64 ID = 1; int64 ID = 1;
} }
@ -385,36 +621,42 @@ message LeaseRevokeResponse {
} }
message LeaseKeepAliveRequest { message LeaseKeepAliveRequest {
// ID is the lease ID for the lease to keep alive.
int64 ID = 1; int64 ID = 1;
} }
message LeaseKeepAliveResponse { message LeaseKeepAliveResponse {
ResponseHeader header = 1; ResponseHeader header = 1;
// ID is the lease ID from the keep alive request.
int64 ID = 2; int64 ID = 2;
// TTL is the new time-to-live for the lease.
int64 TTL = 3; int64 TTL = 3;
} }
message Member { message Member {
// ID is the member ID for this member.
uint64 ID = 1; uint64 ID = 1;
// If the member is not started, name will be an empty string. // name is the human-readable name of the member. If the member is not started, the name will be an empty string.
string name = 2; string name = 2;
bool IsLeader = 3; // peerURLs is the list of URLs the member exposes to the cluster for communication.
repeated string peerURLs = 4; repeated string peerURLs = 3;
// If the member is not started, client_URLs will be an zero length // clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty.
// string array. repeated string clientURLs = 4;
repeated string clientURLs = 5;
} }
message MemberAddRequest { message MemberAddRequest {
// peerURLs is the list of URLs the added member will use to communicate with the cluster.
repeated string peerURLs = 1; repeated string peerURLs = 1;
} }
message MemberAddResponse { message MemberAddResponse {
ResponseHeader header = 1; ResponseHeader header = 1;
// member is the member information for the added member.
Member member = 2; Member member = 2;
} }
message MemberRemoveRequest { message MemberRemoveRequest {
// ID is the member ID of the member to remove.
uint64 ID = 1; uint64 ID = 1;
} }
@ -423,7 +665,9 @@ message MemberRemoveResponse {
} }
message MemberUpdateRequest { message MemberUpdateRequest {
// ID is the member ID of the member to update.
uint64 ID = 1; uint64 ID = 1;
// peerURLs is the new list of URLs the member will use to communicate with the cluster.
repeated string peerURLs = 2; repeated string peerURLs = 2;
} }
@ -436,11 +680,11 @@ message MemberListRequest {
message MemberListResponse { message MemberListResponse {
ResponseHeader header = 1; ResponseHeader header = 1;
// members is a list of all members associated with the cluster.
repeated Member members = 2; repeated Member members = 2;
} }
message DefragmentRequest { message DefragmentRequest {
} }
message DefragmentResponse { message DefragmentResponse {
@ -449,7 +693,7 @@ message DefragmentResponse {
enum AlarmType { enum AlarmType {
NONE = 0; // default, used to query if any alarm is active NONE = 0; // default, used to query if any alarm is active
NOSPACE = 1; NOSPACE = 1; // space quota is exhausted
} }
message AlarmRequest { message AlarmRequest {
@ -458,19 +702,27 @@ message AlarmRequest {
ACTIVATE = 1; ACTIVATE = 1;
DEACTIVATE = 2; DEACTIVATE = 2;
} }
// action is the kind of alarm request to issue. The action
// may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a
// raised alarm.
AlarmAction action = 1; AlarmAction action = 1;
// MemberID is the member raising the alarm request // memberID is the ID of the member associated with the alarm. If memberID is 0, the
// alarm request covers all members.
uint64 memberID = 2; uint64 memberID = 2;
// alarm is the type of alarm to consider for this request.
AlarmType alarm = 3; AlarmType alarm = 3;
} }
message AlarmMember { message AlarmMember {
// memberID is the ID of the member associated with the raised alarm.
uint64 memberID = 1; uint64 memberID = 1;
// alarm is the type of alarm which has been raised.
AlarmType alarm = 2; AlarmType alarm = 2;
} }
message AlarmResponse { message AlarmResponse {
ResponseHeader header = 1; ResponseHeader header = 1;
// alarms is a list of alarms associated with the alarm request.
repeated AlarmMember alarms = 2; repeated AlarmMember alarms = 2;
} }
@ -479,7 +731,16 @@ message StatusRequest {
message StatusResponse { message StatusResponse {
ResponseHeader header = 1; ResponseHeader header = 1;
// version is the cluster protocol version used by the responding member.
string version = 2; string version = 2;
// dbSize is the size of the backend database, in bytes, of the responding member.
int64 dbSize = 3;
// leader is the member ID which the responding member believes is the current leader.
uint64 leader = 4;
// raftIndex is the current raft index of the responding member.
uint64 raftIndex = 5;
// raftTerm is the current raft term of the responding member.
uint64 raftTerm = 6;
} }
message AuthEnableRequest { message AuthEnableRequest {
@ -489,6 +750,8 @@ message AuthDisableRequest {
} }
message AuthenticateRequest { message AuthenticateRequest {
string name = 1;
string password = 2;
} }
message AuthUserAddRequest { message AuthUserAddRequest {
@ -497,37 +760,63 @@ message AuthUserAddRequest {
} }
message AuthUserGetRequest { message AuthUserGetRequest {
string name = 1;
} }
message AuthUserDeleteRequest { message AuthUserDeleteRequest {
// name is the name of the user to delete.
string name = 1; string name = 1;
} }
message AuthUserChangePasswordRequest { message AuthUserChangePasswordRequest {
// name is the name of the user whose password is being changed.
string name = 1; string name = 1;
// password is the new password for the user.
string password = 2; string password = 2;
} }
message AuthUserGrantRequest { message AuthUserGrantRoleRequest {
// user is the name of the user which should be granted a given role.
string user = 1;
// role is the name of the role to grant to the user.
string role = 2;
} }
message AuthUserRevokeRequest { message AuthUserRevokeRoleRequest {
string name = 1;
string role = 2;
} }
message AuthRoleAddRequest { message AuthRoleAddRequest {
// name is the name of the role to add to the authentication system.
string name = 1; string name = 1;
} }
message AuthRoleGetRequest { message AuthRoleGetRequest {
string role = 1;
}
message AuthUserListRequest {
}
message AuthRoleListRequest {
} }
message AuthRoleDeleteRequest { message AuthRoleDeleteRequest {
string role = 1;
} }
message AuthRoleGrantRequest { message AuthRoleGrantPermissionRequest {
// name is the name of the role which will be granted the permission.
string name = 1;
// perm is the permission to grant to the role.
authpb.Permission perm = 2;
} }
message AuthRoleRevokeRequest { message AuthRoleRevokePermissionRequest {
string role = 1;
string key = 2;
string range_end = 3;
} }
message AuthEnableResponse { message AuthEnableResponse {
@ -540,6 +829,8 @@ message AuthDisableResponse {
message AuthenticateResponse { message AuthenticateResponse {
ResponseHeader header = 1; ResponseHeader header = 1;
// token is an authorized token that can be used in succeeding RPCs
string token = 2;
} }
message AuthUserAddResponse { message AuthUserAddResponse {
@ -548,6 +839,8 @@ message AuthUserAddResponse {
message AuthUserGetResponse { message AuthUserGetResponse {
ResponseHeader header = 1; ResponseHeader header = 1;
repeated string roles = 2;
} }
message AuthUserDeleteResponse { message AuthUserDeleteResponse {
@ -558,11 +851,11 @@ message AuthUserChangePasswordResponse {
ResponseHeader header = 1; ResponseHeader header = 1;
} }
message AuthUserGrantResponse { message AuthUserGrantRoleResponse {
ResponseHeader header = 1; ResponseHeader header = 1;
} }
message AuthUserRevokeResponse { message AuthUserRevokeRoleResponse {
ResponseHeader header = 1; ResponseHeader header = 1;
} }
@ -572,16 +865,30 @@ message AuthRoleAddResponse {
message AuthRoleGetResponse { message AuthRoleGetResponse {
ResponseHeader header = 1; ResponseHeader header = 1;
repeated authpb.Permission perm = 2;
}
message AuthRoleListResponse {
ResponseHeader header = 1;
repeated string roles = 2;
}
message AuthUserListResponse {
ResponseHeader header = 1;
repeated string users = 2;
} }
message AuthRoleDeleteResponse { message AuthRoleDeleteResponse {
ResponseHeader header = 1; ResponseHeader header = 1;
} }
message AuthRoleGrantResponse { message AuthRoleGrantPermissionResponse {
ResponseHeader header = 1; ResponseHeader header = 1;
} }
message AuthRoleRevokeResponse { message AuthRoleRevokePermissionResponse {
ResponseHeader header = 1; ResponseHeader header = 1;
} }

View File

@ -1,4 +1,4 @@
// Copyright 2015 CoreOS, Inc. // Copyright 2015 The etcd Authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -25,11 +25,11 @@ import (
"strings" "strings"
"sync" "sync"
"github.com/coreos/etcd/mvcc/backend"
"github.com/coreos/etcd/pkg/netutil" "github.com/coreos/etcd/pkg/netutil"
"github.com/coreos/etcd/pkg/types" "github.com/coreos/etcd/pkg/types"
"github.com/coreos/etcd/raft" "github.com/coreos/etcd/raft"
"github.com/coreos/etcd/raft/raftpb" "github.com/coreos/etcd/raft/raftpb"
"github.com/coreos/etcd/storage/backend"
"github.com/coreos/etcd/store" "github.com/coreos/etcd/store"
"github.com/coreos/etcd/version" "github.com/coreos/etcd/version"
"github.com/coreos/go-semver/semver" "github.com/coreos/go-semver/semver"
@ -144,9 +144,7 @@ func (c *RaftCluster) PeerURLs() []string {
defer c.Unlock() defer c.Unlock()
urls := make([]string, 0) urls := make([]string, 0)
for _, p := range c.members { for _, p := range c.members {
for _, addr := range p.PeerURLs { urls = append(urls, p.PeerURLs...)
urls = append(urls, addr)
}
} }
sort.Strings(urls) sort.Strings(urls)
return urls return urls
@ -159,9 +157,7 @@ func (c *RaftCluster) ClientURLs() []string {
defer c.Unlock() defer c.Unlock()
urls := make([]string, 0) urls := make([]string, 0)
for _, p := range c.members { for _, p := range c.members {
for _, url := range p.ClientURLs { urls = append(urls, p.ClientURLs...)
urls = append(urls, url)
}
} }
sort.Strings(urls) sort.Strings(urls)
return urls return urls
@ -199,13 +195,19 @@ func (c *RaftCluster) SetID(id types.ID) { c.id = id }
func (c *RaftCluster) SetStore(st store.Store) { c.store = st } func (c *RaftCluster) SetStore(st store.Store) { c.store = st }
func (c *RaftCluster) Recover() { func (c *RaftCluster) SetBackend(be backend.Backend) {
c.be = be
mustCreateBackendBuckets(c.be)
}
func (c *RaftCluster) Recover(onSet func(*semver.Version)) {
c.Lock() c.Lock()
defer c.Unlock() defer c.Unlock()
c.members, c.removed = membersFromStore(c.store) c.members, c.removed = membersFromStore(c.store)
c.version = clusterVersionFromStore(c.store) c.version = clusterVersionFromStore(c.store)
mustDetectDowngrade(c.version) mustDetectDowngrade(c.version)
onSet(c.version)
for _, m := range c.members { for _, m := range c.members {
plog.Infof("added member %s %v to cluster %s from store", m.ID, m.PeerURLs, c.id) plog.Infof("added member %s %v to cluster %s from store", m.ID, m.PeerURLs, c.id)
@ -289,6 +291,8 @@ func (c *RaftCluster) AddMember(m *Member) {
} }
c.members[m.ID] = m c.members[m.ID] = m
plog.Infof("added member %s %v to cluster %s", m.ID, m.PeerURLs, c.id)
} }
// RemoveMember removes a member from the store. // RemoveMember removes a member from the store.
@ -305,23 +309,28 @@ func (c *RaftCluster) RemoveMember(id types.ID) {
delete(c.members, id) delete(c.members, id)
c.removed[id] = true c.removed[id] = true
plog.Infof("removed member %s from cluster %s", id, c.id)
} }
func (c *RaftCluster) UpdateAttributes(id types.ID, attr Attributes) bool { func (c *RaftCluster) UpdateAttributes(id types.ID, attr Attributes) {
c.Lock() c.Lock()
defer c.Unlock() defer c.Unlock()
if m, ok := c.members[id]; ok { if m, ok := c.members[id]; ok {
m.Attributes = attr m.Attributes = attr
return true if c.store != nil {
mustUpdateMemberAttrInStore(c.store, m)
}
if c.be != nil {
mustSaveMemberToBackend(c.be, m)
}
return
} }
_, ok := c.removed[id] _, ok := c.removed[id]
if ok { if !ok {
plog.Warningf("skipped updating attributes of removed member %s", id)
} else {
plog.Panicf("error updating attributes of unknown member %s", id) plog.Panicf("error updating attributes of unknown member %s", id)
} }
// TODO: update store in this function plog.Warningf("skipped updating attributes of removed member %s", id)
return false
} }
func (c *RaftCluster) UpdateRaftAttributes(id types.ID, raftAttr RaftAttributes) { func (c *RaftCluster) UpdateRaftAttributes(id types.ID, raftAttr RaftAttributes) {
@ -335,6 +344,8 @@ func (c *RaftCluster) UpdateRaftAttributes(id types.ID, raftAttr RaftAttributes)
if c.be != nil { if c.be != nil {
mustSaveMemberToBackend(c.be, c.members[id]) mustSaveMemberToBackend(c.be, c.members[id])
} }
plog.Noticef("updated member %s %v in cluster %s", id, raftAttr.PeerURLs, c.id)
} }
func (c *RaftCluster) Version() *semver.Version { func (c *RaftCluster) Version() *semver.Version {
@ -346,7 +357,7 @@ func (c *RaftCluster) Version() *semver.Version {
return semver.Must(semver.NewVersion(c.version.String())) return semver.Must(semver.NewVersion(c.version.String()))
} }
func (c *RaftCluster) SetVersion(ver *semver.Version) { func (c *RaftCluster) SetVersion(ver *semver.Version, onSet func(*semver.Version)) {
c.Lock() c.Lock()
defer c.Unlock() defer c.Unlock()
if c.version != nil { if c.version != nil {
@ -356,6 +367,13 @@ func (c *RaftCluster) SetVersion(ver *semver.Version) {
} }
c.version = ver c.version = ver
mustDetectDowngrade(c.version) mustDetectDowngrade(c.version)
if c.store != nil {
mustSaveClusterVersionToStore(c.store, ver)
}
if c.be != nil {
mustSaveClusterVersionToBackend(c.be, ver)
}
onSet(ver)
} }
func (c *RaftCluster) IsReadyToAddNewMember() bool { func (c *RaftCluster) IsReadyToAddNewMember() bool {
@ -371,7 +389,7 @@ func (c *RaftCluster) IsReadyToAddNewMember() bool {
if nstarted == 1 && nmembers == 2 { if nstarted == 1 && nmembers == 2 {
// a case of adding a new node to 1-member cluster for restoring cluster data // a case of adding a new node to 1-member cluster for restoring cluster data
// https://github.com/coreos/etcd/blob/master/Documentation/admin_guide.md#restoring-the-cluster // https://github.com/coreos/etcd/blob/master/Documentation/v2/admin_guide.md#restoring-the-cluster
plog.Debugf("The number of started member is 1. This cluster can accept add member request.") plog.Debugf("The number of started member is 1. This cluster can accept add member request.")
return true return true

Some files were not shown because too many files have changed in this diff Show More